body
stringlengths 26
98.2k
| body_hash
int64 -9,222,864,604,528,158,000
9,221,803,474B
| docstring
stringlengths 1
16.8k
| path
stringlengths 5
230
| name
stringlengths 1
96
| repository_name
stringlengths 7
89
| lang
stringclasses 1
value | body_without_docstring
stringlengths 20
98.2k
|
---|---|---|---|---|---|---|---|
def drop_duplicates(self, subset: Optional[Union[(Name, List[Name])]]=None, keep: Union[(bool, str)]='first', inplace: bool=False) -> Optional['DataFrame']:
"\n Return DataFrame with duplicate rows removed, optionally only\n considering certain columns.\n\n Parameters\n ----------\n subset : column label or sequence of labels, optional\n Only consider certain columns for identifying duplicates, by\n default use all of the columns.\n keep : {'first', 'last', False}, default 'first'\n Determines which duplicates (if any) to keep.\n - ``first`` : Drop duplicates except for the first occurrence.\n - ``last`` : Drop duplicates except for the last occurrence.\n - False : Drop all duplicates.\n inplace : boolean, default False\n Whether to drop duplicates in place or to return a copy.\n\n Returns\n -------\n DataFrame\n DataFrame with duplicates removed or None if ``inplace=True``.\n\n >>> df = ps.DataFrame(\n ... {'a': [1, 2, 2, 2, 3], 'b': ['a', 'a', 'a', 'c', 'd']}, columns = ['a', 'b'])\n >>> df\n a b\n 0 1 a\n 1 2 a\n 2 2 a\n 3 2 c\n 4 3 d\n\n >>> df.drop_duplicates().sort_index()\n a b\n 0 1 a\n 1 2 a\n 3 2 c\n 4 3 d\n\n >>> df.drop_duplicates('a').sort_index()\n a b\n 0 1 a\n 1 2 a\n 4 3 d\n\n >>> df.drop_duplicates(['a', 'b']).sort_index()\n a b\n 0 1 a\n 1 2 a\n 3 2 c\n 4 3 d\n\n >>> df.drop_duplicates(keep='last').sort_index()\n a b\n 0 1 a\n 2 2 a\n 3 2 c\n 4 3 d\n\n >>> df.drop_duplicates(keep=False).sort_index()\n a b\n 0 1 a\n 3 2 c\n 4 3 d\n "
inplace = validate_bool_kwarg(inplace, 'inplace')
(sdf, column) = self._mark_duplicates(subset, keep)
sdf = sdf.where((~ scol_for(sdf, column))).drop(column)
internal = self._internal.with_new_sdf(sdf)
if inplace:
self._update_internal_frame(internal)
return None
else:
return DataFrame(internal) | -3,917,699,198,602,438,700 | Return DataFrame with duplicate rows removed, optionally only
considering certain columns.
Parameters
----------
subset : column label or sequence of labels, optional
Only consider certain columns for identifying duplicates, by
default use all of the columns.
keep : {'first', 'last', False}, default 'first'
Determines which duplicates (if any) to keep.
- ``first`` : Drop duplicates except for the first occurrence.
- ``last`` : Drop duplicates except for the last occurrence.
- False : Drop all duplicates.
inplace : boolean, default False
Whether to drop duplicates in place or to return a copy.
Returns
-------
DataFrame
DataFrame with duplicates removed or None if ``inplace=True``.
>>> df = ps.DataFrame(
... {'a': [1, 2, 2, 2, 3], 'b': ['a', 'a', 'a', 'c', 'd']}, columns = ['a', 'b'])
>>> df
a b
0 1 a
1 2 a
2 2 a
3 2 c
4 3 d
>>> df.drop_duplicates().sort_index()
a b
0 1 a
1 2 a
3 2 c
4 3 d
>>> df.drop_duplicates('a').sort_index()
a b
0 1 a
1 2 a
4 3 d
>>> df.drop_duplicates(['a', 'b']).sort_index()
a b
0 1 a
1 2 a
3 2 c
4 3 d
>>> df.drop_duplicates(keep='last').sort_index()
a b
0 1 a
2 2 a
3 2 c
4 3 d
>>> df.drop_duplicates(keep=False).sort_index()
a b
0 1 a
3 2 c
4 3 d | python/pyspark/pandas/frame.py | drop_duplicates | Flyangz/spark | python | def drop_duplicates(self, subset: Optional[Union[(Name, List[Name])]]=None, keep: Union[(bool, str)]='first', inplace: bool=False) -> Optional['DataFrame']:
"\n Return DataFrame with duplicate rows removed, optionally only\n considering certain columns.\n\n Parameters\n ----------\n subset : column label or sequence of labels, optional\n Only consider certain columns for identifying duplicates, by\n default use all of the columns.\n keep : {'first', 'last', False}, default 'first'\n Determines which duplicates (if any) to keep.\n - ``first`` : Drop duplicates except for the first occurrence.\n - ``last`` : Drop duplicates except for the last occurrence.\n - False : Drop all duplicates.\n inplace : boolean, default False\n Whether to drop duplicates in place or to return a copy.\n\n Returns\n -------\n DataFrame\n DataFrame with duplicates removed or None if ``inplace=True``.\n\n >>> df = ps.DataFrame(\n ... {'a': [1, 2, 2, 2, 3], 'b': ['a', 'a', 'a', 'c', 'd']}, columns = ['a', 'b'])\n >>> df\n a b\n 0 1 a\n 1 2 a\n 2 2 a\n 3 2 c\n 4 3 d\n\n >>> df.drop_duplicates().sort_index()\n a b\n 0 1 a\n 1 2 a\n 3 2 c\n 4 3 d\n\n >>> df.drop_duplicates('a').sort_index()\n a b\n 0 1 a\n 1 2 a\n 4 3 d\n\n >>> df.drop_duplicates(['a', 'b']).sort_index()\n a b\n 0 1 a\n 1 2 a\n 3 2 c\n 4 3 d\n\n >>> df.drop_duplicates(keep='last').sort_index()\n a b\n 0 1 a\n 2 2 a\n 3 2 c\n 4 3 d\n\n >>> df.drop_duplicates(keep=False).sort_index()\n a b\n 0 1 a\n 3 2 c\n 4 3 d\n "
inplace = validate_bool_kwarg(inplace, 'inplace')
(sdf, column) = self._mark_duplicates(subset, keep)
sdf = sdf.where((~ scol_for(sdf, column))).drop(column)
internal = self._internal.with_new_sdf(sdf)
if inplace:
self._update_internal_frame(internal)
return None
else:
return DataFrame(internal) |
def reindex(self, labels: Optional[Sequence[Any]]=None, index: Optional[Union[('Index', Sequence[Any])]]=None, columns: Optional[Union[(pd.Index, Sequence[Any])]]=None, axis: Optional[Axis]=None, copy: Optional[bool]=True, fill_value: Optional[Any]=None) -> 'DataFrame':
'\n Conform DataFrame to new index with optional filling logic, placing\n NA/NaN in locations having no value in the previous index. A new object\n is produced unless the new index is equivalent to the current one and\n ``copy=False``.\n\n Parameters\n ----------\n labels: array-like, optional\n New labels / index to conform the axis specified by ‘axis’ to.\n index, columns: array-like, optional\n New labels / index to conform to, should be specified using keywords.\n Preferably an Index object to avoid duplicating data\n axis: int or str, optional\n Axis to target. Can be either the axis name (‘index’, ‘columns’) or\n number (0, 1).\n copy : bool, default True\n Return a new object, even if the passed indexes are the same.\n fill_value : scalar, default np.NaN\n Value to use for missing values. Defaults to NaN, but can be any\n "compatible" value.\n\n Returns\n -------\n DataFrame with changed index.\n\n See Also\n --------\n DataFrame.set_index : Set row labels.\n DataFrame.reset_index : Remove row labels or move them to new columns.\n\n Examples\n --------\n\n ``DataFrame.reindex`` supports two calling conventions\n\n * ``(index=index_labels, columns=column_labels, ...)``\n * ``(labels, axis={\'index\', \'columns\'}, ...)``\n\n We *highly* recommend using keyword arguments to clarify your\n intent.\n\n Create a dataframe with some fictional data.\n\n >>> index = [\'Firefox\', \'Chrome\', \'Safari\', \'IE10\', \'Konqueror\']\n >>> df = ps.DataFrame({\n ... \'http_status\': [200, 200, 404, 404, 301],\n ... \'response_time\': [0.04, 0.02, 0.07, 0.08, 1.0]},\n ... index=index,\n ... columns=[\'http_status\', \'response_time\'])\n >>> df\n http_status response_time\n Firefox 200 0.04\n Chrome 200 0.02\n Safari 404 0.07\n IE10 404 0.08\n Konqueror 301 1.00\n\n Create a new index and reindex the dataframe. By default\n values in the new index that do not have corresponding\n records in the dataframe are assigned ``NaN``.\n\n >>> new_index= [\'Safari\', \'Iceweasel\', \'Comodo Dragon\', \'IE10\',\n ... \'Chrome\']\n >>> df.reindex(new_index).sort_index()\n http_status response_time\n Chrome 200.0 0.02\n Comodo Dragon NaN NaN\n IE10 404.0 0.08\n Iceweasel NaN NaN\n Safari 404.0 0.07\n\n We can fill in the missing values by passing a value to\n the keyword ``fill_value``.\n\n >>> df.reindex(new_index, fill_value=0, copy=False).sort_index()\n http_status response_time\n Chrome 200 0.02\n Comodo Dragon 0 0.00\n IE10 404 0.08\n Iceweasel 0 0.00\n Safari 404 0.07\n\n We can also reindex the columns.\n\n >>> df.reindex(columns=[\'http_status\', \'user_agent\']).sort_index()\n http_status user_agent\n Chrome 200 NaN\n Firefox 200 NaN\n IE10 404 NaN\n Konqueror 301 NaN\n Safari 404 NaN\n\n Or we can use "axis-style" keyword arguments\n\n >>> df.reindex([\'http_status\', \'user_agent\'], axis="columns").sort_index()\n http_status user_agent\n Chrome 200 NaN\n Firefox 200 NaN\n IE10 404 NaN\n Konqueror 301 NaN\n Safari 404 NaN\n\n To further illustrate the filling functionality in\n ``reindex``, we will create a dataframe with a\n monotonically increasing index (for example, a sequence\n of dates).\n\n >>> date_index = pd.date_range(\'1/1/2010\', periods=6, freq=\'D\')\n >>> df2 = ps.DataFrame({"prices": [100, 101, np.nan, 100, 89, 88]},\n ... index=date_index)\n >>> df2.sort_index()\n prices\n 2010-01-01 100.0\n 2010-01-02 101.0\n 2010-01-03 NaN\n 2010-01-04 100.0\n 2010-01-05 89.0\n 2010-01-06 88.0\n\n Suppose we decide to expand the dataframe to cover a wider\n date range.\n\n >>> date_index2 = pd.date_range(\'12/29/2009\', periods=10, freq=\'D\')\n >>> df2.reindex(date_index2).sort_index()\n prices\n 2009-12-29 NaN\n 2009-12-30 NaN\n 2009-12-31 NaN\n 2010-01-01 100.0\n 2010-01-02 101.0\n 2010-01-03 NaN\n 2010-01-04 100.0\n 2010-01-05 89.0\n 2010-01-06 88.0\n 2010-01-07 NaN\n '
if ((axis is not None) and ((index is not None) or (columns is not None))):
raise TypeError("Cannot specify both 'axis' and any of 'index' or 'columns'.")
if (labels is not None):
axis = validate_axis(axis)
if (axis == 0):
index = labels
elif (axis == 1):
columns = labels
if ((index is not None) and (not is_list_like(index))):
raise TypeError(('Index must be called with a collection of some kind, %s was passed' % type(index)))
if ((columns is not None) and (not is_list_like(columns))):
raise TypeError(('Columns must be called with a collection of some kind, %s was passed' % type(columns)))
df = self
if (index is not None):
df = df._reindex_index(index, fill_value)
if (columns is not None):
df = df._reindex_columns(columns, fill_value)
if (copy and (df is self)):
return df.copy()
else:
return df | 285,927,188,171,719,780 | Conform DataFrame to new index with optional filling logic, placing
NA/NaN in locations having no value in the previous index. A new object
is produced unless the new index is equivalent to the current one and
``copy=False``.
Parameters
----------
labels: array-like, optional
New labels / index to conform the axis specified by ‘axis’ to.
index, columns: array-like, optional
New labels / index to conform to, should be specified using keywords.
Preferably an Index object to avoid duplicating data
axis: int or str, optional
Axis to target. Can be either the axis name (‘index’, ‘columns’) or
number (0, 1).
copy : bool, default True
Return a new object, even if the passed indexes are the same.
fill_value : scalar, default np.NaN
Value to use for missing values. Defaults to NaN, but can be any
"compatible" value.
Returns
-------
DataFrame with changed index.
See Also
--------
DataFrame.set_index : Set row labels.
DataFrame.reset_index : Remove row labels or move them to new columns.
Examples
--------
``DataFrame.reindex`` supports two calling conventions
* ``(index=index_labels, columns=column_labels, ...)``
* ``(labels, axis={'index', 'columns'}, ...)``
We *highly* recommend using keyword arguments to clarify your
intent.
Create a dataframe with some fictional data.
>>> index = ['Firefox', 'Chrome', 'Safari', 'IE10', 'Konqueror']
>>> df = ps.DataFrame({
... 'http_status': [200, 200, 404, 404, 301],
... 'response_time': [0.04, 0.02, 0.07, 0.08, 1.0]},
... index=index,
... columns=['http_status', 'response_time'])
>>> df
http_status response_time
Firefox 200 0.04
Chrome 200 0.02
Safari 404 0.07
IE10 404 0.08
Konqueror 301 1.00
Create a new index and reindex the dataframe. By default
values in the new index that do not have corresponding
records in the dataframe are assigned ``NaN``.
>>> new_index= ['Safari', 'Iceweasel', 'Comodo Dragon', 'IE10',
... 'Chrome']
>>> df.reindex(new_index).sort_index()
http_status response_time
Chrome 200.0 0.02
Comodo Dragon NaN NaN
IE10 404.0 0.08
Iceweasel NaN NaN
Safari 404.0 0.07
We can fill in the missing values by passing a value to
the keyword ``fill_value``.
>>> df.reindex(new_index, fill_value=0, copy=False).sort_index()
http_status response_time
Chrome 200 0.02
Comodo Dragon 0 0.00
IE10 404 0.08
Iceweasel 0 0.00
Safari 404 0.07
We can also reindex the columns.
>>> df.reindex(columns=['http_status', 'user_agent']).sort_index()
http_status user_agent
Chrome 200 NaN
Firefox 200 NaN
IE10 404 NaN
Konqueror 301 NaN
Safari 404 NaN
Or we can use "axis-style" keyword arguments
>>> df.reindex(['http_status', 'user_agent'], axis="columns").sort_index()
http_status user_agent
Chrome 200 NaN
Firefox 200 NaN
IE10 404 NaN
Konqueror 301 NaN
Safari 404 NaN
To further illustrate the filling functionality in
``reindex``, we will create a dataframe with a
monotonically increasing index (for example, a sequence
of dates).
>>> date_index = pd.date_range('1/1/2010', periods=6, freq='D')
>>> df2 = ps.DataFrame({"prices": [100, 101, np.nan, 100, 89, 88]},
... index=date_index)
>>> df2.sort_index()
prices
2010-01-01 100.0
2010-01-02 101.0
2010-01-03 NaN
2010-01-04 100.0
2010-01-05 89.0
2010-01-06 88.0
Suppose we decide to expand the dataframe to cover a wider
date range.
>>> date_index2 = pd.date_range('12/29/2009', periods=10, freq='D')
>>> df2.reindex(date_index2).sort_index()
prices
2009-12-29 NaN
2009-12-30 NaN
2009-12-31 NaN
2010-01-01 100.0
2010-01-02 101.0
2010-01-03 NaN
2010-01-04 100.0
2010-01-05 89.0
2010-01-06 88.0
2010-01-07 NaN | python/pyspark/pandas/frame.py | reindex | Flyangz/spark | python | def reindex(self, labels: Optional[Sequence[Any]]=None, index: Optional[Union[('Index', Sequence[Any])]]=None, columns: Optional[Union[(pd.Index, Sequence[Any])]]=None, axis: Optional[Axis]=None, copy: Optional[bool]=True, fill_value: Optional[Any]=None) -> 'DataFrame':
'\n Conform DataFrame to new index with optional filling logic, placing\n NA/NaN in locations having no value in the previous index. A new object\n is produced unless the new index is equivalent to the current one and\n ``copy=False``.\n\n Parameters\n ----------\n labels: array-like, optional\n New labels / index to conform the axis specified by ‘axis’ to.\n index, columns: array-like, optional\n New labels / index to conform to, should be specified using keywords.\n Preferably an Index object to avoid duplicating data\n axis: int or str, optional\n Axis to target. Can be either the axis name (‘index’, ‘columns’) or\n number (0, 1).\n copy : bool, default True\n Return a new object, even if the passed indexes are the same.\n fill_value : scalar, default np.NaN\n Value to use for missing values. Defaults to NaN, but can be any\n "compatible" value.\n\n Returns\n -------\n DataFrame with changed index.\n\n See Also\n --------\n DataFrame.set_index : Set row labels.\n DataFrame.reset_index : Remove row labels or move them to new columns.\n\n Examples\n --------\n\n ``DataFrame.reindex`` supports two calling conventions\n\n * ``(index=index_labels, columns=column_labels, ...)``\n * ``(labels, axis={\'index\', \'columns\'}, ...)``\n\n We *highly* recommend using keyword arguments to clarify your\n intent.\n\n Create a dataframe with some fictional data.\n\n >>> index = [\'Firefox\', \'Chrome\', \'Safari\', \'IE10\', \'Konqueror\']\n >>> df = ps.DataFrame({\n ... \'http_status\': [200, 200, 404, 404, 301],\n ... \'response_time\': [0.04, 0.02, 0.07, 0.08, 1.0]},\n ... index=index,\n ... columns=[\'http_status\', \'response_time\'])\n >>> df\n http_status response_time\n Firefox 200 0.04\n Chrome 200 0.02\n Safari 404 0.07\n IE10 404 0.08\n Konqueror 301 1.00\n\n Create a new index and reindex the dataframe. By default\n values in the new index that do not have corresponding\n records in the dataframe are assigned ``NaN``.\n\n >>> new_index= [\'Safari\', \'Iceweasel\', \'Comodo Dragon\', \'IE10\',\n ... \'Chrome\']\n >>> df.reindex(new_index).sort_index()\n http_status response_time\n Chrome 200.0 0.02\n Comodo Dragon NaN NaN\n IE10 404.0 0.08\n Iceweasel NaN NaN\n Safari 404.0 0.07\n\n We can fill in the missing values by passing a value to\n the keyword ``fill_value``.\n\n >>> df.reindex(new_index, fill_value=0, copy=False).sort_index()\n http_status response_time\n Chrome 200 0.02\n Comodo Dragon 0 0.00\n IE10 404 0.08\n Iceweasel 0 0.00\n Safari 404 0.07\n\n We can also reindex the columns.\n\n >>> df.reindex(columns=[\'http_status\', \'user_agent\']).sort_index()\n http_status user_agent\n Chrome 200 NaN\n Firefox 200 NaN\n IE10 404 NaN\n Konqueror 301 NaN\n Safari 404 NaN\n\n Or we can use "axis-style" keyword arguments\n\n >>> df.reindex([\'http_status\', \'user_agent\'], axis="columns").sort_index()\n http_status user_agent\n Chrome 200 NaN\n Firefox 200 NaN\n IE10 404 NaN\n Konqueror 301 NaN\n Safari 404 NaN\n\n To further illustrate the filling functionality in\n ``reindex``, we will create a dataframe with a\n monotonically increasing index (for example, a sequence\n of dates).\n\n >>> date_index = pd.date_range(\'1/1/2010\', periods=6, freq=\'D\')\n >>> df2 = ps.DataFrame({"prices": [100, 101, np.nan, 100, 89, 88]},\n ... index=date_index)\n >>> df2.sort_index()\n prices\n 2010-01-01 100.0\n 2010-01-02 101.0\n 2010-01-03 NaN\n 2010-01-04 100.0\n 2010-01-05 89.0\n 2010-01-06 88.0\n\n Suppose we decide to expand the dataframe to cover a wider\n date range.\n\n >>> date_index2 = pd.date_range(\'12/29/2009\', periods=10, freq=\'D\')\n >>> df2.reindex(date_index2).sort_index()\n prices\n 2009-12-29 NaN\n 2009-12-30 NaN\n 2009-12-31 NaN\n 2010-01-01 100.0\n 2010-01-02 101.0\n 2010-01-03 NaN\n 2010-01-04 100.0\n 2010-01-05 89.0\n 2010-01-06 88.0\n 2010-01-07 NaN\n '
if ((axis is not None) and ((index is not None) or (columns is not None))):
raise TypeError("Cannot specify both 'axis' and any of 'index' or 'columns'.")
if (labels is not None):
axis = validate_axis(axis)
if (axis == 0):
index = labels
elif (axis == 1):
columns = labels
if ((index is not None) and (not is_list_like(index))):
raise TypeError(('Index must be called with a collection of some kind, %s was passed' % type(index)))
if ((columns is not None) and (not is_list_like(columns))):
raise TypeError(('Columns must be called with a collection of some kind, %s was passed' % type(columns)))
df = self
if (index is not None):
df = df._reindex_index(index, fill_value)
if (columns is not None):
df = df._reindex_columns(columns, fill_value)
if (copy and (df is self)):
return df.copy()
else:
return df |
def reindex_like(self, other: 'DataFrame', copy: bool=True) -> 'DataFrame':
"\n Return a DataFrame with matching indices as other object.\n\n Conform the object to the same index on all axes. Places NA/NaN in locations\n having no value in the previous index. A new object is produced unless the\n new index is equivalent to the current one and copy=False.\n\n Parameters\n ----------\n other : DataFrame\n Its row and column indices are used to define the new indices\n of this object.\n copy : bool, default True\n Return a new object, even if the passed indexes are the same.\n\n Returns\n -------\n DataFrame\n DataFrame with changed indices on each axis.\n\n See Also\n --------\n DataFrame.set_index : Set row labels.\n DataFrame.reset_index : Remove row labels or move them to new columns.\n DataFrame.reindex : Change to new indices or expand indices.\n\n Notes\n -----\n Same as calling\n ``.reindex(index=other.index, columns=other.columns,...)``.\n\n Examples\n --------\n\n >>> df1 = ps.DataFrame([[24.3, 75.7, 'high'],\n ... [31, 87.8, 'high'],\n ... [22, 71.6, 'medium'],\n ... [35, 95, 'medium']],\n ... columns=['temp_celsius', 'temp_fahrenheit',\n ... 'windspeed'],\n ... index=pd.date_range(start='2014-02-12',\n ... end='2014-02-15', freq='D'))\n >>> df1\n temp_celsius temp_fahrenheit windspeed\n 2014-02-12 24.3 75.7 high\n 2014-02-13 31.0 87.8 high\n 2014-02-14 22.0 71.6 medium\n 2014-02-15 35.0 95.0 medium\n\n >>> df2 = ps.DataFrame([[28, 'low'],\n ... [30, 'low'],\n ... [35.1, 'medium']],\n ... columns=['temp_celsius', 'windspeed'],\n ... index=pd.DatetimeIndex(['2014-02-12', '2014-02-13',\n ... '2014-02-15']))\n >>> df2\n temp_celsius windspeed\n 2014-02-12 28.0 low\n 2014-02-13 30.0 low\n 2014-02-15 35.1 medium\n\n >>> df2.reindex_like(df1).sort_index() # doctest: +NORMALIZE_WHITESPACE\n temp_celsius temp_fahrenheit windspeed\n 2014-02-12 28.0 NaN low\n 2014-02-13 30.0 NaN low\n 2014-02-14 NaN NaN None\n 2014-02-15 35.1 NaN medium\n "
if isinstance(other, DataFrame):
return self.reindex(index=other.index, columns=other.columns, copy=copy)
else:
raise TypeError('other must be a pandas-on-Spark DataFrame') | 7,742,307,885,276,616,000 | Return a DataFrame with matching indices as other object.
Conform the object to the same index on all axes. Places NA/NaN in locations
having no value in the previous index. A new object is produced unless the
new index is equivalent to the current one and copy=False.
Parameters
----------
other : DataFrame
Its row and column indices are used to define the new indices
of this object.
copy : bool, default True
Return a new object, even if the passed indexes are the same.
Returns
-------
DataFrame
DataFrame with changed indices on each axis.
See Also
--------
DataFrame.set_index : Set row labels.
DataFrame.reset_index : Remove row labels or move them to new columns.
DataFrame.reindex : Change to new indices or expand indices.
Notes
-----
Same as calling
``.reindex(index=other.index, columns=other.columns,...)``.
Examples
--------
>>> df1 = ps.DataFrame([[24.3, 75.7, 'high'],
... [31, 87.8, 'high'],
... [22, 71.6, 'medium'],
... [35, 95, 'medium']],
... columns=['temp_celsius', 'temp_fahrenheit',
... 'windspeed'],
... index=pd.date_range(start='2014-02-12',
... end='2014-02-15', freq='D'))
>>> df1
temp_celsius temp_fahrenheit windspeed
2014-02-12 24.3 75.7 high
2014-02-13 31.0 87.8 high
2014-02-14 22.0 71.6 medium
2014-02-15 35.0 95.0 medium
>>> df2 = ps.DataFrame([[28, 'low'],
... [30, 'low'],
... [35.1, 'medium']],
... columns=['temp_celsius', 'windspeed'],
... index=pd.DatetimeIndex(['2014-02-12', '2014-02-13',
... '2014-02-15']))
>>> df2
temp_celsius windspeed
2014-02-12 28.0 low
2014-02-13 30.0 low
2014-02-15 35.1 medium
>>> df2.reindex_like(df1).sort_index() # doctest: +NORMALIZE_WHITESPACE
temp_celsius temp_fahrenheit windspeed
2014-02-12 28.0 NaN low
2014-02-13 30.0 NaN low
2014-02-14 NaN NaN None
2014-02-15 35.1 NaN medium | python/pyspark/pandas/frame.py | reindex_like | Flyangz/spark | python | def reindex_like(self, other: 'DataFrame', copy: bool=True) -> 'DataFrame':
"\n Return a DataFrame with matching indices as other object.\n\n Conform the object to the same index on all axes. Places NA/NaN in locations\n having no value in the previous index. A new object is produced unless the\n new index is equivalent to the current one and copy=False.\n\n Parameters\n ----------\n other : DataFrame\n Its row and column indices are used to define the new indices\n of this object.\n copy : bool, default True\n Return a new object, even if the passed indexes are the same.\n\n Returns\n -------\n DataFrame\n DataFrame with changed indices on each axis.\n\n See Also\n --------\n DataFrame.set_index : Set row labels.\n DataFrame.reset_index : Remove row labels or move them to new columns.\n DataFrame.reindex : Change to new indices or expand indices.\n\n Notes\n -----\n Same as calling\n ``.reindex(index=other.index, columns=other.columns,...)``.\n\n Examples\n --------\n\n >>> df1 = ps.DataFrame([[24.3, 75.7, 'high'],\n ... [31, 87.8, 'high'],\n ... [22, 71.6, 'medium'],\n ... [35, 95, 'medium']],\n ... columns=['temp_celsius', 'temp_fahrenheit',\n ... 'windspeed'],\n ... index=pd.date_range(start='2014-02-12',\n ... end='2014-02-15', freq='D'))\n >>> df1\n temp_celsius temp_fahrenheit windspeed\n 2014-02-12 24.3 75.7 high\n 2014-02-13 31.0 87.8 high\n 2014-02-14 22.0 71.6 medium\n 2014-02-15 35.0 95.0 medium\n\n >>> df2 = ps.DataFrame([[28, 'low'],\n ... [30, 'low'],\n ... [35.1, 'medium']],\n ... columns=['temp_celsius', 'windspeed'],\n ... index=pd.DatetimeIndex(['2014-02-12', '2014-02-13',\n ... '2014-02-15']))\n >>> df2\n temp_celsius windspeed\n 2014-02-12 28.0 low\n 2014-02-13 30.0 low\n 2014-02-15 35.1 medium\n\n >>> df2.reindex_like(df1).sort_index() # doctest: +NORMALIZE_WHITESPACE\n temp_celsius temp_fahrenheit windspeed\n 2014-02-12 28.0 NaN low\n 2014-02-13 30.0 NaN low\n 2014-02-14 NaN NaN None\n 2014-02-15 35.1 NaN medium\n "
if isinstance(other, DataFrame):
return self.reindex(index=other.index, columns=other.columns, copy=copy)
else:
raise TypeError('other must be a pandas-on-Spark DataFrame') |
def melt(self, id_vars: Optional[Union[(Name, List[Name])]]=None, value_vars: Optional[Union[(Name, List[Name])]]=None, var_name: Optional[Union[(str, List[str])]]=None, value_name: str='value') -> 'DataFrame':
'\n Unpivot a DataFrame from wide format to long format, optionally\n leaving identifier variables set.\n\n This function is useful to massage a DataFrame into a format where one\n or more columns are identifier variables (`id_vars`), while all other\n columns, considered measured variables (`value_vars`), are "unpivoted" to\n the row axis, leaving just two non-identifier columns, \'variable\' and\n \'value\'.\n\n Parameters\n ----------\n frame : DataFrame\n id_vars : tuple, list, or ndarray, optional\n Column(s) to use as identifier variables.\n value_vars : tuple, list, or ndarray, optional\n Column(s) to unpivot. If not specified, uses all columns that\n are not set as `id_vars`.\n var_name : scalar, default \'variable\'\n Name to use for the \'variable\' column. If None it uses `frame.columns.name` or\n ‘variable’.\n value_name : scalar, default \'value\'\n Name to use for the \'value\' column.\n\n Returns\n -------\n DataFrame\n Unpivoted DataFrame.\n\n Examples\n --------\n >>> df = ps.DataFrame({\'A\': {0: \'a\', 1: \'b\', 2: \'c\'},\n ... \'B\': {0: 1, 1: 3, 2: 5},\n ... \'C\': {0: 2, 1: 4, 2: 6}},\n ... columns=[\'A\', \'B\', \'C\'])\n >>> df\n A B C\n 0 a 1 2\n 1 b 3 4\n 2 c 5 6\n\n >>> ps.melt(df)\n variable value\n 0 A a\n 1 B 1\n 2 C 2\n 3 A b\n 4 B 3\n 5 C 4\n 6 A c\n 7 B 5\n 8 C 6\n\n >>> df.melt(id_vars=\'A\')\n A variable value\n 0 a B 1\n 1 a C 2\n 2 b B 3\n 3 b C 4\n 4 c B 5\n 5 c C 6\n\n >>> df.melt(value_vars=\'A\')\n variable value\n 0 A a\n 1 A b\n 2 A c\n\n >>> ps.melt(df, id_vars=[\'A\', \'B\'])\n A B variable value\n 0 a 1 C 2\n 1 b 3 C 4\n 2 c 5 C 6\n\n >>> df.melt(id_vars=[\'A\'], value_vars=[\'C\'])\n A variable value\n 0 a C 2\n 1 b C 4\n 2 c C 6\n\n The names of \'variable\' and \'value\' columns can be customized:\n\n >>> ps.melt(df, id_vars=[\'A\'], value_vars=[\'B\'],\n ... var_name=\'myVarname\', value_name=\'myValname\')\n A myVarname myValname\n 0 a B 1\n 1 b B 3\n 2 c B 5\n '
column_labels = self._internal.column_labels
if (id_vars is None):
id_vars = []
else:
if isinstance(id_vars, tuple):
if (self._internal.column_labels_level == 1):
id_vars = [(idv if is_name_like_tuple(idv) else (idv,)) for idv in id_vars]
else:
raise ValueError('id_vars must be a list of tuples when columns are a MultiIndex')
elif is_name_like_value(id_vars):
id_vars = [(id_vars,)]
else:
id_vars = [(idv if is_name_like_tuple(idv) else (idv,)) for idv in id_vars]
non_existence_col = [idv for idv in id_vars if (idv not in column_labels)]
if (len(non_existence_col) != 0):
raveled_column_labels = np.ravel(column_labels)
missing = [nec for nec in np.ravel(non_existence_col) if (nec not in raveled_column_labels)]
if (len(missing) != 0):
raise KeyError("The following 'id_vars' are not present in the DataFrame: {}".format(missing))
else:
raise KeyError('None of {} are in the {}'.format(non_existence_col, column_labels))
if (value_vars is None):
value_vars = []
else:
if isinstance(value_vars, tuple):
if (self._internal.column_labels_level == 1):
value_vars = [(valv if is_name_like_tuple(valv) else (valv,)) for valv in value_vars]
else:
raise ValueError('value_vars must be a list of tuples when columns are a MultiIndex')
elif is_name_like_value(value_vars):
value_vars = [(value_vars,)]
else:
value_vars = [(valv if is_name_like_tuple(valv) else (valv,)) for valv in value_vars]
non_existence_col = [valv for valv in value_vars if (valv not in column_labels)]
if (len(non_existence_col) != 0):
raveled_column_labels = np.ravel(column_labels)
missing = [nec for nec in np.ravel(non_existence_col) if (nec not in raveled_column_labels)]
if (len(missing) != 0):
raise KeyError("The following 'value_vars' are not present in the DataFrame: {}".format(missing))
else:
raise KeyError('None of {} are in the {}'.format(non_existence_col, column_labels))
if (len(value_vars) == 0):
value_vars = column_labels
column_labels = [label for label in column_labels if (label not in id_vars)]
sdf = self._internal.spark_frame
if (var_name is None):
if ((self._internal.column_labels_level == 1) and (self._internal.column_label_names[0] is None)):
var_name = ['variable']
else:
var_name = [(name_like_string(name) if (name is not None) else 'variable_{}'.format(i)) for (i, name) in enumerate(self._internal.column_label_names)]
elif isinstance(var_name, str):
var_name = [var_name]
pairs = F.explode(F.array(*[F.struct(*[SF.lit(c).alias(name) for (c, name) in zip(label, var_name)], *[self._internal.spark_column_for(label).alias(value_name)]) for label in column_labels if (label in value_vars)]))
columns = (([self._internal.spark_column_for(label).alias(name_like_string(label)) for label in id_vars] + [F.col(('pairs.`%s`' % name)) for name in var_name]) + [F.col(('pairs.`%s`' % value_name))])
exploded_df = sdf.withColumn('pairs', pairs).select(columns)
return DataFrame(InternalFrame(spark_frame=exploded_df, index_spark_columns=None, column_labels=(([(label if (len(label) == 1) else (name_like_string(label),)) for label in id_vars] + [(name,) for name in var_name]) + [(value_name,)]))) | -6,052,788,158,713,160,000 | Unpivot a DataFrame from wide format to long format, optionally
leaving identifier variables set.
This function is useful to massage a DataFrame into a format where one
or more columns are identifier variables (`id_vars`), while all other
columns, considered measured variables (`value_vars`), are "unpivoted" to
the row axis, leaving just two non-identifier columns, 'variable' and
'value'.
Parameters
----------
frame : DataFrame
id_vars : tuple, list, or ndarray, optional
Column(s) to use as identifier variables.
value_vars : tuple, list, or ndarray, optional
Column(s) to unpivot. If not specified, uses all columns that
are not set as `id_vars`.
var_name : scalar, default 'variable'
Name to use for the 'variable' column. If None it uses `frame.columns.name` or
‘variable’.
value_name : scalar, default 'value'
Name to use for the 'value' column.
Returns
-------
DataFrame
Unpivoted DataFrame.
Examples
--------
>>> df = ps.DataFrame({'A': {0: 'a', 1: 'b', 2: 'c'},
... 'B': {0: 1, 1: 3, 2: 5},
... 'C': {0: 2, 1: 4, 2: 6}},
... columns=['A', 'B', 'C'])
>>> df
A B C
0 a 1 2
1 b 3 4
2 c 5 6
>>> ps.melt(df)
variable value
0 A a
1 B 1
2 C 2
3 A b
4 B 3
5 C 4
6 A c
7 B 5
8 C 6
>>> df.melt(id_vars='A')
A variable value
0 a B 1
1 a C 2
2 b B 3
3 b C 4
4 c B 5
5 c C 6
>>> df.melt(value_vars='A')
variable value
0 A a
1 A b
2 A c
>>> ps.melt(df, id_vars=['A', 'B'])
A B variable value
0 a 1 C 2
1 b 3 C 4
2 c 5 C 6
>>> df.melt(id_vars=['A'], value_vars=['C'])
A variable value
0 a C 2
1 b C 4
2 c C 6
The names of 'variable' and 'value' columns can be customized:
>>> ps.melt(df, id_vars=['A'], value_vars=['B'],
... var_name='myVarname', value_name='myValname')
A myVarname myValname
0 a B 1
1 b B 3
2 c B 5 | python/pyspark/pandas/frame.py | melt | Flyangz/spark | python | def melt(self, id_vars: Optional[Union[(Name, List[Name])]]=None, value_vars: Optional[Union[(Name, List[Name])]]=None, var_name: Optional[Union[(str, List[str])]]=None, value_name: str='value') -> 'DataFrame':
'\n Unpivot a DataFrame from wide format to long format, optionally\n leaving identifier variables set.\n\n This function is useful to massage a DataFrame into a format where one\n or more columns are identifier variables (`id_vars`), while all other\n columns, considered measured variables (`value_vars`), are "unpivoted" to\n the row axis, leaving just two non-identifier columns, \'variable\' and\n \'value\'.\n\n Parameters\n ----------\n frame : DataFrame\n id_vars : tuple, list, or ndarray, optional\n Column(s) to use as identifier variables.\n value_vars : tuple, list, or ndarray, optional\n Column(s) to unpivot. If not specified, uses all columns that\n are not set as `id_vars`.\n var_name : scalar, default \'variable\'\n Name to use for the \'variable\' column. If None it uses `frame.columns.name` or\n ‘variable’.\n value_name : scalar, default \'value\'\n Name to use for the \'value\' column.\n\n Returns\n -------\n DataFrame\n Unpivoted DataFrame.\n\n Examples\n --------\n >>> df = ps.DataFrame({\'A\': {0: \'a\', 1: \'b\', 2: \'c\'},\n ... \'B\': {0: 1, 1: 3, 2: 5},\n ... \'C\': {0: 2, 1: 4, 2: 6}},\n ... columns=[\'A\', \'B\', \'C\'])\n >>> df\n A B C\n 0 a 1 2\n 1 b 3 4\n 2 c 5 6\n\n >>> ps.melt(df)\n variable value\n 0 A a\n 1 B 1\n 2 C 2\n 3 A b\n 4 B 3\n 5 C 4\n 6 A c\n 7 B 5\n 8 C 6\n\n >>> df.melt(id_vars=\'A\')\n A variable value\n 0 a B 1\n 1 a C 2\n 2 b B 3\n 3 b C 4\n 4 c B 5\n 5 c C 6\n\n >>> df.melt(value_vars=\'A\')\n variable value\n 0 A a\n 1 A b\n 2 A c\n\n >>> ps.melt(df, id_vars=[\'A\', \'B\'])\n A B variable value\n 0 a 1 C 2\n 1 b 3 C 4\n 2 c 5 C 6\n\n >>> df.melt(id_vars=[\'A\'], value_vars=[\'C\'])\n A variable value\n 0 a C 2\n 1 b C 4\n 2 c C 6\n\n The names of \'variable\' and \'value\' columns can be customized:\n\n >>> ps.melt(df, id_vars=[\'A\'], value_vars=[\'B\'],\n ... var_name=\'myVarname\', value_name=\'myValname\')\n A myVarname myValname\n 0 a B 1\n 1 b B 3\n 2 c B 5\n '
column_labels = self._internal.column_labels
if (id_vars is None):
id_vars = []
else:
if isinstance(id_vars, tuple):
if (self._internal.column_labels_level == 1):
id_vars = [(idv if is_name_like_tuple(idv) else (idv,)) for idv in id_vars]
else:
raise ValueError('id_vars must be a list of tuples when columns are a MultiIndex')
elif is_name_like_value(id_vars):
id_vars = [(id_vars,)]
else:
id_vars = [(idv if is_name_like_tuple(idv) else (idv,)) for idv in id_vars]
non_existence_col = [idv for idv in id_vars if (idv not in column_labels)]
if (len(non_existence_col) != 0):
raveled_column_labels = np.ravel(column_labels)
missing = [nec for nec in np.ravel(non_existence_col) if (nec not in raveled_column_labels)]
if (len(missing) != 0):
raise KeyError("The following 'id_vars' are not present in the DataFrame: {}".format(missing))
else:
raise KeyError('None of {} are in the {}'.format(non_existence_col, column_labels))
if (value_vars is None):
value_vars = []
else:
if isinstance(value_vars, tuple):
if (self._internal.column_labels_level == 1):
value_vars = [(valv if is_name_like_tuple(valv) else (valv,)) for valv in value_vars]
else:
raise ValueError('value_vars must be a list of tuples when columns are a MultiIndex')
elif is_name_like_value(value_vars):
value_vars = [(value_vars,)]
else:
value_vars = [(valv if is_name_like_tuple(valv) else (valv,)) for valv in value_vars]
non_existence_col = [valv for valv in value_vars if (valv not in column_labels)]
if (len(non_existence_col) != 0):
raveled_column_labels = np.ravel(column_labels)
missing = [nec for nec in np.ravel(non_existence_col) if (nec not in raveled_column_labels)]
if (len(missing) != 0):
raise KeyError("The following 'value_vars' are not present in the DataFrame: {}".format(missing))
else:
raise KeyError('None of {} are in the {}'.format(non_existence_col, column_labels))
if (len(value_vars) == 0):
value_vars = column_labels
column_labels = [label for label in column_labels if (label not in id_vars)]
sdf = self._internal.spark_frame
if (var_name is None):
if ((self._internal.column_labels_level == 1) and (self._internal.column_label_names[0] is None)):
var_name = ['variable']
else:
var_name = [(name_like_string(name) if (name is not None) else 'variable_{}'.format(i)) for (i, name) in enumerate(self._internal.column_label_names)]
elif isinstance(var_name, str):
var_name = [var_name]
pairs = F.explode(F.array(*[F.struct(*[SF.lit(c).alias(name) for (c, name) in zip(label, var_name)], *[self._internal.spark_column_for(label).alias(value_name)]) for label in column_labels if (label in value_vars)]))
columns = (([self._internal.spark_column_for(label).alias(name_like_string(label)) for label in id_vars] + [F.col(('pairs.`%s`' % name)) for name in var_name]) + [F.col(('pairs.`%s`' % value_name))])
exploded_df = sdf.withColumn('pairs', pairs).select(columns)
return DataFrame(InternalFrame(spark_frame=exploded_df, index_spark_columns=None, column_labels=(([(label if (len(label) == 1) else (name_like_string(label),)) for label in id_vars] + [(name,) for name in var_name]) + [(value_name,)]))) |
def stack(self) -> DataFrameOrSeries:
"\n Stack the prescribed level(s) from columns to index.\n\n Return a reshaped DataFrame or Series having a multi-level\n index with one or more new inner-most levels compared to the current\n DataFrame. The new inner-most levels are created by pivoting the\n columns of the current dataframe:\n\n - if the columns have a single level, the output is a Series;\n - if the columns have multiple levels, the new index\n level(s) is (are) taken from the prescribed level(s) and\n the output is a DataFrame.\n\n The new index levels are sorted.\n\n Returns\n -------\n DataFrame or Series\n Stacked dataframe or series.\n\n See Also\n --------\n DataFrame.unstack : Unstack prescribed level(s) from index axis\n onto column axis.\n DataFrame.pivot : Reshape dataframe from long format to wide\n format.\n DataFrame.pivot_table : Create a spreadsheet-style pivot table\n as a DataFrame.\n\n Notes\n -----\n The function is named by analogy with a collection of books\n being reorganized from being side by side on a horizontal\n position (the columns of the dataframe) to being stacked\n vertically on top of each other (in the index of the\n dataframe).\n\n Examples\n --------\n **Single level columns**\n\n >>> df_single_level_cols = ps.DataFrame([[0, 1], [2, 3]],\n ... index=['cat', 'dog'],\n ... columns=['weight', 'height'])\n\n Stacking a dataframe with a single level column axis returns a Series:\n\n >>> df_single_level_cols\n weight height\n cat 0 1\n dog 2 3\n >>> df_single_level_cols.stack().sort_index()\n cat height 1\n weight 0\n dog height 3\n weight 2\n dtype: int64\n\n **Multi level columns: simple case**\n\n >>> multicol1 = pd.MultiIndex.from_tuples([('weight', 'kg'),\n ... ('weight', 'pounds')])\n >>> df_multi_level_cols1 = ps.DataFrame([[1, 2], [2, 4]],\n ... index=['cat', 'dog'],\n ... columns=multicol1)\n\n Stacking a dataframe with a multi-level column axis:\n\n >>> df_multi_level_cols1 # doctest: +NORMALIZE_WHITESPACE\n weight\n kg pounds\n cat 1 2\n dog 2 4\n >>> df_multi_level_cols1.stack().sort_index()\n weight\n cat kg 1\n pounds 2\n dog kg 2\n pounds 4\n\n **Missing values**\n\n >>> multicol2 = pd.MultiIndex.from_tuples([('weight', 'kg'),\n ... ('height', 'm')])\n >>> df_multi_level_cols2 = ps.DataFrame([[1.0, 2.0], [3.0, 4.0]],\n ... index=['cat', 'dog'],\n ... columns=multicol2)\n\n It is common to have missing values when stacking a dataframe\n with multi-level columns, as the stacked dataframe typically\n has more values than the original dataframe. Missing values\n are filled with NaNs:\n\n >>> df_multi_level_cols2\n weight height\n kg m\n cat 1.0 2.0\n dog 3.0 4.0\n >>> df_multi_level_cols2.stack().sort_index() # doctest: +SKIP\n height weight\n cat kg NaN 1.0\n m 2.0 NaN\n dog kg NaN 3.0\n m 4.0 NaN\n "
from pyspark.pandas.series import first_series
if (len(self._internal.column_labels) == 0):
return DataFrame(self._internal.copy(column_label_names=self._internal.column_label_names[:(- 1)]).with_filter(SF.lit(False)))
column_labels: Dict[(Label, Dict[(Any, Column)])] = defaultdict(dict)
index_values = set()
should_returns_series = False
for label in self._internal.column_labels:
new_label = label[:(- 1)]
if (len(new_label) == 0):
new_label = None
should_returns_series = True
value = label[(- 1)]
scol = self._internal.spark_column_for(label)
column_labels[new_label][value] = scol
index_values.add(value)
column_labels = dict(sorted(column_labels.items(), key=(lambda x: x[0])))
index_name = self._internal.column_label_names[(- 1)]
column_label_names = self._internal.column_label_names[:(- 1)]
if (len(column_label_names) == 0):
column_label_names = [None]
index_column = SPARK_INDEX_NAME_FORMAT(self._internal.index_level)
data_columns = [name_like_string(label) for label in column_labels]
structs = [F.struct(*[SF.lit(value).alias(index_column)], *[(column_labels[label][value] if (value in column_labels[label]) else SF.lit(None)).alias(name) for (label, name) in zip(column_labels, data_columns)]).alias(value) for value in index_values]
pairs = F.explode(F.array(*structs))
sdf = self._internal.spark_frame.withColumn('pairs', pairs)
sdf = sdf.select(((self._internal.index_spark_columns + [sdf['pairs'][index_column].alias(index_column)]) + [sdf['pairs'][name].alias(name) for name in data_columns]))
internal = InternalFrame(spark_frame=sdf, index_spark_columns=[scol_for(sdf, col) for col in (self._internal.index_spark_column_names + [index_column])], index_names=(self._internal.index_names + [index_name]), index_fields=(self._internal.index_fields + [None]), column_labels=list(column_labels), data_spark_columns=[scol_for(sdf, col) for col in data_columns], column_label_names=column_label_names)
psdf: DataFrame = DataFrame(internal)
if should_returns_series:
return first_series(psdf)
else:
return psdf | 9,052,999,775,344,987,000 | Stack the prescribed level(s) from columns to index.
Return a reshaped DataFrame or Series having a multi-level
index with one or more new inner-most levels compared to the current
DataFrame. The new inner-most levels are created by pivoting the
columns of the current dataframe:
- if the columns have a single level, the output is a Series;
- if the columns have multiple levels, the new index
level(s) is (are) taken from the prescribed level(s) and
the output is a DataFrame.
The new index levels are sorted.
Returns
-------
DataFrame or Series
Stacked dataframe or series.
See Also
--------
DataFrame.unstack : Unstack prescribed level(s) from index axis
onto column axis.
DataFrame.pivot : Reshape dataframe from long format to wide
format.
DataFrame.pivot_table : Create a spreadsheet-style pivot table
as a DataFrame.
Notes
-----
The function is named by analogy with a collection of books
being reorganized from being side by side on a horizontal
position (the columns of the dataframe) to being stacked
vertically on top of each other (in the index of the
dataframe).
Examples
--------
**Single level columns**
>>> df_single_level_cols = ps.DataFrame([[0, 1], [2, 3]],
... index=['cat', 'dog'],
... columns=['weight', 'height'])
Stacking a dataframe with a single level column axis returns a Series:
>>> df_single_level_cols
weight height
cat 0 1
dog 2 3
>>> df_single_level_cols.stack().sort_index()
cat height 1
weight 0
dog height 3
weight 2
dtype: int64
**Multi level columns: simple case**
>>> multicol1 = pd.MultiIndex.from_tuples([('weight', 'kg'),
... ('weight', 'pounds')])
>>> df_multi_level_cols1 = ps.DataFrame([[1, 2], [2, 4]],
... index=['cat', 'dog'],
... columns=multicol1)
Stacking a dataframe with a multi-level column axis:
>>> df_multi_level_cols1 # doctest: +NORMALIZE_WHITESPACE
weight
kg pounds
cat 1 2
dog 2 4
>>> df_multi_level_cols1.stack().sort_index()
weight
cat kg 1
pounds 2
dog kg 2
pounds 4
**Missing values**
>>> multicol2 = pd.MultiIndex.from_tuples([('weight', 'kg'),
... ('height', 'm')])
>>> df_multi_level_cols2 = ps.DataFrame([[1.0, 2.0], [3.0, 4.0]],
... index=['cat', 'dog'],
... columns=multicol2)
It is common to have missing values when stacking a dataframe
with multi-level columns, as the stacked dataframe typically
has more values than the original dataframe. Missing values
are filled with NaNs:
>>> df_multi_level_cols2
weight height
kg m
cat 1.0 2.0
dog 3.0 4.0
>>> df_multi_level_cols2.stack().sort_index() # doctest: +SKIP
height weight
cat kg NaN 1.0
m 2.0 NaN
dog kg NaN 3.0
m 4.0 NaN | python/pyspark/pandas/frame.py | stack | Flyangz/spark | python | def stack(self) -> DataFrameOrSeries:
"\n Stack the prescribed level(s) from columns to index.\n\n Return a reshaped DataFrame or Series having a multi-level\n index with one or more new inner-most levels compared to the current\n DataFrame. The new inner-most levels are created by pivoting the\n columns of the current dataframe:\n\n - if the columns have a single level, the output is a Series;\n - if the columns have multiple levels, the new index\n level(s) is (are) taken from the prescribed level(s) and\n the output is a DataFrame.\n\n The new index levels are sorted.\n\n Returns\n -------\n DataFrame or Series\n Stacked dataframe or series.\n\n See Also\n --------\n DataFrame.unstack : Unstack prescribed level(s) from index axis\n onto column axis.\n DataFrame.pivot : Reshape dataframe from long format to wide\n format.\n DataFrame.pivot_table : Create a spreadsheet-style pivot table\n as a DataFrame.\n\n Notes\n -----\n The function is named by analogy with a collection of books\n being reorganized from being side by side on a horizontal\n position (the columns of the dataframe) to being stacked\n vertically on top of each other (in the index of the\n dataframe).\n\n Examples\n --------\n **Single level columns**\n\n >>> df_single_level_cols = ps.DataFrame([[0, 1], [2, 3]],\n ... index=['cat', 'dog'],\n ... columns=['weight', 'height'])\n\n Stacking a dataframe with a single level column axis returns a Series:\n\n >>> df_single_level_cols\n weight height\n cat 0 1\n dog 2 3\n >>> df_single_level_cols.stack().sort_index()\n cat height 1\n weight 0\n dog height 3\n weight 2\n dtype: int64\n\n **Multi level columns: simple case**\n\n >>> multicol1 = pd.MultiIndex.from_tuples([('weight', 'kg'),\n ... ('weight', 'pounds')])\n >>> df_multi_level_cols1 = ps.DataFrame([[1, 2], [2, 4]],\n ... index=['cat', 'dog'],\n ... columns=multicol1)\n\n Stacking a dataframe with a multi-level column axis:\n\n >>> df_multi_level_cols1 # doctest: +NORMALIZE_WHITESPACE\n weight\n kg pounds\n cat 1 2\n dog 2 4\n >>> df_multi_level_cols1.stack().sort_index()\n weight\n cat kg 1\n pounds 2\n dog kg 2\n pounds 4\n\n **Missing values**\n\n >>> multicol2 = pd.MultiIndex.from_tuples([('weight', 'kg'),\n ... ('height', 'm')])\n >>> df_multi_level_cols2 = ps.DataFrame([[1.0, 2.0], [3.0, 4.0]],\n ... index=['cat', 'dog'],\n ... columns=multicol2)\n\n It is common to have missing values when stacking a dataframe\n with multi-level columns, as the stacked dataframe typically\n has more values than the original dataframe. Missing values\n are filled with NaNs:\n\n >>> df_multi_level_cols2\n weight height\n kg m\n cat 1.0 2.0\n dog 3.0 4.0\n >>> df_multi_level_cols2.stack().sort_index() # doctest: +SKIP\n height weight\n cat kg NaN 1.0\n m 2.0 NaN\n dog kg NaN 3.0\n m 4.0 NaN\n "
from pyspark.pandas.series import first_series
if (len(self._internal.column_labels) == 0):
return DataFrame(self._internal.copy(column_label_names=self._internal.column_label_names[:(- 1)]).with_filter(SF.lit(False)))
column_labels: Dict[(Label, Dict[(Any, Column)])] = defaultdict(dict)
index_values = set()
should_returns_series = False
for label in self._internal.column_labels:
new_label = label[:(- 1)]
if (len(new_label) == 0):
new_label = None
should_returns_series = True
value = label[(- 1)]
scol = self._internal.spark_column_for(label)
column_labels[new_label][value] = scol
index_values.add(value)
column_labels = dict(sorted(column_labels.items(), key=(lambda x: x[0])))
index_name = self._internal.column_label_names[(- 1)]
column_label_names = self._internal.column_label_names[:(- 1)]
if (len(column_label_names) == 0):
column_label_names = [None]
index_column = SPARK_INDEX_NAME_FORMAT(self._internal.index_level)
data_columns = [name_like_string(label) for label in column_labels]
structs = [F.struct(*[SF.lit(value).alias(index_column)], *[(column_labels[label][value] if (value in column_labels[label]) else SF.lit(None)).alias(name) for (label, name) in zip(column_labels, data_columns)]).alias(value) for value in index_values]
pairs = F.explode(F.array(*structs))
sdf = self._internal.spark_frame.withColumn('pairs', pairs)
sdf = sdf.select(((self._internal.index_spark_columns + [sdf['pairs'][index_column].alias(index_column)]) + [sdf['pairs'][name].alias(name) for name in data_columns]))
internal = InternalFrame(spark_frame=sdf, index_spark_columns=[scol_for(sdf, col) for col in (self._internal.index_spark_column_names + [index_column])], index_names=(self._internal.index_names + [index_name]), index_fields=(self._internal.index_fields + [None]), column_labels=list(column_labels), data_spark_columns=[scol_for(sdf, col) for col in data_columns], column_label_names=column_label_names)
psdf: DataFrame = DataFrame(internal)
if should_returns_series:
return first_series(psdf)
else:
return psdf |
def unstack(self) -> DataFrameOrSeries:
'\n Pivot the (necessarily hierarchical) index labels.\n\n Returns a DataFrame having a new level of column labels whose inner-most level\n consists of the pivoted index labels.\n\n If the index is not a MultiIndex, the output will be a Series.\n\n .. note:: If the index is a MultiIndex, the output DataFrame could be very wide, and\n it could cause a serious performance degradation since Spark partitions it row based.\n\n Returns\n -------\n Series or DataFrame\n\n See Also\n --------\n DataFrame.pivot : Pivot a table based on column values.\n DataFrame.stack : Pivot a level of the column labels (inverse operation from unstack).\n\n Examples\n --------\n >>> df = ps.DataFrame({"A": {"0": "a", "1": "b", "2": "c"},\n ... "B": {"0": "1", "1": "3", "2": "5"},\n ... "C": {"0": "2", "1": "4", "2": "6"}},\n ... columns=["A", "B", "C"])\n >>> df\n A B C\n 0 a 1 2\n 1 b 3 4\n 2 c 5 6\n\n >>> df.unstack().sort_index()\n A 0 a\n 1 b\n 2 c\n B 0 1\n 1 3\n 2 5\n C 0 2\n 1 4\n 2 6\n dtype: object\n\n >>> df.columns = pd.MultiIndex.from_tuples([(\'X\', \'A\'), (\'X\', \'B\'), (\'Y\', \'C\')])\n >>> df.unstack().sort_index()\n X A 0 a\n 1 b\n 2 c\n B 0 1\n 1 3\n 2 5\n Y C 0 2\n 1 4\n 2 6\n dtype: object\n\n For MultiIndex case:\n\n >>> df = ps.DataFrame({"A": ["a", "b", "c"],\n ... "B": [1, 3, 5],\n ... "C": [2, 4, 6]},\n ... columns=["A", "B", "C"])\n >>> df = df.set_index(\'A\', append=True)\n >>> df # doctest: +NORMALIZE_WHITESPACE\n B C\n A\n 0 a 1 2\n 1 b 3 4\n 2 c 5 6\n >>> df.unstack().sort_index() # doctest: +NORMALIZE_WHITESPACE\n B C\n A a b c a b c\n 0 1.0 NaN NaN 2.0 NaN NaN\n 1 NaN 3.0 NaN NaN 4.0 NaN\n 2 NaN NaN 5.0 NaN NaN 6.0\n '
from pyspark.pandas.series import first_series
if (self._internal.index_level > 1):
with option_context('compute.default_index_type', 'distributed'):
df = self.reset_index()
index = df._internal.column_labels[:(self._internal.index_level - 1)]
columns = df.columns[(self._internal.index_level - 1)]
df = df.pivot_table(index=index, columns=columns, values=self._internal.column_labels, aggfunc='first')
internal = df._internal.copy(index_names=self._internal.index_names[:(- 1)], index_fields=df._internal.index_fields[:(self._internal.index_level - 1)], column_label_names=(df._internal.column_label_names[:(- 1)] + [(None if (self._internal.index_names[(- 1)] is None) else df._internal.column_label_names[(- 1)])]))
return DataFrame(internal)
column_labels = self._internal.column_labels
ser_name = SPARK_DEFAULT_SERIES_NAME
sdf = self._internal.spark_frame
new_index_columns = [SPARK_INDEX_NAME_FORMAT(i) for i in range(self._internal.column_labels_level)]
new_index_map = list(zip_longest(new_index_columns, self._internal.column_label_names, []))
pairs = F.explode(F.array(*[F.struct(*[SF.lit(c).alias(name) for (c, name) in zip(idx, new_index_columns)], *[self._internal.spark_column_for(idx).alias(ser_name)]) for idx in column_labels]))
columns = ([F.col(('pairs.%s' % name)) for name in new_index_columns[:self._internal.column_labels_level]] + [F.col(('pairs.%s' % ser_name))])
new_index_len = len(new_index_columns)
existing_index_columns = []
for (i, (index_name, index_field)) in enumerate(zip(self._internal.index_names, self._internal.index_fields)):
name = SPARK_INDEX_NAME_FORMAT((i + new_index_len))
new_index_map.append((name, index_name, index_field.copy(name=name)))
existing_index_columns.append(self._internal.index_spark_columns[i].alias(name))
exploded_df = sdf.withColumn('pairs', pairs).select((existing_index_columns + columns))
(index_spark_column_names, index_names, index_fields) = zip(*new_index_map)
return first_series(DataFrame(InternalFrame(exploded_df, index_spark_columns=[scol_for(exploded_df, col) for col in index_spark_column_names], index_names=list(index_names), index_fields=list(index_fields), column_labels=[None]))) | 2,893,301,910,422,294,500 | Pivot the (necessarily hierarchical) index labels.
Returns a DataFrame having a new level of column labels whose inner-most level
consists of the pivoted index labels.
If the index is not a MultiIndex, the output will be a Series.
.. note:: If the index is a MultiIndex, the output DataFrame could be very wide, and
it could cause a serious performance degradation since Spark partitions it row based.
Returns
-------
Series or DataFrame
See Also
--------
DataFrame.pivot : Pivot a table based on column values.
DataFrame.stack : Pivot a level of the column labels (inverse operation from unstack).
Examples
--------
>>> df = ps.DataFrame({"A": {"0": "a", "1": "b", "2": "c"},
... "B": {"0": "1", "1": "3", "2": "5"},
... "C": {"0": "2", "1": "4", "2": "6"}},
... columns=["A", "B", "C"])
>>> df
A B C
0 a 1 2
1 b 3 4
2 c 5 6
>>> df.unstack().sort_index()
A 0 a
1 b
2 c
B 0 1
1 3
2 5
C 0 2
1 4
2 6
dtype: object
>>> df.columns = pd.MultiIndex.from_tuples([('X', 'A'), ('X', 'B'), ('Y', 'C')])
>>> df.unstack().sort_index()
X A 0 a
1 b
2 c
B 0 1
1 3
2 5
Y C 0 2
1 4
2 6
dtype: object
For MultiIndex case:
>>> df = ps.DataFrame({"A": ["a", "b", "c"],
... "B": [1, 3, 5],
... "C": [2, 4, 6]},
... columns=["A", "B", "C"])
>>> df = df.set_index('A', append=True)
>>> df # doctest: +NORMALIZE_WHITESPACE
B C
A
0 a 1 2
1 b 3 4
2 c 5 6
>>> df.unstack().sort_index() # doctest: +NORMALIZE_WHITESPACE
B C
A a b c a b c
0 1.0 NaN NaN 2.0 NaN NaN
1 NaN 3.0 NaN NaN 4.0 NaN
2 NaN NaN 5.0 NaN NaN 6.0 | python/pyspark/pandas/frame.py | unstack | Flyangz/spark | python | def unstack(self) -> DataFrameOrSeries:
'\n Pivot the (necessarily hierarchical) index labels.\n\n Returns a DataFrame having a new level of column labels whose inner-most level\n consists of the pivoted index labels.\n\n If the index is not a MultiIndex, the output will be a Series.\n\n .. note:: If the index is a MultiIndex, the output DataFrame could be very wide, and\n it could cause a serious performance degradation since Spark partitions it row based.\n\n Returns\n -------\n Series or DataFrame\n\n See Also\n --------\n DataFrame.pivot : Pivot a table based on column values.\n DataFrame.stack : Pivot a level of the column labels (inverse operation from unstack).\n\n Examples\n --------\n >>> df = ps.DataFrame({"A": {"0": "a", "1": "b", "2": "c"},\n ... "B": {"0": "1", "1": "3", "2": "5"},\n ... "C": {"0": "2", "1": "4", "2": "6"}},\n ... columns=["A", "B", "C"])\n >>> df\n A B C\n 0 a 1 2\n 1 b 3 4\n 2 c 5 6\n\n >>> df.unstack().sort_index()\n A 0 a\n 1 b\n 2 c\n B 0 1\n 1 3\n 2 5\n C 0 2\n 1 4\n 2 6\n dtype: object\n\n >>> df.columns = pd.MultiIndex.from_tuples([(\'X\', \'A\'), (\'X\', \'B\'), (\'Y\', \'C\')])\n >>> df.unstack().sort_index()\n X A 0 a\n 1 b\n 2 c\n B 0 1\n 1 3\n 2 5\n Y C 0 2\n 1 4\n 2 6\n dtype: object\n\n For MultiIndex case:\n\n >>> df = ps.DataFrame({"A": ["a", "b", "c"],\n ... "B": [1, 3, 5],\n ... "C": [2, 4, 6]},\n ... columns=["A", "B", "C"])\n >>> df = df.set_index(\'A\', append=True)\n >>> df # doctest: +NORMALIZE_WHITESPACE\n B C\n A\n 0 a 1 2\n 1 b 3 4\n 2 c 5 6\n >>> df.unstack().sort_index() # doctest: +NORMALIZE_WHITESPACE\n B C\n A a b c a b c\n 0 1.0 NaN NaN 2.0 NaN NaN\n 1 NaN 3.0 NaN NaN 4.0 NaN\n 2 NaN NaN 5.0 NaN NaN 6.0\n '
from pyspark.pandas.series import first_series
if (self._internal.index_level > 1):
with option_context('compute.default_index_type', 'distributed'):
df = self.reset_index()
index = df._internal.column_labels[:(self._internal.index_level - 1)]
columns = df.columns[(self._internal.index_level - 1)]
df = df.pivot_table(index=index, columns=columns, values=self._internal.column_labels, aggfunc='first')
internal = df._internal.copy(index_names=self._internal.index_names[:(- 1)], index_fields=df._internal.index_fields[:(self._internal.index_level - 1)], column_label_names=(df._internal.column_label_names[:(- 1)] + [(None if (self._internal.index_names[(- 1)] is None) else df._internal.column_label_names[(- 1)])]))
return DataFrame(internal)
column_labels = self._internal.column_labels
ser_name = SPARK_DEFAULT_SERIES_NAME
sdf = self._internal.spark_frame
new_index_columns = [SPARK_INDEX_NAME_FORMAT(i) for i in range(self._internal.column_labels_level)]
new_index_map = list(zip_longest(new_index_columns, self._internal.column_label_names, []))
pairs = F.explode(F.array(*[F.struct(*[SF.lit(c).alias(name) for (c, name) in zip(idx, new_index_columns)], *[self._internal.spark_column_for(idx).alias(ser_name)]) for idx in column_labels]))
columns = ([F.col(('pairs.%s' % name)) for name in new_index_columns[:self._internal.column_labels_level]] + [F.col(('pairs.%s' % ser_name))])
new_index_len = len(new_index_columns)
existing_index_columns = []
for (i, (index_name, index_field)) in enumerate(zip(self._internal.index_names, self._internal.index_fields)):
name = SPARK_INDEX_NAME_FORMAT((i + new_index_len))
new_index_map.append((name, index_name, index_field.copy(name=name)))
existing_index_columns.append(self._internal.index_spark_columns[i].alias(name))
exploded_df = sdf.withColumn('pairs', pairs).select((existing_index_columns + columns))
(index_spark_column_names, index_names, index_fields) = zip(*new_index_map)
return first_series(DataFrame(InternalFrame(exploded_df, index_spark_columns=[scol_for(exploded_df, col) for col in index_spark_column_names], index_names=list(index_names), index_fields=list(index_fields), column_labels=[None]))) |
def all(self, axis: Axis=0, bool_only: Optional[bool]=None) -> 'Series':
"\n Return whether all elements are True.\n\n Returns True unless there is at least one element within a series that is\n False or equivalent (e.g. zero or empty)\n\n Parameters\n ----------\n axis : {0 or 'index'}, default 0\n Indicate which axis or axes should be reduced.\n\n * 0 / 'index' : reduce the index, return a Series whose index is the\n original column labels.\n\n bool_only : bool, default None\n Include only boolean columns. If None, will attempt to use everything,\n then use only boolean data.\n\n Returns\n -------\n Series\n\n Examples\n --------\n Create a dataframe from a dictionary.\n\n >>> df = ps.DataFrame({\n ... 'col1': [True, True, True],\n ... 'col2': [True, False, False],\n ... 'col3': [0, 0, 0],\n ... 'col4': [1, 2, 3],\n ... 'col5': [True, True, None],\n ... 'col6': [True, False, None]},\n ... columns=['col1', 'col2', 'col3', 'col4', 'col5', 'col6'])\n\n Default behaviour checks if column-wise values all return True.\n\n >>> df.all()\n col1 True\n col2 False\n col3 False\n col4 True\n col5 True\n col6 False\n dtype: bool\n\n Include only boolean columns when set `bool_only=True`.\n\n >>> df.all(bool_only=True)\n col1 True\n col2 False\n dtype: bool\n "
axis = validate_axis(axis)
if (axis != 0):
raise NotImplementedError('axis should be either 0 or "index" currently.')
column_labels = self._internal.column_labels
if bool_only:
column_labels = self._bool_column_labels(column_labels)
if (len(column_labels) == 0):
return ps.Series([], dtype=bool)
applied = []
for label in column_labels:
scol = self._internal.spark_column_for(label)
all_col = F.min(F.coalesce(scol.cast('boolean'), SF.lit(True)))
applied.append(F.when(all_col.isNull(), True).otherwise(all_col))
return self._result_aggregated(column_labels, applied) | -349,392,930,906,440,600 | Return whether all elements are True.
Returns True unless there is at least one element within a series that is
False or equivalent (e.g. zero or empty)
Parameters
----------
axis : {0 or 'index'}, default 0
Indicate which axis or axes should be reduced.
* 0 / 'index' : reduce the index, return a Series whose index is the
original column labels.
bool_only : bool, default None
Include only boolean columns. If None, will attempt to use everything,
then use only boolean data.
Returns
-------
Series
Examples
--------
Create a dataframe from a dictionary.
>>> df = ps.DataFrame({
... 'col1': [True, True, True],
... 'col2': [True, False, False],
... 'col3': [0, 0, 0],
... 'col4': [1, 2, 3],
... 'col5': [True, True, None],
... 'col6': [True, False, None]},
... columns=['col1', 'col2', 'col3', 'col4', 'col5', 'col6'])
Default behaviour checks if column-wise values all return True.
>>> df.all()
col1 True
col2 False
col3 False
col4 True
col5 True
col6 False
dtype: bool
Include only boolean columns when set `bool_only=True`.
>>> df.all(bool_only=True)
col1 True
col2 False
dtype: bool | python/pyspark/pandas/frame.py | all | Flyangz/spark | python | def all(self, axis: Axis=0, bool_only: Optional[bool]=None) -> 'Series':
"\n Return whether all elements are True.\n\n Returns True unless there is at least one element within a series that is\n False or equivalent (e.g. zero or empty)\n\n Parameters\n ----------\n axis : {0 or 'index'}, default 0\n Indicate which axis or axes should be reduced.\n\n * 0 / 'index' : reduce the index, return a Series whose index is the\n original column labels.\n\n bool_only : bool, default None\n Include only boolean columns. If None, will attempt to use everything,\n then use only boolean data.\n\n Returns\n -------\n Series\n\n Examples\n --------\n Create a dataframe from a dictionary.\n\n >>> df = ps.DataFrame({\n ... 'col1': [True, True, True],\n ... 'col2': [True, False, False],\n ... 'col3': [0, 0, 0],\n ... 'col4': [1, 2, 3],\n ... 'col5': [True, True, None],\n ... 'col6': [True, False, None]},\n ... columns=['col1', 'col2', 'col3', 'col4', 'col5', 'col6'])\n\n Default behaviour checks if column-wise values all return True.\n\n >>> df.all()\n col1 True\n col2 False\n col3 False\n col4 True\n col5 True\n col6 False\n dtype: bool\n\n Include only boolean columns when set `bool_only=True`.\n\n >>> df.all(bool_only=True)\n col1 True\n col2 False\n dtype: bool\n "
axis = validate_axis(axis)
if (axis != 0):
raise NotImplementedError('axis should be either 0 or "index" currently.')
column_labels = self._internal.column_labels
if bool_only:
column_labels = self._bool_column_labels(column_labels)
if (len(column_labels) == 0):
return ps.Series([], dtype=bool)
applied = []
for label in column_labels:
scol = self._internal.spark_column_for(label)
all_col = F.min(F.coalesce(scol.cast('boolean'), SF.lit(True)))
applied.append(F.when(all_col.isNull(), True).otherwise(all_col))
return self._result_aggregated(column_labels, applied) |
def any(self, axis: Axis=0, bool_only: Optional[bool]=None) -> 'Series':
"\n Return whether any element is True.\n\n Returns False unless there is at least one element within a series that is\n True or equivalent (e.g. non-zero or non-empty).\n\n Parameters\n ----------\n axis : {0 or 'index'}, default 0\n Indicate which axis or axes should be reduced.\n\n * 0 / 'index' : reduce the index, return a Series whose index is the\n original column labels.\n\n bool_only : bool, default None\n Include only boolean columns. If None, will attempt to use everything,\n then use only boolean data.\n\n Returns\n -------\n Series\n\n Examples\n --------\n Create a dataframe from a dictionary.\n\n >>> df = ps.DataFrame({\n ... 'col1': [False, False, False],\n ... 'col2': [True, False, False],\n ... 'col3': [0, 0, 1],\n ... 'col4': [0, 1, 2],\n ... 'col5': [False, False, None],\n ... 'col6': [True, False, None]},\n ... columns=['col1', 'col2', 'col3', 'col4', 'col5', 'col6'])\n\n Default behaviour checks if column-wise values all return True.\n\n >>> df.any()\n col1 False\n col2 True\n col3 True\n col4 True\n col5 False\n col6 True\n dtype: bool\n\n Include only boolean columns when set `bool_only=True`.\n\n >>> df.any(bool_only=True)\n col1 False\n col2 True\n dtype: bool\n "
axis = validate_axis(axis)
if (axis != 0):
raise NotImplementedError('axis should be either 0 or "index" currently.')
column_labels = self._internal.column_labels
if bool_only:
column_labels = self._bool_column_labels(column_labels)
if (len(column_labels) == 0):
return ps.Series([], dtype=bool)
applied = []
for label in column_labels:
scol = self._internal.spark_column_for(label)
any_col = F.max(F.coalesce(scol.cast('boolean'), SF.lit(False)))
applied.append(F.when(any_col.isNull(), False).otherwise(any_col))
return self._result_aggregated(column_labels, applied) | 5,382,438,178,177,989,000 | Return whether any element is True.
Returns False unless there is at least one element within a series that is
True or equivalent (e.g. non-zero or non-empty).
Parameters
----------
axis : {0 or 'index'}, default 0
Indicate which axis or axes should be reduced.
* 0 / 'index' : reduce the index, return a Series whose index is the
original column labels.
bool_only : bool, default None
Include only boolean columns. If None, will attempt to use everything,
then use only boolean data.
Returns
-------
Series
Examples
--------
Create a dataframe from a dictionary.
>>> df = ps.DataFrame({
... 'col1': [False, False, False],
... 'col2': [True, False, False],
... 'col3': [0, 0, 1],
... 'col4': [0, 1, 2],
... 'col5': [False, False, None],
... 'col6': [True, False, None]},
... columns=['col1', 'col2', 'col3', 'col4', 'col5', 'col6'])
Default behaviour checks if column-wise values all return True.
>>> df.any()
col1 False
col2 True
col3 True
col4 True
col5 False
col6 True
dtype: bool
Include only boolean columns when set `bool_only=True`.
>>> df.any(bool_only=True)
col1 False
col2 True
dtype: bool | python/pyspark/pandas/frame.py | any | Flyangz/spark | python | def any(self, axis: Axis=0, bool_only: Optional[bool]=None) -> 'Series':
"\n Return whether any element is True.\n\n Returns False unless there is at least one element within a series that is\n True or equivalent (e.g. non-zero or non-empty).\n\n Parameters\n ----------\n axis : {0 or 'index'}, default 0\n Indicate which axis or axes should be reduced.\n\n * 0 / 'index' : reduce the index, return a Series whose index is the\n original column labels.\n\n bool_only : bool, default None\n Include only boolean columns. If None, will attempt to use everything,\n then use only boolean data.\n\n Returns\n -------\n Series\n\n Examples\n --------\n Create a dataframe from a dictionary.\n\n >>> df = ps.DataFrame({\n ... 'col1': [False, False, False],\n ... 'col2': [True, False, False],\n ... 'col3': [0, 0, 1],\n ... 'col4': [0, 1, 2],\n ... 'col5': [False, False, None],\n ... 'col6': [True, False, None]},\n ... columns=['col1', 'col2', 'col3', 'col4', 'col5', 'col6'])\n\n Default behaviour checks if column-wise values all return True.\n\n >>> df.any()\n col1 False\n col2 True\n col3 True\n col4 True\n col5 False\n col6 True\n dtype: bool\n\n Include only boolean columns when set `bool_only=True`.\n\n >>> df.any(bool_only=True)\n col1 False\n col2 True\n dtype: bool\n "
axis = validate_axis(axis)
if (axis != 0):
raise NotImplementedError('axis should be either 0 or "index" currently.')
column_labels = self._internal.column_labels
if bool_only:
column_labels = self._bool_column_labels(column_labels)
if (len(column_labels) == 0):
return ps.Series([], dtype=bool)
applied = []
for label in column_labels:
scol = self._internal.spark_column_for(label)
any_col = F.max(F.coalesce(scol.cast('boolean'), SF.lit(False)))
applied.append(F.when(any_col.isNull(), False).otherwise(any_col))
return self._result_aggregated(column_labels, applied) |
def _bool_column_labels(self, column_labels: List[Label]) -> List[Label]:
'\n Filter column labels of boolean columns (without None).\n '
bool_column_labels = []
for label in column_labels:
psser = self._psser_for(label)
if is_bool_dtype(psser):
bool_column_labels.append(label)
return bool_column_labels | -4,105,215,105,612,054,000 | Filter column labels of boolean columns (without None). | python/pyspark/pandas/frame.py | _bool_column_labels | Flyangz/spark | python | def _bool_column_labels(self, column_labels: List[Label]) -> List[Label]:
'\n \n '
bool_column_labels = []
for label in column_labels:
psser = self._psser_for(label)
if is_bool_dtype(psser):
bool_column_labels.append(label)
return bool_column_labels |
def _result_aggregated(self, column_labels: List[Label], scols: List[Column]) -> 'Series':
'\n Given aggregated Spark columns and respective column labels from the original\n pandas-on-Spark DataFrame, construct the result Series.\n '
from pyspark.pandas.series import first_series
cols = []
result_scol_name = 'value'
for (label, applied_col) in zip(column_labels, scols):
cols.append(F.struct(*[SF.lit(col).alias(SPARK_INDEX_NAME_FORMAT(i)) for (i, col) in enumerate(label)], *[applied_col.alias(result_scol_name)]))
sdf = self._internal.spark_frame.select(F.array(*cols).alias('arrays')).select(F.explode(F.col('arrays')))
sdf = sdf.selectExpr('col.*')
internal = InternalFrame(spark_frame=sdf, index_spark_columns=[scol_for(sdf, SPARK_INDEX_NAME_FORMAT(i)) for i in range(self._internal.column_labels_level)], index_names=self._internal.column_label_names, column_labels=[None], data_spark_columns=[scol_for(sdf, result_scol_name)])
return first_series(DataFrame(internal)) | -2,983,645,101,199,888,000 | Given aggregated Spark columns and respective column labels from the original
pandas-on-Spark DataFrame, construct the result Series. | python/pyspark/pandas/frame.py | _result_aggregated | Flyangz/spark | python | def _result_aggregated(self, column_labels: List[Label], scols: List[Column]) -> 'Series':
'\n Given aggregated Spark columns and respective column labels from the original\n pandas-on-Spark DataFrame, construct the result Series.\n '
from pyspark.pandas.series import first_series
cols = []
result_scol_name = 'value'
for (label, applied_col) in zip(column_labels, scols):
cols.append(F.struct(*[SF.lit(col).alias(SPARK_INDEX_NAME_FORMAT(i)) for (i, col) in enumerate(label)], *[applied_col.alias(result_scol_name)]))
sdf = self._internal.spark_frame.select(F.array(*cols).alias('arrays')).select(F.explode(F.col('arrays')))
sdf = sdf.selectExpr('col.*')
internal = InternalFrame(spark_frame=sdf, index_spark_columns=[scol_for(sdf, SPARK_INDEX_NAME_FORMAT(i)) for i in range(self._internal.column_labels_level)], index_names=self._internal.column_label_names, column_labels=[None], data_spark_columns=[scol_for(sdf, result_scol_name)])
return first_series(DataFrame(internal)) |
def rank(self, method: str='average', ascending: bool=True, numeric_only: Optional[bool]=None) -> 'DataFrame':
"\n Compute numerical data ranks (1 through n) along axis. Equal values are\n assigned a rank that is the average of the ranks of those values.\n\n .. note:: the current implementation of rank uses Spark's Window without\n specifying partition specification. This leads to move all data into\n single partition in single machine and could cause serious\n performance degradation. Avoid this method against very large dataset.\n\n Parameters\n ----------\n method : {'average', 'min', 'max', 'first', 'dense'}\n * average: average rank of group\n * min: lowest rank in group\n * max: highest rank in group\n * first: ranks assigned in order they appear in the array\n * dense: like 'min', but rank always increases by 1 between groups\n ascending : boolean, default True\n False for ranks by high (1) to low (N)\n numeric_only : bool, optional\n For DataFrame objects, rank only numeric columns if set to True.\n\n Returns\n -------\n ranks : same type as caller\n\n Examples\n --------\n >>> df = ps.DataFrame({'A': [1, 2, 2, 3], 'B': [4, 3, 2, 1]}, columns=['A', 'B'])\n >>> df\n A B\n 0 1 4\n 1 2 3\n 2 2 2\n 3 3 1\n\n >>> df.rank().sort_index()\n A B\n 0 1.0 4.0\n 1 2.5 3.0\n 2 2.5 2.0\n 3 4.0 1.0\n\n If method is set to 'min', it use lowest rank in group.\n\n >>> df.rank(method='min').sort_index()\n A B\n 0 1.0 4.0\n 1 2.0 3.0\n 2 2.0 2.0\n 3 4.0 1.0\n\n If method is set to 'max', it use highest rank in group.\n\n >>> df.rank(method='max').sort_index()\n A B\n 0 1.0 4.0\n 1 3.0 3.0\n 2 3.0 2.0\n 3 4.0 1.0\n\n If method is set to 'dense', it leaves no gaps in group.\n\n >>> df.rank(method='dense').sort_index()\n A B\n 0 1.0 4.0\n 1 2.0 3.0\n 2 2.0 2.0\n 3 3.0 1.0\n\n If numeric_only is set to 'True', rank only numeric columns.\n\n >>> df = ps.DataFrame({'A': [1, 2, 2, 3], 'B': ['a', 'b', 'd', 'c']}, columns= ['A', 'B'])\n >>> df\n A B\n 0 1 a\n 1 2 b\n 2 2 d\n 3 3 c\n >>> df.rank(numeric_only=True)\n A\n 0 1.0\n 1 2.5\n 2 2.5\n 3 4.0\n "
if numeric_only:
numeric_col_names = []
for label in self._internal.column_labels:
psser = self._psser_for(label)
if isinstance(psser.spark.data_type, (NumericType, BooleanType)):
numeric_col_names.append(psser.name)
psdf = (self[numeric_col_names] if numeric_only else self)
return psdf._apply_series_op((lambda psser: psser._rank(method=method, ascending=ascending)), should_resolve=True) | 2,881,934,767,336,696,000 | Compute numerical data ranks (1 through n) along axis. Equal values are
assigned a rank that is the average of the ranks of those values.
.. note:: the current implementation of rank uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
method : {'average', 'min', 'max', 'first', 'dense'}
* average: average rank of group
* min: lowest rank in group
* max: highest rank in group
* first: ranks assigned in order they appear in the array
* dense: like 'min', but rank always increases by 1 between groups
ascending : boolean, default True
False for ranks by high (1) to low (N)
numeric_only : bool, optional
For DataFrame objects, rank only numeric columns if set to True.
Returns
-------
ranks : same type as caller
Examples
--------
>>> df = ps.DataFrame({'A': [1, 2, 2, 3], 'B': [4, 3, 2, 1]}, columns=['A', 'B'])
>>> df
A B
0 1 4
1 2 3
2 2 2
3 3 1
>>> df.rank().sort_index()
A B
0 1.0 4.0
1 2.5 3.0
2 2.5 2.0
3 4.0 1.0
If method is set to 'min', it use lowest rank in group.
>>> df.rank(method='min').sort_index()
A B
0 1.0 4.0
1 2.0 3.0
2 2.0 2.0
3 4.0 1.0
If method is set to 'max', it use highest rank in group.
>>> df.rank(method='max').sort_index()
A B
0 1.0 4.0
1 3.0 3.0
2 3.0 2.0
3 4.0 1.0
If method is set to 'dense', it leaves no gaps in group.
>>> df.rank(method='dense').sort_index()
A B
0 1.0 4.0
1 2.0 3.0
2 2.0 2.0
3 3.0 1.0
If numeric_only is set to 'True', rank only numeric columns.
>>> df = ps.DataFrame({'A': [1, 2, 2, 3], 'B': ['a', 'b', 'd', 'c']}, columns= ['A', 'B'])
>>> df
A B
0 1 a
1 2 b
2 2 d
3 3 c
>>> df.rank(numeric_only=True)
A
0 1.0
1 2.5
2 2.5
3 4.0 | python/pyspark/pandas/frame.py | rank | Flyangz/spark | python | def rank(self, method: str='average', ascending: bool=True, numeric_only: Optional[bool]=None) -> 'DataFrame':
"\n Compute numerical data ranks (1 through n) along axis. Equal values are\n assigned a rank that is the average of the ranks of those values.\n\n .. note:: the current implementation of rank uses Spark's Window without\n specifying partition specification. This leads to move all data into\n single partition in single machine and could cause serious\n performance degradation. Avoid this method against very large dataset.\n\n Parameters\n ----------\n method : {'average', 'min', 'max', 'first', 'dense'}\n * average: average rank of group\n * min: lowest rank in group\n * max: highest rank in group\n * first: ranks assigned in order they appear in the array\n * dense: like 'min', but rank always increases by 1 between groups\n ascending : boolean, default True\n False for ranks by high (1) to low (N)\n numeric_only : bool, optional\n For DataFrame objects, rank only numeric columns if set to True.\n\n Returns\n -------\n ranks : same type as caller\n\n Examples\n --------\n >>> df = ps.DataFrame({'A': [1, 2, 2, 3], 'B': [4, 3, 2, 1]}, columns=['A', 'B'])\n >>> df\n A B\n 0 1 4\n 1 2 3\n 2 2 2\n 3 3 1\n\n >>> df.rank().sort_index()\n A B\n 0 1.0 4.0\n 1 2.5 3.0\n 2 2.5 2.0\n 3 4.0 1.0\n\n If method is set to 'min', it use lowest rank in group.\n\n >>> df.rank(method='min').sort_index()\n A B\n 0 1.0 4.0\n 1 2.0 3.0\n 2 2.0 2.0\n 3 4.0 1.0\n\n If method is set to 'max', it use highest rank in group.\n\n >>> df.rank(method='max').sort_index()\n A B\n 0 1.0 4.0\n 1 3.0 3.0\n 2 3.0 2.0\n 3 4.0 1.0\n\n If method is set to 'dense', it leaves no gaps in group.\n\n >>> df.rank(method='dense').sort_index()\n A B\n 0 1.0 4.0\n 1 2.0 3.0\n 2 2.0 2.0\n 3 3.0 1.0\n\n If numeric_only is set to 'True', rank only numeric columns.\n\n >>> df = ps.DataFrame({'A': [1, 2, 2, 3], 'B': ['a', 'b', 'd', 'c']}, columns= ['A', 'B'])\n >>> df\n A B\n 0 1 a\n 1 2 b\n 2 2 d\n 3 3 c\n >>> df.rank(numeric_only=True)\n A\n 0 1.0\n 1 2.5\n 2 2.5\n 3 4.0\n "
if numeric_only:
numeric_col_names = []
for label in self._internal.column_labels:
psser = self._psser_for(label)
if isinstance(psser.spark.data_type, (NumericType, BooleanType)):
numeric_col_names.append(psser.name)
psdf = (self[numeric_col_names] if numeric_only else self)
return psdf._apply_series_op((lambda psser: psser._rank(method=method, ascending=ascending)), should_resolve=True) |
def filter(self, items: Optional[Sequence[Any]]=None, like: Optional[str]=None, regex: Optional[str]=None, axis: Optional[Axis]=None) -> 'DataFrame':
'\n Subset rows or columns of dataframe according to labels in\n the specified index.\n\n Note that this routine does not filter a dataframe on its\n contents. The filter is applied to the labels of the index.\n\n Parameters\n ----------\n items : list-like\n Keep labels from axis which are in items.\n like : string\n Keep labels from axis for which "like in label == True".\n regex : string (regular expression)\n Keep labels from axis for which re.search(regex, label) == True.\n axis : int or string axis name\n The axis to filter on. By default this is the info axis,\n \'index\' for Series, \'columns\' for DataFrame.\n\n Returns\n -------\n same type as input object\n\n See Also\n --------\n DataFrame.loc\n\n Notes\n -----\n The ``items``, ``like``, and ``regex`` parameters are\n enforced to be mutually exclusive.\n\n ``axis`` defaults to the info axis that is used when indexing\n with ``[]``.\n\n Examples\n --------\n >>> df = ps.DataFrame(np.array(([1, 2, 3], [4, 5, 6])),\n ... index=[\'mouse\', \'rabbit\'],\n ... columns=[\'one\', \'two\', \'three\'])\n\n >>> # select columns by name\n >>> df.filter(items=[\'one\', \'three\'])\n one three\n mouse 1 3\n rabbit 4 6\n\n >>> # select columns by regular expression\n >>> df.filter(regex=\'e$\', axis=1)\n one three\n mouse 1 3\n rabbit 4 6\n\n >>> # select rows containing \'bbi\'\n >>> df.filter(like=\'bbi\', axis=0)\n one two three\n rabbit 4 5 6\n\n For a Series,\n\n >>> # select rows by name\n >>> df.one.filter(items=[\'rabbit\'])\n rabbit 4\n Name: one, dtype: int64\n\n >>> # select rows by regular expression\n >>> df.one.filter(regex=\'e$\')\n mouse 1\n Name: one, dtype: int64\n\n >>> # select rows containing \'bbi\'\n >>> df.one.filter(like=\'bbi\')\n rabbit 4\n Name: one, dtype: int64\n '
if (sum(((x is not None) for x in (items, like, regex))) > 1):
raise TypeError('Keyword arguments `items`, `like`, or `regex` are mutually exclusive')
axis = validate_axis(axis, none_axis=1)
index_scols = self._internal.index_spark_columns
if (items is not None):
if is_list_like(items):
items = list(items)
else:
raise ValueError('items should be a list-like object.')
if (axis == 0):
if (len(index_scols) == 1):
if (len(items) <= ps.get_option('compute.isin_limit')):
col = index_scols[0].isin([SF.lit(item) for item in items])
return DataFrame(self._internal.with_filter(col))
else:
item_sdf_col = verify_temp_column_name(self._internal.spark_frame, '__item__')
item_sdf = default_session().createDataFrame(pd.DataFrame({item_sdf_col: items}))
joined_sdf = self._internal.spark_frame.join(other=F.broadcast(item_sdf), on=(index_scols[0] == scol_for(item_sdf, item_sdf_col)), how='semi')
return DataFrame(self._internal.with_new_sdf(joined_sdf))
else:
col = None
for item in items:
if (not isinstance(item, tuple)):
raise TypeError('Unsupported type {}'.format(type(item).__name__))
if (not item):
raise ValueError('The item should not be empty.')
midx_col = None
for (i, element) in enumerate(item):
if (midx_col is None):
midx_col = (index_scols[i] == SF.lit(element))
else:
midx_col = (midx_col & (index_scols[i] == SF.lit(element)))
if (col is None):
col = midx_col
else:
col = (col | midx_col)
return DataFrame(self._internal.with_filter(col))
else:
return self[items]
elif (like is not None):
if (axis == 0):
col = None
for index_scol in index_scols:
if (col is None):
col = index_scol.contains(like)
else:
col = (col | index_scol.contains(like))
return DataFrame(self._internal.with_filter(col))
else:
column_labels = self._internal.column_labels
output_labels = [label for label in column_labels if any(((like in i) for i in label))]
return self[output_labels]
elif (regex is not None):
if (axis == 0):
col = None
for index_scol in index_scols:
if (col is None):
col = index_scol.rlike(regex)
else:
col = (col | index_scol.rlike(regex))
return DataFrame(self._internal.with_filter(col))
else:
column_labels = self._internal.column_labels
matcher = re.compile(regex)
output_labels = [label for label in column_labels if any(((matcher.search(i) is not None) for i in label))]
return self[output_labels]
else:
raise TypeError('Must pass either `items`, `like`, or `regex`') | 8,439,502,228,821,004,000 | Subset rows or columns of dataframe according to labels in
the specified index.
Note that this routine does not filter a dataframe on its
contents. The filter is applied to the labels of the index.
Parameters
----------
items : list-like
Keep labels from axis which are in items.
like : string
Keep labels from axis for which "like in label == True".
regex : string (regular expression)
Keep labels from axis for which re.search(regex, label) == True.
axis : int or string axis name
The axis to filter on. By default this is the info axis,
'index' for Series, 'columns' for DataFrame.
Returns
-------
same type as input object
See Also
--------
DataFrame.loc
Notes
-----
The ``items``, ``like``, and ``regex`` parameters are
enforced to be mutually exclusive.
``axis`` defaults to the info axis that is used when indexing
with ``[]``.
Examples
--------
>>> df = ps.DataFrame(np.array(([1, 2, 3], [4, 5, 6])),
... index=['mouse', 'rabbit'],
... columns=['one', 'two', 'three'])
>>> # select columns by name
>>> df.filter(items=['one', 'three'])
one three
mouse 1 3
rabbit 4 6
>>> # select columns by regular expression
>>> df.filter(regex='e$', axis=1)
one three
mouse 1 3
rabbit 4 6
>>> # select rows containing 'bbi'
>>> df.filter(like='bbi', axis=0)
one two three
rabbit 4 5 6
For a Series,
>>> # select rows by name
>>> df.one.filter(items=['rabbit'])
rabbit 4
Name: one, dtype: int64
>>> # select rows by regular expression
>>> df.one.filter(regex='e$')
mouse 1
Name: one, dtype: int64
>>> # select rows containing 'bbi'
>>> df.one.filter(like='bbi')
rabbit 4
Name: one, dtype: int64 | python/pyspark/pandas/frame.py | filter | Flyangz/spark | python | def filter(self, items: Optional[Sequence[Any]]=None, like: Optional[str]=None, regex: Optional[str]=None, axis: Optional[Axis]=None) -> 'DataFrame':
'\n Subset rows or columns of dataframe according to labels in\n the specified index.\n\n Note that this routine does not filter a dataframe on its\n contents. The filter is applied to the labels of the index.\n\n Parameters\n ----------\n items : list-like\n Keep labels from axis which are in items.\n like : string\n Keep labels from axis for which "like in label == True".\n regex : string (regular expression)\n Keep labels from axis for which re.search(regex, label) == True.\n axis : int or string axis name\n The axis to filter on. By default this is the info axis,\n \'index\' for Series, \'columns\' for DataFrame.\n\n Returns\n -------\n same type as input object\n\n See Also\n --------\n DataFrame.loc\n\n Notes\n -----\n The ``items``, ``like``, and ``regex`` parameters are\n enforced to be mutually exclusive.\n\n ``axis`` defaults to the info axis that is used when indexing\n with ``[]``.\n\n Examples\n --------\n >>> df = ps.DataFrame(np.array(([1, 2, 3], [4, 5, 6])),\n ... index=[\'mouse\', \'rabbit\'],\n ... columns=[\'one\', \'two\', \'three\'])\n\n >>> # select columns by name\n >>> df.filter(items=[\'one\', \'three\'])\n one three\n mouse 1 3\n rabbit 4 6\n\n >>> # select columns by regular expression\n >>> df.filter(regex=\'e$\', axis=1)\n one three\n mouse 1 3\n rabbit 4 6\n\n >>> # select rows containing \'bbi\'\n >>> df.filter(like=\'bbi\', axis=0)\n one two three\n rabbit 4 5 6\n\n For a Series,\n\n >>> # select rows by name\n >>> df.one.filter(items=[\'rabbit\'])\n rabbit 4\n Name: one, dtype: int64\n\n >>> # select rows by regular expression\n >>> df.one.filter(regex=\'e$\')\n mouse 1\n Name: one, dtype: int64\n\n >>> # select rows containing \'bbi\'\n >>> df.one.filter(like=\'bbi\')\n rabbit 4\n Name: one, dtype: int64\n '
if (sum(((x is not None) for x in (items, like, regex))) > 1):
raise TypeError('Keyword arguments `items`, `like`, or `regex` are mutually exclusive')
axis = validate_axis(axis, none_axis=1)
index_scols = self._internal.index_spark_columns
if (items is not None):
if is_list_like(items):
items = list(items)
else:
raise ValueError('items should be a list-like object.')
if (axis == 0):
if (len(index_scols) == 1):
if (len(items) <= ps.get_option('compute.isin_limit')):
col = index_scols[0].isin([SF.lit(item) for item in items])
return DataFrame(self._internal.with_filter(col))
else:
item_sdf_col = verify_temp_column_name(self._internal.spark_frame, '__item__')
item_sdf = default_session().createDataFrame(pd.DataFrame({item_sdf_col: items}))
joined_sdf = self._internal.spark_frame.join(other=F.broadcast(item_sdf), on=(index_scols[0] == scol_for(item_sdf, item_sdf_col)), how='semi')
return DataFrame(self._internal.with_new_sdf(joined_sdf))
else:
col = None
for item in items:
if (not isinstance(item, tuple)):
raise TypeError('Unsupported type {}'.format(type(item).__name__))
if (not item):
raise ValueError('The item should not be empty.')
midx_col = None
for (i, element) in enumerate(item):
if (midx_col is None):
midx_col = (index_scols[i] == SF.lit(element))
else:
midx_col = (midx_col & (index_scols[i] == SF.lit(element)))
if (col is None):
col = midx_col
else:
col = (col | midx_col)
return DataFrame(self._internal.with_filter(col))
else:
return self[items]
elif (like is not None):
if (axis == 0):
col = None
for index_scol in index_scols:
if (col is None):
col = index_scol.contains(like)
else:
col = (col | index_scol.contains(like))
return DataFrame(self._internal.with_filter(col))
else:
column_labels = self._internal.column_labels
output_labels = [label for label in column_labels if any(((like in i) for i in label))]
return self[output_labels]
elif (regex is not None):
if (axis == 0):
col = None
for index_scol in index_scols:
if (col is None):
col = index_scol.rlike(regex)
else:
col = (col | index_scol.rlike(regex))
return DataFrame(self._internal.with_filter(col))
else:
column_labels = self._internal.column_labels
matcher = re.compile(regex)
output_labels = [label for label in column_labels if any(((matcher.search(i) is not None) for i in label))]
return self[output_labels]
else:
raise TypeError('Must pass either `items`, `like`, or `regex`') |
def rename(self, mapper: Optional[Union[(Dict, Callable[([Any], Any)])]]=None, index: Optional[Union[(Dict, Callable[([Any], Any)])]]=None, columns: Optional[Union[(Dict, Callable[([Any], Any)])]]=None, axis: Axis='index', inplace: bool=False, level: Optional[int]=None, errors: str='ignore') -> Optional['DataFrame']:
'\n Alter axes labels.\n Function / dict values must be unique (1-to-1). Labels not contained in a dict / Series\n will be left as-is. Extra labels listed don’t throw an error.\n\n Parameters\n ----------\n mapper : dict-like or function\n Dict-like or functions transformations to apply to that axis’ values.\n Use either `mapper` and `axis` to specify the axis to target with `mapper`, or `index`\n and `columns`.\n index : dict-like or function\n Alternative to specifying axis ("mapper, axis=0" is equivalent to "index=mapper").\n columns : dict-like or function\n Alternative to specifying axis ("mapper, axis=1" is equivalent to "columns=mapper").\n axis : int or str, default \'index\'\n Axis to target with mapper. Can be either the axis name (\'index\', \'columns\') or\n number (0, 1).\n inplace : bool, default False\n Whether to return a new DataFrame.\n level : int or level name, default None\n In case of a MultiIndex, only rename labels in the specified level.\n errors : {\'ignore\', \'raise}, default \'ignore\'\n If \'raise\', raise a `KeyError` when a dict-like `mapper`, `index`, or `columns`\n contains labels that are not present in the Index being transformed. If \'ignore\',\n existing keys will be renamed and extra keys will be ignored.\n\n Returns\n -------\n DataFrame with the renamed axis labels.\n\n Raises\n ------\n `KeyError`\n If any of the labels is not found in the selected axis and "errors=\'raise\'".\n\n Examples\n --------\n >>> psdf1 = ps.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})\n >>> psdf1.rename(columns={"A": "a", "B": "c"}) # doctest: +NORMALIZE_WHITESPACE\n a c\n 0 1 4\n 1 2 5\n 2 3 6\n\n >>> psdf1.rename(index={1: 10, 2: 20}) # doctest: +NORMALIZE_WHITESPACE\n A B\n 0 1 4\n 10 2 5\n 20 3 6\n\n >>> def str_lower(s) -> str:\n ... return str.lower(s)\n >>> psdf1.rename(str_lower, axis=\'columns\') # doctest: +NORMALIZE_WHITESPACE\n a b\n 0 1 4\n 1 2 5\n 2 3 6\n\n >>> def mul10(x) -> int:\n ... return x * 10\n >>> psdf1.rename(mul10, axis=\'index\') # doctest: +NORMALIZE_WHITESPACE\n A B\n 0 1 4\n 10 2 5\n 20 3 6\n\n >>> idx = pd.MultiIndex.from_tuples([(\'X\', \'A\'), (\'X\', \'B\'), (\'Y\', \'C\'), (\'Y\', \'D\')])\n >>> psdf2 = ps.DataFrame([[1, 2, 3, 4], [5, 6, 7, 8]], columns=idx)\n >>> psdf2.rename(columns=str_lower, level=0) # doctest: +NORMALIZE_WHITESPACE\n x y\n A B C D\n 0 1 2 3 4\n 1 5 6 7 8\n\n >>> psdf3 = ps.DataFrame([[1, 2], [3, 4], [5, 6], [7, 8]], index=idx, columns=list(\'ab\'))\n >>> psdf3.rename(index=str_lower) # doctest: +NORMALIZE_WHITESPACE\n a b\n x a 1 2\n b 3 4\n y c 5 6\n d 7 8\n '
def gen_mapper_fn(mapper: Union[(Dict, Callable[([Any], Any)])]) -> Tuple[(Callable[([Any], Any)], Dtype, DataType)]:
if isinstance(mapper, dict):
mapper_dict = mapper
type_set = set(map((lambda x: type(x)), mapper_dict.values()))
if (len(type_set) > 1):
raise ValueError('Mapper dict should have the same value type.')
(dtype, spark_return_type) = pandas_on_spark_type(list(type_set)[0])
def mapper_fn(x: Any) -> Any:
if (x in mapper_dict):
return mapper_dict[x]
else:
if (errors == 'raise'):
raise KeyError('Index include value which is not in the `mapper`')
return x
elif callable(mapper):
mapper_callable = cast(Callable, mapper)
return_type = cast(ScalarType, infer_return_type(mapper))
dtype = return_type.dtype
spark_return_type = return_type.spark_type
def mapper_fn(x: Any) -> Any:
return mapper_callable(x)
else:
raise ValueError('`mapper` or `index` or `columns` should be either dict-like or function type.')
return (mapper_fn, dtype, spark_return_type)
index_mapper_fn = None
index_mapper_ret_stype = None
columns_mapper_fn = None
inplace = validate_bool_kwarg(inplace, 'inplace')
if mapper:
axis = validate_axis(axis)
if (axis == 0):
(index_mapper_fn, index_mapper_ret_dtype, index_mapper_ret_stype) = gen_mapper_fn(mapper)
elif (axis == 1):
(columns_mapper_fn, _, _) = gen_mapper_fn(mapper)
else:
if index:
(index_mapper_fn, index_mapper_ret_dtype, index_mapper_ret_stype) = gen_mapper_fn(index)
if columns:
(columns_mapper_fn, _, _) = gen_mapper_fn(columns)
if ((not index) and (not columns)):
raise ValueError('Either `index` or `columns` should be provided.')
psdf = self.copy()
if index_mapper_fn:
index_columns = psdf._internal.index_spark_column_names
num_indices = len(index_columns)
if level:
if ((level < 0) or (level >= num_indices)):
raise ValueError('level should be an integer between [0, num_indices)')
@pandas_udf(returnType=index_mapper_ret_stype)
def index_mapper_udf(s: pd.Series) -> pd.Series:
return s.map(index_mapper_fn)
index_spark_columns = psdf._internal.index_spark_columns.copy()
index_fields = psdf._internal.index_fields.copy()
if (level is None):
for i in range(num_indices):
index_spark_columns[i] = index_mapper_udf(index_spark_columns[i]).alias(index_columns[i])
index_fields[i] = index_fields[i].copy(dtype=index_mapper_ret_dtype, spark_type=index_mapper_ret_stype, nullable=True)
else:
index_spark_columns[level] = index_mapper_udf(index_spark_columns[level]).alias(index_columns[level])
index_fields[level] = index_fields[level].copy(dtype=index_mapper_ret_dtype, spark_type=index_mapper_ret_stype, nullable=True)
psdf = DataFrame(psdf._internal.copy(index_spark_columns=index_spark_columns, index_fields=index_fields))
if columns_mapper_fn:
if level:
if ((level < 0) or (level >= psdf._internal.column_labels_level)):
raise ValueError('level should be an integer between [0, column_labels_level)')
def gen_new_column_labels_entry(column_labels_entry: Label) -> Label:
if (level is None):
return tuple(map(columns_mapper_fn, column_labels_entry))
else:
entry_list = list(column_labels_entry)
entry_list[level] = columns_mapper_fn(entry_list[level])
return tuple(entry_list)
new_column_labels = list(map(gen_new_column_labels_entry, psdf._internal.column_labels))
new_data_pssers = [psdf._psser_for(old_label).rename(new_label) for (old_label, new_label) in zip(psdf._internal.column_labels, new_column_labels)]
psdf = DataFrame(psdf._internal.with_new_columns(new_data_pssers))
if inplace:
self._update_internal_frame(psdf._internal)
return None
else:
return psdf | 4,436,174,056,561,670,000 | Alter axes labels.
Function / dict values must be unique (1-to-1). Labels not contained in a dict / Series
will be left as-is. Extra labels listed don’t throw an error.
Parameters
----------
mapper : dict-like or function
Dict-like or functions transformations to apply to that axis’ values.
Use either `mapper` and `axis` to specify the axis to target with `mapper`, or `index`
and `columns`.
index : dict-like or function
Alternative to specifying axis ("mapper, axis=0" is equivalent to "index=mapper").
columns : dict-like or function
Alternative to specifying axis ("mapper, axis=1" is equivalent to "columns=mapper").
axis : int or str, default 'index'
Axis to target with mapper. Can be either the axis name ('index', 'columns') or
number (0, 1).
inplace : bool, default False
Whether to return a new DataFrame.
level : int or level name, default None
In case of a MultiIndex, only rename labels in the specified level.
errors : {'ignore', 'raise}, default 'ignore'
If 'raise', raise a `KeyError` when a dict-like `mapper`, `index`, or `columns`
contains labels that are not present in the Index being transformed. If 'ignore',
existing keys will be renamed and extra keys will be ignored.
Returns
-------
DataFrame with the renamed axis labels.
Raises
------
`KeyError`
If any of the labels is not found in the selected axis and "errors='raise'".
Examples
--------
>>> psdf1 = ps.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
>>> psdf1.rename(columns={"A": "a", "B": "c"}) # doctest: +NORMALIZE_WHITESPACE
a c
0 1 4
1 2 5
2 3 6
>>> psdf1.rename(index={1: 10, 2: 20}) # doctest: +NORMALIZE_WHITESPACE
A B
0 1 4
10 2 5
20 3 6
>>> def str_lower(s) -> str:
... return str.lower(s)
>>> psdf1.rename(str_lower, axis='columns') # doctest: +NORMALIZE_WHITESPACE
a b
0 1 4
1 2 5
2 3 6
>>> def mul10(x) -> int:
... return x * 10
>>> psdf1.rename(mul10, axis='index') # doctest: +NORMALIZE_WHITESPACE
A B
0 1 4
10 2 5
20 3 6
>>> idx = pd.MultiIndex.from_tuples([('X', 'A'), ('X', 'B'), ('Y', 'C'), ('Y', 'D')])
>>> psdf2 = ps.DataFrame([[1, 2, 3, 4], [5, 6, 7, 8]], columns=idx)
>>> psdf2.rename(columns=str_lower, level=0) # doctest: +NORMALIZE_WHITESPACE
x y
A B C D
0 1 2 3 4
1 5 6 7 8
>>> psdf3 = ps.DataFrame([[1, 2], [3, 4], [5, 6], [7, 8]], index=idx, columns=list('ab'))
>>> psdf3.rename(index=str_lower) # doctest: +NORMALIZE_WHITESPACE
a b
x a 1 2
b 3 4
y c 5 6
d 7 8 | python/pyspark/pandas/frame.py | rename | Flyangz/spark | python | def rename(self, mapper: Optional[Union[(Dict, Callable[([Any], Any)])]]=None, index: Optional[Union[(Dict, Callable[([Any], Any)])]]=None, columns: Optional[Union[(Dict, Callable[([Any], Any)])]]=None, axis: Axis='index', inplace: bool=False, level: Optional[int]=None, errors: str='ignore') -> Optional['DataFrame']:
'\n Alter axes labels.\n Function / dict values must be unique (1-to-1). Labels not contained in a dict / Series\n will be left as-is. Extra labels listed don’t throw an error.\n\n Parameters\n ----------\n mapper : dict-like or function\n Dict-like or functions transformations to apply to that axis’ values.\n Use either `mapper` and `axis` to specify the axis to target with `mapper`, or `index`\n and `columns`.\n index : dict-like or function\n Alternative to specifying axis ("mapper, axis=0" is equivalent to "index=mapper").\n columns : dict-like or function\n Alternative to specifying axis ("mapper, axis=1" is equivalent to "columns=mapper").\n axis : int or str, default \'index\'\n Axis to target with mapper. Can be either the axis name (\'index\', \'columns\') or\n number (0, 1).\n inplace : bool, default False\n Whether to return a new DataFrame.\n level : int or level name, default None\n In case of a MultiIndex, only rename labels in the specified level.\n errors : {\'ignore\', \'raise}, default \'ignore\'\n If \'raise\', raise a `KeyError` when a dict-like `mapper`, `index`, or `columns`\n contains labels that are not present in the Index being transformed. If \'ignore\',\n existing keys will be renamed and extra keys will be ignored.\n\n Returns\n -------\n DataFrame with the renamed axis labels.\n\n Raises\n ------\n `KeyError`\n If any of the labels is not found in the selected axis and "errors=\'raise\'".\n\n Examples\n --------\n >>> psdf1 = ps.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})\n >>> psdf1.rename(columns={"A": "a", "B": "c"}) # doctest: +NORMALIZE_WHITESPACE\n a c\n 0 1 4\n 1 2 5\n 2 3 6\n\n >>> psdf1.rename(index={1: 10, 2: 20}) # doctest: +NORMALIZE_WHITESPACE\n A B\n 0 1 4\n 10 2 5\n 20 3 6\n\n >>> def str_lower(s) -> str:\n ... return str.lower(s)\n >>> psdf1.rename(str_lower, axis=\'columns\') # doctest: +NORMALIZE_WHITESPACE\n a b\n 0 1 4\n 1 2 5\n 2 3 6\n\n >>> def mul10(x) -> int:\n ... return x * 10\n >>> psdf1.rename(mul10, axis=\'index\') # doctest: +NORMALIZE_WHITESPACE\n A B\n 0 1 4\n 10 2 5\n 20 3 6\n\n >>> idx = pd.MultiIndex.from_tuples([(\'X\', \'A\'), (\'X\', \'B\'), (\'Y\', \'C\'), (\'Y\', \'D\')])\n >>> psdf2 = ps.DataFrame([[1, 2, 3, 4], [5, 6, 7, 8]], columns=idx)\n >>> psdf2.rename(columns=str_lower, level=0) # doctest: +NORMALIZE_WHITESPACE\n x y\n A B C D\n 0 1 2 3 4\n 1 5 6 7 8\n\n >>> psdf3 = ps.DataFrame([[1, 2], [3, 4], [5, 6], [7, 8]], index=idx, columns=list(\'ab\'))\n >>> psdf3.rename(index=str_lower) # doctest: +NORMALIZE_WHITESPACE\n a b\n x a 1 2\n b 3 4\n y c 5 6\n d 7 8\n '
def gen_mapper_fn(mapper: Union[(Dict, Callable[([Any], Any)])]) -> Tuple[(Callable[([Any], Any)], Dtype, DataType)]:
if isinstance(mapper, dict):
mapper_dict = mapper
type_set = set(map((lambda x: type(x)), mapper_dict.values()))
if (len(type_set) > 1):
raise ValueError('Mapper dict should have the same value type.')
(dtype, spark_return_type) = pandas_on_spark_type(list(type_set)[0])
def mapper_fn(x: Any) -> Any:
if (x in mapper_dict):
return mapper_dict[x]
else:
if (errors == 'raise'):
raise KeyError('Index include value which is not in the `mapper`')
return x
elif callable(mapper):
mapper_callable = cast(Callable, mapper)
return_type = cast(ScalarType, infer_return_type(mapper))
dtype = return_type.dtype
spark_return_type = return_type.spark_type
def mapper_fn(x: Any) -> Any:
return mapper_callable(x)
else:
raise ValueError('`mapper` or `index` or `columns` should be either dict-like or function type.')
return (mapper_fn, dtype, spark_return_type)
index_mapper_fn = None
index_mapper_ret_stype = None
columns_mapper_fn = None
inplace = validate_bool_kwarg(inplace, 'inplace')
if mapper:
axis = validate_axis(axis)
if (axis == 0):
(index_mapper_fn, index_mapper_ret_dtype, index_mapper_ret_stype) = gen_mapper_fn(mapper)
elif (axis == 1):
(columns_mapper_fn, _, _) = gen_mapper_fn(mapper)
else:
if index:
(index_mapper_fn, index_mapper_ret_dtype, index_mapper_ret_stype) = gen_mapper_fn(index)
if columns:
(columns_mapper_fn, _, _) = gen_mapper_fn(columns)
if ((not index) and (not columns)):
raise ValueError('Either `index` or `columns` should be provided.')
psdf = self.copy()
if index_mapper_fn:
index_columns = psdf._internal.index_spark_column_names
num_indices = len(index_columns)
if level:
if ((level < 0) or (level >= num_indices)):
raise ValueError('level should be an integer between [0, num_indices)')
@pandas_udf(returnType=index_mapper_ret_stype)
def index_mapper_udf(s: pd.Series) -> pd.Series:
return s.map(index_mapper_fn)
index_spark_columns = psdf._internal.index_spark_columns.copy()
index_fields = psdf._internal.index_fields.copy()
if (level is None):
for i in range(num_indices):
index_spark_columns[i] = index_mapper_udf(index_spark_columns[i]).alias(index_columns[i])
index_fields[i] = index_fields[i].copy(dtype=index_mapper_ret_dtype, spark_type=index_mapper_ret_stype, nullable=True)
else:
index_spark_columns[level] = index_mapper_udf(index_spark_columns[level]).alias(index_columns[level])
index_fields[level] = index_fields[level].copy(dtype=index_mapper_ret_dtype, spark_type=index_mapper_ret_stype, nullable=True)
psdf = DataFrame(psdf._internal.copy(index_spark_columns=index_spark_columns, index_fields=index_fields))
if columns_mapper_fn:
if level:
if ((level < 0) or (level >= psdf._internal.column_labels_level)):
raise ValueError('level should be an integer between [0, column_labels_level)')
def gen_new_column_labels_entry(column_labels_entry: Label) -> Label:
if (level is None):
return tuple(map(columns_mapper_fn, column_labels_entry))
else:
entry_list = list(column_labels_entry)
entry_list[level] = columns_mapper_fn(entry_list[level])
return tuple(entry_list)
new_column_labels = list(map(gen_new_column_labels_entry, psdf._internal.column_labels))
new_data_pssers = [psdf._psser_for(old_label).rename(new_label) for (old_label, new_label) in zip(psdf._internal.column_labels, new_column_labels)]
psdf = DataFrame(psdf._internal.with_new_columns(new_data_pssers))
if inplace:
self._update_internal_frame(psdf._internal)
return None
else:
return psdf |
def rename_axis(self, mapper: Union[(Any, Sequence[Any], Dict[(Name, Any)], Callable[([Name], Any)])]=None, index: Union[(Any, Sequence[Any], Dict[(Name, Any)], Callable[([Name], Any)])]=None, columns: Union[(Any, Sequence[Any], Dict[(Name, Any)], Callable[([Name], Any)])]=None, axis: Optional[Axis]=0, inplace: Optional[bool]=False) -> Optional['DataFrame']:
'\n Set the name of the axis for the index or columns.\n\n Parameters\n ----------\n mapper : scalar, list-like, optional\n A scalar, list-like, dict-like or functions transformations to\n apply to the axis name attribute.\n index, columns : scalar, list-like, dict-like or function, optional\n A scalar, list-like, dict-like or functions transformations to\n apply to that axis\' values.\n\n Use either ``mapper`` and ``axis`` to\n specify the axis to target with ``mapper``, or ``index``\n and/or ``columns``.\n axis : {0 or \'index\', 1 or \'columns\'}, default 0\n The axis to rename.\n inplace : bool, default False\n Modifies the object directly, instead of creating a new DataFrame.\n\n Returns\n -------\n DataFrame, or None if `inplace` is True.\n\n See Also\n --------\n Series.rename : Alter Series index labels or name.\n DataFrame.rename : Alter DataFrame index labels or name.\n Index.rename : Set new names on index.\n\n Notes\n -----\n ``DataFrame.rename_axis`` supports two calling conventions\n\n * ``(index=index_mapper, columns=columns_mapper, ...)``\n * ``(mapper, axis={\'index\', \'columns\'}, ...)``\n\n The first calling convention will only modify the names of\n the index and/or the names of the Index object that is the columns.\n\n The second calling convention will modify the names of the\n corresponding index specified by axis.\n\n We *highly* recommend using keyword arguments to clarify your\n intent.\n\n Examples\n --------\n >>> df = ps.DataFrame({"num_legs": [4, 4, 2],\n ... "num_arms": [0, 0, 2]},\n ... index=["dog", "cat", "monkey"],\n ... columns=["num_legs", "num_arms"])\n >>> df\n num_legs num_arms\n dog 4 0\n cat 4 0\n monkey 2 2\n\n >>> df = df.rename_axis("animal").sort_index()\n >>> df # doctest: +NORMALIZE_WHITESPACE\n num_legs num_arms\n animal\n cat 4 0\n dog 4 0\n monkey 2 2\n\n >>> df = df.rename_axis("limbs", axis="columns").sort_index()\n >>> df # doctest: +NORMALIZE_WHITESPACE\n limbs num_legs num_arms\n animal\n cat 4 0\n dog 4 0\n monkey 2 2\n\n **MultiIndex**\n\n >>> index = pd.MultiIndex.from_product([[\'mammal\'],\n ... [\'dog\', \'cat\', \'monkey\']],\n ... names=[\'type\', \'name\'])\n >>> df = ps.DataFrame({"num_legs": [4, 4, 2],\n ... "num_arms": [0, 0, 2]},\n ... index=index,\n ... columns=["num_legs", "num_arms"])\n >>> df # doctest: +NORMALIZE_WHITESPACE\n num_legs num_arms\n type name\n mammal dog 4 0\n cat 4 0\n monkey 2 2\n\n >>> df.rename_axis(index={\'type\': \'class\'}).sort_index() # doctest: +NORMALIZE_WHITESPACE\n num_legs num_arms\n class name\n mammal cat 4 0\n dog 4 0\n monkey 2 2\n\n >>> df.rename_axis(index=str.upper).sort_index() # doctest: +NORMALIZE_WHITESPACE\n num_legs num_arms\n TYPE NAME\n mammal cat 4 0\n dog 4 0\n monkey 2 2\n '
def gen_names(v: Union[(Any, Sequence[Any], Dict[(Name, Any)], Callable[([Name], Any)])], curnames: List[Name]) -> List[Label]:
newnames: List[Name]
if is_scalar(v):
newnames = [cast(Name, v)]
elif (is_list_like(v) and (not is_dict_like(v))):
newnames = list(cast(Sequence[Name], v))
elif is_dict_like(v):
v_dict = cast(Dict[(Name, Name)], v)
newnames = [(v_dict[name] if (name in v_dict) else name) for name in curnames]
elif callable(v):
v_callable = cast(Callable[([Name], Name)], v)
newnames = [v_callable(name) for name in curnames]
else:
raise ValueError('`mapper` or `index` or `columns` should be either dict-like or function type.')
if (len(newnames) != len(curnames)):
raise ValueError('Length of new names must be {}, got {}'.format(len(curnames), len(newnames)))
return [(name if is_name_like_tuple(name) else (name,)) for name in newnames]
if ((mapper is not None) and ((index is not None) or (columns is not None))):
raise TypeError("Cannot specify both 'mapper' and any of 'index' or 'columns'.")
if (mapper is not None):
axis = validate_axis(axis)
if (axis == 0):
index = mapper
elif (axis == 1):
columns = mapper
column_label_names = (gen_names(columns, self.columns.names) if (columns is not None) else self._internal.column_label_names)
index_names = (gen_names(index, self.index.names) if (index is not None) else self._internal.index_names)
internal = self._internal.copy(index_names=index_names, column_label_names=column_label_names)
if inplace:
self._update_internal_frame(internal)
return None
else:
return DataFrame(internal) | -2,829,426,125,369,859,600 | Set the name of the axis for the index or columns.
Parameters
----------
mapper : scalar, list-like, optional
A scalar, list-like, dict-like or functions transformations to
apply to the axis name attribute.
index, columns : scalar, list-like, dict-like or function, optional
A scalar, list-like, dict-like or functions transformations to
apply to that axis' values.
Use either ``mapper`` and ``axis`` to
specify the axis to target with ``mapper``, or ``index``
and/or ``columns``.
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to rename.
inplace : bool, default False
Modifies the object directly, instead of creating a new DataFrame.
Returns
-------
DataFrame, or None if `inplace` is True.
See Also
--------
Series.rename : Alter Series index labels or name.
DataFrame.rename : Alter DataFrame index labels or name.
Index.rename : Set new names on index.
Notes
-----
``DataFrame.rename_axis`` supports two calling conventions
* ``(index=index_mapper, columns=columns_mapper, ...)``
* ``(mapper, axis={'index', 'columns'}, ...)``
The first calling convention will only modify the names of
the index and/or the names of the Index object that is the columns.
The second calling convention will modify the names of the
corresponding index specified by axis.
We *highly* recommend using keyword arguments to clarify your
intent.
Examples
--------
>>> df = ps.DataFrame({"num_legs": [4, 4, 2],
... "num_arms": [0, 0, 2]},
... index=["dog", "cat", "monkey"],
... columns=["num_legs", "num_arms"])
>>> df
num_legs num_arms
dog 4 0
cat 4 0
monkey 2 2
>>> df = df.rename_axis("animal").sort_index()
>>> df # doctest: +NORMALIZE_WHITESPACE
num_legs num_arms
animal
cat 4 0
dog 4 0
monkey 2 2
>>> df = df.rename_axis("limbs", axis="columns").sort_index()
>>> df # doctest: +NORMALIZE_WHITESPACE
limbs num_legs num_arms
animal
cat 4 0
dog 4 0
monkey 2 2
**MultiIndex**
>>> index = pd.MultiIndex.from_product([['mammal'],
... ['dog', 'cat', 'monkey']],
... names=['type', 'name'])
>>> df = ps.DataFrame({"num_legs": [4, 4, 2],
... "num_arms": [0, 0, 2]},
... index=index,
... columns=["num_legs", "num_arms"])
>>> df # doctest: +NORMALIZE_WHITESPACE
num_legs num_arms
type name
mammal dog 4 0
cat 4 0
monkey 2 2
>>> df.rename_axis(index={'type': 'class'}).sort_index() # doctest: +NORMALIZE_WHITESPACE
num_legs num_arms
class name
mammal cat 4 0
dog 4 0
monkey 2 2
>>> df.rename_axis(index=str.upper).sort_index() # doctest: +NORMALIZE_WHITESPACE
num_legs num_arms
TYPE NAME
mammal cat 4 0
dog 4 0
monkey 2 2 | python/pyspark/pandas/frame.py | rename_axis | Flyangz/spark | python | def rename_axis(self, mapper: Union[(Any, Sequence[Any], Dict[(Name, Any)], Callable[([Name], Any)])]=None, index: Union[(Any, Sequence[Any], Dict[(Name, Any)], Callable[([Name], Any)])]=None, columns: Union[(Any, Sequence[Any], Dict[(Name, Any)], Callable[([Name], Any)])]=None, axis: Optional[Axis]=0, inplace: Optional[bool]=False) -> Optional['DataFrame']:
'\n Set the name of the axis for the index or columns.\n\n Parameters\n ----------\n mapper : scalar, list-like, optional\n A scalar, list-like, dict-like or functions transformations to\n apply to the axis name attribute.\n index, columns : scalar, list-like, dict-like or function, optional\n A scalar, list-like, dict-like or functions transformations to\n apply to that axis\' values.\n\n Use either ``mapper`` and ``axis`` to\n specify the axis to target with ``mapper``, or ``index``\n and/or ``columns``.\n axis : {0 or \'index\', 1 or \'columns\'}, default 0\n The axis to rename.\n inplace : bool, default False\n Modifies the object directly, instead of creating a new DataFrame.\n\n Returns\n -------\n DataFrame, or None if `inplace` is True.\n\n See Also\n --------\n Series.rename : Alter Series index labels or name.\n DataFrame.rename : Alter DataFrame index labels or name.\n Index.rename : Set new names on index.\n\n Notes\n -----\n ``DataFrame.rename_axis`` supports two calling conventions\n\n * ``(index=index_mapper, columns=columns_mapper, ...)``\n * ``(mapper, axis={\'index\', \'columns\'}, ...)``\n\n The first calling convention will only modify the names of\n the index and/or the names of the Index object that is the columns.\n\n The second calling convention will modify the names of the\n corresponding index specified by axis.\n\n We *highly* recommend using keyword arguments to clarify your\n intent.\n\n Examples\n --------\n >>> df = ps.DataFrame({"num_legs": [4, 4, 2],\n ... "num_arms": [0, 0, 2]},\n ... index=["dog", "cat", "monkey"],\n ... columns=["num_legs", "num_arms"])\n >>> df\n num_legs num_arms\n dog 4 0\n cat 4 0\n monkey 2 2\n\n >>> df = df.rename_axis("animal").sort_index()\n >>> df # doctest: +NORMALIZE_WHITESPACE\n num_legs num_arms\n animal\n cat 4 0\n dog 4 0\n monkey 2 2\n\n >>> df = df.rename_axis("limbs", axis="columns").sort_index()\n >>> df # doctest: +NORMALIZE_WHITESPACE\n limbs num_legs num_arms\n animal\n cat 4 0\n dog 4 0\n monkey 2 2\n\n **MultiIndex**\n\n >>> index = pd.MultiIndex.from_product([[\'mammal\'],\n ... [\'dog\', \'cat\', \'monkey\']],\n ... names=[\'type\', \'name\'])\n >>> df = ps.DataFrame({"num_legs": [4, 4, 2],\n ... "num_arms": [0, 0, 2]},\n ... index=index,\n ... columns=["num_legs", "num_arms"])\n >>> df # doctest: +NORMALIZE_WHITESPACE\n num_legs num_arms\n type name\n mammal dog 4 0\n cat 4 0\n monkey 2 2\n\n >>> df.rename_axis(index={\'type\': \'class\'}).sort_index() # doctest: +NORMALIZE_WHITESPACE\n num_legs num_arms\n class name\n mammal cat 4 0\n dog 4 0\n monkey 2 2\n\n >>> df.rename_axis(index=str.upper).sort_index() # doctest: +NORMALIZE_WHITESPACE\n num_legs num_arms\n TYPE NAME\n mammal cat 4 0\n dog 4 0\n monkey 2 2\n '
def gen_names(v: Union[(Any, Sequence[Any], Dict[(Name, Any)], Callable[([Name], Any)])], curnames: List[Name]) -> List[Label]:
newnames: List[Name]
if is_scalar(v):
newnames = [cast(Name, v)]
elif (is_list_like(v) and (not is_dict_like(v))):
newnames = list(cast(Sequence[Name], v))
elif is_dict_like(v):
v_dict = cast(Dict[(Name, Name)], v)
newnames = [(v_dict[name] if (name in v_dict) else name) for name in curnames]
elif callable(v):
v_callable = cast(Callable[([Name], Name)], v)
newnames = [v_callable(name) for name in curnames]
else:
raise ValueError('`mapper` or `index` or `columns` should be either dict-like or function type.')
if (len(newnames) != len(curnames)):
raise ValueError('Length of new names must be {}, got {}'.format(len(curnames), len(newnames)))
return [(name if is_name_like_tuple(name) else (name,)) for name in newnames]
if ((mapper is not None) and ((index is not None) or (columns is not None))):
raise TypeError("Cannot specify both 'mapper' and any of 'index' or 'columns'.")
if (mapper is not None):
axis = validate_axis(axis)
if (axis == 0):
index = mapper
elif (axis == 1):
columns = mapper
column_label_names = (gen_names(columns, self.columns.names) if (columns is not None) else self._internal.column_label_names)
index_names = (gen_names(index, self.index.names) if (index is not None) else self._internal.index_names)
internal = self._internal.copy(index_names=index_names, column_label_names=column_label_names)
if inplace:
self._update_internal_frame(internal)
return None
else:
return DataFrame(internal) |
def keys(self) -> pd.Index:
"\n Return alias for columns.\n\n Returns\n -------\n Index\n Columns of the DataFrame.\n\n Examples\n --------\n >>> df = ps.DataFrame([[1, 2], [4, 5], [7, 8]],\n ... index=['cobra', 'viper', 'sidewinder'],\n ... columns=['max_speed', 'shield'])\n >>> df\n max_speed shield\n cobra 1 2\n viper 4 5\n sidewinder 7 8\n\n >>> df.keys()\n Index(['max_speed', 'shield'], dtype='object')\n "
return self.columns | 6,675,430,877,286,866,000 | Return alias for columns.
Returns
-------
Index
Columns of the DataFrame.
Examples
--------
>>> df = ps.DataFrame([[1, 2], [4, 5], [7, 8]],
... index=['cobra', 'viper', 'sidewinder'],
... columns=['max_speed', 'shield'])
>>> df
max_speed shield
cobra 1 2
viper 4 5
sidewinder 7 8
>>> df.keys()
Index(['max_speed', 'shield'], dtype='object') | python/pyspark/pandas/frame.py | keys | Flyangz/spark | python | def keys(self) -> pd.Index:
"\n Return alias for columns.\n\n Returns\n -------\n Index\n Columns of the DataFrame.\n\n Examples\n --------\n >>> df = ps.DataFrame([[1, 2], [4, 5], [7, 8]],\n ... index=['cobra', 'viper', 'sidewinder'],\n ... columns=['max_speed', 'shield'])\n >>> df\n max_speed shield\n cobra 1 2\n viper 4 5\n sidewinder 7 8\n\n >>> df.keys()\n Index(['max_speed', 'shield'], dtype='object')\n "
return self.columns |
def pct_change(self, periods: int=1) -> 'DataFrame':
"\n Percentage change between the current and a prior element.\n\n .. note:: the current implementation of this API uses Spark's Window without\n specifying partition specification. This leads to move all data into\n single partition in single machine and could cause serious\n performance degradation. Avoid this method against very large dataset.\n\n Parameters\n ----------\n periods : int, default 1\n Periods to shift for forming percent change.\n\n Returns\n -------\n DataFrame\n\n Examples\n --------\n Percentage change in French franc, Deutsche Mark, and Italian lira\n from 1980-01-01 to 1980-03-01.\n\n >>> df = ps.DataFrame({\n ... 'FR': [4.0405, 4.0963, 4.3149],\n ... 'GR': [1.7246, 1.7482, 1.8519],\n ... 'IT': [804.74, 810.01, 860.13]},\n ... index=['1980-01-01', '1980-02-01', '1980-03-01'])\n >>> df\n FR GR IT\n 1980-01-01 4.0405 1.7246 804.74\n 1980-02-01 4.0963 1.7482 810.01\n 1980-03-01 4.3149 1.8519 860.13\n\n >>> df.pct_change()\n FR GR IT\n 1980-01-01 NaN NaN NaN\n 1980-02-01 0.013810 0.013684 0.006549\n 1980-03-01 0.053365 0.059318 0.061876\n\n You can set periods to shift for forming percent change\n\n >>> df.pct_change(2)\n FR GR IT\n 1980-01-01 NaN NaN NaN\n 1980-02-01 NaN NaN NaN\n 1980-03-01 0.067912 0.073814 0.06883\n "
window = Window.orderBy(NATURAL_ORDER_COLUMN_NAME).rowsBetween((- periods), (- periods))
def op(psser: ps.Series) -> Column:
prev_row = F.lag(psser.spark.column, periods).over(window)
return ((psser.spark.column - prev_row) / prev_row).alias(psser._internal.data_spark_column_names[0])
return self._apply_series_op(op, should_resolve=True) | 919,436,271,991,181,000 | Percentage change between the current and a prior element.
.. note:: the current implementation of this API uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
periods : int, default 1
Periods to shift for forming percent change.
Returns
-------
DataFrame
Examples
--------
Percentage change in French franc, Deutsche Mark, and Italian lira
from 1980-01-01 to 1980-03-01.
>>> df = ps.DataFrame({
... 'FR': [4.0405, 4.0963, 4.3149],
... 'GR': [1.7246, 1.7482, 1.8519],
... 'IT': [804.74, 810.01, 860.13]},
... index=['1980-01-01', '1980-02-01', '1980-03-01'])
>>> df
FR GR IT
1980-01-01 4.0405 1.7246 804.74
1980-02-01 4.0963 1.7482 810.01
1980-03-01 4.3149 1.8519 860.13
>>> df.pct_change()
FR GR IT
1980-01-01 NaN NaN NaN
1980-02-01 0.013810 0.013684 0.006549
1980-03-01 0.053365 0.059318 0.061876
You can set periods to shift for forming percent change
>>> df.pct_change(2)
FR GR IT
1980-01-01 NaN NaN NaN
1980-02-01 NaN NaN NaN
1980-03-01 0.067912 0.073814 0.06883 | python/pyspark/pandas/frame.py | pct_change | Flyangz/spark | python | def pct_change(self, periods: int=1) -> 'DataFrame':
"\n Percentage change between the current and a prior element.\n\n .. note:: the current implementation of this API uses Spark's Window without\n specifying partition specification. This leads to move all data into\n single partition in single machine and could cause serious\n performance degradation. Avoid this method against very large dataset.\n\n Parameters\n ----------\n periods : int, default 1\n Periods to shift for forming percent change.\n\n Returns\n -------\n DataFrame\n\n Examples\n --------\n Percentage change in French franc, Deutsche Mark, and Italian lira\n from 1980-01-01 to 1980-03-01.\n\n >>> df = ps.DataFrame({\n ... 'FR': [4.0405, 4.0963, 4.3149],\n ... 'GR': [1.7246, 1.7482, 1.8519],\n ... 'IT': [804.74, 810.01, 860.13]},\n ... index=['1980-01-01', '1980-02-01', '1980-03-01'])\n >>> df\n FR GR IT\n 1980-01-01 4.0405 1.7246 804.74\n 1980-02-01 4.0963 1.7482 810.01\n 1980-03-01 4.3149 1.8519 860.13\n\n >>> df.pct_change()\n FR GR IT\n 1980-01-01 NaN NaN NaN\n 1980-02-01 0.013810 0.013684 0.006549\n 1980-03-01 0.053365 0.059318 0.061876\n\n You can set periods to shift for forming percent change\n\n >>> df.pct_change(2)\n FR GR IT\n 1980-01-01 NaN NaN NaN\n 1980-02-01 NaN NaN NaN\n 1980-03-01 0.067912 0.073814 0.06883\n "
window = Window.orderBy(NATURAL_ORDER_COLUMN_NAME).rowsBetween((- periods), (- periods))
def op(psser: ps.Series) -> Column:
prev_row = F.lag(psser.spark.column, periods).over(window)
return ((psser.spark.column - prev_row) / prev_row).alias(psser._internal.data_spark_column_names[0])
return self._apply_series_op(op, should_resolve=True) |
def idxmax(self, axis: Axis=0) -> 'Series':
"\n Return index of first occurrence of maximum over requested axis.\n NA/null values are excluded.\n\n .. note:: This API collect all rows with maximum value using `to_pandas()`\n because we suppose the number of rows with max values are usually small in general.\n\n Parameters\n ----------\n axis : 0 or 'index'\n Can only be set to 0 at the moment.\n\n Returns\n -------\n Series\n\n See Also\n --------\n Series.idxmax\n\n Examples\n --------\n >>> psdf = ps.DataFrame({'a': [1, 2, 3, 2],\n ... 'b': [4.0, 2.0, 3.0, 1.0],\n ... 'c': [300, 200, 400, 200]})\n >>> psdf\n a b c\n 0 1 4.0 300\n 1 2 2.0 200\n 2 3 3.0 400\n 3 2 1.0 200\n\n >>> psdf.idxmax()\n a 2\n b 0\n c 2\n dtype: int64\n\n For Multi-column Index\n\n >>> psdf = ps.DataFrame({'a': [1, 2, 3, 2],\n ... 'b': [4.0, 2.0, 3.0, 1.0],\n ... 'c': [300, 200, 400, 200]})\n >>> psdf.columns = pd.MultiIndex.from_tuples([('a', 'x'), ('b', 'y'), ('c', 'z')])\n >>> psdf\n a b c\n x y z\n 0 1 4.0 300\n 1 2 2.0 200\n 2 3 3.0 400\n 3 2 1.0 200\n\n >>> psdf.idxmax()\n a x 2\n b y 0\n c z 2\n dtype: int64\n "
max_cols = map((lambda scol: F.max(scol)), self._internal.data_spark_columns)
sdf_max = self._internal.spark_frame.select(*max_cols).head()
conds = ((scol == max_val) for (scol, max_val) in zip(self._internal.data_spark_columns, sdf_max))
cond = reduce((lambda x, y: (x | y)), conds)
psdf: DataFrame = DataFrame(self._internal.with_filter(cond))
return cast(ps.Series, ps.from_pandas(psdf._to_internal_pandas().idxmax())) | 5,427,617,348,550,696,000 | Return index of first occurrence of maximum over requested axis.
NA/null values are excluded.
.. note:: This API collect all rows with maximum value using `to_pandas()`
because we suppose the number of rows with max values are usually small in general.
Parameters
----------
axis : 0 or 'index'
Can only be set to 0 at the moment.
Returns
-------
Series
See Also
--------
Series.idxmax
Examples
--------
>>> psdf = ps.DataFrame({'a': [1, 2, 3, 2],
... 'b': [4.0, 2.0, 3.0, 1.0],
... 'c': [300, 200, 400, 200]})
>>> psdf
a b c
0 1 4.0 300
1 2 2.0 200
2 3 3.0 400
3 2 1.0 200
>>> psdf.idxmax()
a 2
b 0
c 2
dtype: int64
For Multi-column Index
>>> psdf = ps.DataFrame({'a': [1, 2, 3, 2],
... 'b': [4.0, 2.0, 3.0, 1.0],
... 'c': [300, 200, 400, 200]})
>>> psdf.columns = pd.MultiIndex.from_tuples([('a', 'x'), ('b', 'y'), ('c', 'z')])
>>> psdf
a b c
x y z
0 1 4.0 300
1 2 2.0 200
2 3 3.0 400
3 2 1.0 200
>>> psdf.idxmax()
a x 2
b y 0
c z 2
dtype: int64 | python/pyspark/pandas/frame.py | idxmax | Flyangz/spark | python | def idxmax(self, axis: Axis=0) -> 'Series':
"\n Return index of first occurrence of maximum over requested axis.\n NA/null values are excluded.\n\n .. note:: This API collect all rows with maximum value using `to_pandas()`\n because we suppose the number of rows with max values are usually small in general.\n\n Parameters\n ----------\n axis : 0 or 'index'\n Can only be set to 0 at the moment.\n\n Returns\n -------\n Series\n\n See Also\n --------\n Series.idxmax\n\n Examples\n --------\n >>> psdf = ps.DataFrame({'a': [1, 2, 3, 2],\n ... 'b': [4.0, 2.0, 3.0, 1.0],\n ... 'c': [300, 200, 400, 200]})\n >>> psdf\n a b c\n 0 1 4.0 300\n 1 2 2.0 200\n 2 3 3.0 400\n 3 2 1.0 200\n\n >>> psdf.idxmax()\n a 2\n b 0\n c 2\n dtype: int64\n\n For Multi-column Index\n\n >>> psdf = ps.DataFrame({'a': [1, 2, 3, 2],\n ... 'b': [4.0, 2.0, 3.0, 1.0],\n ... 'c': [300, 200, 400, 200]})\n >>> psdf.columns = pd.MultiIndex.from_tuples([('a', 'x'), ('b', 'y'), ('c', 'z')])\n >>> psdf\n a b c\n x y z\n 0 1 4.0 300\n 1 2 2.0 200\n 2 3 3.0 400\n 3 2 1.0 200\n\n >>> psdf.idxmax()\n a x 2\n b y 0\n c z 2\n dtype: int64\n "
max_cols = map((lambda scol: F.max(scol)), self._internal.data_spark_columns)
sdf_max = self._internal.spark_frame.select(*max_cols).head()
conds = ((scol == max_val) for (scol, max_val) in zip(self._internal.data_spark_columns, sdf_max))
cond = reduce((lambda x, y: (x | y)), conds)
psdf: DataFrame = DataFrame(self._internal.with_filter(cond))
return cast(ps.Series, ps.from_pandas(psdf._to_internal_pandas().idxmax())) |
def idxmin(self, axis: Axis=0) -> 'Series':
"\n Return index of first occurrence of minimum over requested axis.\n NA/null values are excluded.\n\n .. note:: This API collect all rows with minimum value using `to_pandas()`\n because we suppose the number of rows with min values are usually small in general.\n\n Parameters\n ----------\n axis : 0 or 'index'\n Can only be set to 0 at the moment.\n\n Returns\n -------\n Series\n\n See Also\n --------\n Series.idxmin\n\n Examples\n --------\n >>> psdf = ps.DataFrame({'a': [1, 2, 3, 2],\n ... 'b': [4.0, 2.0, 3.0, 1.0],\n ... 'c': [300, 200, 400, 200]})\n >>> psdf\n a b c\n 0 1 4.0 300\n 1 2 2.0 200\n 2 3 3.0 400\n 3 2 1.0 200\n\n >>> psdf.idxmin()\n a 0\n b 3\n c 1\n dtype: int64\n\n For Multi-column Index\n\n >>> psdf = ps.DataFrame({'a': [1, 2, 3, 2],\n ... 'b': [4.0, 2.0, 3.0, 1.0],\n ... 'c': [300, 200, 400, 200]})\n >>> psdf.columns = pd.MultiIndex.from_tuples([('a', 'x'), ('b', 'y'), ('c', 'z')])\n >>> psdf\n a b c\n x y z\n 0 1 4.0 300\n 1 2 2.0 200\n 2 3 3.0 400\n 3 2 1.0 200\n\n >>> psdf.idxmin()\n a x 0\n b y 3\n c z 1\n dtype: int64\n "
min_cols = map((lambda scol: F.min(scol)), self._internal.data_spark_columns)
sdf_min = self._internal.spark_frame.select(*min_cols).head()
conds = ((scol == min_val) for (scol, min_val) in zip(self._internal.data_spark_columns, sdf_min))
cond = reduce((lambda x, y: (x | y)), conds)
psdf: DataFrame = DataFrame(self._internal.with_filter(cond))
return cast(ps.Series, ps.from_pandas(psdf._to_internal_pandas().idxmin())) | 3,556,289,599,252,744,000 | Return index of first occurrence of minimum over requested axis.
NA/null values are excluded.
.. note:: This API collect all rows with minimum value using `to_pandas()`
because we suppose the number of rows with min values are usually small in general.
Parameters
----------
axis : 0 or 'index'
Can only be set to 0 at the moment.
Returns
-------
Series
See Also
--------
Series.idxmin
Examples
--------
>>> psdf = ps.DataFrame({'a': [1, 2, 3, 2],
... 'b': [4.0, 2.0, 3.0, 1.0],
... 'c': [300, 200, 400, 200]})
>>> psdf
a b c
0 1 4.0 300
1 2 2.0 200
2 3 3.0 400
3 2 1.0 200
>>> psdf.idxmin()
a 0
b 3
c 1
dtype: int64
For Multi-column Index
>>> psdf = ps.DataFrame({'a': [1, 2, 3, 2],
... 'b': [4.0, 2.0, 3.0, 1.0],
... 'c': [300, 200, 400, 200]})
>>> psdf.columns = pd.MultiIndex.from_tuples([('a', 'x'), ('b', 'y'), ('c', 'z')])
>>> psdf
a b c
x y z
0 1 4.0 300
1 2 2.0 200
2 3 3.0 400
3 2 1.0 200
>>> psdf.idxmin()
a x 0
b y 3
c z 1
dtype: int64 | python/pyspark/pandas/frame.py | idxmin | Flyangz/spark | python | def idxmin(self, axis: Axis=0) -> 'Series':
"\n Return index of first occurrence of minimum over requested axis.\n NA/null values are excluded.\n\n .. note:: This API collect all rows with minimum value using `to_pandas()`\n because we suppose the number of rows with min values are usually small in general.\n\n Parameters\n ----------\n axis : 0 or 'index'\n Can only be set to 0 at the moment.\n\n Returns\n -------\n Series\n\n See Also\n --------\n Series.idxmin\n\n Examples\n --------\n >>> psdf = ps.DataFrame({'a': [1, 2, 3, 2],\n ... 'b': [4.0, 2.0, 3.0, 1.0],\n ... 'c': [300, 200, 400, 200]})\n >>> psdf\n a b c\n 0 1 4.0 300\n 1 2 2.0 200\n 2 3 3.0 400\n 3 2 1.0 200\n\n >>> psdf.idxmin()\n a 0\n b 3\n c 1\n dtype: int64\n\n For Multi-column Index\n\n >>> psdf = ps.DataFrame({'a': [1, 2, 3, 2],\n ... 'b': [4.0, 2.0, 3.0, 1.0],\n ... 'c': [300, 200, 400, 200]})\n >>> psdf.columns = pd.MultiIndex.from_tuples([('a', 'x'), ('b', 'y'), ('c', 'z')])\n >>> psdf\n a b c\n x y z\n 0 1 4.0 300\n 1 2 2.0 200\n 2 3 3.0 400\n 3 2 1.0 200\n\n >>> psdf.idxmin()\n a x 0\n b y 3\n c z 1\n dtype: int64\n "
min_cols = map((lambda scol: F.min(scol)), self._internal.data_spark_columns)
sdf_min = self._internal.spark_frame.select(*min_cols).head()
conds = ((scol == min_val) for (scol, min_val) in zip(self._internal.data_spark_columns, sdf_min))
cond = reduce((lambda x, y: (x | y)), conds)
psdf: DataFrame = DataFrame(self._internal.with_filter(cond))
return cast(ps.Series, ps.from_pandas(psdf._to_internal_pandas().idxmin())) |
def info(self, verbose: Optional[bool]=None, buf: Optional[IO[str]]=None, max_cols: Optional[int]=None, null_counts: Optional[bool]=None) -> None:
'\n Print a concise summary of a DataFrame.\n\n This method prints information about a DataFrame including\n the index dtype and column dtypes, non-null values and memory usage.\n\n Parameters\n ----------\n verbose : bool, optional\n Whether to print the full summary.\n buf : writable buffer, defaults to sys.stdout\n Where to send the output. By default, the output is printed to\n sys.stdout. Pass a writable buffer if you need to further process\n the output.\n max_cols : int, optional\n When to switch from the verbose to the truncated output. If the\n DataFrame has more than `max_cols` columns, the truncated output\n is used.\n null_counts : bool, optional\n Whether to show the non-null counts.\n\n Returns\n -------\n None\n This method prints a summary of a DataFrame and returns None.\n\n See Also\n --------\n DataFrame.describe: Generate descriptive statistics of DataFrame\n columns.\n\n Examples\n --------\n >>> int_values = [1, 2, 3, 4, 5]\n >>> text_values = [\'alpha\', \'beta\', \'gamma\', \'delta\', \'epsilon\']\n >>> float_values = [0.0, 0.25, 0.5, 0.75, 1.0]\n >>> df = ps.DataFrame(\n ... {"int_col": int_values, "text_col": text_values, "float_col": float_values},\n ... columns=[\'int_col\', \'text_col\', \'float_col\'])\n >>> df\n int_col text_col float_col\n 0 1 alpha 0.00\n 1 2 beta 0.25\n 2 3 gamma 0.50\n 3 4 delta 0.75\n 4 5 epsilon 1.00\n\n Prints information of all columns:\n\n >>> df.info(verbose=True) # doctest: +SKIP\n <class \'pyspark.pandas.frame.DataFrame\'>\n Index: 5 entries, 0 to 4\n Data columns (total 3 columns):\n # Column Non-Null Count Dtype\n --- ------ -------------- -----\n 0 int_col 5 non-null int64\n 1 text_col 5 non-null object\n 2 float_col 5 non-null float64\n dtypes: float64(1), int64(1), object(1)\n\n Prints a summary of columns count and its dtypes but not per column\n information:\n\n >>> df.info(verbose=False) # doctest: +SKIP\n <class \'pyspark.pandas.frame.DataFrame\'>\n Index: 5 entries, 0 to 4\n Columns: 3 entries, int_col to float_col\n dtypes: float64(1), int64(1), object(1)\n\n Pipe output of DataFrame.info to buffer instead of sys.stdout, get\n buffer content and writes to a text file:\n\n >>> import io\n >>> buffer = io.StringIO()\n >>> df.info(buf=buffer)\n >>> s = buffer.getvalue()\n >>> with open(\'%s/info.txt\' % path, "w",\n ... encoding="utf-8") as f:\n ... _ = f.write(s)\n >>> with open(\'%s/info.txt\' % path) as f:\n ... f.readlines() # doctest: +SKIP\n ["<class \'pyspark.pandas.frame.DataFrame\'>\\n",\n \'Index: 5 entries, 0 to 4\\n\',\n \'Data columns (total 3 columns):\\n\',\n \' # Column Non-Null Count Dtype \\n\',\n \'--- ------ -------------- ----- \\n\',\n \' 0 int_col 5 non-null int64 \\n\',\n \' 1 text_col 5 non-null object \\n\',\n \' 2 float_col 5 non-null float64\\n\',\n \'dtypes: float64(1), int64(1), object(1)\']\n '
with pd.option_context('display.max_info_columns', sys.maxsize, 'display.max_info_rows', sys.maxsize):
try:
object.__setattr__(self, '_data', self)
count_func = self.count
self.count = (lambda : count_func()._to_pandas())
return pd.DataFrame.info(self, verbose=verbose, buf=buf, max_cols=max_cols, memory_usage=False, null_counts=null_counts)
finally:
del self._data
self.count = count_func | -6,592,994,989,138,733,000 | Print a concise summary of a DataFrame.
This method prints information about a DataFrame including
the index dtype and column dtypes, non-null values and memory usage.
Parameters
----------
verbose : bool, optional
Whether to print the full summary.
buf : writable buffer, defaults to sys.stdout
Where to send the output. By default, the output is printed to
sys.stdout. Pass a writable buffer if you need to further process
the output.
max_cols : int, optional
When to switch from the verbose to the truncated output. If the
DataFrame has more than `max_cols` columns, the truncated output
is used.
null_counts : bool, optional
Whether to show the non-null counts.
Returns
-------
None
This method prints a summary of a DataFrame and returns None.
See Also
--------
DataFrame.describe: Generate descriptive statistics of DataFrame
columns.
Examples
--------
>>> int_values = [1, 2, 3, 4, 5]
>>> text_values = ['alpha', 'beta', 'gamma', 'delta', 'epsilon']
>>> float_values = [0.0, 0.25, 0.5, 0.75, 1.0]
>>> df = ps.DataFrame(
... {"int_col": int_values, "text_col": text_values, "float_col": float_values},
... columns=['int_col', 'text_col', 'float_col'])
>>> df
int_col text_col float_col
0 1 alpha 0.00
1 2 beta 0.25
2 3 gamma 0.50
3 4 delta 0.75
4 5 epsilon 1.00
Prints information of all columns:
>>> df.info(verbose=True) # doctest: +SKIP
<class 'pyspark.pandas.frame.DataFrame'>
Index: 5 entries, 0 to 4
Data columns (total 3 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 int_col 5 non-null int64
1 text_col 5 non-null object
2 float_col 5 non-null float64
dtypes: float64(1), int64(1), object(1)
Prints a summary of columns count and its dtypes but not per column
information:
>>> df.info(verbose=False) # doctest: +SKIP
<class 'pyspark.pandas.frame.DataFrame'>
Index: 5 entries, 0 to 4
Columns: 3 entries, int_col to float_col
dtypes: float64(1), int64(1), object(1)
Pipe output of DataFrame.info to buffer instead of sys.stdout, get
buffer content and writes to a text file:
>>> import io
>>> buffer = io.StringIO()
>>> df.info(buf=buffer)
>>> s = buffer.getvalue()
>>> with open('%s/info.txt' % path, "w",
... encoding="utf-8") as f:
... _ = f.write(s)
>>> with open('%s/info.txt' % path) as f:
... f.readlines() # doctest: +SKIP
["<class 'pyspark.pandas.frame.DataFrame'>\n",
'Index: 5 entries, 0 to 4\n',
'Data columns (total 3 columns):\n',
' # Column Non-Null Count Dtype \n',
'--- ------ -------------- ----- \n',
' 0 int_col 5 non-null int64 \n',
' 1 text_col 5 non-null object \n',
' 2 float_col 5 non-null float64\n',
'dtypes: float64(1), int64(1), object(1)'] | python/pyspark/pandas/frame.py | info | Flyangz/spark | python | def info(self, verbose: Optional[bool]=None, buf: Optional[IO[str]]=None, max_cols: Optional[int]=None, null_counts: Optional[bool]=None) -> None:
'\n Print a concise summary of a DataFrame.\n\n This method prints information about a DataFrame including\n the index dtype and column dtypes, non-null values and memory usage.\n\n Parameters\n ----------\n verbose : bool, optional\n Whether to print the full summary.\n buf : writable buffer, defaults to sys.stdout\n Where to send the output. By default, the output is printed to\n sys.stdout. Pass a writable buffer if you need to further process\n the output.\n max_cols : int, optional\n When to switch from the verbose to the truncated output. If the\n DataFrame has more than `max_cols` columns, the truncated output\n is used.\n null_counts : bool, optional\n Whether to show the non-null counts.\n\n Returns\n -------\n None\n This method prints a summary of a DataFrame and returns None.\n\n See Also\n --------\n DataFrame.describe: Generate descriptive statistics of DataFrame\n columns.\n\n Examples\n --------\n >>> int_values = [1, 2, 3, 4, 5]\n >>> text_values = [\'alpha\', \'beta\', \'gamma\', \'delta\', \'epsilon\']\n >>> float_values = [0.0, 0.25, 0.5, 0.75, 1.0]\n >>> df = ps.DataFrame(\n ... {"int_col": int_values, "text_col": text_values, "float_col": float_values},\n ... columns=[\'int_col\', \'text_col\', \'float_col\'])\n >>> df\n int_col text_col float_col\n 0 1 alpha 0.00\n 1 2 beta 0.25\n 2 3 gamma 0.50\n 3 4 delta 0.75\n 4 5 epsilon 1.00\n\n Prints information of all columns:\n\n >>> df.info(verbose=True) # doctest: +SKIP\n <class \'pyspark.pandas.frame.DataFrame\'>\n Index: 5 entries, 0 to 4\n Data columns (total 3 columns):\n # Column Non-Null Count Dtype\n --- ------ -------------- -----\n 0 int_col 5 non-null int64\n 1 text_col 5 non-null object\n 2 float_col 5 non-null float64\n dtypes: float64(1), int64(1), object(1)\n\n Prints a summary of columns count and its dtypes but not per column\n information:\n\n >>> df.info(verbose=False) # doctest: +SKIP\n <class \'pyspark.pandas.frame.DataFrame\'>\n Index: 5 entries, 0 to 4\n Columns: 3 entries, int_col to float_col\n dtypes: float64(1), int64(1), object(1)\n\n Pipe output of DataFrame.info to buffer instead of sys.stdout, get\n buffer content and writes to a text file:\n\n >>> import io\n >>> buffer = io.StringIO()\n >>> df.info(buf=buffer)\n >>> s = buffer.getvalue()\n >>> with open(\'%s/info.txt\' % path, "w",\n ... encoding="utf-8") as f:\n ... _ = f.write(s)\n >>> with open(\'%s/info.txt\' % path) as f:\n ... f.readlines() # doctest: +SKIP\n ["<class \'pyspark.pandas.frame.DataFrame\'>\\n",\n \'Index: 5 entries, 0 to 4\\n\',\n \'Data columns (total 3 columns):\\n\',\n \' # Column Non-Null Count Dtype \\n\',\n \'--- ------ -------------- ----- \\n\',\n \' 0 int_col 5 non-null int64 \\n\',\n \' 1 text_col 5 non-null object \\n\',\n \' 2 float_col 5 non-null float64\\n\',\n \'dtypes: float64(1), int64(1), object(1)\']\n '
with pd.option_context('display.max_info_columns', sys.maxsize, 'display.max_info_rows', sys.maxsize):
try:
object.__setattr__(self, '_data', self)
count_func = self.count
self.count = (lambda : count_func()._to_pandas())
return pd.DataFrame.info(self, verbose=verbose, buf=buf, max_cols=max_cols, memory_usage=False, null_counts=null_counts)
finally:
del self._data
self.count = count_func |
def quantile(self, q: Union[(float, Iterable[float])]=0.5, axis: Axis=0, numeric_only: bool=True, accuracy: int=10000) -> DataFrameOrSeries:
"\n Return value at the given quantile.\n\n .. note:: Unlike pandas', the quantile in pandas-on-Spark is an approximated quantile\n based upon approximate percentile computation because computing quantile across a\n large dataset is extremely expensive.\n\n Parameters\n ----------\n q : float or array-like, default 0.5 (50% quantile)\n 0 <= q <= 1, the quantile(s) to compute.\n axis : int or str, default 0 or 'index'\n Can only be set to 0 at the moment.\n numeric_only : bool, default True\n If False, the quantile of datetime and timedelta data will be computed as well.\n Can only be set to True at the moment.\n accuracy : int, optional\n Default accuracy of approximation. Larger value means better accuracy.\n The relative error can be deduced by 1.0 / accuracy.\n\n Returns\n -------\n Series or DataFrame\n If q is an array, a DataFrame will be returned where the\n index is q, the columns are the columns of self, and the values are the quantiles.\n If q is a float, a Series will be returned where the\n index is the columns of self and the values are the quantiles.\n\n Examples\n --------\n >>> psdf = ps.DataFrame({'a': [1, 2, 3, 4, 5], 'b': [6, 7, 8, 9, 0]})\n >>> psdf\n a b\n 0 1 6\n 1 2 7\n 2 3 8\n 3 4 9\n 4 5 0\n\n >>> psdf.quantile(.5)\n a 3.0\n b 7.0\n Name: 0.5, dtype: float64\n\n >>> psdf.quantile([.25, .5, .75])\n a b\n 0.25 2.0 6.0\n 0.50 3.0 7.0\n 0.75 4.0 8.0\n "
axis = validate_axis(axis)
if (axis != 0):
raise NotImplementedError('axis should be either 0 or "index" currently.')
if (not isinstance(accuracy, int)):
raise TypeError(('accuracy must be an integer; however, got [%s]' % type(accuracy).__name__))
qq: Union[(float, List[float])] = (list(q) if isinstance(q, Iterable) else q)
for v in (qq if isinstance(qq, list) else [qq]):
if (not isinstance(v, float)):
raise TypeError(('q must be a float or an array of floats; however, [%s] found.' % type(v)))
if ((v < 0.0) or (v > 1.0)):
raise ValueError('percentiles should all be in the interval [0, 1].')
def quantile(psser: 'Series') -> Column:
spark_type = psser.spark.data_type
spark_column = psser.spark.column
if isinstance(spark_type, (BooleanType, NumericType)):
return F.percentile_approx(spark_column.cast(DoubleType()), qq, accuracy)
else:
raise TypeError('Could not convert {} ({}) to numeric'.format(spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()))
if isinstance(qq, list):
percentile_cols: List[Column] = []
percentile_col_names: List[str] = []
column_labels: List[Label] = []
for (label, column) in zip(self._internal.column_labels, self._internal.data_spark_column_names):
psser = self._psser_for(label)
is_numeric_or_boolean = isinstance(psser.spark.data_type, (NumericType, BooleanType))
keep_column = ((not numeric_only) or is_numeric_or_boolean)
if keep_column:
percentile_col = quantile(psser)
percentile_cols.append(percentile_col.alias(column))
percentile_col_names.append(column)
column_labels.append(label)
if (len(percentile_cols) == 0):
return DataFrame(index=qq)
sdf = self._internal.spark_frame.select(percentile_cols)
cols_dict: Dict[(str, List[Column])] = {}
for column in percentile_col_names:
cols_dict[column] = list()
for i in range(len(qq)):
cols_dict[column].append(scol_for(sdf, column)[i].alias(column))
internal_index_column = SPARK_DEFAULT_INDEX_NAME
cols = []
for (i, col) in enumerate(zip(*cols_dict.values())):
cols.append(F.struct(SF.lit(qq[i]).alias(internal_index_column), *col))
sdf = sdf.select(F.array(*cols).alias('arrays'))
sdf = sdf.select(F.explode(F.col('arrays'))).selectExpr('col.*')
internal = InternalFrame(spark_frame=sdf, index_spark_columns=[scol_for(sdf, internal_index_column)], column_labels=column_labels, data_spark_columns=[scol_for(sdf, col) for col in percentile_col_names])
return DataFrame(internal)
else:
return self._reduce_for_stat_function(quantile, name='quantile', numeric_only=numeric_only).rename(qq) | -3,218,161,924,381,842,000 | Return value at the given quantile.
.. note:: Unlike pandas', the quantile in pandas-on-Spark is an approximated quantile
based upon approximate percentile computation because computing quantile across a
large dataset is extremely expensive.
Parameters
----------
q : float or array-like, default 0.5 (50% quantile)
0 <= q <= 1, the quantile(s) to compute.
axis : int or str, default 0 or 'index'
Can only be set to 0 at the moment.
numeric_only : bool, default True
If False, the quantile of datetime and timedelta data will be computed as well.
Can only be set to True at the moment.
accuracy : int, optional
Default accuracy of approximation. Larger value means better accuracy.
The relative error can be deduced by 1.0 / accuracy.
Returns
-------
Series or DataFrame
If q is an array, a DataFrame will be returned where the
index is q, the columns are the columns of self, and the values are the quantiles.
If q is a float, a Series will be returned where the
index is the columns of self and the values are the quantiles.
Examples
--------
>>> psdf = ps.DataFrame({'a': [1, 2, 3, 4, 5], 'b': [6, 7, 8, 9, 0]})
>>> psdf
a b
0 1 6
1 2 7
2 3 8
3 4 9
4 5 0
>>> psdf.quantile(.5)
a 3.0
b 7.0
Name: 0.5, dtype: float64
>>> psdf.quantile([.25, .5, .75])
a b
0.25 2.0 6.0
0.50 3.0 7.0
0.75 4.0 8.0 | python/pyspark/pandas/frame.py | quantile | Flyangz/spark | python | def quantile(self, q: Union[(float, Iterable[float])]=0.5, axis: Axis=0, numeric_only: bool=True, accuracy: int=10000) -> DataFrameOrSeries:
"\n Return value at the given quantile.\n\n .. note:: Unlike pandas', the quantile in pandas-on-Spark is an approximated quantile\n based upon approximate percentile computation because computing quantile across a\n large dataset is extremely expensive.\n\n Parameters\n ----------\n q : float or array-like, default 0.5 (50% quantile)\n 0 <= q <= 1, the quantile(s) to compute.\n axis : int or str, default 0 or 'index'\n Can only be set to 0 at the moment.\n numeric_only : bool, default True\n If False, the quantile of datetime and timedelta data will be computed as well.\n Can only be set to True at the moment.\n accuracy : int, optional\n Default accuracy of approximation. Larger value means better accuracy.\n The relative error can be deduced by 1.0 / accuracy.\n\n Returns\n -------\n Series or DataFrame\n If q is an array, a DataFrame will be returned where the\n index is q, the columns are the columns of self, and the values are the quantiles.\n If q is a float, a Series will be returned where the\n index is the columns of self and the values are the quantiles.\n\n Examples\n --------\n >>> psdf = ps.DataFrame({'a': [1, 2, 3, 4, 5], 'b': [6, 7, 8, 9, 0]})\n >>> psdf\n a b\n 0 1 6\n 1 2 7\n 2 3 8\n 3 4 9\n 4 5 0\n\n >>> psdf.quantile(.5)\n a 3.0\n b 7.0\n Name: 0.5, dtype: float64\n\n >>> psdf.quantile([.25, .5, .75])\n a b\n 0.25 2.0 6.0\n 0.50 3.0 7.0\n 0.75 4.0 8.0\n "
axis = validate_axis(axis)
if (axis != 0):
raise NotImplementedError('axis should be either 0 or "index" currently.')
if (not isinstance(accuracy, int)):
raise TypeError(('accuracy must be an integer; however, got [%s]' % type(accuracy).__name__))
qq: Union[(float, List[float])] = (list(q) if isinstance(q, Iterable) else q)
for v in (qq if isinstance(qq, list) else [qq]):
if (not isinstance(v, float)):
raise TypeError(('q must be a float or an array of floats; however, [%s] found.' % type(v)))
if ((v < 0.0) or (v > 1.0)):
raise ValueError('percentiles should all be in the interval [0, 1].')
def quantile(psser: 'Series') -> Column:
spark_type = psser.spark.data_type
spark_column = psser.spark.column
if isinstance(spark_type, (BooleanType, NumericType)):
return F.percentile_approx(spark_column.cast(DoubleType()), qq, accuracy)
else:
raise TypeError('Could not convert {} ({}) to numeric'.format(spark_type_to_pandas_dtype(spark_type), spark_type.simpleString()))
if isinstance(qq, list):
percentile_cols: List[Column] = []
percentile_col_names: List[str] = []
column_labels: List[Label] = []
for (label, column) in zip(self._internal.column_labels, self._internal.data_spark_column_names):
psser = self._psser_for(label)
is_numeric_or_boolean = isinstance(psser.spark.data_type, (NumericType, BooleanType))
keep_column = ((not numeric_only) or is_numeric_or_boolean)
if keep_column:
percentile_col = quantile(psser)
percentile_cols.append(percentile_col.alias(column))
percentile_col_names.append(column)
column_labels.append(label)
if (len(percentile_cols) == 0):
return DataFrame(index=qq)
sdf = self._internal.spark_frame.select(percentile_cols)
cols_dict: Dict[(str, List[Column])] = {}
for column in percentile_col_names:
cols_dict[column] = list()
for i in range(len(qq)):
cols_dict[column].append(scol_for(sdf, column)[i].alias(column))
internal_index_column = SPARK_DEFAULT_INDEX_NAME
cols = []
for (i, col) in enumerate(zip(*cols_dict.values())):
cols.append(F.struct(SF.lit(qq[i]).alias(internal_index_column), *col))
sdf = sdf.select(F.array(*cols).alias('arrays'))
sdf = sdf.select(F.explode(F.col('arrays'))).selectExpr('col.*')
internal = InternalFrame(spark_frame=sdf, index_spark_columns=[scol_for(sdf, internal_index_column)], column_labels=column_labels, data_spark_columns=[scol_for(sdf, col) for col in percentile_col_names])
return DataFrame(internal)
else:
return self._reduce_for_stat_function(quantile, name='quantile', numeric_only=numeric_only).rename(qq) |
def query(self, expr: str, inplace: bool=False) -> Optional['DataFrame']:
"\n Query the columns of a DataFrame with a boolean expression.\n\n .. note:: Internal columns that starting with a '__' prefix are able to access, however,\n they are not supposed to be accessed.\n\n .. note:: This API delegates to Spark SQL so the syntax follows Spark SQL. Therefore, the\n pandas specific syntax such as `@` is not supported. If you want the pandas syntax,\n you can work around with :meth:`DataFrame.pandas_on_spark.apply_batch`, but you should\n be aware that `query_func` will be executed at different nodes in a distributed manner.\n So, for example, to use `@` syntax, make sure the variable is serialized by, for\n example, putting it within the closure as below.\n\n >>> df = ps.DataFrame({'A': range(2000), 'B': range(2000)})\n >>> def query_func(pdf):\n ... num = 1995\n ... return pdf.query('A > @num')\n >>> df.pandas_on_spark.apply_batch(query_func)\n A B\n 1996 1996 1996\n 1997 1997 1997\n 1998 1998 1998\n 1999 1999 1999\n\n Parameters\n ----------\n expr : str\n The query string to evaluate.\n\n You can refer to column names that contain spaces by surrounding\n them in backticks.\n\n For example, if one of your columns is called ``a a`` and you want\n to sum it with ``b``, your query should be ```a a` + b``.\n\n inplace : bool\n Whether the query should modify the data in place or return\n a modified copy.\n\n Returns\n -------\n DataFrame\n DataFrame resulting from the provided query expression.\n\n Examples\n --------\n >>> df = ps.DataFrame({'A': range(1, 6),\n ... 'B': range(10, 0, -2),\n ... 'C C': range(10, 5, -1)})\n >>> df\n A B C C\n 0 1 10 10\n 1 2 8 9\n 2 3 6 8\n 3 4 4 7\n 4 5 2 6\n\n >>> df.query('A > B')\n A B C C\n 4 5 2 6\n\n The previous expression is equivalent to\n\n >>> df[df.A > df.B]\n A B C C\n 4 5 2 6\n\n For columns with spaces in their name, you can use backtick quoting.\n\n >>> df.query('B == `C C`')\n A B C C\n 0 1 10 10\n\n The previous expression is equivalent to\n\n >>> df[df.B == df['C C']]\n A B C C\n 0 1 10 10\n "
if isinstance(self.columns, pd.MultiIndex):
raise TypeError("Doesn't support for MultiIndex columns")
if (not isinstance(expr, str)):
raise TypeError('expr must be a string to be evaluated, {} given'.format(type(expr).__name__))
inplace = validate_bool_kwarg(inplace, 'inplace')
data_columns = [label[0] for label in self._internal.column_labels]
sdf = self._internal.spark_frame.select((self._internal.index_spark_columns + [scol.alias(col) for (scol, col) in zip(self._internal.data_spark_columns, data_columns)])).filter(expr)
internal = self._internal.with_new_sdf(sdf, data_columns=data_columns)
if inplace:
self._update_internal_frame(internal)
return None
else:
return DataFrame(internal) | 4,015,551,663,124,263,400 | Query the columns of a DataFrame with a boolean expression.
.. note:: Internal columns that starting with a '__' prefix are able to access, however,
they are not supposed to be accessed.
.. note:: This API delegates to Spark SQL so the syntax follows Spark SQL. Therefore, the
pandas specific syntax such as `@` is not supported. If you want the pandas syntax,
you can work around with :meth:`DataFrame.pandas_on_spark.apply_batch`, but you should
be aware that `query_func` will be executed at different nodes in a distributed manner.
So, for example, to use `@` syntax, make sure the variable is serialized by, for
example, putting it within the closure as below.
>>> df = ps.DataFrame({'A': range(2000), 'B': range(2000)})
>>> def query_func(pdf):
... num = 1995
... return pdf.query('A > @num')
>>> df.pandas_on_spark.apply_batch(query_func)
A B
1996 1996 1996
1997 1997 1997
1998 1998 1998
1999 1999 1999
Parameters
----------
expr : str
The query string to evaluate.
You can refer to column names that contain spaces by surrounding
them in backticks.
For example, if one of your columns is called ``a a`` and you want
to sum it with ``b``, your query should be ```a a` + b``.
inplace : bool
Whether the query should modify the data in place or return
a modified copy.
Returns
-------
DataFrame
DataFrame resulting from the provided query expression.
Examples
--------
>>> df = ps.DataFrame({'A': range(1, 6),
... 'B': range(10, 0, -2),
... 'C C': range(10, 5, -1)})
>>> df
A B C C
0 1 10 10
1 2 8 9
2 3 6 8
3 4 4 7
4 5 2 6
>>> df.query('A > B')
A B C C
4 5 2 6
The previous expression is equivalent to
>>> df[df.A > df.B]
A B C C
4 5 2 6
For columns with spaces in their name, you can use backtick quoting.
>>> df.query('B == `C C`')
A B C C
0 1 10 10
The previous expression is equivalent to
>>> df[df.B == df['C C']]
A B C C
0 1 10 10 | python/pyspark/pandas/frame.py | query | Flyangz/spark | python | def query(self, expr: str, inplace: bool=False) -> Optional['DataFrame']:
"\n Query the columns of a DataFrame with a boolean expression.\n\n .. note:: Internal columns that starting with a '__' prefix are able to access, however,\n they are not supposed to be accessed.\n\n .. note:: This API delegates to Spark SQL so the syntax follows Spark SQL. Therefore, the\n pandas specific syntax such as `@` is not supported. If you want the pandas syntax,\n you can work around with :meth:`DataFrame.pandas_on_spark.apply_batch`, but you should\n be aware that `query_func` will be executed at different nodes in a distributed manner.\n So, for example, to use `@` syntax, make sure the variable is serialized by, for\n example, putting it within the closure as below.\n\n >>> df = ps.DataFrame({'A': range(2000), 'B': range(2000)})\n >>> def query_func(pdf):\n ... num = 1995\n ... return pdf.query('A > @num')\n >>> df.pandas_on_spark.apply_batch(query_func)\n A B\n 1996 1996 1996\n 1997 1997 1997\n 1998 1998 1998\n 1999 1999 1999\n\n Parameters\n ----------\n expr : str\n The query string to evaluate.\n\n You can refer to column names that contain spaces by surrounding\n them in backticks.\n\n For example, if one of your columns is called ``a a`` and you want\n to sum it with ``b``, your query should be ```a a` + b``.\n\n inplace : bool\n Whether the query should modify the data in place or return\n a modified copy.\n\n Returns\n -------\n DataFrame\n DataFrame resulting from the provided query expression.\n\n Examples\n --------\n >>> df = ps.DataFrame({'A': range(1, 6),\n ... 'B': range(10, 0, -2),\n ... 'C C': range(10, 5, -1)})\n >>> df\n A B C C\n 0 1 10 10\n 1 2 8 9\n 2 3 6 8\n 3 4 4 7\n 4 5 2 6\n\n >>> df.query('A > B')\n A B C C\n 4 5 2 6\n\n The previous expression is equivalent to\n\n >>> df[df.A > df.B]\n A B C C\n 4 5 2 6\n\n For columns with spaces in their name, you can use backtick quoting.\n\n >>> df.query('B == `C C`')\n A B C C\n 0 1 10 10\n\n The previous expression is equivalent to\n\n >>> df[df.B == df['C C']]\n A B C C\n 0 1 10 10\n "
if isinstance(self.columns, pd.MultiIndex):
raise TypeError("Doesn't support for MultiIndex columns")
if (not isinstance(expr, str)):
raise TypeError('expr must be a string to be evaluated, {} given'.format(type(expr).__name__))
inplace = validate_bool_kwarg(inplace, 'inplace')
data_columns = [label[0] for label in self._internal.column_labels]
sdf = self._internal.spark_frame.select((self._internal.index_spark_columns + [scol.alias(col) for (scol, col) in zip(self._internal.data_spark_columns, data_columns)])).filter(expr)
internal = self._internal.with_new_sdf(sdf, data_columns=data_columns)
if inplace:
self._update_internal_frame(internal)
return None
else:
return DataFrame(internal) |
def take(self, indices: List[int], axis: Axis=0, **kwargs: Any) -> 'DataFrame':
"\n Return the elements in the given *positional* indices along an axis.\n\n This means that we are not indexing according to actual values in\n the index attribute of the object. We are indexing according to the\n actual position of the element in the object.\n\n Parameters\n ----------\n indices : array-like\n An array of ints indicating which positions to take.\n axis : {0 or 'index', 1 or 'columns', None}, default 0\n The axis on which to select elements. ``0`` means that we are\n selecting rows, ``1`` means that we are selecting columns.\n **kwargs\n For compatibility with :meth:`numpy.take`. Has no effect on the\n output.\n\n Returns\n -------\n taken : same type as caller\n An array-like containing the elements taken from the object.\n\n See Also\n --------\n DataFrame.loc : Select a subset of a DataFrame by labels.\n DataFrame.iloc : Select a subset of a DataFrame by positions.\n numpy.take : Take elements from an array along an axis.\n\n Examples\n --------\n >>> df = ps.DataFrame([('falcon', 'bird', 389.0),\n ... ('parrot', 'bird', 24.0),\n ... ('lion', 'mammal', 80.5),\n ... ('monkey', 'mammal', np.nan)],\n ... columns=['name', 'class', 'max_speed'],\n ... index=[0, 2, 3, 1])\n >>> df\n name class max_speed\n 0 falcon bird 389.0\n 2 parrot bird 24.0\n 3 lion mammal 80.5\n 1 monkey mammal NaN\n\n Take elements at positions 0 and 3 along the axis 0 (default).\n\n Note how the actual indices selected (0 and 1) do not correspond to\n our selected indices 0 and 3. That's because we are selecting the 0th\n and 3rd rows, not rows whose indices equal 0 and 3.\n\n >>> df.take([0, 3]).sort_index()\n name class max_speed\n 0 falcon bird 389.0\n 1 monkey mammal NaN\n\n Take elements at indices 1 and 2 along the axis 1 (column selection).\n\n >>> df.take([1, 2], axis=1)\n class max_speed\n 0 bird 389.0\n 2 bird 24.0\n 3 mammal 80.5\n 1 mammal NaN\n\n We may take elements using negative integers for positive indices,\n starting from the end of the object, just like with Python lists.\n\n >>> df.take([-1, -2]).sort_index()\n name class max_speed\n 1 monkey mammal NaN\n 3 lion mammal 80.5\n "
axis = validate_axis(axis)
if ((not is_list_like(indices)) or isinstance(indices, (dict, set))):
raise TypeError('`indices` must be a list-like except dict or set')
if (axis == 0):
return cast(DataFrame, self.iloc[indices, :])
else:
return cast(DataFrame, self.iloc[:, indices]) | 8,431,998,034,869,836,000 | Return the elements in the given *positional* indices along an axis.
This means that we are not indexing according to actual values in
the index attribute of the object. We are indexing according to the
actual position of the element in the object.
Parameters
----------
indices : array-like
An array of ints indicating which positions to take.
axis : {0 or 'index', 1 or 'columns', None}, default 0
The axis on which to select elements. ``0`` means that we are
selecting rows, ``1`` means that we are selecting columns.
**kwargs
For compatibility with :meth:`numpy.take`. Has no effect on the
output.
Returns
-------
taken : same type as caller
An array-like containing the elements taken from the object.
See Also
--------
DataFrame.loc : Select a subset of a DataFrame by labels.
DataFrame.iloc : Select a subset of a DataFrame by positions.
numpy.take : Take elements from an array along an axis.
Examples
--------
>>> df = ps.DataFrame([('falcon', 'bird', 389.0),
... ('parrot', 'bird', 24.0),
... ('lion', 'mammal', 80.5),
... ('monkey', 'mammal', np.nan)],
... columns=['name', 'class', 'max_speed'],
... index=[0, 2, 3, 1])
>>> df
name class max_speed
0 falcon bird 389.0
2 parrot bird 24.0
3 lion mammal 80.5
1 monkey mammal NaN
Take elements at positions 0 and 3 along the axis 0 (default).
Note how the actual indices selected (0 and 1) do not correspond to
our selected indices 0 and 3. That's because we are selecting the 0th
and 3rd rows, not rows whose indices equal 0 and 3.
>>> df.take([0, 3]).sort_index()
name class max_speed
0 falcon bird 389.0
1 monkey mammal NaN
Take elements at indices 1 and 2 along the axis 1 (column selection).
>>> df.take([1, 2], axis=1)
class max_speed
0 bird 389.0
2 bird 24.0
3 mammal 80.5
1 mammal NaN
We may take elements using negative integers for positive indices,
starting from the end of the object, just like with Python lists.
>>> df.take([-1, -2]).sort_index()
name class max_speed
1 monkey mammal NaN
3 lion mammal 80.5 | python/pyspark/pandas/frame.py | take | Flyangz/spark | python | def take(self, indices: List[int], axis: Axis=0, **kwargs: Any) -> 'DataFrame':
"\n Return the elements in the given *positional* indices along an axis.\n\n This means that we are not indexing according to actual values in\n the index attribute of the object. We are indexing according to the\n actual position of the element in the object.\n\n Parameters\n ----------\n indices : array-like\n An array of ints indicating which positions to take.\n axis : {0 or 'index', 1 or 'columns', None}, default 0\n The axis on which to select elements. ``0`` means that we are\n selecting rows, ``1`` means that we are selecting columns.\n **kwargs\n For compatibility with :meth:`numpy.take`. Has no effect on the\n output.\n\n Returns\n -------\n taken : same type as caller\n An array-like containing the elements taken from the object.\n\n See Also\n --------\n DataFrame.loc : Select a subset of a DataFrame by labels.\n DataFrame.iloc : Select a subset of a DataFrame by positions.\n numpy.take : Take elements from an array along an axis.\n\n Examples\n --------\n >>> df = ps.DataFrame([('falcon', 'bird', 389.0),\n ... ('parrot', 'bird', 24.0),\n ... ('lion', 'mammal', 80.5),\n ... ('monkey', 'mammal', np.nan)],\n ... columns=['name', 'class', 'max_speed'],\n ... index=[0, 2, 3, 1])\n >>> df\n name class max_speed\n 0 falcon bird 389.0\n 2 parrot bird 24.0\n 3 lion mammal 80.5\n 1 monkey mammal NaN\n\n Take elements at positions 0 and 3 along the axis 0 (default).\n\n Note how the actual indices selected (0 and 1) do not correspond to\n our selected indices 0 and 3. That's because we are selecting the 0th\n and 3rd rows, not rows whose indices equal 0 and 3.\n\n >>> df.take([0, 3]).sort_index()\n name class max_speed\n 0 falcon bird 389.0\n 1 monkey mammal NaN\n\n Take elements at indices 1 and 2 along the axis 1 (column selection).\n\n >>> df.take([1, 2], axis=1)\n class max_speed\n 0 bird 389.0\n 2 bird 24.0\n 3 mammal 80.5\n 1 mammal NaN\n\n We may take elements using negative integers for positive indices,\n starting from the end of the object, just like with Python lists.\n\n >>> df.take([-1, -2]).sort_index()\n name class max_speed\n 1 monkey mammal NaN\n 3 lion mammal 80.5\n "
axis = validate_axis(axis)
if ((not is_list_like(indices)) or isinstance(indices, (dict, set))):
raise TypeError('`indices` must be a list-like except dict or set')
if (axis == 0):
return cast(DataFrame, self.iloc[indices, :])
else:
return cast(DataFrame, self.iloc[:, indices]) |
def eval(self, expr: str, inplace: bool=False) -> Optional[DataFrameOrSeries]:
"\n Evaluate a string describing operations on DataFrame columns.\n\n Operates on columns only, not specific rows or elements. This allows\n `eval` to run arbitrary code, which can make you vulnerable to code\n injection if you pass user input to this function.\n\n Parameters\n ----------\n expr : str\n The expression string to evaluate.\n inplace : bool, default False\n If the expression contains an assignment, whether to perform the\n operation inplace and mutate the existing DataFrame. Otherwise,\n a new DataFrame is returned.\n\n Returns\n -------\n The result of the evaluation.\n\n See Also\n --------\n DataFrame.query : Evaluates a boolean expression to query the columns\n of a frame.\n DataFrame.assign : Can evaluate an expression or function to create new\n values for a column.\n eval : Evaluate a Python expression as a string using various\n backends.\n\n Examples\n --------\n >>> df = ps.DataFrame({'A': range(1, 6), 'B': range(10, 0, -2)})\n >>> df\n A B\n 0 1 10\n 1 2 8\n 2 3 6\n 3 4 4\n 4 5 2\n >>> df.eval('A + B')\n 0 11\n 1 10\n 2 9\n 3 8\n 4 7\n dtype: int64\n\n Assignment is allowed though by default the original DataFrame is not\n modified.\n\n >>> df.eval('C = A + B')\n A B C\n 0 1 10 11\n 1 2 8 10\n 2 3 6 9\n 3 4 4 8\n 4 5 2 7\n >>> df\n A B\n 0 1 10\n 1 2 8\n 2 3 6\n 3 4 4\n 4 5 2\n\n Use ``inplace=True`` to modify the original DataFrame.\n\n >>> df.eval('C = A + B', inplace=True)\n >>> df\n A B C\n 0 1 10 11\n 1 2 8 10\n 2 3 6 9\n 3 4 4 8\n 4 5 2 7\n "
from pyspark.pandas.series import first_series
if isinstance(self.columns, pd.MultiIndex):
raise TypeError('`eval` is not supported for multi-index columns')
inplace = validate_bool_kwarg(inplace, 'inplace')
should_return_series = False
series_name = None
should_return_scalar = False
def eval_func(pdf):
nonlocal should_return_series
nonlocal series_name
nonlocal should_return_scalar
result_inner = pdf.eval(expr, inplace=inplace)
if inplace:
result_inner = pdf
if isinstance(result_inner, pd.Series):
should_return_series = True
series_name = result_inner.name
result_inner = result_inner.to_frame()
elif is_scalar(result_inner):
should_return_scalar = True
result_inner = pd.Series(result_inner).to_frame()
return result_inner
result = self.pandas_on_spark.apply_batch(eval_func)
if inplace:
self._update_internal_frame(result._internal, requires_same_anchor=False)
return None
elif should_return_series:
return first_series(result).rename(series_name)
elif should_return_scalar:
return first_series(result)[0]
else:
return result | -2,884,725,735,896,062,000 | Evaluate a string describing operations on DataFrame columns.
Operates on columns only, not specific rows or elements. This allows
`eval` to run arbitrary code, which can make you vulnerable to code
injection if you pass user input to this function.
Parameters
----------
expr : str
The expression string to evaluate.
inplace : bool, default False
If the expression contains an assignment, whether to perform the
operation inplace and mutate the existing DataFrame. Otherwise,
a new DataFrame is returned.
Returns
-------
The result of the evaluation.
See Also
--------
DataFrame.query : Evaluates a boolean expression to query the columns
of a frame.
DataFrame.assign : Can evaluate an expression or function to create new
values for a column.
eval : Evaluate a Python expression as a string using various
backends.
Examples
--------
>>> df = ps.DataFrame({'A': range(1, 6), 'B': range(10, 0, -2)})
>>> df
A B
0 1 10
1 2 8
2 3 6
3 4 4
4 5 2
>>> df.eval('A + B')
0 11
1 10
2 9
3 8
4 7
dtype: int64
Assignment is allowed though by default the original DataFrame is not
modified.
>>> df.eval('C = A + B')
A B C
0 1 10 11
1 2 8 10
2 3 6 9
3 4 4 8
4 5 2 7
>>> df
A B
0 1 10
1 2 8
2 3 6
3 4 4
4 5 2
Use ``inplace=True`` to modify the original DataFrame.
>>> df.eval('C = A + B', inplace=True)
>>> df
A B C
0 1 10 11
1 2 8 10
2 3 6 9
3 4 4 8
4 5 2 7 | python/pyspark/pandas/frame.py | eval | Flyangz/spark | python | def eval(self, expr: str, inplace: bool=False) -> Optional[DataFrameOrSeries]:
"\n Evaluate a string describing operations on DataFrame columns.\n\n Operates on columns only, not specific rows or elements. This allows\n `eval` to run arbitrary code, which can make you vulnerable to code\n injection if you pass user input to this function.\n\n Parameters\n ----------\n expr : str\n The expression string to evaluate.\n inplace : bool, default False\n If the expression contains an assignment, whether to perform the\n operation inplace and mutate the existing DataFrame. Otherwise,\n a new DataFrame is returned.\n\n Returns\n -------\n The result of the evaluation.\n\n See Also\n --------\n DataFrame.query : Evaluates a boolean expression to query the columns\n of a frame.\n DataFrame.assign : Can evaluate an expression or function to create new\n values for a column.\n eval : Evaluate a Python expression as a string using various\n backends.\n\n Examples\n --------\n >>> df = ps.DataFrame({'A': range(1, 6), 'B': range(10, 0, -2)})\n >>> df\n A B\n 0 1 10\n 1 2 8\n 2 3 6\n 3 4 4\n 4 5 2\n >>> df.eval('A + B')\n 0 11\n 1 10\n 2 9\n 3 8\n 4 7\n dtype: int64\n\n Assignment is allowed though by default the original DataFrame is not\n modified.\n\n >>> df.eval('C = A + B')\n A B C\n 0 1 10 11\n 1 2 8 10\n 2 3 6 9\n 3 4 4 8\n 4 5 2 7\n >>> df\n A B\n 0 1 10\n 1 2 8\n 2 3 6\n 3 4 4\n 4 5 2\n\n Use ``inplace=True`` to modify the original DataFrame.\n\n >>> df.eval('C = A + B', inplace=True)\n >>> df\n A B C\n 0 1 10 11\n 1 2 8 10\n 2 3 6 9\n 3 4 4 8\n 4 5 2 7\n "
from pyspark.pandas.series import first_series
if isinstance(self.columns, pd.MultiIndex):
raise TypeError('`eval` is not supported for multi-index columns')
inplace = validate_bool_kwarg(inplace, 'inplace')
should_return_series = False
series_name = None
should_return_scalar = False
def eval_func(pdf):
nonlocal should_return_series
nonlocal series_name
nonlocal should_return_scalar
result_inner = pdf.eval(expr, inplace=inplace)
if inplace:
result_inner = pdf
if isinstance(result_inner, pd.Series):
should_return_series = True
series_name = result_inner.name
result_inner = result_inner.to_frame()
elif is_scalar(result_inner):
should_return_scalar = True
result_inner = pd.Series(result_inner).to_frame()
return result_inner
result = self.pandas_on_spark.apply_batch(eval_func)
if inplace:
self._update_internal_frame(result._internal, requires_same_anchor=False)
return None
elif should_return_series:
return first_series(result).rename(series_name)
elif should_return_scalar:
return first_series(result)[0]
else:
return result |
def explode(self, column: Name) -> 'DataFrame':
"\n Transform each element of a list-like to a row, replicating index values.\n\n Parameters\n ----------\n column : str or tuple\n Column to explode.\n\n Returns\n -------\n DataFrame\n Exploded lists to rows of the subset columns;\n index will be duplicated for these rows.\n\n See Also\n --------\n DataFrame.unstack : Pivot a level of the (necessarily hierarchical)\n index labels.\n DataFrame.melt : Unpivot a DataFrame from wide format to long format.\n\n Examples\n --------\n >>> df = ps.DataFrame({'A': [[1, 2, 3], [], [3, 4]], 'B': 1})\n >>> df\n A B\n 0 [1, 2, 3] 1\n 1 [] 1\n 2 [3, 4] 1\n\n >>> df.explode('A')\n A B\n 0 1.0 1\n 0 2.0 1\n 0 3.0 1\n 1 NaN 1\n 2 3.0 1\n 2 4.0 1\n "
from pyspark.pandas.series import Series
if (not is_name_like_value(column)):
raise TypeError('column must be a scalar')
psdf: DataFrame = DataFrame(self._internal.resolved_copy)
psser = psdf[column]
if (not isinstance(psser, Series)):
raise ValueError(('The column %s is not unique. For a multi-index, the label must be a tuple with elements corresponding to each level.' % name_like_string(column)))
if (not isinstance(psser.spark.data_type, ArrayType)):
return self.copy()
sdf = psdf._internal.spark_frame.withColumn(psser._internal.data_spark_column_names[0], F.explode_outer(psser.spark.column))
data_fields = psdf._internal.data_fields.copy()
idx = psdf._internal.column_labels.index(psser._column_label)
field = data_fields[idx]
spark_type = cast(ArrayType, field.spark_type).elementType
dtype = spark_type_to_pandas_dtype(spark_type)
data_fields[idx] = field.copy(dtype=dtype, spark_type=spark_type, nullable=True)
internal = psdf._internal.with_new_sdf(sdf, data_fields=data_fields)
return DataFrame(internal) | 7,501,693,200,103,724,000 | Transform each element of a list-like to a row, replicating index values.
Parameters
----------
column : str or tuple
Column to explode.
Returns
-------
DataFrame
Exploded lists to rows of the subset columns;
index will be duplicated for these rows.
See Also
--------
DataFrame.unstack : Pivot a level of the (necessarily hierarchical)
index labels.
DataFrame.melt : Unpivot a DataFrame from wide format to long format.
Examples
--------
>>> df = ps.DataFrame({'A': [[1, 2, 3], [], [3, 4]], 'B': 1})
>>> df
A B
0 [1, 2, 3] 1
1 [] 1
2 [3, 4] 1
>>> df.explode('A')
A B
0 1.0 1
0 2.0 1
0 3.0 1
1 NaN 1
2 3.0 1
2 4.0 1 | python/pyspark/pandas/frame.py | explode | Flyangz/spark | python | def explode(self, column: Name) -> 'DataFrame':
"\n Transform each element of a list-like to a row, replicating index values.\n\n Parameters\n ----------\n column : str or tuple\n Column to explode.\n\n Returns\n -------\n DataFrame\n Exploded lists to rows of the subset columns;\n index will be duplicated for these rows.\n\n See Also\n --------\n DataFrame.unstack : Pivot a level of the (necessarily hierarchical)\n index labels.\n DataFrame.melt : Unpivot a DataFrame from wide format to long format.\n\n Examples\n --------\n >>> df = ps.DataFrame({'A': [[1, 2, 3], [], [3, 4]], 'B': 1})\n >>> df\n A B\n 0 [1, 2, 3] 1\n 1 [] 1\n 2 [3, 4] 1\n\n >>> df.explode('A')\n A B\n 0 1.0 1\n 0 2.0 1\n 0 3.0 1\n 1 NaN 1\n 2 3.0 1\n 2 4.0 1\n "
from pyspark.pandas.series import Series
if (not is_name_like_value(column)):
raise TypeError('column must be a scalar')
psdf: DataFrame = DataFrame(self._internal.resolved_copy)
psser = psdf[column]
if (not isinstance(psser, Series)):
raise ValueError(('The column %s is not unique. For a multi-index, the label must be a tuple with elements corresponding to each level.' % name_like_string(column)))
if (not isinstance(psser.spark.data_type, ArrayType)):
return self.copy()
sdf = psdf._internal.spark_frame.withColumn(psser._internal.data_spark_column_names[0], F.explode_outer(psser.spark.column))
data_fields = psdf._internal.data_fields.copy()
idx = psdf._internal.column_labels.index(psser._column_label)
field = data_fields[idx]
spark_type = cast(ArrayType, field.spark_type).elementType
dtype = spark_type_to_pandas_dtype(spark_type)
data_fields[idx] = field.copy(dtype=dtype, spark_type=spark_type, nullable=True)
internal = psdf._internal.with_new_sdf(sdf, data_fields=data_fields)
return DataFrame(internal) |
def mad(self, axis: Axis=0) -> 'Series':
"\n Return the mean absolute deviation of values.\n\n Parameters\n ----------\n axis : {index (0), columns (1)}\n Axis for the function to be applied on.\n\n Examples\n --------\n >>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},\n ... columns=['a', 'b'])\n\n >>> df.mad()\n a 0.666667\n b 0.066667\n dtype: float64\n\n >>> df.mad(axis=1)\n 0 0.45\n 1 0.90\n 2 1.35\n 3 NaN\n dtype: float64\n "
from pyspark.pandas.series import first_series
axis = validate_axis(axis)
if (axis == 0):
def get_spark_column(psdf: DataFrame, label: Label) -> Column:
scol = psdf._internal.spark_column_for(label)
col_type = psdf._internal.spark_type_for(label)
if isinstance(col_type, BooleanType):
scol = scol.cast('integer')
return scol
new_column_labels: List[Label] = []
for label in self._internal.column_labels:
dtype = self._psser_for(label).spark.data_type
if isinstance(dtype, (NumericType, BooleanType)):
new_column_labels.append(label)
new_columns = [F.avg(get_spark_column(self, label)).alias(name_like_string(label)) for label in new_column_labels]
mean_data = self._internal.spark_frame.select(*new_columns).first()
new_columns = [F.avg(F.abs((get_spark_column(self, label) - mean_data[name_like_string(label)]))).alias(name_like_string(label)) for label in new_column_labels]
sdf = self._internal.spark_frame.select(*[SF.lit(None).cast(StringType()).alias(SPARK_DEFAULT_INDEX_NAME)], *new_columns)
with ps.option_context('compute.max_rows', 1):
internal = InternalFrame(spark_frame=sdf, index_spark_columns=[scol_for(sdf, SPARK_DEFAULT_INDEX_NAME)], column_labels=new_column_labels, column_label_names=self._internal.column_label_names)
return first_series(DataFrame(internal).transpose())
else:
@pandas_udf(returnType=DoubleType())
def calculate_columns_axis(*cols: pd.Series) -> pd.Series:
return pd.concat(cols, axis=1).mad(axis=1)
internal = self._internal.copy(column_labels=[None], data_spark_columns=[calculate_columns_axis(*self._internal.data_spark_columns).alias(SPARK_DEFAULT_SERIES_NAME)], data_fields=[None], column_label_names=None)
return first_series(DataFrame(internal)) | 5,261,953,540,311,855,000 | Return the mean absolute deviation of values.
Parameters
----------
axis : {index (0), columns (1)}
Axis for the function to be applied on.
Examples
--------
>>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},
... columns=['a', 'b'])
>>> df.mad()
a 0.666667
b 0.066667
dtype: float64
>>> df.mad(axis=1)
0 0.45
1 0.90
2 1.35
3 NaN
dtype: float64 | python/pyspark/pandas/frame.py | mad | Flyangz/spark | python | def mad(self, axis: Axis=0) -> 'Series':
"\n Return the mean absolute deviation of values.\n\n Parameters\n ----------\n axis : {index (0), columns (1)}\n Axis for the function to be applied on.\n\n Examples\n --------\n >>> df = ps.DataFrame({'a': [1, 2, 3, np.nan], 'b': [0.1, 0.2, 0.3, np.nan]},\n ... columns=['a', 'b'])\n\n >>> df.mad()\n a 0.666667\n b 0.066667\n dtype: float64\n\n >>> df.mad(axis=1)\n 0 0.45\n 1 0.90\n 2 1.35\n 3 NaN\n dtype: float64\n "
from pyspark.pandas.series import first_series
axis = validate_axis(axis)
if (axis == 0):
def get_spark_column(psdf: DataFrame, label: Label) -> Column:
scol = psdf._internal.spark_column_for(label)
col_type = psdf._internal.spark_type_for(label)
if isinstance(col_type, BooleanType):
scol = scol.cast('integer')
return scol
new_column_labels: List[Label] = []
for label in self._internal.column_labels:
dtype = self._psser_for(label).spark.data_type
if isinstance(dtype, (NumericType, BooleanType)):
new_column_labels.append(label)
new_columns = [F.avg(get_spark_column(self, label)).alias(name_like_string(label)) for label in new_column_labels]
mean_data = self._internal.spark_frame.select(*new_columns).first()
new_columns = [F.avg(F.abs((get_spark_column(self, label) - mean_data[name_like_string(label)]))).alias(name_like_string(label)) for label in new_column_labels]
sdf = self._internal.spark_frame.select(*[SF.lit(None).cast(StringType()).alias(SPARK_DEFAULT_INDEX_NAME)], *new_columns)
with ps.option_context('compute.max_rows', 1):
internal = InternalFrame(spark_frame=sdf, index_spark_columns=[scol_for(sdf, SPARK_DEFAULT_INDEX_NAME)], column_labels=new_column_labels, column_label_names=self._internal.column_label_names)
return first_series(DataFrame(internal).transpose())
else:
@pandas_udf(returnType=DoubleType())
def calculate_columns_axis(*cols: pd.Series) -> pd.Series:
return pd.concat(cols, axis=1).mad(axis=1)
internal = self._internal.copy(column_labels=[None], data_spark_columns=[calculate_columns_axis(*self._internal.data_spark_columns).alias(SPARK_DEFAULT_SERIES_NAME)], data_fields=[None], column_label_names=None)
return first_series(DataFrame(internal)) |
def tail(self, n: int=5) -> 'DataFrame':
"\n Return the last `n` rows.\n\n This function returns last `n` rows from the object based on\n position. It is useful for quickly verifying data, for example,\n after sorting or appending rows.\n\n For negative values of `n`, this function returns all rows except\n the first `n` rows, equivalent to ``df[n:]``.\n\n Parameters\n ----------\n n : int, default 5\n Number of rows to select.\n\n Returns\n -------\n type of caller\n The last `n` rows of the caller object.\n\n See Also\n --------\n DataFrame.head : The first `n` rows of the caller object.\n\n Examples\n --------\n >>> df = ps.DataFrame({'animal': ['alligator', 'bee', 'falcon', 'lion',\n ... 'monkey', 'parrot', 'shark', 'whale', 'zebra']})\n >>> df\n animal\n 0 alligator\n 1 bee\n 2 falcon\n 3 lion\n 4 monkey\n 5 parrot\n 6 shark\n 7 whale\n 8 zebra\n\n Viewing the last 5 lines\n\n >>> df.tail() # doctest: +SKIP\n animal\n 4 monkey\n 5 parrot\n 6 shark\n 7 whale\n 8 zebra\n\n Viewing the last `n` lines (three in this case)\n\n >>> df.tail(3) # doctest: +SKIP\n animal\n 6 shark\n 7 whale\n 8 zebra\n\n For negative values of `n`\n\n >>> df.tail(-3) # doctest: +SKIP\n animal\n 3 lion\n 4 monkey\n 5 parrot\n 6 shark\n 7 whale\n 8 zebra\n "
if (not isinstance(n, int)):
raise TypeError("bad operand type for unary -: '{}'".format(type(n).__name__))
if (n < 0):
n = (len(self) + n)
if (n <= 0):
return ps.DataFrame(self._internal.with_filter(SF.lit(False)))
sdf = self._internal.resolved_copy.spark_frame
rows = sdf.tail(n)
new_sdf = default_session().createDataFrame(rows, sdf.schema)
return DataFrame(self._internal.with_new_sdf(new_sdf)) | -381,023,855,042,304,900 | Return the last `n` rows.
This function returns last `n` rows from the object based on
position. It is useful for quickly verifying data, for example,
after sorting or appending rows.
For negative values of `n`, this function returns all rows except
the first `n` rows, equivalent to ``df[n:]``.
Parameters
----------
n : int, default 5
Number of rows to select.
Returns
-------
type of caller
The last `n` rows of the caller object.
See Also
--------
DataFrame.head : The first `n` rows of the caller object.
Examples
--------
>>> df = ps.DataFrame({'animal': ['alligator', 'bee', 'falcon', 'lion',
... 'monkey', 'parrot', 'shark', 'whale', 'zebra']})
>>> df
animal
0 alligator
1 bee
2 falcon
3 lion
4 monkey
5 parrot
6 shark
7 whale
8 zebra
Viewing the last 5 lines
>>> df.tail() # doctest: +SKIP
animal
4 monkey
5 parrot
6 shark
7 whale
8 zebra
Viewing the last `n` lines (three in this case)
>>> df.tail(3) # doctest: +SKIP
animal
6 shark
7 whale
8 zebra
For negative values of `n`
>>> df.tail(-3) # doctest: +SKIP
animal
3 lion
4 monkey
5 parrot
6 shark
7 whale
8 zebra | python/pyspark/pandas/frame.py | tail | Flyangz/spark | python | def tail(self, n: int=5) -> 'DataFrame':
"\n Return the last `n` rows.\n\n This function returns last `n` rows from the object based on\n position. It is useful for quickly verifying data, for example,\n after sorting or appending rows.\n\n For negative values of `n`, this function returns all rows except\n the first `n` rows, equivalent to ``df[n:]``.\n\n Parameters\n ----------\n n : int, default 5\n Number of rows to select.\n\n Returns\n -------\n type of caller\n The last `n` rows of the caller object.\n\n See Also\n --------\n DataFrame.head : The first `n` rows of the caller object.\n\n Examples\n --------\n >>> df = ps.DataFrame({'animal': ['alligator', 'bee', 'falcon', 'lion',\n ... 'monkey', 'parrot', 'shark', 'whale', 'zebra']})\n >>> df\n animal\n 0 alligator\n 1 bee\n 2 falcon\n 3 lion\n 4 monkey\n 5 parrot\n 6 shark\n 7 whale\n 8 zebra\n\n Viewing the last 5 lines\n\n >>> df.tail() # doctest: +SKIP\n animal\n 4 monkey\n 5 parrot\n 6 shark\n 7 whale\n 8 zebra\n\n Viewing the last `n` lines (three in this case)\n\n >>> df.tail(3) # doctest: +SKIP\n animal\n 6 shark\n 7 whale\n 8 zebra\n\n For negative values of `n`\n\n >>> df.tail(-3) # doctest: +SKIP\n animal\n 3 lion\n 4 monkey\n 5 parrot\n 6 shark\n 7 whale\n 8 zebra\n "
if (not isinstance(n, int)):
raise TypeError("bad operand type for unary -: '{}'".format(type(n).__name__))
if (n < 0):
n = (len(self) + n)
if (n <= 0):
return ps.DataFrame(self._internal.with_filter(SF.lit(False)))
sdf = self._internal.resolved_copy.spark_frame
rows = sdf.tail(n)
new_sdf = default_session().createDataFrame(rows, sdf.schema)
return DataFrame(self._internal.with_new_sdf(new_sdf)) |
def align(self, other: DataFrameOrSeries, join: str='outer', axis: Optional[Axis]=None, copy: bool=True) -> Tuple[('DataFrame', DataFrameOrSeries)]:
'\n Align two objects on their axes with the specified join method.\n\n Join method is specified for each axis Index.\n\n Parameters\n ----------\n other : DataFrame or Series\n join : {{\'outer\', \'inner\', \'left\', \'right\'}}, default \'outer\'\n axis : allowed axis of the other object, default None\n Align on index (0), columns (1), or both (None).\n copy : bool, default True\n Always returns new objects. If copy=False and no reindexing is\n required then original objects are returned.\n\n Returns\n -------\n (left, right) : (DataFrame, type of other)\n Aligned objects.\n\n Examples\n --------\n >>> ps.set_option("compute.ops_on_diff_frames", True)\n >>> df1 = ps.DataFrame({"a": [1, 2, 3], "b": ["a", "b", "c"]}, index=[10, 20, 30])\n >>> df2 = ps.DataFrame({"a": [4, 5, 6], "c": ["d", "e", "f"]}, index=[10, 11, 12])\n\n Align both axis:\n\n >>> aligned_l, aligned_r = df1.align(df2)\n >>> aligned_l.sort_index()\n a b c\n 10 1.0 a NaN\n 11 NaN None NaN\n 12 NaN None NaN\n 20 2.0 b NaN\n 30 3.0 c NaN\n >>> aligned_r.sort_index()\n a b c\n 10 4.0 NaN d\n 11 5.0 NaN e\n 12 6.0 NaN f\n 20 NaN NaN None\n 30 NaN NaN None\n\n Align only axis=0 (index):\n\n >>> aligned_l, aligned_r = df1.align(df2, axis=0)\n >>> aligned_l.sort_index()\n a b\n 10 1.0 a\n 11 NaN None\n 12 NaN None\n 20 2.0 b\n 30 3.0 c\n >>> aligned_r.sort_index()\n a c\n 10 4.0 d\n 11 5.0 e\n 12 6.0 f\n 20 NaN None\n 30 NaN None\n\n Align only axis=1 (column):\n\n >>> aligned_l, aligned_r = df1.align(df2, axis=1)\n >>> aligned_l.sort_index()\n a b c\n 10 1 a NaN\n 20 2 b NaN\n 30 3 c NaN\n >>> aligned_r.sort_index()\n a b c\n 10 4 NaN d\n 11 5 NaN e\n 12 6 NaN f\n\n Align with the join type "inner":\n\n >>> aligned_l, aligned_r = df1.align(df2, join="inner")\n >>> aligned_l.sort_index()\n a\n 10 1\n >>> aligned_r.sort_index()\n a\n 10 4\n\n Align with a Series:\n\n >>> s = ps.Series([7, 8, 9], index=[10, 11, 12])\n >>> aligned_l, aligned_r = df1.align(s, axis=0)\n >>> aligned_l.sort_index()\n a b\n 10 1.0 a\n 11 NaN None\n 12 NaN None\n 20 2.0 b\n 30 3.0 c\n >>> aligned_r.sort_index()\n 10 7.0\n 11 8.0\n 12 9.0\n 20 NaN\n 30 NaN\n dtype: float64\n\n >>> ps.reset_option("compute.ops_on_diff_frames")\n '
from pyspark.pandas.series import Series, first_series
if (not isinstance(other, (DataFrame, Series))):
raise TypeError('unsupported type: {}'.format(type(other).__name__))
how = validate_how(join)
axis = validate_axis(axis, None)
right_is_series = isinstance(other, Series)
if right_is_series:
if (axis is None):
raise ValueError('Must specify axis=0 or 1')
elif (axis != 0):
raise NotImplementedError('align currently only works for axis=0 when right is Series')
left = self
right = other
if (((axis is None) or (axis == 0)) and (not same_anchor(left, right))):
combined = combine_frames(left, right, how=how)
left = combined['this']
right = combined['that']
if right_is_series:
right = first_series(cast(DataFrame[Any], right)).rename(other.name)
if (((axis is None) or (axis == 1)) and (left._internal.column_labels != right._internal.column_labels)):
if (left._internal.column_labels_level != right._internal.column_labels_level):
raise ValueError('cannot join with no overlapping index names')
left = left.copy()
right = right.copy()
if (how == 'full'):
column_labels = sorted(list((set(left._internal.column_labels) | set(right._internal.column_labels))))
elif (how == 'inner'):
column_labels = sorted(list((set(left._internal.column_labels) & set(right._internal.column_labels))))
elif (how == 'left'):
column_labels = left._internal.column_labels
else:
column_labels = right._internal.column_labels
for label in column_labels:
if (label not in left._internal.column_labels):
left[label] = SF.lit(None).cast(DoubleType())
left = left[column_labels]
for label in column_labels:
if (label not in right._internal.column_labels):
right[label] = SF.lit(None).cast(DoubleType())
right = right[column_labels]
return ((left.copy(), right.copy()) if copy else (left, right)) | 436,715,312,717,442,240 | Align two objects on their axes with the specified join method.
Join method is specified for each axis Index.
Parameters
----------
other : DataFrame or Series
join : {{'outer', 'inner', 'left', 'right'}}, default 'outer'
axis : allowed axis of the other object, default None
Align on index (0), columns (1), or both (None).
copy : bool, default True
Always returns new objects. If copy=False and no reindexing is
required then original objects are returned.
Returns
-------
(left, right) : (DataFrame, type of other)
Aligned objects.
Examples
--------
>>> ps.set_option("compute.ops_on_diff_frames", True)
>>> df1 = ps.DataFrame({"a": [1, 2, 3], "b": ["a", "b", "c"]}, index=[10, 20, 30])
>>> df2 = ps.DataFrame({"a": [4, 5, 6], "c": ["d", "e", "f"]}, index=[10, 11, 12])
Align both axis:
>>> aligned_l, aligned_r = df1.align(df2)
>>> aligned_l.sort_index()
a b c
10 1.0 a NaN
11 NaN None NaN
12 NaN None NaN
20 2.0 b NaN
30 3.0 c NaN
>>> aligned_r.sort_index()
a b c
10 4.0 NaN d
11 5.0 NaN e
12 6.0 NaN f
20 NaN NaN None
30 NaN NaN None
Align only axis=0 (index):
>>> aligned_l, aligned_r = df1.align(df2, axis=0)
>>> aligned_l.sort_index()
a b
10 1.0 a
11 NaN None
12 NaN None
20 2.0 b
30 3.0 c
>>> aligned_r.sort_index()
a c
10 4.0 d
11 5.0 e
12 6.0 f
20 NaN None
30 NaN None
Align only axis=1 (column):
>>> aligned_l, aligned_r = df1.align(df2, axis=1)
>>> aligned_l.sort_index()
a b c
10 1 a NaN
20 2 b NaN
30 3 c NaN
>>> aligned_r.sort_index()
a b c
10 4 NaN d
11 5 NaN e
12 6 NaN f
Align with the join type "inner":
>>> aligned_l, aligned_r = df1.align(df2, join="inner")
>>> aligned_l.sort_index()
a
10 1
>>> aligned_r.sort_index()
a
10 4
Align with a Series:
>>> s = ps.Series([7, 8, 9], index=[10, 11, 12])
>>> aligned_l, aligned_r = df1.align(s, axis=0)
>>> aligned_l.sort_index()
a b
10 1.0 a
11 NaN None
12 NaN None
20 2.0 b
30 3.0 c
>>> aligned_r.sort_index()
10 7.0
11 8.0
12 9.0
20 NaN
30 NaN
dtype: float64
>>> ps.reset_option("compute.ops_on_diff_frames") | python/pyspark/pandas/frame.py | align | Flyangz/spark | python | def align(self, other: DataFrameOrSeries, join: str='outer', axis: Optional[Axis]=None, copy: bool=True) -> Tuple[('DataFrame', DataFrameOrSeries)]:
'\n Align two objects on their axes with the specified join method.\n\n Join method is specified for each axis Index.\n\n Parameters\n ----------\n other : DataFrame or Series\n join : {{\'outer\', \'inner\', \'left\', \'right\'}}, default \'outer\'\n axis : allowed axis of the other object, default None\n Align on index (0), columns (1), or both (None).\n copy : bool, default True\n Always returns new objects. If copy=False and no reindexing is\n required then original objects are returned.\n\n Returns\n -------\n (left, right) : (DataFrame, type of other)\n Aligned objects.\n\n Examples\n --------\n >>> ps.set_option("compute.ops_on_diff_frames", True)\n >>> df1 = ps.DataFrame({"a": [1, 2, 3], "b": ["a", "b", "c"]}, index=[10, 20, 30])\n >>> df2 = ps.DataFrame({"a": [4, 5, 6], "c": ["d", "e", "f"]}, index=[10, 11, 12])\n\n Align both axis:\n\n >>> aligned_l, aligned_r = df1.align(df2)\n >>> aligned_l.sort_index()\n a b c\n 10 1.0 a NaN\n 11 NaN None NaN\n 12 NaN None NaN\n 20 2.0 b NaN\n 30 3.0 c NaN\n >>> aligned_r.sort_index()\n a b c\n 10 4.0 NaN d\n 11 5.0 NaN e\n 12 6.0 NaN f\n 20 NaN NaN None\n 30 NaN NaN None\n\n Align only axis=0 (index):\n\n >>> aligned_l, aligned_r = df1.align(df2, axis=0)\n >>> aligned_l.sort_index()\n a b\n 10 1.0 a\n 11 NaN None\n 12 NaN None\n 20 2.0 b\n 30 3.0 c\n >>> aligned_r.sort_index()\n a c\n 10 4.0 d\n 11 5.0 e\n 12 6.0 f\n 20 NaN None\n 30 NaN None\n\n Align only axis=1 (column):\n\n >>> aligned_l, aligned_r = df1.align(df2, axis=1)\n >>> aligned_l.sort_index()\n a b c\n 10 1 a NaN\n 20 2 b NaN\n 30 3 c NaN\n >>> aligned_r.sort_index()\n a b c\n 10 4 NaN d\n 11 5 NaN e\n 12 6 NaN f\n\n Align with the join type "inner":\n\n >>> aligned_l, aligned_r = df1.align(df2, join="inner")\n >>> aligned_l.sort_index()\n a\n 10 1\n >>> aligned_r.sort_index()\n a\n 10 4\n\n Align with a Series:\n\n >>> s = ps.Series([7, 8, 9], index=[10, 11, 12])\n >>> aligned_l, aligned_r = df1.align(s, axis=0)\n >>> aligned_l.sort_index()\n a b\n 10 1.0 a\n 11 NaN None\n 12 NaN None\n 20 2.0 b\n 30 3.0 c\n >>> aligned_r.sort_index()\n 10 7.0\n 11 8.0\n 12 9.0\n 20 NaN\n 30 NaN\n dtype: float64\n\n >>> ps.reset_option("compute.ops_on_diff_frames")\n '
from pyspark.pandas.series import Series, first_series
if (not isinstance(other, (DataFrame, Series))):
raise TypeError('unsupported type: {}'.format(type(other).__name__))
how = validate_how(join)
axis = validate_axis(axis, None)
right_is_series = isinstance(other, Series)
if right_is_series:
if (axis is None):
raise ValueError('Must specify axis=0 or 1')
elif (axis != 0):
raise NotImplementedError('align currently only works for axis=0 when right is Series')
left = self
right = other
if (((axis is None) or (axis == 0)) and (not same_anchor(left, right))):
combined = combine_frames(left, right, how=how)
left = combined['this']
right = combined['that']
if right_is_series:
right = first_series(cast(DataFrame[Any], right)).rename(other.name)
if (((axis is None) or (axis == 1)) and (left._internal.column_labels != right._internal.column_labels)):
if (left._internal.column_labels_level != right._internal.column_labels_level):
raise ValueError('cannot join with no overlapping index names')
left = left.copy()
right = right.copy()
if (how == 'full'):
column_labels = sorted(list((set(left._internal.column_labels) | set(right._internal.column_labels))))
elif (how == 'inner'):
column_labels = sorted(list((set(left._internal.column_labels) & set(right._internal.column_labels))))
elif (how == 'left'):
column_labels = left._internal.column_labels
else:
column_labels = right._internal.column_labels
for label in column_labels:
if (label not in left._internal.column_labels):
left[label] = SF.lit(None).cast(DoubleType())
left = left[column_labels]
for label in column_labels:
if (label not in right._internal.column_labels):
right[label] = SF.lit(None).cast(DoubleType())
right = right[column_labels]
return ((left.copy(), right.copy()) if copy else (left, right)) |
@staticmethod
def from_dict(data: Dict[(Name, Sequence[Any])], orient: str='columns', dtype: Union[(str, Dtype)]=None, columns: Optional[List[Name]]=None) -> 'DataFrame':
'\n Construct DataFrame from dict of array-like or dicts.\n\n Creates DataFrame object from dictionary by columns or by index\n allowing dtype specification.\n\n Parameters\n ----------\n data : dict\n Of the form {field : array-like} or {field : dict}.\n orient : {\'columns\', \'index\'}, default \'columns\'\n The "orientation" of the data. If the keys of the passed dict\n should be the columns of the resulting DataFrame, pass \'columns\'\n (default). Otherwise if the keys should be rows, pass \'index\'.\n dtype : dtype, default None\n Data type to force, otherwise infer.\n columns : list, default None\n Column labels to use when ``orient=\'index\'``. Raises a ValueError\n if used with ``orient=\'columns\'``.\n\n Returns\n -------\n DataFrame\n\n See Also\n --------\n DataFrame.from_records : DataFrame from structured ndarray, sequence\n of tuples or dicts, or DataFrame.\n DataFrame : DataFrame object creation using constructor.\n\n Examples\n --------\n By default the keys of the dict become the DataFrame columns:\n\n >>> data = {\'col_1\': [3, 2, 1, 0], \'col_2\': [10, 20, 30, 40]}\n >>> ps.DataFrame.from_dict(data)\n col_1 col_2\n 0 3 10\n 1 2 20\n 2 1 30\n 3 0 40\n\n Specify ``orient=\'index\'`` to create the DataFrame using dictionary\n keys as rows:\n\n >>> data = {\'row_1\': [3, 2, 1, 0], \'row_2\': [10, 20, 30, 40]}\n >>> ps.DataFrame.from_dict(data, orient=\'index\').sort_index()\n 0 1 2 3\n row_1 3 2 1 0\n row_2 10 20 30 40\n\n When using the \'index\' orientation, the column names can be\n specified manually:\n\n >>> ps.DataFrame.from_dict(data, orient=\'index\',\n ... columns=[\'A\', \'B\', \'C\', \'D\']).sort_index()\n A B C D\n row_1 3 2 1 0\n row_2 10 20 30 40\n '
return DataFrame(pd.DataFrame.from_dict(data, orient=orient, dtype=dtype, columns=columns)) | -6,497,009,801,001,677,000 | Construct DataFrame from dict of array-like or dicts.
Creates DataFrame object from dictionary by columns or by index
allowing dtype specification.
Parameters
----------
data : dict
Of the form {field : array-like} or {field : dict}.
orient : {'columns', 'index'}, default 'columns'
The "orientation" of the data. If the keys of the passed dict
should be the columns of the resulting DataFrame, pass 'columns'
(default). Otherwise if the keys should be rows, pass 'index'.
dtype : dtype, default None
Data type to force, otherwise infer.
columns : list, default None
Column labels to use when ``orient='index'``. Raises a ValueError
if used with ``orient='columns'``.
Returns
-------
DataFrame
See Also
--------
DataFrame.from_records : DataFrame from structured ndarray, sequence
of tuples or dicts, or DataFrame.
DataFrame : DataFrame object creation using constructor.
Examples
--------
By default the keys of the dict become the DataFrame columns:
>>> data = {'col_1': [3, 2, 1, 0], 'col_2': [10, 20, 30, 40]}
>>> ps.DataFrame.from_dict(data)
col_1 col_2
0 3 10
1 2 20
2 1 30
3 0 40
Specify ``orient='index'`` to create the DataFrame using dictionary
keys as rows:
>>> data = {'row_1': [3, 2, 1, 0], 'row_2': [10, 20, 30, 40]}
>>> ps.DataFrame.from_dict(data, orient='index').sort_index()
0 1 2 3
row_1 3 2 1 0
row_2 10 20 30 40
When using the 'index' orientation, the column names can be
specified manually:
>>> ps.DataFrame.from_dict(data, orient='index',
... columns=['A', 'B', 'C', 'D']).sort_index()
A B C D
row_1 3 2 1 0
row_2 10 20 30 40 | python/pyspark/pandas/frame.py | from_dict | Flyangz/spark | python | @staticmethod
def from_dict(data: Dict[(Name, Sequence[Any])], orient: str='columns', dtype: Union[(str, Dtype)]=None, columns: Optional[List[Name]]=None) -> 'DataFrame':
'\n Construct DataFrame from dict of array-like or dicts.\n\n Creates DataFrame object from dictionary by columns or by index\n allowing dtype specification.\n\n Parameters\n ----------\n data : dict\n Of the form {field : array-like} or {field : dict}.\n orient : {\'columns\', \'index\'}, default \'columns\'\n The "orientation" of the data. If the keys of the passed dict\n should be the columns of the resulting DataFrame, pass \'columns\'\n (default). Otherwise if the keys should be rows, pass \'index\'.\n dtype : dtype, default None\n Data type to force, otherwise infer.\n columns : list, default None\n Column labels to use when ``orient=\'index\'``. Raises a ValueError\n if used with ``orient=\'columns\'``.\n\n Returns\n -------\n DataFrame\n\n See Also\n --------\n DataFrame.from_records : DataFrame from structured ndarray, sequence\n of tuples or dicts, or DataFrame.\n DataFrame : DataFrame object creation using constructor.\n\n Examples\n --------\n By default the keys of the dict become the DataFrame columns:\n\n >>> data = {\'col_1\': [3, 2, 1, 0], \'col_2\': [10, 20, 30, 40]}\n >>> ps.DataFrame.from_dict(data)\n col_1 col_2\n 0 3 10\n 1 2 20\n 2 1 30\n 3 0 40\n\n Specify ``orient=\'index\'`` to create the DataFrame using dictionary\n keys as rows:\n\n >>> data = {\'row_1\': [3, 2, 1, 0], \'row_2\': [10, 20, 30, 40]}\n >>> ps.DataFrame.from_dict(data, orient=\'index\').sort_index()\n 0 1 2 3\n row_1 3 2 1 0\n row_2 10 20 30 40\n\n When using the \'index\' orientation, the column names can be\n specified manually:\n\n >>> ps.DataFrame.from_dict(data, orient=\'index\',\n ... columns=[\'A\', \'B\', \'C\', \'D\']).sort_index()\n A B C D\n row_1 3 2 1 0\n row_2 10 20 30 40\n '
return DataFrame(pd.DataFrame.from_dict(data, orient=orient, dtype=dtype, columns=columns)) |
def _to_internal_pandas(self) -> pd.DataFrame:
'\n Return a pandas DataFrame directly from _internal to avoid overhead of copy.\n\n This method is for internal use only.\n '
return self._internal.to_pandas_frame | -1,994,076,103,929,380,600 | Return a pandas DataFrame directly from _internal to avoid overhead of copy.
This method is for internal use only. | python/pyspark/pandas/frame.py | _to_internal_pandas | Flyangz/spark | python | def _to_internal_pandas(self) -> pd.DataFrame:
'\n Return a pandas DataFrame directly from _internal to avoid overhead of copy.\n\n This method is for internal use only.\n '
return self._internal.to_pandas_frame |
@staticmethod
def _index_normalized_label(level: int, labels: Union[(Name, Sequence[Name])]) -> List[Label]:
'\n Returns a label that is normalized against the current column index level.\n For example, the key "abc" can be ("abc", "", "") if the current Frame has\n a multi-index for its column\n '
if is_name_like_tuple(labels):
labels = [labels]
elif is_name_like_value(labels):
labels = [(labels,)]
else:
labels = [(k if is_name_like_tuple(k) else (k,)) for k in labels]
if any(((len(label) > level) for label in labels)):
raise KeyError('Key length ({}) exceeds index depth ({})'.format(max((len(label) for label in labels)), level))
return [tuple((list(label) + ([''] * (level - len(label))))) for label in labels] | 3,790,296,275,256,254,000 | Returns a label that is normalized against the current column index level.
For example, the key "abc" can be ("abc", "", "") if the current Frame has
a multi-index for its column | python/pyspark/pandas/frame.py | _index_normalized_label | Flyangz/spark | python | @staticmethod
def _index_normalized_label(level: int, labels: Union[(Name, Sequence[Name])]) -> List[Label]:
'\n Returns a label that is normalized against the current column index level.\n For example, the key "abc" can be ("abc", , ) if the current Frame has\n a multi-index for its column\n '
if is_name_like_tuple(labels):
labels = [labels]
elif is_name_like_value(labels):
labels = [(labels,)]
else:
labels = [(k if is_name_like_tuple(k) else (k,)) for k in labels]
if any(((len(label) > level) for label in labels)):
raise KeyError('Key length ({}) exceeds index depth ({})'.format(max((len(label) for label in labels)), level))
return [tuple((list(label) + ([] * (level - len(label))))) for label in labels] |
@staticmethod
def _index_normalized_frame(level: int, psser_or_psdf: DataFrameOrSeries) -> 'DataFrame':
'\n Returns a frame that is normalized against the current column index level.\n For example, the name in `pd.Series([...], name="abc")` can be can be\n ("abc", "", "") if the current DataFrame has a multi-index for its column\n '
from pyspark.pandas.series import Series
if isinstance(psser_or_psdf, Series):
psdf = psser_or_psdf.to_frame()
else:
assert isinstance(psser_or_psdf, DataFrame), type(psser_or_psdf)
psdf = psser_or_psdf.copy()
psdf.columns = pd.MultiIndex.from_tuples([tuple(([name_like_string(label)] + ([''] * (level - 1)))) for label in psdf._internal.column_labels])
return psdf | 4,519,135,396,839,812,600 | Returns a frame that is normalized against the current column index level.
For example, the name in `pd.Series([...], name="abc")` can be can be
("abc", "", "") if the current DataFrame has a multi-index for its column | python/pyspark/pandas/frame.py | _index_normalized_frame | Flyangz/spark | python | @staticmethod
def _index_normalized_frame(level: int, psser_or_psdf: DataFrameOrSeries) -> 'DataFrame':
'\n Returns a frame that is normalized against the current column index level.\n For example, the name in `pd.Series([...], name="abc")` can be can be\n ("abc", , ) if the current DataFrame has a multi-index for its column\n '
from pyspark.pandas.series import Series
if isinstance(psser_or_psdf, Series):
psdf = psser_or_psdf.to_frame()
else:
assert isinstance(psser_or_psdf, DataFrame), type(psser_or_psdf)
psdf = psser_or_psdf.copy()
psdf.columns = pd.MultiIndex.from_tuples([tuple(([name_like_string(label)] + ([] * (level - 1)))) for label in psdf._internal.column_labels])
return psdf |
@export
def display_timeline(data: Union[(pd.DataFrame, dict)], time_column: str='TimeGenerated', source_columns: list=None, **kwargs) -> figure:
'\n Display a timeline of events.\n\n Parameters\n ----------\n data : Union[dict, pd.DataFrame]\n Either\n dict of data sets to plot on the timeline with the following structure::\n\n Key (str) - Name of data set to be displayed in legend\n Value (Dict[str, Any]) - containing:\n data (pd.DataFrame) - Data to plot\n time_column (str, optional) - Name of the timestamp column\n source_columns (list[str], optional) - source columns to use\n in tooltips\n color (str, optional) - color of datapoints for this data\n If any of the last values are omitted, they default to the values\n supplied as parameters to the function (see below)\n\n Or\n DataFrame as a single data set or grouped into individual\n plot series using the `group_by` parameter\n time_column : str, optional\n Name of the timestamp column\n (the default is \'TimeGenerated\')\n source_columns : list, optional\n List of default source columns to use in tooltips\n (the default is None)\n\n Other Parameters\n ----------------\n title : str, optional\n Title to display (the default is None)\n alert : SecurityAlert, optional\n Add a reference line/label using the alert time (the default is None)\n ref_event : Any, optional\n Add a reference line/label using the alert time (the default is None)\n ref_time : datetime, optional\n Add a reference line/label using `ref_time` (the default is None)\n group_by : str\n (where `data` is a DataFrame)\n The column to group timelines on\n legend: str, optional\n "left", "right", "inline" or "none"\n (the default is to show a legend when plotting multiple series\n and not to show one when plotting a single series)\n yaxis : bool, optional\n Whether to show the yaxis and labels (default is False)\n ygrid : bool, optional\n Whether to show the yaxis grid (default is False)\n xgrid : bool, optional\n Whether to show the xaxis grid (default is True)\n range_tool : bool, optional\n Show the the range slider tool (default is True)\n height : int, optional\n The height of the plot figure\n (the default is auto-calculated height)\n width : int, optional\n The width of the plot figure (the default is 900)\n color : str\n Default series color (default is "navy")\n overlay_color : str\n Overlay series color (default is "green")\n\n Returns\n -------\n figure\n The bokeh plot figure.\n\n '
overlay_data: pd.DataFrame = kwargs.pop('overlay_data', None)
overlay_columns: list = kwargs.pop('overlay_columns', source_columns)
color: str = kwargs.get('color', 'navy')
overlay_color: str = kwargs.pop('overlay_color', 'green')
kwargs_sub = kwargs.copy()
kwargs_sub['time_column'] = time_column
kwargs_sub['source_columns'] = source_columns
(kwargs_sub['ref_time'], kwargs_sub['ref_label']) = _get_ref_event_time(**kwargs)
if isinstance(data, pd.DataFrame):
if (overlay_data is not None):
aggr_data = {'Primary': {'data': data, 'time_column': time_column, 'source_columns': source_columns, 'color': color}, 'Secondary': {'data': overlay_data, 'time_column': time_column, 'source_columns': overlay_columns, 'color': overlay_color}}
return _display_timeline_dict(data=aggr_data, **kwargs_sub)
series_dict = _create_dict_from_grouping(data=data, source_columns=source_columns, time_column=time_column, group_by=kwargs.get('group_by', None), color=kwargs.get('color', 'navy'))
return _display_timeline_dict(data=series_dict, **kwargs_sub)
if isinstance(data, dict):
return _display_timeline_dict(data, **kwargs_sub)
return None | 5,080,413,146,164,393,000 | Display a timeline of events.
Parameters
----------
data : Union[dict, pd.DataFrame]
Either
dict of data sets to plot on the timeline with the following structure::
Key (str) - Name of data set to be displayed in legend
Value (Dict[str, Any]) - containing:
data (pd.DataFrame) - Data to plot
time_column (str, optional) - Name of the timestamp column
source_columns (list[str], optional) - source columns to use
in tooltips
color (str, optional) - color of datapoints for this data
If any of the last values are omitted, they default to the values
supplied as parameters to the function (see below)
Or
DataFrame as a single data set or grouped into individual
plot series using the `group_by` parameter
time_column : str, optional
Name of the timestamp column
(the default is 'TimeGenerated')
source_columns : list, optional
List of default source columns to use in tooltips
(the default is None)
Other Parameters
----------------
title : str, optional
Title to display (the default is None)
alert : SecurityAlert, optional
Add a reference line/label using the alert time (the default is None)
ref_event : Any, optional
Add a reference line/label using the alert time (the default is None)
ref_time : datetime, optional
Add a reference line/label using `ref_time` (the default is None)
group_by : str
(where `data` is a DataFrame)
The column to group timelines on
legend: str, optional
"left", "right", "inline" or "none"
(the default is to show a legend when plotting multiple series
and not to show one when plotting a single series)
yaxis : bool, optional
Whether to show the yaxis and labels (default is False)
ygrid : bool, optional
Whether to show the yaxis grid (default is False)
xgrid : bool, optional
Whether to show the xaxis grid (default is True)
range_tool : bool, optional
Show the the range slider tool (default is True)
height : int, optional
The height of the plot figure
(the default is auto-calculated height)
width : int, optional
The width of the plot figure (the default is 900)
color : str
Default series color (default is "navy")
overlay_color : str
Overlay series color (default is "green")
Returns
-------
figure
The bokeh plot figure. | msticpy/nbtools/timeline.py | display_timeline | Dqirvin/msticpy | python | @export
def display_timeline(data: Union[(pd.DataFrame, dict)], time_column: str='TimeGenerated', source_columns: list=None, **kwargs) -> figure:
'\n Display a timeline of events.\n\n Parameters\n ----------\n data : Union[dict, pd.DataFrame]\n Either\n dict of data sets to plot on the timeline with the following structure::\n\n Key (str) - Name of data set to be displayed in legend\n Value (Dict[str, Any]) - containing:\n data (pd.DataFrame) - Data to plot\n time_column (str, optional) - Name of the timestamp column\n source_columns (list[str], optional) - source columns to use\n in tooltips\n color (str, optional) - color of datapoints for this data\n If any of the last values are omitted, they default to the values\n supplied as parameters to the function (see below)\n\n Or\n DataFrame as a single data set or grouped into individual\n plot series using the `group_by` parameter\n time_column : str, optional\n Name of the timestamp column\n (the default is \'TimeGenerated\')\n source_columns : list, optional\n List of default source columns to use in tooltips\n (the default is None)\n\n Other Parameters\n ----------------\n title : str, optional\n Title to display (the default is None)\n alert : SecurityAlert, optional\n Add a reference line/label using the alert time (the default is None)\n ref_event : Any, optional\n Add a reference line/label using the alert time (the default is None)\n ref_time : datetime, optional\n Add a reference line/label using `ref_time` (the default is None)\n group_by : str\n (where `data` is a DataFrame)\n The column to group timelines on\n legend: str, optional\n "left", "right", "inline" or "none"\n (the default is to show a legend when plotting multiple series\n and not to show one when plotting a single series)\n yaxis : bool, optional\n Whether to show the yaxis and labels (default is False)\n ygrid : bool, optional\n Whether to show the yaxis grid (default is False)\n xgrid : bool, optional\n Whether to show the xaxis grid (default is True)\n range_tool : bool, optional\n Show the the range slider tool (default is True)\n height : int, optional\n The height of the plot figure\n (the default is auto-calculated height)\n width : int, optional\n The width of the plot figure (the default is 900)\n color : str\n Default series color (default is "navy")\n overlay_color : str\n Overlay series color (default is "green")\n\n Returns\n -------\n figure\n The bokeh plot figure.\n\n '
overlay_data: pd.DataFrame = kwargs.pop('overlay_data', None)
overlay_columns: list = kwargs.pop('overlay_columns', source_columns)
color: str = kwargs.get('color', 'navy')
overlay_color: str = kwargs.pop('overlay_color', 'green')
kwargs_sub = kwargs.copy()
kwargs_sub['time_column'] = time_column
kwargs_sub['source_columns'] = source_columns
(kwargs_sub['ref_time'], kwargs_sub['ref_label']) = _get_ref_event_time(**kwargs)
if isinstance(data, pd.DataFrame):
if (overlay_data is not None):
aggr_data = {'Primary': {'data': data, 'time_column': time_column, 'source_columns': source_columns, 'color': color}, 'Secondary': {'data': overlay_data, 'time_column': time_column, 'source_columns': overlay_columns, 'color': overlay_color}}
return _display_timeline_dict(data=aggr_data, **kwargs_sub)
series_dict = _create_dict_from_grouping(data=data, source_columns=source_columns, time_column=time_column, group_by=kwargs.get('group_by', None), color=kwargs.get('color', 'navy'))
return _display_timeline_dict(data=series_dict, **kwargs_sub)
if isinstance(data, dict):
return _display_timeline_dict(data, **kwargs_sub)
return None |
@export
def display_timeline_values(data: pd.DataFrame, y: str, time_column: str='TimeGenerated', source_columns: list=None, **kwargs) -> figure:
'\n Display a timeline of events.\n\n Parameters\n ----------\n data : pd.DataFrame\n DataFrame as a single data set or grouped into individual\n plot series using the `group_by` parameter\n time_column : str, optional\n Name of the timestamp column\n (the default is \'TimeGenerated\')\n y : str\n The column name holding the value to plot vertically\n source_columns : list, optional\n List of default source columns to use in tooltips\n (the default is None)\n\n Other Parameters\n ----------------\n x : str, optional\n alias of `time_column`\n title : str, optional\n Title to display (the default is None)\n ref_event : Any, optional\n Add a reference line/label using the alert time (the default is None)\n ref_time : datetime, optional\n Add a reference line/label using `ref_time` (the default is None)\n group_by : str\n (where `data` is a DataFrame)\n The column to group timelines on\n legend_column : str, optional\n (where `data` is a DataFrame)\n Name of the column used to generate the legend labels if a legend is\n to be displayed. Default is `group_by` parameter.\n yaxis : bool, optional\n Whether to show the yaxis and labels\n range_tool : bool, optional\n Show the the range slider tool (default is True)\n height : int, optional\n The height of the plot figure\n (the default is auto-calculated height)\n width : int, optional\n The width of the plot figure (the default is 900)\n color : str\n Default series color (default is "navy"). This is overridden by\n automatic color assignments if plotting a grouped chart\n kind : Union[str, List[str]]\n one or more glyph types to plot., optional\n Supported types are "circle", "line" and "vbar" (default is "vbar")\n\n Returns\n -------\n figure\n The bokeh plot figure.\n\n '
reset_output()
output_notebook()
height: int = kwargs.pop('height', None)
width: int = kwargs.pop('width', 900)
title: str = kwargs.pop('title', None)
time_column = kwargs.get('x', time_column)
group_by: str = kwargs.get('group_by', None)
show_yaxis: bool = kwargs.pop('yaxis', True)
show_range: bool = kwargs.pop('range_tool', True)
color: str = kwargs.get('color', 'navy')
legend_pos: str = kwargs.pop('legend', None)
kind: Any = kwargs.pop('kind', ['vbar'])
plot_kinds = (kind if isinstance(kind, list) else [kind])
(ref_time, ref_label) = _get_ref_event_time(**kwargs)
(graph_df, group_count_df, tool_tip_columns, series_count) = _create_data_grouping(data, source_columns, time_column, group_by, color)
tool_tip_items = [(f'{col}', f'@{col}') for col in tool_tip_columns]
hover = HoverTool(tooltips=tool_tip_items, formatters={'Tooltip': 'printf'})
title = (title if title else 'Timeline')
min_time = graph_df[time_column].min()
max_time = graph_df[time_column].max()
start_range = (min_time - ((max_time - min_time) * 0.1))
end_range = (max_time + ((max_time - min_time) * 0.1))
height = (height if height else _calc_auto_plot_height(series_count))
plot = figure(x_range=(start_range, end_range), min_border_left=50, plot_height=height, plot_width=width, x_axis_label='Event Time', x_axis_type='datetime', x_minor_ticks=10, y_axis_label=y, tools=[hover, 'xwheel_zoom', 'box_zoom', 'reset', 'save', 'xpan'], toolbar_location='above', title=title)
plot.yaxis.visible = show_yaxis
plot.ygrid.minor_grid_line_color = 'navy'
plot.ygrid.minor_grid_line_alpha = 0.1
plot.ygrid.grid_line_color = 'navy'
plot.ygrid.grid_line_alpha = 0.3
plot.xgrid.minor_grid_line_color = 'navy'
plot.xgrid.minor_grid_line_alpha = 0.1
plot.xgrid.grid_line_color = 'navy'
plot.xgrid.grid_line_alpha = 0.3
plot.xaxis[0].formatter = _get_tick_formatter()
if group_by:
legend_items = []
for (_, group_id) in group_count_df[group_by].items():
first_group_item = graph_df[(graph_df[group_by] == group_id)].iloc[0]
legend_label = str(first_group_item[group_by])
inline_legend = (str(group_id) if (legend_pos == 'inline') else None)
group_color = first_group_item['color']
row_source = ColumnDataSource(graph_df[(graph_df[group_by] == group_id)])
p_series = []
plot_args: Dict[(str, Any)] = dict(x=time_column, alpha=0.7, source=row_source, legend_label=str(inline_legend))
if ('vbar' in plot_kinds):
p_series.append(plot.vbar(top=y, width=4, color='color', **plot_args))
if ('circle' in plot_kinds):
p_series.append(plot.circle(y=y, size=4, color='color', **plot_args))
if ('line' in plot_kinds):
p_series.append(plot.line(y=y, line_width=1, line_color=group_color, **plot_args))
if (not inline_legend):
legend_items.append((legend_label, p_series))
if (legend_pos == 'inline'):
plot.legend.location = 'top_left'
plot.legend.click_policy = 'hide'
elif (legend_pos in ['left', 'right']):
ext_legend = Legend(items=legend_items, location='center', click_policy='hide', label_text_font_size='8pt')
plot.add_layout(ext_legend, legend_pos)
else:
plot_args = dict(x=time_column, color='color', alpha=0.7, source=ColumnDataSource(graph_df))
if ('vbar' in plot_kinds):
plot.vbar(top=y, width=4, **plot_args)
if ('circle' in plot_kinds):
plot.circle(y=y, size=4, **plot_args)
if ('line' in plot_kinds):
plot.line(y=y, line_width=4, **plot_args)
if (ref_time is not None):
_add_ref_line(plot, ref_time, ref_label, series_count)
if show_range:
rng_select = _create_range_tool(data=graph_df, min_time=min_time, max_time=max_time, plot_range=plot.x_range, width=width, height=height, time_column=time_column)
show(column(plot, rng_select))
else:
show(plot)
return plot | 6,533,993,632,519,709,000 | Display a timeline of events.
Parameters
----------
data : pd.DataFrame
DataFrame as a single data set or grouped into individual
plot series using the `group_by` parameter
time_column : str, optional
Name of the timestamp column
(the default is 'TimeGenerated')
y : str
The column name holding the value to plot vertically
source_columns : list, optional
List of default source columns to use in tooltips
(the default is None)
Other Parameters
----------------
x : str, optional
alias of `time_column`
title : str, optional
Title to display (the default is None)
ref_event : Any, optional
Add a reference line/label using the alert time (the default is None)
ref_time : datetime, optional
Add a reference line/label using `ref_time` (the default is None)
group_by : str
(where `data` is a DataFrame)
The column to group timelines on
legend_column : str, optional
(where `data` is a DataFrame)
Name of the column used to generate the legend labels if a legend is
to be displayed. Default is `group_by` parameter.
yaxis : bool, optional
Whether to show the yaxis and labels
range_tool : bool, optional
Show the the range slider tool (default is True)
height : int, optional
The height of the plot figure
(the default is auto-calculated height)
width : int, optional
The width of the plot figure (the default is 900)
color : str
Default series color (default is "navy"). This is overridden by
automatic color assignments if plotting a grouped chart
kind : Union[str, List[str]]
one or more glyph types to plot., optional
Supported types are "circle", "line" and "vbar" (default is "vbar")
Returns
-------
figure
The bokeh plot figure. | msticpy/nbtools/timeline.py | display_timeline_values | Dqirvin/msticpy | python | @export
def display_timeline_values(data: pd.DataFrame, y: str, time_column: str='TimeGenerated', source_columns: list=None, **kwargs) -> figure:
'\n Display a timeline of events.\n\n Parameters\n ----------\n data : pd.DataFrame\n DataFrame as a single data set or grouped into individual\n plot series using the `group_by` parameter\n time_column : str, optional\n Name of the timestamp column\n (the default is \'TimeGenerated\')\n y : str\n The column name holding the value to plot vertically\n source_columns : list, optional\n List of default source columns to use in tooltips\n (the default is None)\n\n Other Parameters\n ----------------\n x : str, optional\n alias of `time_column`\n title : str, optional\n Title to display (the default is None)\n ref_event : Any, optional\n Add a reference line/label using the alert time (the default is None)\n ref_time : datetime, optional\n Add a reference line/label using `ref_time` (the default is None)\n group_by : str\n (where `data` is a DataFrame)\n The column to group timelines on\n legend_column : str, optional\n (where `data` is a DataFrame)\n Name of the column used to generate the legend labels if a legend is\n to be displayed. Default is `group_by` parameter.\n yaxis : bool, optional\n Whether to show the yaxis and labels\n range_tool : bool, optional\n Show the the range slider tool (default is True)\n height : int, optional\n The height of the plot figure\n (the default is auto-calculated height)\n width : int, optional\n The width of the plot figure (the default is 900)\n color : str\n Default series color (default is "navy"). This is overridden by\n automatic color assignments if plotting a grouped chart\n kind : Union[str, List[str]]\n one or more glyph types to plot., optional\n Supported types are "circle", "line" and "vbar" (default is "vbar")\n\n Returns\n -------\n figure\n The bokeh plot figure.\n\n '
reset_output()
output_notebook()
height: int = kwargs.pop('height', None)
width: int = kwargs.pop('width', 900)
title: str = kwargs.pop('title', None)
time_column = kwargs.get('x', time_column)
group_by: str = kwargs.get('group_by', None)
show_yaxis: bool = kwargs.pop('yaxis', True)
show_range: bool = kwargs.pop('range_tool', True)
color: str = kwargs.get('color', 'navy')
legend_pos: str = kwargs.pop('legend', None)
kind: Any = kwargs.pop('kind', ['vbar'])
plot_kinds = (kind if isinstance(kind, list) else [kind])
(ref_time, ref_label) = _get_ref_event_time(**kwargs)
(graph_df, group_count_df, tool_tip_columns, series_count) = _create_data_grouping(data, source_columns, time_column, group_by, color)
tool_tip_items = [(f'{col}', f'@{col}') for col in tool_tip_columns]
hover = HoverTool(tooltips=tool_tip_items, formatters={'Tooltip': 'printf'})
title = (title if title else 'Timeline')
min_time = graph_df[time_column].min()
max_time = graph_df[time_column].max()
start_range = (min_time - ((max_time - min_time) * 0.1))
end_range = (max_time + ((max_time - min_time) * 0.1))
height = (height if height else _calc_auto_plot_height(series_count))
plot = figure(x_range=(start_range, end_range), min_border_left=50, plot_height=height, plot_width=width, x_axis_label='Event Time', x_axis_type='datetime', x_minor_ticks=10, y_axis_label=y, tools=[hover, 'xwheel_zoom', 'box_zoom', 'reset', 'save', 'xpan'], toolbar_location='above', title=title)
plot.yaxis.visible = show_yaxis
plot.ygrid.minor_grid_line_color = 'navy'
plot.ygrid.minor_grid_line_alpha = 0.1
plot.ygrid.grid_line_color = 'navy'
plot.ygrid.grid_line_alpha = 0.3
plot.xgrid.minor_grid_line_color = 'navy'
plot.xgrid.minor_grid_line_alpha = 0.1
plot.xgrid.grid_line_color = 'navy'
plot.xgrid.grid_line_alpha = 0.3
plot.xaxis[0].formatter = _get_tick_formatter()
if group_by:
legend_items = []
for (_, group_id) in group_count_df[group_by].items():
first_group_item = graph_df[(graph_df[group_by] == group_id)].iloc[0]
legend_label = str(first_group_item[group_by])
inline_legend = (str(group_id) if (legend_pos == 'inline') else None)
group_color = first_group_item['color']
row_source = ColumnDataSource(graph_df[(graph_df[group_by] == group_id)])
p_series = []
plot_args: Dict[(str, Any)] = dict(x=time_column, alpha=0.7, source=row_source, legend_label=str(inline_legend))
if ('vbar' in plot_kinds):
p_series.append(plot.vbar(top=y, width=4, color='color', **plot_args))
if ('circle' in plot_kinds):
p_series.append(plot.circle(y=y, size=4, color='color', **plot_args))
if ('line' in plot_kinds):
p_series.append(plot.line(y=y, line_width=1, line_color=group_color, **plot_args))
if (not inline_legend):
legend_items.append((legend_label, p_series))
if (legend_pos == 'inline'):
plot.legend.location = 'top_left'
plot.legend.click_policy = 'hide'
elif (legend_pos in ['left', 'right']):
ext_legend = Legend(items=legend_items, location='center', click_policy='hide', label_text_font_size='8pt')
plot.add_layout(ext_legend, legend_pos)
else:
plot_args = dict(x=time_column, color='color', alpha=0.7, source=ColumnDataSource(graph_df))
if ('vbar' in plot_kinds):
plot.vbar(top=y, width=4, **plot_args)
if ('circle' in plot_kinds):
plot.circle(y=y, size=4, **plot_args)
if ('line' in plot_kinds):
plot.line(y=y, line_width=4, **plot_args)
if (ref_time is not None):
_add_ref_line(plot, ref_time, ref_label, series_count)
if show_range:
rng_select = _create_range_tool(data=graph_df, min_time=min_time, max_time=max_time, plot_range=plot.x_range, width=width, height=height, time_column=time_column)
show(column(plot, rng_select))
else:
show(plot)
return plot |
def _display_timeline_dict(data: dict, **kwargs) -> figure:
"\n Display a timeline of events.\n\n Parameters\n ----------\n data : dict\n Data points to plot on the timeline.\n Need to contain:\n Key - Name of data type to be displayed in legend\n Value - dict of data containing:\n data : pd.DataFrame\n Data to plot\n time_column : str\n Name of the timestamp column\n source_columns : list\n List of source columns to use in tooltips\n color: str\n Color of datapoints for this data\n Other Parameters\n ----------------\n ref_time : datetime, optional\n Input reference line to display (the default is None)\n title : str, optional\n Title to display (the default is None)\n time_column : str, optional\n Name of the timestamp column\n (the default is 'TimeGenerated')\n legend: str, optional\n Where to position the legend\n None, left, right or inline (default is None)\n yaxis : bool, optional\n Whether to show the yaxis and labels\n range_tool : bool, optional\n Show the the range slider tool (default is True)\n source_columns : list, optional\n List of default source columns to use in tooltips\n (the default is None)\n height : int, optional\n The height of the plot figure\n (the default is auto-calculated height)\n width : int, optional\n The width of the plot figure (the default is 900)\n\n Returns\n -------\n figure\n The bokeh plot figure.\n\n "
reset_output()
output_notebook()
height: int = kwargs.pop('height', None)
width: int = kwargs.pop('width', 900)
ref_time: Any = kwargs.pop('ref_time', None)
ref_label: str = kwargs.pop('ref_label', None)
title: str = kwargs.pop('title', None)
legend_pos: str = kwargs.pop('legend', None)
show_yaxis: bool = kwargs.pop('yaxis', False)
show_range: bool = kwargs.pop('range_tool', True)
xgrid: bool = kwargs.pop('xgrid', True)
ygrid: bool = kwargs.pop('ygrid', False)
(tool_tip_columns, min_time, max_time) = _unpack_data_series_dict(data, **kwargs)
series_count = len(data)
tool_tip_items = [(f'{col}', f'@{col}') for col in tool_tip_columns]
hover = HoverTool(tooltips=tool_tip_items, formatters={'Tooltip': 'printf'})
title = (f'Timeline: {title}' if title else 'Event Timeline')
start_range = (min_time - ((max_time - min_time) * 0.1))
end_range = (max_time + ((max_time - min_time) * 0.1))
height = (height if height else _calc_auto_plot_height(len(data)))
y_range = (((- 1) / series_count), ((series_count - 1) + (1 / series_count)))
plot = figure(x_range=(start_range, end_range), y_range=y_range, min_border_left=50, plot_height=height, plot_width=width, x_axis_label='Event Time', x_axis_type='datetime', x_minor_ticks=10, tools=[hover, 'xwheel_zoom', 'box_zoom', 'reset', 'save', 'xpan'], title=title)
plot.yaxis.visible = show_yaxis
if show_yaxis:
if data:
y_labels = {ser_def['y_index']: str(lbl) for (lbl, ser_def) in data.items()}
plot.yaxis.major_label_overrides = y_labels
if ygrid:
plot.ygrid.minor_grid_line_color = 'navy'
plot.ygrid.minor_grid_line_alpha = 0.1
plot.ygrid.grid_line_color = 'navy'
plot.ygrid.grid_line_alpha = 0.3
else:
plot.ygrid.grid_line_color = None
if xgrid:
plot.xgrid.minor_grid_line_color = 'navy'
plot.xgrid.minor_grid_line_alpha = 0.3
else:
plot.xgrid.grid_line_color = None
rng_select = _create_range_tool(data=data, min_time=min_time, max_time=max_time, plot_range=plot.x_range, width=width, height=height)
plot.xaxis[0].formatter = _get_tick_formatter()
if ((series_count > 1) and (not legend_pos)):
legend_pos = 'left'
legend_items = []
for (ser_name, series_def) in data.items():
if (legend_pos == 'inline'):
p_series = plot.diamond(x=series_def['time_column'], y='y_index', color=series_def['color'], alpha=0.5, size=10, source=series_def['source'], legend_label=str(ser_name))
else:
p_series = plot.diamond(x=series_def['time_column'], y='y_index', color=series_def['color'], alpha=0.5, size=10, source=series_def['source'])
if (legend_pos in ['left', 'right']):
legend_items.append((str(ser_name), [p_series]))
if (legend_pos == 'inline'):
plot.legend.location = 'center_left'
plot.legend.click_policy = 'hide'
elif (legend_pos in ['left', 'right']):
ext_legend = Legend(items=legend_items, location='center', click_policy='hide', label_text_font_size='8pt')
plot.add_layout(ext_legend, legend_pos)
if (ref_time is not None):
_add_ref_line(plot, ref_time, ref_label, len(data))
if show_range:
show(column(plot, rng_select))
else:
show(plot)
return plot | 8,766,972,964,578,907,000 | Display a timeline of events.
Parameters
----------
data : dict
Data points to plot on the timeline.
Need to contain:
Key - Name of data type to be displayed in legend
Value - dict of data containing:
data : pd.DataFrame
Data to plot
time_column : str
Name of the timestamp column
source_columns : list
List of source columns to use in tooltips
color: str
Color of datapoints for this data
Other Parameters
----------------
ref_time : datetime, optional
Input reference line to display (the default is None)
title : str, optional
Title to display (the default is None)
time_column : str, optional
Name of the timestamp column
(the default is 'TimeGenerated')
legend: str, optional
Where to position the legend
None, left, right or inline (default is None)
yaxis : bool, optional
Whether to show the yaxis and labels
range_tool : bool, optional
Show the the range slider tool (default is True)
source_columns : list, optional
List of default source columns to use in tooltips
(the default is None)
height : int, optional
The height of the plot figure
(the default is auto-calculated height)
width : int, optional
The width of the plot figure (the default is 900)
Returns
-------
figure
The bokeh plot figure. | msticpy/nbtools/timeline.py | _display_timeline_dict | Dqirvin/msticpy | python | def _display_timeline_dict(data: dict, **kwargs) -> figure:
"\n Display a timeline of events.\n\n Parameters\n ----------\n data : dict\n Data points to plot on the timeline.\n Need to contain:\n Key - Name of data type to be displayed in legend\n Value - dict of data containing:\n data : pd.DataFrame\n Data to plot\n time_column : str\n Name of the timestamp column\n source_columns : list\n List of source columns to use in tooltips\n color: str\n Color of datapoints for this data\n Other Parameters\n ----------------\n ref_time : datetime, optional\n Input reference line to display (the default is None)\n title : str, optional\n Title to display (the default is None)\n time_column : str, optional\n Name of the timestamp column\n (the default is 'TimeGenerated')\n legend: str, optional\n Where to position the legend\n None, left, right or inline (default is None)\n yaxis : bool, optional\n Whether to show the yaxis and labels\n range_tool : bool, optional\n Show the the range slider tool (default is True)\n source_columns : list, optional\n List of default source columns to use in tooltips\n (the default is None)\n height : int, optional\n The height of the plot figure\n (the default is auto-calculated height)\n width : int, optional\n The width of the plot figure (the default is 900)\n\n Returns\n -------\n figure\n The bokeh plot figure.\n\n "
reset_output()
output_notebook()
height: int = kwargs.pop('height', None)
width: int = kwargs.pop('width', 900)
ref_time: Any = kwargs.pop('ref_time', None)
ref_label: str = kwargs.pop('ref_label', None)
title: str = kwargs.pop('title', None)
legend_pos: str = kwargs.pop('legend', None)
show_yaxis: bool = kwargs.pop('yaxis', False)
show_range: bool = kwargs.pop('range_tool', True)
xgrid: bool = kwargs.pop('xgrid', True)
ygrid: bool = kwargs.pop('ygrid', False)
(tool_tip_columns, min_time, max_time) = _unpack_data_series_dict(data, **kwargs)
series_count = len(data)
tool_tip_items = [(f'{col}', f'@{col}') for col in tool_tip_columns]
hover = HoverTool(tooltips=tool_tip_items, formatters={'Tooltip': 'printf'})
title = (f'Timeline: {title}' if title else 'Event Timeline')
start_range = (min_time - ((max_time - min_time) * 0.1))
end_range = (max_time + ((max_time - min_time) * 0.1))
height = (height if height else _calc_auto_plot_height(len(data)))
y_range = (((- 1) / series_count), ((series_count - 1) + (1 / series_count)))
plot = figure(x_range=(start_range, end_range), y_range=y_range, min_border_left=50, plot_height=height, plot_width=width, x_axis_label='Event Time', x_axis_type='datetime', x_minor_ticks=10, tools=[hover, 'xwheel_zoom', 'box_zoom', 'reset', 'save', 'xpan'], title=title)
plot.yaxis.visible = show_yaxis
if show_yaxis:
if data:
y_labels = {ser_def['y_index']: str(lbl) for (lbl, ser_def) in data.items()}
plot.yaxis.major_label_overrides = y_labels
if ygrid:
plot.ygrid.minor_grid_line_color = 'navy'
plot.ygrid.minor_grid_line_alpha = 0.1
plot.ygrid.grid_line_color = 'navy'
plot.ygrid.grid_line_alpha = 0.3
else:
plot.ygrid.grid_line_color = None
if xgrid:
plot.xgrid.minor_grid_line_color = 'navy'
plot.xgrid.minor_grid_line_alpha = 0.3
else:
plot.xgrid.grid_line_color = None
rng_select = _create_range_tool(data=data, min_time=min_time, max_time=max_time, plot_range=plot.x_range, width=width, height=height)
plot.xaxis[0].formatter = _get_tick_formatter()
if ((series_count > 1) and (not legend_pos)):
legend_pos = 'left'
legend_items = []
for (ser_name, series_def) in data.items():
if (legend_pos == 'inline'):
p_series = plot.diamond(x=series_def['time_column'], y='y_index', color=series_def['color'], alpha=0.5, size=10, source=series_def['source'], legend_label=str(ser_name))
else:
p_series = plot.diamond(x=series_def['time_column'], y='y_index', color=series_def['color'], alpha=0.5, size=10, source=series_def['source'])
if (legend_pos in ['left', 'right']):
legend_items.append((str(ser_name), [p_series]))
if (legend_pos == 'inline'):
plot.legend.location = 'center_left'
plot.legend.click_policy = 'hide'
elif (legend_pos in ['left', 'right']):
ext_legend = Legend(items=legend_items, location='center', click_policy='hide', label_text_font_size='8pt')
plot.add_layout(ext_legend, legend_pos)
if (ref_time is not None):
_add_ref_line(plot, ref_time, ref_label, len(data))
if show_range:
show(column(plot, rng_select))
else:
show(plot)
return plot |
def _get_ref_event_time(**kwargs) -> Tuple[(datetime, str)]:
'Extract the reference time from kwargs.'
ref_alert = kwargs.get('alert', None)
if (ref_alert is not None):
ref_event = ref_alert
ref_label = 'Alert time'
else:
ref_event = kwargs.get('ref_event', None)
ref_label = 'Event time'
if (ref_event is not None):
ref_time = getattr(ref_event, 'StartTimeUtc', None)
if (not ref_time):
ref_time = getattr(ref_event, 'TimeGenerated', None)
else:
ref_time = kwargs.get('ref_time', None)
ref_label = 'Ref time'
return (ref_time, kwargs.get('ref_label', ref_label)) | 502,102,706,645,366,100 | Extract the reference time from kwargs. | msticpy/nbtools/timeline.py | _get_ref_event_time | Dqirvin/msticpy | python | def _get_ref_event_time(**kwargs) -> Tuple[(datetime, str)]:
ref_alert = kwargs.get('alert', None)
if (ref_alert is not None):
ref_event = ref_alert
ref_label = 'Alert time'
else:
ref_event = kwargs.get('ref_event', None)
ref_label = 'Event time'
if (ref_event is not None):
ref_time = getattr(ref_event, 'StartTimeUtc', None)
if (not ref_time):
ref_time = getattr(ref_event, 'TimeGenerated', None)
else:
ref_time = kwargs.get('ref_time', None)
ref_label = 'Ref time'
return (ref_time, kwargs.get('ref_label', ref_label)) |
def _plot_dict_series(data, plot, legend_pos):
'Plot series from dict.'
legend_items = []
for (ser_name, series_def) in data.items():
if (legend_pos == 'inline'):
p_series = plot.diamond(x=series_def['time_column'], y='y_index', color=series_def['color'], alpha=0.5, size=10, source=series_def['source'], legend_label=str(ser_name))
else:
p_series = plot.diamond(x=series_def['time_column'], y='y_index', color=series_def['color'], alpha=0.5, size=10, source=series_def['source'])
if (legend_pos in ['left', 'right']):
legend_items.append((ser_name, [p_series]))
if (legend_pos == 'inline'):
plot.legend.location = 'top_left'
plot.legend.click_policy = 'hide'
elif (legend_pos in ['left', 'right']):
ext_legend = Legend(items=legend_items, location='center', click_policy='hide', label_text_font_size='8pt')
plot.add_layout(ext_legend, legend_pos) | 7,726,691,837,423,861,000 | Plot series from dict. | msticpy/nbtools/timeline.py | _plot_dict_series | Dqirvin/msticpy | python | def _plot_dict_series(data, plot, legend_pos):
legend_items = []
for (ser_name, series_def) in data.items():
if (legend_pos == 'inline'):
p_series = plot.diamond(x=series_def['time_column'], y='y_index', color=series_def['color'], alpha=0.5, size=10, source=series_def['source'], legend_label=str(ser_name))
else:
p_series = plot.diamond(x=series_def['time_column'], y='y_index', color=series_def['color'], alpha=0.5, size=10, source=series_def['source'])
if (legend_pos in ['left', 'right']):
legend_items.append((ser_name, [p_series]))
if (legend_pos == 'inline'):
plot.legend.location = 'top_left'
plot.legend.click_policy = 'hide'
elif (legend_pos in ['left', 'right']):
ext_legend = Legend(items=legend_items, location='center', click_policy='hide', label_text_font_size='8pt')
plot.add_layout(ext_legend, legend_pos) |
def _wrap_df_columns(data: pd.DataFrame, wrap_len: int=50):
'Wrap any string columns.'
if (not data.empty):
for col in data.columns:
if isinstance(data[col].iloc[0], str):
data[col] = data[col].str.wrap(wrap_len) | 647,050,524,434,827,400 | Wrap any string columns. | msticpy/nbtools/timeline.py | _wrap_df_columns | Dqirvin/msticpy | python | def _wrap_df_columns(data: pd.DataFrame, wrap_len: int=50):
if (not data.empty):
for col in data.columns:
if isinstance(data[col].iloc[0], str):
data[col] = data[col].str.wrap(wrap_len) |
def _get_tick_formatter() -> DatetimeTickFormatter:
'Return tick formatting for different zoom levels.'
tick_format = DatetimeTickFormatter()
tick_format.days = ['%m-%d %H:%M']
tick_format.hours = ['%H:%M:%S']
tick_format.minutes = ['%H:%M:%S']
tick_format.seconds = ['%H:%M:%S']
tick_format.milliseconds = ['%H:%M:%S.%3N']
return tick_format | 6,239,954,124,480,516,000 | Return tick formatting for different zoom levels. | msticpy/nbtools/timeline.py | _get_tick_formatter | Dqirvin/msticpy | python | def _get_tick_formatter() -> DatetimeTickFormatter:
tick_format = DatetimeTickFormatter()
tick_format.days = ['%m-%d %H:%M']
tick_format.hours = ['%H:%M:%S']
tick_format.minutes = ['%H:%M:%S']
tick_format.seconds = ['%H:%M:%S']
tick_format.milliseconds = ['%H:%M:%S.%3N']
return tick_format |
def _calc_auto_plot_height(group_count):
'Dynamic calculation of plot height.'
ht_per_row = 40
if (group_count > 15):
ht_per_row = 25
return max((ht_per_row * group_count), 300) | 2,604,020,579,015,324,700 | Dynamic calculation of plot height. | msticpy/nbtools/timeline.py | _calc_auto_plot_height | Dqirvin/msticpy | python | def _calc_auto_plot_height(group_count):
ht_per_row = 40
if (group_count > 15):
ht_per_row = 25
return max((ht_per_row * group_count), 300) |
def _create_range_tool(data, min_time, max_time, plot_range, width, height, time_column: str=None):
'Create plot bar to act as as range selector.'
ext_min = (min_time - ((max_time - min_time) * 0.15))
ext_max = (max_time + ((max_time - min_time) * 0.15))
plot_height = max(120, int((height * 0.2)))
rng_select = figure(x_range=(ext_min, ext_max), title='Range Selector', plot_height=plot_height, plot_width=width, x_axis_type='datetime', y_axis_type=None, tools='', toolbar_location=None)
help_str = ('Drag the middle or edges of the selection box to change ' + 'the range in the main chart')
rng_select.add_layout(Title(text=help_str, align='right', text_font_size='10px'), 'below')
rng_select.xaxis[0].formatter = _get_tick_formatter()
if isinstance(data, dict):
for (_, series_def) in data.items():
rng_select.circle(x=series_def['time_column'], y='y_index', color=series_def['color'], source=series_def['source'])
elif isinstance(data, pd.DataFrame):
rng_select.circle(x=time_column, y='y_index', color='blue', source=ColumnDataSource(data))
range_tool = RangeTool(x_range=plot_range)
range_tool.overlay.fill_color = 'navy'
range_tool.overlay.fill_alpha = 0.2
rng_select.ygrid.grid_line_color = None
rng_select.add_tools(range_tool)
rng_select.toolbar.active_multi = range_tool
return rng_select | 7,389,459,447,783,332,000 | Create plot bar to act as as range selector. | msticpy/nbtools/timeline.py | _create_range_tool | Dqirvin/msticpy | python | def _create_range_tool(data, min_time, max_time, plot_range, width, height, time_column: str=None):
ext_min = (min_time - ((max_time - min_time) * 0.15))
ext_max = (max_time + ((max_time - min_time) * 0.15))
plot_height = max(120, int((height * 0.2)))
rng_select = figure(x_range=(ext_min, ext_max), title='Range Selector', plot_height=plot_height, plot_width=width, x_axis_type='datetime', y_axis_type=None, tools=, toolbar_location=None)
help_str = ('Drag the middle or edges of the selection box to change ' + 'the range in the main chart')
rng_select.add_layout(Title(text=help_str, align='right', text_font_size='10px'), 'below')
rng_select.xaxis[0].formatter = _get_tick_formatter()
if isinstance(data, dict):
for (_, series_def) in data.items():
rng_select.circle(x=series_def['time_column'], y='y_index', color=series_def['color'], source=series_def['source'])
elif isinstance(data, pd.DataFrame):
rng_select.circle(x=time_column, y='y_index', color='blue', source=ColumnDataSource(data))
range_tool = RangeTool(x_range=plot_range)
range_tool.overlay.fill_color = 'navy'
range_tool.overlay.fill_alpha = 0.2
rng_select.ygrid.grid_line_color = None
rng_select.add_tools(range_tool)
rng_select.toolbar.active_multi = range_tool
return rng_select |
def _add_ref_line(plot, ref_time, ref_text='Ref time', series_count=1):
'Add a reference marker line and label at `ref_time`.'
ref_label_tm = pd.Timestamp(ref_time)
plot.line(x=[ref_label_tm, ref_label_tm], y=[0, series_count])
ref_label = Label(x=ref_label_tm, y=0, y_offset=10, x_units='data', y_units='data', text=f'< {ref_text}', text_font_size='8pt', render_mode='css', border_line_color='red', border_line_alpha=1.0, background_fill_color='white', background_fill_alpha=0.5)
plot.add_layout(ref_label) | 5,033,550,887,243,387,000 | Add a reference marker line and label at `ref_time`. | msticpy/nbtools/timeline.py | _add_ref_line | Dqirvin/msticpy | python | def _add_ref_line(plot, ref_time, ref_text='Ref time', series_count=1):
ref_label_tm = pd.Timestamp(ref_time)
plot.line(x=[ref_label_tm, ref_label_tm], y=[0, series_count])
ref_label = Label(x=ref_label_tm, y=0, y_offset=10, x_units='data', y_units='data', text=f'< {ref_text}', text_font_size='8pt', render_mode='css', border_line_color='red', border_line_alpha=1.0, background_fill_color='white', background_fill_alpha=0.5)
plot.add_layout(ref_label) |
def render_form(self, *args, **kwargs):
'Placeholder for Wagtail < 2.13'
return '' | -8,506,567,350,089,177,000 | Placeholder for Wagtail < 2.13 | wagtail_localize/test/models.py | render_form | dinoperovic/wagtail-localize | python | def render_form(self, *args, **kwargs):
return |
def filtermultiport(ips):
'Filter out hosts with more nodes per IP'
hist = collections.defaultdict(list)
for ip in ips:
hist[ip['sortkey']].append(ip)
return [value[0] for (key, value) in list(hist.items()) if (len(value) == 1)] | 6,911,170,735,548,327,000 | Filter out hosts with more nodes per IP | contrib/seeds/makeseeds.py | filtermultiport | BitHostCoin/BitHost | python | def filtermultiport(ips):
hist = collections.defaultdict(list)
for ip in ips:
hist[ip['sortkey']].append(ip)
return [value[0] for (key, value) in list(hist.items()) if (len(value) == 1)] |
def test_next_must_pass(self):
"\n Kathy and Tom each have face cards, tom just played and the total is at 30\n\n Expected: It is now kathy's turn and she must pass\n "
fake_redis = fakeredis.FakeRedis()
game_dict = {'cards': CARDS, 'hands': {'kathy': ['5e1e7e60ab'], 'tom': ['95f92b2f0c']}, 'pegging': {'cards': ['75e734d054', '60575e1068', '1d5eb77128'], 'last_played': 'tom', 'passed': [], 'run': [], 'total': 30}, 'players': {'tom': 0, 'kathy': 0}, 'state': 'PLAY', 'turn': 'tom', 'winning_score': 121}
fake_redis.set('test', json.dumps(game_dict))
bev.cache = fake_redis
bev.next_player('test')
g = json.loads(fake_redis.get('test'))
assert (g['turn'] == 'kathy')
assert (g['pegging']['total'] == 30)
assert (bev.get_player_action('test', g['turn']) == 'PASS') | -4,560,224,086,035,091,000 | Kathy and Tom each have face cards, tom just played and the total is at 30
Expected: It is now kathy's turn and she must pass | cribbage/app/tests/test_bev.py | test_next_must_pass | zachcalvert/card-games | python | def test_next_must_pass(self):
"\n Kathy and Tom each have face cards, tom just played and the total is at 30\n\n Expected: It is now kathy's turn and she must pass\n "
fake_redis = fakeredis.FakeRedis()
game_dict = {'cards': CARDS, 'hands': {'kathy': ['5e1e7e60ab'], 'tom': ['95f92b2f0c']}, 'pegging': {'cards': ['75e734d054', '60575e1068', '1d5eb77128'], 'last_played': 'tom', 'passed': [], 'run': [], 'total': 30}, 'players': {'tom': 0, 'kathy': 0}, 'state': 'PLAY', 'turn': 'tom', 'winning_score': 121}
fake_redis.set('test', json.dumps(game_dict))
bev.cache = fake_redis
bev.next_player('test')
g = json.loads(fake_redis.get('test'))
assert (g['turn'] == 'kathy')
assert (g['pegging']['total'] == 30)
assert (bev.get_player_action('test', g['turn']) == 'PASS') |
def test_next_must_play(self):
"\n Kathy and Tom each have aces. Tom just played and the total is at 30\n\n Expected: It is now kathy's turn and she must play\n "
fake_redis = fakeredis.FakeRedis()
game_dict = {'cards': CARDS, 'hands': {'kathy': ['EXAMPLE_KEY'], 'tom': ['ace1293f8a']}, 'pegging': {'cards': ['75e734d054', '60575e1068', '1d5eb77128'], 'last_played': 'tom', 'passed': [], 'run': [], 'total': 30}, 'players': {'tom': 0, 'kathy': 0}, 'state': 'PLAY', 'turn': 'tom', 'winning_score': 121}
fake_redis.set('test', json.dumps(game_dict))
bev.cache = fake_redis
bev.next_player('test')
g = json.loads(fake_redis.get('test'))
assert (g['turn'] == 'kathy')
assert (g['pegging']['total'] == 30)
assert (bev.get_player_action('test', g['turn']) == 'PLAY') | -4,929,694,881,815,937,000 | Kathy and Tom each have aces. Tom just played and the total is at 30
Expected: It is now kathy's turn and she must play | cribbage/app/tests/test_bev.py | test_next_must_play | zachcalvert/card-games | python | def test_next_must_play(self):
"\n Kathy and Tom each have aces. Tom just played and the total is at 30\n\n Expected: It is now kathy's turn and she must play\n "
fake_redis = fakeredis.FakeRedis()
game_dict = {'cards': CARDS, 'hands': {'kathy': ['EXAMPLE_KEY'], 'tom': ['ace1293f8a']}, 'pegging': {'cards': ['75e734d054', '60575e1068', '1d5eb77128'], 'last_played': 'tom', 'passed': [], 'run': [], 'total': 30}, 'players': {'tom': 0, 'kathy': 0}, 'state': 'PLAY', 'turn': 'tom', 'winning_score': 121}
fake_redis.set('test', json.dumps(game_dict))
bev.cache = fake_redis
bev.next_player('test')
g = json.loads(fake_redis.get('test'))
assert (g['turn'] == 'kathy')
assert (g['pegging']['total'] == 30)
assert (bev.get_player_action('test', g['turn']) == 'PLAY') |
def test_everyone_has_passed_and_tom_cant_play_again_this_round(self):
"\n Kathy and Tom each have face cards, kathy just passed and the total is at 30\n\n Expected: It is Tom's turn and he must pass.\n "
fake_redis = fakeredis.FakeRedis()
game_dict = {'cards': CARDS, 'hands': {'kathy': ['5e1e7e60ab'], 'tom': ['95f92b2f0c']}, 'pegging': {'cards': ['75e734d054', '60575e1068', '1d5eb77128'], 'last_played': 'tom', 'passed': ['kathy'], 'run': [], 'total': 30}, 'players': {'tom': 0, 'kathy': 0}, 'scoring_stats': {'kathy': {'a_play': 0, 'b_hand': 0, 'c_crib': 0}, 'tom': {'a_play': 0, 'b_hand': 0, 'c_crib': 0}}, 'state': 'PLAY', 'turn': 'kathy', 'winning_score': 121}
fake_redis.set('test', json.dumps(game_dict))
bev.cache = fake_redis
bev.next_player('test')
g = json.loads(fake_redis.get('test'))
assert (g['players']['tom'] == 0)
assert (g['turn'] == 'tom')
assert (g['pegging']['total'] == 30)
assert (bev.get_player_action('test', g['turn']) == 'PASS') | -6,701,485,922,209,635,000 | Kathy and Tom each have face cards, kathy just passed and the total is at 30
Expected: It is Tom's turn and he must pass. | cribbage/app/tests/test_bev.py | test_everyone_has_passed_and_tom_cant_play_again_this_round | zachcalvert/card-games | python | def test_everyone_has_passed_and_tom_cant_play_again_this_round(self):
"\n Kathy and Tom each have face cards, kathy just passed and the total is at 30\n\n Expected: It is Tom's turn and he must pass.\n "
fake_redis = fakeredis.FakeRedis()
game_dict = {'cards': CARDS, 'hands': {'kathy': ['5e1e7e60ab'], 'tom': ['95f92b2f0c']}, 'pegging': {'cards': ['75e734d054', '60575e1068', '1d5eb77128'], 'last_played': 'tom', 'passed': ['kathy'], 'run': [], 'total': 30}, 'players': {'tom': 0, 'kathy': 0}, 'scoring_stats': {'kathy': {'a_play': 0, 'b_hand': 0, 'c_crib': 0}, 'tom': {'a_play': 0, 'b_hand': 0, 'c_crib': 0}}, 'state': 'PLAY', 'turn': 'kathy', 'winning_score': 121}
fake_redis.set('test', json.dumps(game_dict))
bev.cache = fake_redis
bev.next_player('test')
g = json.loads(fake_redis.get('test'))
assert (g['players']['tom'] == 0)
assert (g['turn'] == 'tom')
assert (g['pegging']['total'] == 30)
assert (bev.get_player_action('test', g['turn']) == 'PASS') |
def test_everyone_else_has_passed_and_tom_can_play_again_this_round(self):
"\n Tom has an Ace, kathy just passed and the total is at 30\n\n Expected: It is now Tom's turn to play, he does not receive a point for go\n "
fake_redis = fakeredis.FakeRedis()
game_dict = {'cards': CARDS, 'hands': {'kathy': ['5e1e7e60ab'], 'tom': ['EXAMPLE_KEY']}, 'pegging': {'cards': ['75e734d054', '60575e1068', '1d5eb77128'], 'last_played': 'tom', 'passed': ['kathy'], 'run': [], 'total': 30}, 'players': {'tom': 0, 'kathy': 0}, 'state': 'PLAY', 'turn': 'kathy', 'winning_score': 121}
fake_redis.set('test', json.dumps(game_dict))
bev.cache = fake_redis
bev.next_player('test')
g = json.loads(fake_redis.get('test'))
assert (g['players']['tom'] == 0)
assert (g['turn'] == 'tom')
assert (g['pegging']['total'] == 30)
assert (bev.get_player_action('test', g['turn']) == 'PLAY') | 541,724,108,961,516,740 | Tom has an Ace, kathy just passed and the total is at 30
Expected: It is now Tom's turn to play, he does not receive a point for go | cribbage/app/tests/test_bev.py | test_everyone_else_has_passed_and_tom_can_play_again_this_round | zachcalvert/card-games | python | def test_everyone_else_has_passed_and_tom_can_play_again_this_round(self):
"\n Tom has an Ace, kathy just passed and the total is at 30\n\n Expected: It is now Tom's turn to play, he does not receive a point for go\n "
fake_redis = fakeredis.FakeRedis()
game_dict = {'cards': CARDS, 'hands': {'kathy': ['5e1e7e60ab'], 'tom': ['EXAMPLE_KEY']}, 'pegging': {'cards': ['75e734d054', '60575e1068', '1d5eb77128'], 'last_played': 'tom', 'passed': ['kathy'], 'run': [], 'total': 30}, 'players': {'tom': 0, 'kathy': 0}, 'state': 'PLAY', 'turn': 'kathy', 'winning_score': 121}
fake_redis.set('test', json.dumps(game_dict))
bev.cache = fake_redis
bev.next_player('test')
g = json.loads(fake_redis.get('test'))
assert (g['players']['tom'] == 0)
assert (g['turn'] == 'tom')
assert (g['pegging']['total'] == 30)
assert (bev.get_player_action('test', g['turn']) == 'PLAY') |
def test_kathy_hit_thirtyone_still_has_cards(self):
'\n Kathy just hit 31, and still has cards\n\n Expected: no new points for kathy, and its her turn with a fresh pegging area\n '
fake_redis = fakeredis.FakeRedis()
game_dict = {'cards': CARDS, 'hands': {'kathy': ['5e1e7e60ab'], 'tom': ['95f92b2f0c']}, 'pegging': {'cards': ['75e734d054', '60575e1068', '1d5eb77128'], 'last_played': 'kathy', 'passed': [], 'run': [], 'total': 31}, 'players': {'tom': 0, 'kathy': 2}, 'turn': 'kathy', 'winning_score': 121}
fake_redis.set('test', json.dumps(game_dict))
bev.cache = fake_redis
bev.next_player('test')
g = json.loads(fake_redis.get('test'))
assert (g['players']['kathy'] == 2)
assert (g['turn'] == 'tom')
assert (g['pegging']['total'] == 0) | -137,440,574,099,362,750 | Kathy just hit 31, and still has cards
Expected: no new points for kathy, and its her turn with a fresh pegging area | cribbage/app/tests/test_bev.py | test_kathy_hit_thirtyone_still_has_cards | zachcalvert/card-games | python | def test_kathy_hit_thirtyone_still_has_cards(self):
'\n Kathy just hit 31, and still has cards\n\n Expected: no new points for kathy, and its her turn with a fresh pegging area\n '
fake_redis = fakeredis.FakeRedis()
game_dict = {'cards': CARDS, 'hands': {'kathy': ['5e1e7e60ab'], 'tom': ['95f92b2f0c']}, 'pegging': {'cards': ['75e734d054', '60575e1068', '1d5eb77128'], 'last_played': 'kathy', 'passed': [], 'run': [], 'total': 31}, 'players': {'tom': 0, 'kathy': 2}, 'turn': 'kathy', 'winning_score': 121}
fake_redis.set('test', json.dumps(game_dict))
bev.cache = fake_redis
bev.next_player('test')
g = json.loads(fake_redis.get('test'))
assert (g['players']['kathy'] == 2)
assert (g['turn'] == 'tom')
assert (g['pegging']['total'] == 0) |
def test_kathy_hit_thirtyone_has_no_cards_left_and_others_do(self):
"\n Kathy just hit 31, and has no cards left. Tom has a card left\n\n Expected: no new points for kathy, and its now Tom's turn with a fresh pegging area\n "
fake_redis = fakeredis.FakeRedis()
game_dict = {'cards': CARDS, 'hands': {'kathy': [], 'tom': ['95f92b2f0c']}, 'pegging': {'cards': ['75e734d054', '60575e1068', '1d5eb77128'], 'last_played': 'kathy', 'passed': [], 'run': [], 'total': 31}, 'players': {'tom': 0, 'kathy': 2}, 'turn': 'kathy', 'winning_score': 121}
fake_redis.set('test', json.dumps(game_dict))
bev.cache = fake_redis
bev.next_player('test')
g = json.loads(fake_redis.get('test'))
assert (g['players']['kathy'] == 2)
assert (g['turn'] == 'tom')
assert (g['pegging']['total'] == 0) | -2,291,250,625,859,228,000 | Kathy just hit 31, and has no cards left. Tom has a card left
Expected: no new points for kathy, and its now Tom's turn with a fresh pegging area | cribbage/app/tests/test_bev.py | test_kathy_hit_thirtyone_has_no_cards_left_and_others_do | zachcalvert/card-games | python | def test_kathy_hit_thirtyone_has_no_cards_left_and_others_do(self):
"\n Kathy just hit 31, and has no cards left. Tom has a card left\n\n Expected: no new points for kathy, and its now Tom's turn with a fresh pegging area\n "
fake_redis = fakeredis.FakeRedis()
game_dict = {'cards': CARDS, 'hands': {'kathy': [], 'tom': ['95f92b2f0c']}, 'pegging': {'cards': ['75e734d054', '60575e1068', '1d5eb77128'], 'last_played': 'kathy', 'passed': [], 'run': [], 'total': 31}, 'players': {'tom': 0, 'kathy': 2}, 'turn': 'kathy', 'winning_score': 121}
fake_redis.set('test', json.dumps(game_dict))
bev.cache = fake_redis
bev.next_player('test')
g = json.loads(fake_redis.get('test'))
assert (g['players']['kathy'] == 2)
assert (g['turn'] == 'tom')
assert (g['pegging']['total'] == 0) |
def test_player_hit_thirtyone_and_no_one_has_cards_left(self):
'\n Kathy just hit 31, and everyone is out of cards\n\n Expected: no new points for kathy, and it is now time to score hands\n '
fake_redis = fakeredis.FakeRedis()
game_dict = {'cards': CARDS, 'first_to_score': 'tom', 'hands': {'kathy': [], 'tom': []}, 'pegging': {'cards': ['75e734d054', '60575e1068', '1d5eb77128'], 'last_played': 'kathy', 'passed': [], 'run': [], 'total': 31}, 'players': {'tom': 0, 'kathy': 2}, 'turn': 'kathy', 'winning_score': 121}
fake_redis.set('test', json.dumps(game_dict))
bev.cache = fake_redis
bev.next_player('test')
g = json.loads(fake_redis.get('test'))
assert (g['players']['kathy'] == 2)
assert (g['pegging']['total'] == 0)
assert (g['state'] == 'SCORE')
assert (g['turn'] == 'tom') | -749,672,507,504,275,300 | Kathy just hit 31, and everyone is out of cards
Expected: no new points for kathy, and it is now time to score hands | cribbage/app/tests/test_bev.py | test_player_hit_thirtyone_and_no_one_has_cards_left | zachcalvert/card-games | python | def test_player_hit_thirtyone_and_no_one_has_cards_left(self):
'\n Kathy just hit 31, and everyone is out of cards\n\n Expected: no new points for kathy, and it is now time to score hands\n '
fake_redis = fakeredis.FakeRedis()
game_dict = {'cards': CARDS, 'first_to_score': 'tom', 'hands': {'kathy': [], 'tom': []}, 'pegging': {'cards': ['75e734d054', '60575e1068', '1d5eb77128'], 'last_played': 'kathy', 'passed': [], 'run': [], 'total': 31}, 'players': {'tom': 0, 'kathy': 2}, 'turn': 'kathy', 'winning_score': 121}
fake_redis.set('test', json.dumps(game_dict))
bev.cache = fake_redis
bev.next_player('test')
g = json.loads(fake_redis.get('test'))
assert (g['players']['kathy'] == 2)
assert (g['pegging']['total'] == 0)
assert (g['state'] == 'SCORE')
assert (g['turn'] == 'tom') |
@mock.patch('app.award_points', mock.MagicMock(return_value=True))
def test_no_one_has_cards_left(self):
'\n Kathy just hit 24, and everyone is out of cards\n\n Expected: Kathy gets 1 point for go, and it is now time to score hands\n '
fake_redis = fakeredis.FakeRedis()
game_dict = {'cards': CARDS, 'first_to_score': 'tom', 'hands': {'kathy': [], 'tom': []}, 'pegging': {'cards': ['75e734d054', '60575e1068', '1d5eb77128'], 'last_played': 'kathy', 'passed': [], 'run': [], 'total': 24}, 'players': {'tom': 0, 'kathy': 2}, 'scoring_stats': {'kathy': {'a_play': 0, 'b_hand': 0, 'c_crib': 0}, 'tom': {'a_play': 0, 'b_hand': 0, 'c_crib': 0}}, 'turn': 'kathy', 'winning_score': 121}
fake_redis.set('test', json.dumps(game_dict))
bev.cache = fake_redis
bev.next_player('test')
g = json.loads(fake_redis.get('test'))
assert (g['players']['kathy'] == 3)
assert (g['pegging']['total'] == 0)
assert (g['state'] == 'SCORE')
assert (g['turn'] == 'tom') | 8,366,143,360,917,596,000 | Kathy just hit 24, and everyone is out of cards
Expected: Kathy gets 1 point for go, and it is now time to score hands | cribbage/app/tests/test_bev.py | test_no_one_has_cards_left | zachcalvert/card-games | python | @mock.patch('app.award_points', mock.MagicMock(return_value=True))
def test_no_one_has_cards_left(self):
'\n Kathy just hit 24, and everyone is out of cards\n\n Expected: Kathy gets 1 point for go, and it is now time to score hands\n '
fake_redis = fakeredis.FakeRedis()
game_dict = {'cards': CARDS, 'first_to_score': 'tom', 'hands': {'kathy': [], 'tom': []}, 'pegging': {'cards': ['75e734d054', '60575e1068', '1d5eb77128'], 'last_played': 'kathy', 'passed': [], 'run': [], 'total': 24}, 'players': {'tom': 0, 'kathy': 2}, 'scoring_stats': {'kathy': {'a_play': 0, 'b_hand': 0, 'c_crib': 0}, 'tom': {'a_play': 0, 'b_hand': 0, 'c_crib': 0}}, 'turn': 'kathy', 'winning_score': 121}
fake_redis.set('test', json.dumps(game_dict))
bev.cache = fake_redis
bev.next_player('test')
g = json.loads(fake_redis.get('test'))
assert (g['players']['kathy'] == 3)
assert (g['pegging']['total'] == 0)
assert (g['state'] == 'SCORE')
assert (g['turn'] == 'tom') |
@mock.patch('app.award_points', mock.MagicMock(return_value=False))
def test_thirtyone(self):
'\n Verify two points for 31\n '
fake_redis = fakeredis.FakeRedis()
game_dict = {'cards': CARDS, 'hands': {'kathy': ['EXAMPLE_KEY', 'c6f4900f82'], 'tom': ['ace1293f8a']}, 'pegging': {'cards': ['4de6b73ab8', 'f6571e162f', 'c88523b677'], 'last_played': 'tom', 'passed': [], 'run': [], 'total': 24}, 'players': {'tom': 0, 'kathy': 0}, 'played_cards': {'kathy': ['f6571e162f'], 'tom': ['4de6b73ab8', 'c88523b677']}, 'scoring_stats': {'tom': {'a_play': 0, 'b_hand': 0, 'c_crib': 0}, 'kathy': {'a_play': 0, 'b_hand': 0, 'c_crib': 0}}, 'state': 'PLAY', 'turn': 'kathy', 'winning_score': 121}
fake_redis.set('test', json.dumps(game_dict))
bev.cache = fake_redis
seven_of_clubs = 'c6f4900f82'
(just_won, points, points_source) = bev.score_play('test', 'kathy', seven_of_clubs)
assert (not just_won)
bev.record_play('test', 'kathy', seven_of_clubs)
g = json.loads(fake_redis.get('test'))
assert (set(g['pegging']['cards']) == set(['4de6b73ab8', 'f6571e162f', 'c88523b677', 'c6f4900f82']))
assert (g['hands']['kathy'] == ['EXAMPLE_KEY'])
assert (g['players']['kathy'] == 2)
assert (g['pegging']['total'] == 31) | -6,152,835,974,117,221,000 | Verify two points for 31 | cribbage/app/tests/test_bev.py | test_thirtyone | zachcalvert/card-games | python | @mock.patch('app.award_points', mock.MagicMock(return_value=False))
def test_thirtyone(self):
'\n \n '
fake_redis = fakeredis.FakeRedis()
game_dict = {'cards': CARDS, 'hands': {'kathy': ['EXAMPLE_KEY', 'c6f4900f82'], 'tom': ['ace1293f8a']}, 'pegging': {'cards': ['4de6b73ab8', 'f6571e162f', 'c88523b677'], 'last_played': 'tom', 'passed': [], 'run': [], 'total': 24}, 'players': {'tom': 0, 'kathy': 0}, 'played_cards': {'kathy': ['f6571e162f'], 'tom': ['4de6b73ab8', 'c88523b677']}, 'scoring_stats': {'tom': {'a_play': 0, 'b_hand': 0, 'c_crib': 0}, 'kathy': {'a_play': 0, 'b_hand': 0, 'c_crib': 0}}, 'state': 'PLAY', 'turn': 'kathy', 'winning_score': 121}
fake_redis.set('test', json.dumps(game_dict))
bev.cache = fake_redis
seven_of_clubs = 'c6f4900f82'
(just_won, points, points_source) = bev.score_play('test', 'kathy', seven_of_clubs)
assert (not just_won)
bev.record_play('test', 'kathy', seven_of_clubs)
g = json.loads(fake_redis.get('test'))
assert (set(g['pegging']['cards']) == set(['4de6b73ab8', 'f6571e162f', 'c88523b677', 'c6f4900f82']))
assert (g['hands']['kathy'] == ['EXAMPLE_KEY'])
assert (g['players']['kathy'] == 2)
assert (g['pegging']['total'] == 31) |
@mock.patch('app.award_points', mock.MagicMock(return_value=False))
def test_run_of_three(self):
'\n test run of three scores three points\n '
fake_redis = fakeredis.FakeRedis()
game_dict = {'cards': CARDS, 'hands': {'kathy': ['EXAMPLE_KEY', 'c6f4900f82'], 'tom': ['ace1293f8a']}, 'pegging': {'cards': ['4de6b73ab8', 'c88523b677'], 'last_played': 'tom', 'passed': [], 'run': [], 'total': 14}, 'players': {'tom': 0, 'kathy': 0}, 'played_cards': {'kathy': ['32f7615119'], 'tom': ['4f99bf15e5', 'def8effef6']}, 'scoring_stats': {'tom': {'a_play': 0, 'b_hand': 0, 'c_crib': 0}, 'kathy': {'a_play': 0, 'b_hand': 0, 'c_crib': 0}}, 'state': 'PLAY', 'turn': 'kathy', 'winning_score': 121}
fake_redis.set('test', json.dumps(game_dict))
bev.cache = fake_redis
seven_of_clubs = 'c6f4900f82'
(just_won, points, points_source) = bev.score_play('test', 'kathy', seven_of_clubs)
assert (not just_won)
bev.record_play('test', 'kathy', seven_of_clubs)
g = json.loads(fake_redis.get('test'))
assert (set(g['pegging']['cards']) == set(['4de6b73ab8', 'c88523b677', 'c6f4900f82']))
assert (g['hands']['kathy'] == ['EXAMPLE_KEY'])
assert (g['pegging']['total'] == 21)
assert (g['players']['kathy'] == 3) | 8,995,552,579,221,174,000 | test run of three scores three points | cribbage/app/tests/test_bev.py | test_run_of_three | zachcalvert/card-games | python | @mock.patch('app.award_points', mock.MagicMock(return_value=False))
def test_run_of_three(self):
'\n \n '
fake_redis = fakeredis.FakeRedis()
game_dict = {'cards': CARDS, 'hands': {'kathy': ['EXAMPLE_KEY', 'c6f4900f82'], 'tom': ['ace1293f8a']}, 'pegging': {'cards': ['4de6b73ab8', 'c88523b677'], 'last_played': 'tom', 'passed': [], 'run': [], 'total': 14}, 'players': {'tom': 0, 'kathy': 0}, 'played_cards': {'kathy': ['32f7615119'], 'tom': ['4f99bf15e5', 'def8effef6']}, 'scoring_stats': {'tom': {'a_play': 0, 'b_hand': 0, 'c_crib': 0}, 'kathy': {'a_play': 0, 'b_hand': 0, 'c_crib': 0}}, 'state': 'PLAY', 'turn': 'kathy', 'winning_score': 121}
fake_redis.set('test', json.dumps(game_dict))
bev.cache = fake_redis
seven_of_clubs = 'c6f4900f82'
(just_won, points, points_source) = bev.score_play('test', 'kathy', seven_of_clubs)
assert (not just_won)
bev.record_play('test', 'kathy', seven_of_clubs)
g = json.loads(fake_redis.get('test'))
assert (set(g['pegging']['cards']) == set(['4de6b73ab8', 'c88523b677', 'c6f4900f82']))
assert (g['hands']['kathy'] == ['EXAMPLE_KEY'])
assert (g['pegging']['total'] == 21)
assert (g['players']['kathy'] == 3) |
def equals(self, other: Any) -> bool:
'\n Determines if two Index objects contain the same elements.\n '
if self.is_(other):
return True
if (not isinstance(other, Index)):
return False
elif (other.dtype.kind in ['f', 'i', 'u', 'c']):
return False
elif (not isinstance(other, type(self))):
should_try = False
inferable = self._data._infer_matches
if (other.dtype == object):
should_try = (other.inferred_type in inferable)
elif is_categorical_dtype(other.dtype):
other = cast('CategoricalIndex', other)
should_try = (other.categories.inferred_type in inferable)
if should_try:
try:
other = type(self)(other)
except (ValueError, TypeError, OverflowError):
return False
if (not is_dtype_equal(self.dtype, other.dtype)):
return False
return np.array_equal(self.asi8, other.asi8) | -8,305,807,658,120,672,000 | Determines if two Index objects contain the same elements. | pandas/core/indexes/datetimelike.py | equals | DiligentDolphin/pandas | python | def equals(self, other: Any) -> bool:
'\n \n '
if self.is_(other):
return True
if (not isinstance(other, Index)):
return False
elif (other.dtype.kind in ['f', 'i', 'u', 'c']):
return False
elif (not isinstance(other, type(self))):
should_try = False
inferable = self._data._infer_matches
if (other.dtype == object):
should_try = (other.inferred_type in inferable)
elif is_categorical_dtype(other.dtype):
other = cast('CategoricalIndex', other)
should_try = (other.categories.inferred_type in inferable)
if should_try:
try:
other = type(self)(other)
except (ValueError, TypeError, OverflowError):
return False
if (not is_dtype_equal(self.dtype, other.dtype)):
return False
return np.array_equal(self.asi8, other.asi8) |
def format(self, name: bool=False, formatter: (Callable | None)=None, na_rep: str='NaT', date_format: (str | None)=None) -> list[str]:
'\n Render a string representation of the Index.\n '
header = []
if name:
header.append((ibase.pprint_thing(self.name, escape_chars=('\t', '\r', '\n')) if (self.name is not None) else ''))
if (formatter is not None):
return (header + list(self.map(formatter)))
return self._format_with_header(header, na_rep=na_rep, date_format=date_format) | 8,713,305,425,244,024,000 | Render a string representation of the Index. | pandas/core/indexes/datetimelike.py | format | DiligentDolphin/pandas | python | def format(self, name: bool=False, formatter: (Callable | None)=None, na_rep: str='NaT', date_format: (str | None)=None) -> list[str]:
'\n \n '
header = []
if name:
header.append((ibase.pprint_thing(self.name, escape_chars=('\t', '\r', '\n')) if (self.name is not None) else ))
if (formatter is not None):
return (header + list(self.map(formatter)))
return self._format_with_header(header, na_rep=na_rep, date_format=date_format) |
def _format_attrs(self):
'\n Return a list of tuples of the (attr,formatted_value).\n '
attrs = super()._format_attrs()
for attrib in self._attributes:
if (attrib == 'freq'):
freq = self.freqstr
if (freq is not None):
freq = repr(freq)
attrs.append(('freq', freq))
return attrs | 4,205,978,032,163,911,700 | Return a list of tuples of the (attr,formatted_value). | pandas/core/indexes/datetimelike.py | _format_attrs | DiligentDolphin/pandas | python | def _format_attrs(self):
'\n \n '
attrs = super()._format_attrs()
for attrib in self._attributes:
if (attrib == 'freq'):
freq = self.freqstr
if (freq is not None):
freq = repr(freq)
attrs.append(('freq', freq))
return attrs |
@final
def _partial_date_slice(self, reso: Resolution, parsed: datetime):
'\n Parameters\n ----------\n reso : Resolution\n parsed : datetime\n\n Returns\n -------\n slice or ndarray[intp]\n '
if (not self._can_partial_date_slice(reso)):
raise ValueError
(t1, t2) = self._parsed_string_to_bounds(reso, parsed)
vals = self._data._ndarray
unbox = self._data._unbox
if self.is_monotonic_increasing:
if (len(self) and (((t1 < self[0]) and (t2 < self[0])) or ((t1 > self[(- 1)]) and (t2 > self[(- 1)])))):
raise KeyError
left = vals.searchsorted(unbox(t1), side='left')
right = vals.searchsorted(unbox(t2), side='right')
return slice(left, right)
else:
lhs_mask = (vals >= unbox(t1))
rhs_mask = (vals <= unbox(t2))
return (lhs_mask & rhs_mask).nonzero()[0] | 2,203,640,350,825,362,400 | Parameters
----------
reso : Resolution
parsed : datetime
Returns
-------
slice or ndarray[intp] | pandas/core/indexes/datetimelike.py | _partial_date_slice | DiligentDolphin/pandas | python | @final
def _partial_date_slice(self, reso: Resolution, parsed: datetime):
'\n Parameters\n ----------\n reso : Resolution\n parsed : datetime\n\n Returns\n -------\n slice or ndarray[intp]\n '
if (not self._can_partial_date_slice(reso)):
raise ValueError
(t1, t2) = self._parsed_string_to_bounds(reso, parsed)
vals = self._data._ndarray
unbox = self._data._unbox
if self.is_monotonic_increasing:
if (len(self) and (((t1 < self[0]) and (t2 < self[0])) or ((t1 > self[(- 1)]) and (t2 > self[(- 1)])))):
raise KeyError
left = vals.searchsorted(unbox(t1), side='left')
right = vals.searchsorted(unbox(t2), side='right')
return slice(left, right)
else:
lhs_mask = (vals >= unbox(t1))
rhs_mask = (vals <= unbox(t2))
return (lhs_mask & rhs_mask).nonzero()[0] |
def _maybe_cast_slice_bound(self, label, side: str, kind=lib.no_default):
"\n If label is a string, cast it to scalar type according to resolution.\n\n Parameters\n ----------\n label : object\n side : {'left', 'right'}\n kind : {'loc', 'getitem'} or None\n\n Returns\n -------\n label : object\n\n Notes\n -----\n Value of `side` parameter should be validated in caller.\n "
assert (kind in ['loc', 'getitem', None, lib.no_default])
self._deprecated_arg(kind, 'kind', '_maybe_cast_slice_bound')
if isinstance(label, str):
try:
(parsed, reso) = self._parse_with_reso(label)
except ValueError as err:
raise self._invalid_indexer('slice', label) from err
(lower, upper) = self._parsed_string_to_bounds(reso, parsed)
return (lower if (side == 'left') else upper)
elif (not isinstance(label, self._data._recognized_scalars)):
raise self._invalid_indexer('slice', label)
return label | -7,072,608,151,381,606,000 | If label is a string, cast it to scalar type according to resolution.
Parameters
----------
label : object
side : {'left', 'right'}
kind : {'loc', 'getitem'} or None
Returns
-------
label : object
Notes
-----
Value of `side` parameter should be validated in caller. | pandas/core/indexes/datetimelike.py | _maybe_cast_slice_bound | DiligentDolphin/pandas | python | def _maybe_cast_slice_bound(self, label, side: str, kind=lib.no_default):
"\n If label is a string, cast it to scalar type according to resolution.\n\n Parameters\n ----------\n label : object\n side : {'left', 'right'}\n kind : {'loc', 'getitem'} or None\n\n Returns\n -------\n label : object\n\n Notes\n -----\n Value of `side` parameter should be validated in caller.\n "
assert (kind in ['loc', 'getitem', None, lib.no_default])
self._deprecated_arg(kind, 'kind', '_maybe_cast_slice_bound')
if isinstance(label, str):
try:
(parsed, reso) = self._parse_with_reso(label)
except ValueError as err:
raise self._invalid_indexer('slice', label) from err
(lower, upper) = self._parsed_string_to_bounds(reso, parsed)
return (lower if (side == 'left') else upper)
elif (not isinstance(label, self._data._recognized_scalars)):
raise self._invalid_indexer('slice', label)
return label |
def shift(self: _T, periods: int=1, freq=None) -> _T:
"\n Shift index by desired number of time frequency increments.\n\n This method is for shifting the values of datetime-like indexes\n by a specified time increment a given number of times.\n\n Parameters\n ----------\n periods : int, default 1\n Number of periods (or increments) to shift by,\n can be positive or negative.\n freq : pandas.DateOffset, pandas.Timedelta or string, optional\n Frequency increment to shift by.\n If None, the index is shifted by its own `freq` attribute.\n Offset aliases are valid strings, e.g., 'D', 'W', 'M' etc.\n\n Returns\n -------\n pandas.DatetimeIndex\n Shifted index.\n\n See Also\n --------\n Index.shift : Shift values of Index.\n PeriodIndex.shift : Shift values of PeriodIndex.\n "
arr = self._data.view()
arr._freq = self.freq
result = arr._time_shift(periods, freq=freq)
return type(self)._simple_new(result, name=self.name) | -8,632,447,863,693,839,000 | Shift index by desired number of time frequency increments.
This method is for shifting the values of datetime-like indexes
by a specified time increment a given number of times.
Parameters
----------
periods : int, default 1
Number of periods (or increments) to shift by,
can be positive or negative.
freq : pandas.DateOffset, pandas.Timedelta or string, optional
Frequency increment to shift by.
If None, the index is shifted by its own `freq` attribute.
Offset aliases are valid strings, e.g., 'D', 'W', 'M' etc.
Returns
-------
pandas.DatetimeIndex
Shifted index.
See Also
--------
Index.shift : Shift values of Index.
PeriodIndex.shift : Shift values of PeriodIndex. | pandas/core/indexes/datetimelike.py | shift | DiligentDolphin/pandas | python | def shift(self: _T, periods: int=1, freq=None) -> _T:
"\n Shift index by desired number of time frequency increments.\n\n This method is for shifting the values of datetime-like indexes\n by a specified time increment a given number of times.\n\n Parameters\n ----------\n periods : int, default 1\n Number of periods (or increments) to shift by,\n can be positive or negative.\n freq : pandas.DateOffset, pandas.Timedelta or string, optional\n Frequency increment to shift by.\n If None, the index is shifted by its own `freq` attribute.\n Offset aliases are valid strings, e.g., 'D', 'W', 'M' etc.\n\n Returns\n -------\n pandas.DatetimeIndex\n Shifted index.\n\n See Also\n --------\n Index.shift : Shift values of Index.\n PeriodIndex.shift : Shift values of PeriodIndex.\n "
arr = self._data.view()
arr._freq = self.freq
result = arr._time_shift(periods, freq=freq)
return type(self)._simple_new(result, name=self.name) |
def _intersection(self, other: Index, sort=False) -> Index:
'\n intersection specialized to the case with matching dtypes and both non-empty.\n '
other = cast('DatetimeTimedeltaMixin', other)
if self._can_range_setop(other):
return self._range_intersect(other, sort=sort)
if (not self._can_fast_intersect(other)):
result = Index._intersection(self, other, sort=sort)
result = self._wrap_setop_result(other, result)
return result._with_freq(None)._with_freq('infer')
else:
return self._fast_intersect(other, sort) | -2,951,834,144,288,449,000 | intersection specialized to the case with matching dtypes and both non-empty. | pandas/core/indexes/datetimelike.py | _intersection | DiligentDolphin/pandas | python | def _intersection(self, other: Index, sort=False) -> Index:
'\n \n '
other = cast('DatetimeTimedeltaMixin', other)
if self._can_range_setop(other):
return self._range_intersect(other, sort=sort)
if (not self._can_fast_intersect(other)):
result = Index._intersection(self, other, sort=sort)
result = self._wrap_setop_result(other, result)
return result._with_freq(None)._with_freq('infer')
else:
return self._fast_intersect(other, sort) |
def _get_join_freq(self, other):
'\n Get the freq to attach to the result of a join operation.\n '
freq = None
if self._can_fast_union(other):
freq = self.freq
return freq | -8,029,963,893,525,508,000 | Get the freq to attach to the result of a join operation. | pandas/core/indexes/datetimelike.py | _get_join_freq | DiligentDolphin/pandas | python | def _get_join_freq(self, other):
'\n \n '
freq = None
if self._can_fast_union(other):
freq = self.freq
return freq |
def _get_delete_freq(self, loc: ((int | slice) | Sequence[int])):
'\n Find the `freq` for self.delete(loc).\n '
freq = None
if (self.freq is not None):
if is_integer(loc):
if (loc in (0, (- len(self)), (- 1), (len(self) - 1))):
freq = self.freq
else:
if is_list_like(loc):
loc = lib.maybe_indices_to_slice(np.asarray(loc, dtype=np.intp), len(self))
if (isinstance(loc, slice) and (loc.step in (1, None))):
if ((loc.start in (0, None)) or (loc.stop in (len(self), None))):
freq = self.freq
return freq | -9,139,549,193,207,140,000 | Find the `freq` for self.delete(loc). | pandas/core/indexes/datetimelike.py | _get_delete_freq | DiligentDolphin/pandas | python | def _get_delete_freq(self, loc: ((int | slice) | Sequence[int])):
'\n \n '
freq = None
if (self.freq is not None):
if is_integer(loc):
if (loc in (0, (- len(self)), (- 1), (len(self) - 1))):
freq = self.freq
else:
if is_list_like(loc):
loc = lib.maybe_indices_to_slice(np.asarray(loc, dtype=np.intp), len(self))
if (isinstance(loc, slice) and (loc.step in (1, None))):
if ((loc.start in (0, None)) or (loc.stop in (len(self), None))):
freq = self.freq
return freq |
def _get_insert_freq(self, loc: int, item):
'\n Find the `freq` for self.insert(loc, item).\n '
value = self._data._validate_scalar(item)
item = self._data._box_func(value)
freq = None
if (self.freq is not None):
if self.size:
if (item is NaT):
pass
elif (((loc == 0) or (loc == (- len(self)))) and ((item + self.freq) == self[0])):
freq = self.freq
elif ((loc == len(self)) and ((item - self.freq) == self[(- 1)])):
freq = self.freq
elif isinstance(self.freq, Tick):
freq = self.freq
elif self.freq.is_on_offset(item):
freq = self.freq
return freq | 5,177,903,697,816,854,000 | Find the `freq` for self.insert(loc, item). | pandas/core/indexes/datetimelike.py | _get_insert_freq | DiligentDolphin/pandas | python | def _get_insert_freq(self, loc: int, item):
'\n \n '
value = self._data._validate_scalar(item)
item = self._data._box_func(value)
freq = None
if (self.freq is not None):
if self.size:
if (item is NaT):
pass
elif (((loc == 0) or (loc == (- len(self)))) and ((item + self.freq) == self[0])):
freq = self.freq
elif ((loc == len(self)) and ((item - self.freq) == self[(- 1)])):
freq = self.freq
elif isinstance(self.freq, Tick):
freq = self.freq
elif self.freq.is_on_offset(item):
freq = self.freq
return freq |
def clear_mysql_db():
'\n Clear MySQL Database\n :return: true\n '
logger.info('Clearing MySQL Database')
try:
drop_table_content()
except Exception as exp:
logger.error(('Could not clear MySQL Database: ' + repr(exp)))
raise
else:
logger.info('MySQL Database cleared')
return True | -8,534,009,352,897,233,000 | Clear MySQL Database
:return: true | Account/app/mod_system/controller.py | clear_mysql_db | TamSzaGot/mydata-sdk | python | def clear_mysql_db():
'\n Clear MySQL Database\n :return: true\n '
logger.info('Clearing MySQL Database')
try:
drop_table_content()
except Exception as exp:
logger.error(('Could not clear MySQL Database: ' + repr(exp)))
raise
else:
logger.info('MySQL Database cleared')
return True |
def clear_blackbox_db():
'\n Clear black box database\n :return: true\n '
logger.info('Clearing Blackbox Database')
try:
clear_blackbox_sqlite_db()
except Exception as exp:
logger.error(('Could not clear Blackbox Database: ' + repr(exp)))
raise
else:
logger.info('Blackbox Database cleared')
return True | 2,870,511,574,239,039,000 | Clear black box database
:return: true | Account/app/mod_system/controller.py | clear_blackbox_db | TamSzaGot/mydata-sdk | python | def clear_blackbox_db():
'\n Clear black box database\n :return: true\n '
logger.info('Clearing Blackbox Database')
try:
clear_blackbox_sqlite_db()
except Exception as exp:
logger.error(('Could not clear Blackbox Database: ' + repr(exp)))
raise
else:
logger.info('Blackbox Database cleared')
return True |
def clear_api_key_db():
'\n Clear API Key database\n :return: true\n '
logger.info('##########')
logger.info('Clearing ApiKey Database')
try:
clear_apikey_sqlite_db()
except Exception as exp:
logger.error(('Could not clear ApiKey Database: ' + repr(exp)))
raise
else:
logger.info('ApiKey Database cleared')
return True | -5,303,551,338,756,569,000 | Clear API Key database
:return: true | Account/app/mod_system/controller.py | clear_api_key_db | TamSzaGot/mydata-sdk | python | def clear_api_key_db():
'\n Clear API Key database\n :return: true\n '
logger.info('##########')
logger.info('Clearing ApiKey Database')
try:
clear_apikey_sqlite_db()
except Exception as exp:
logger.error(('Could not clear ApiKey Database: ' + repr(exp)))
raise
else:
logger.info('ApiKey Database cleared')
return True |
def system_check():
'\n Check system functionality\n :return: dict\n '
logger.info('Checking system functionality')
try:
status_dict = {'type': 'StatusReport', 'attributes': {'title': 'System running as intended', 'db_row_counts': get_db_statistics()}}
except Exception as exp:
logger.error(('System not running as intended: ' + repr(exp)))
raise
else:
logger.info('ApiKey Database cleared')
return status_dict | 1,838,993,185,687,893,000 | Check system functionality
:return: dict | Account/app/mod_system/controller.py | system_check | TamSzaGot/mydata-sdk | python | def system_check():
'\n Check system functionality\n :return: dict\n '
logger.info('Checking system functionality')
try:
status_dict = {'type': 'StatusReport', 'attributes': {'title': 'System running as intended', 'db_row_counts': get_db_statistics()}}
except Exception as exp:
logger.error(('System not running as intended: ' + repr(exp)))
raise
else:
logger.info('ApiKey Database cleared')
return status_dict |
def sum_mixed_list(mxd_lst: List[Union[(int, float)]]) -> float:
'sum all float number in list\n\n Args:\n input_list (List[float]): arg\n\n Returns:\n float: result\n '
return sum(mxd_lst) | -5,815,243,350,808,947,000 | sum all float number in list
Args:
input_list (List[float]): arg
Returns:
float: result | 0x00-python_variable_annotations/6-sum_mixed_list.py | sum_mixed_list | JoseAVallejo12/holbertonschool-web_back_end | python | def sum_mixed_list(mxd_lst: List[Union[(int, float)]]) -> float:
'sum all float number in list\n\n Args:\n input_list (List[float]): arg\n\n Returns:\n float: result\n '
return sum(mxd_lst) |
def valid_vars(vars):
"\n Note: run_program_op.InferShape requires `X`/'Out' not be null.\n But it's common in dy2static, fake varBase is created to handle the\n problem.\n "
if vars:
return vars
return [core.VarBase(value=[1], name='Fake_var', place=framework._current_expected_place())] | -6,657,273,862,314,413,000 | Note: run_program_op.InferShape requires `X`/'Out' not be null.
But it's common in dy2static, fake varBase is created to handle the
problem. | python/paddle/fluid/dygraph/dygraph_to_static/partial_program.py | valid_vars | CheQiXiao/Paddle | python | def valid_vars(vars):
"\n Note: run_program_op.InferShape requires `X`/'Out' not be null.\n But it's common in dy2static, fake varBase is created to handle the\n problem.\n "
if vars:
return vars
return [core.VarBase(value=[1], name='Fake_var', place=framework._current_expected_place())] |
def tolist(self):
'\n Flattens the nested sequences into single list.\n '
return flatten(self.__raw_input) | -7,850,800,606,931,174,000 | Flattens the nested sequences into single list. | python/paddle/fluid/dygraph/dygraph_to_static/partial_program.py | tolist | CheQiXiao/Paddle | python | def tolist(self):
'\n \n '
return flatten(self.__raw_input) |
def restore(self, value_list):
'\n Restores the nested sequence from value list.\n '
assert (len(self.tolist()) == len(value_list))
return pack_sequence_as(self.__raw_input, value_list) | 1,636,940,109,083,474,400 | Restores the nested sequence from value list. | python/paddle/fluid/dygraph/dygraph_to_static/partial_program.py | restore | CheQiXiao/Paddle | python | def restore(self, value_list):
'\n \n '
assert (len(self.tolist()) == len(value_list))
return pack_sequence_as(self.__raw_input, value_list) |
def _check_non_variable(self, need_check):
'\n Raises warning if output of traced function contains non-tensor type values.\n '
if need_check:
warning_types = set()
for var in self.tolist():
if (not isinstance(var, (framework.Variable, core.VarBase))):
warning_types.add(type(var))
if warning_types:
logging_utils.warn("Output of traced function contains non-tensor type values: {}. Currently, We don't support to update them while training and will return what we first saw. Please try to return them as tensor.".format(list(warning_types))) | 4,097,785,078,502,480,000 | Raises warning if output of traced function contains non-tensor type values. | python/paddle/fluid/dygraph/dygraph_to_static/partial_program.py | _check_non_variable | CheQiXiao/Paddle | python | def _check_non_variable(self, need_check):
'\n \n '
if need_check:
warning_types = set()
for var in self.tolist():
if (not isinstance(var, (framework.Variable, core.VarBase))):
warning_types.add(type(var))
if warning_types:
logging_utils.warn("Output of traced function contains non-tensor type values: {}. Currently, We don't support to update them while training and will return what we first saw. Please try to return them as tensor.".format(list(warning_types))) |
@LazyInitialized
def _infer_program(self):
'\n Lazy initialized property of infer_program.\n '
return self._clone_for_test(self._origin_main_program) | 1,281,564,852,890,502,100 | Lazy initialized property of infer_program. | python/paddle/fluid/dygraph/dygraph_to_static/partial_program.py | _infer_program | CheQiXiao/Paddle | python | @LazyInitialized
def _infer_program(self):
'\n \n '
return self._clone_for_test(self._origin_main_program) |
@LazyInitialized
def _train_program(self):
'\n Lazy initialized property of train_program.\n '
train_program = self._append_backward_desc(self._origin_main_program)
self._set_grad_type(self._params, train_program)
return train_program | -2,370,555,548,043,581,400 | Lazy initialized property of train_program. | python/paddle/fluid/dygraph/dygraph_to_static/partial_program.py | _train_program | CheQiXiao/Paddle | python | @LazyInitialized
def _train_program(self):
'\n \n '
train_program = self._append_backward_desc(self._origin_main_program)
self._set_grad_type(self._params, train_program)
return train_program |
def _verify_program(self, main_program):
'\n Verify that the program parameter is initialized, prune some unused params,\n and remove redundant op callstack.\n '
self._check_params_all_inited(main_program)
self._prune_unused_params(main_program)
return main_program | 944,476,005,322,594,400 | Verify that the program parameter is initialized, prune some unused params,
and remove redundant op callstack. | python/paddle/fluid/dygraph/dygraph_to_static/partial_program.py | _verify_program | CheQiXiao/Paddle | python | def _verify_program(self, main_program):
'\n Verify that the program parameter is initialized, prune some unused params,\n and remove redundant op callstack.\n '
self._check_params_all_inited(main_program)
self._prune_unused_params(main_program)
return main_program |
def _prune_unused_params(self, program):
'\n Prune the parameters not used anywhere in the program.\n The `@declarative` may only decorated a sub function which\n contains some unused parameters created in `__init__`.\n So prune these parameters to avoid unnecessary operations in\n `run_program_op`.\n '
required_params = []
for param in self._params:
found_param = False
for block in program.blocks:
for op in block.ops:
if ((param.name in op.input_arg_names) or (param.name in op.output_arg_names)):
required_params.append(param)
found_param = True
break
if found_param:
break
self._params = required_params | -5,956,918,768,261,268,000 | Prune the parameters not used anywhere in the program.
The `@declarative` may only decorated a sub function which
contains some unused parameters created in `__init__`.
So prune these parameters to avoid unnecessary operations in
`run_program_op`. | python/paddle/fluid/dygraph/dygraph_to_static/partial_program.py | _prune_unused_params | CheQiXiao/Paddle | python | def _prune_unused_params(self, program):
'\n Prune the parameters not used anywhere in the program.\n The `@declarative` may only decorated a sub function which\n contains some unused parameters created in `__init__`.\n So prune these parameters to avoid unnecessary operations in\n `run_program_op`.\n '
required_params = []
for param in self._params:
found_param = False
for block in program.blocks:
for op in block.ops:
if ((param.name in op.input_arg_names) or (param.name in op.output_arg_names)):
required_params.append(param)
found_param = True
break
if found_param:
break
self._params = required_params |
def _prepare(self, inputs):
'\n Prepare inputs, outputs, attrs.\n '
assert isinstance(inputs, (tuple, list))
flatten_inputs = flatten(inputs)
input_vars = []
for (i, value) in enumerate(flatten_inputs):
if isinstance(value, np.ndarray):
var = core.VarBase(value=value, name=self._inputs[i].desc.name(), persistable=False, place=framework._current_expected_place(), zero_copy=True)
elif isinstance(value, core.VarBase):
value.name = self._inputs[i].desc.name()
if value.stop_gradient:
var = paddle.to_tensor(value, dtype=value.dtype, place=framework._current_expected_place(), stop_gradient=True)
var.name = value.name
else:
var = value
else:
continue
input_vars.append(var)
out_vars = []
for idx in self._outputs.var_ids:
var = self._outputs[idx]
assert isinstance(var, framework.Variable)
var_desc = var.desc
var_base = core.VarBase(var_desc.dtype(), var_desc.shape(), var_desc.name(), var_desc.type(), False)
out_vars.append(var_base)
tmp_scope_vec = core.VarBase(core.VarDesc.VarType.FP32, [], 'program_out_scope', core.VarDesc.VarType.STEP_SCOPES, True)
tmp_scope_vec.value().set_scope(self._inner_scope)
return (input_vars, out_vars, tmp_scope_vec) | 5,576,537,689,546,665,000 | Prepare inputs, outputs, attrs. | python/paddle/fluid/dygraph/dygraph_to_static/partial_program.py | _prepare | CheQiXiao/Paddle | python | def _prepare(self, inputs):
'\n \n '
assert isinstance(inputs, (tuple, list))
flatten_inputs = flatten(inputs)
input_vars = []
for (i, value) in enumerate(flatten_inputs):
if isinstance(value, np.ndarray):
var = core.VarBase(value=value, name=self._inputs[i].desc.name(), persistable=False, place=framework._current_expected_place(), zero_copy=True)
elif isinstance(value, core.VarBase):
value.name = self._inputs[i].desc.name()
if value.stop_gradient:
var = paddle.to_tensor(value, dtype=value.dtype, place=framework._current_expected_place(), stop_gradient=True)
var.name = value.name
else:
var = value
else:
continue
input_vars.append(var)
out_vars = []
for idx in self._outputs.var_ids:
var = self._outputs[idx]
assert isinstance(var, framework.Variable)
var_desc = var.desc
var_base = core.VarBase(var_desc.dtype(), var_desc.shape(), var_desc.name(), var_desc.type(), False)
out_vars.append(var_base)
tmp_scope_vec = core.VarBase(core.VarDesc.VarType.FP32, [], 'program_out_scope', core.VarDesc.VarType.STEP_SCOPES, True)
tmp_scope_vec.value().set_scope(self._inner_scope)
return (input_vars, out_vars, tmp_scope_vec) |
def _restore_out(self, out_vars):
'\n Restores same nested outputs by only replacing the Variable with VarBase.\n '
flatten_outputs = self._outputs.tolist()
for (i, idx) in enumerate(self._outputs.var_ids):
flatten_outputs[idx] = out_vars[i]
outs = self._outputs.restore(flatten_outputs)
if ((outs is not None) and (len(outs) == 1)):
outs = outs[0]
return outs | -6,028,813,199,620,918,000 | Restores same nested outputs by only replacing the Variable with VarBase. | python/paddle/fluid/dygraph/dygraph_to_static/partial_program.py | _restore_out | CheQiXiao/Paddle | python | def _restore_out(self, out_vars):
'\n \n '
flatten_outputs = self._outputs.tolist()
for (i, idx) in enumerate(self._outputs.var_ids):
flatten_outputs[idx] = out_vars[i]
outs = self._outputs.restore(flatten_outputs)
if ((outs is not None) and (len(outs) == 1)):
outs = outs[0]
return outs |
def _remove_no_value(self, out_vars):
'\n Removes invalid value for various-length return statement\n '
if isinstance(out_vars, core.VarBase):
if self._is_no_value(out_vars):
return None
return out_vars
elif isinstance(out_vars, (tuple, list)):
if isinstance(out_vars, tuple):
res = tuple((var for var in out_vars if (not self._is_no_value(var))))
else:
res = [var for var in out_vars if (not self._is_no_value(var))]
has_removed = (len(out_vars) > len(res))
if ((len(res) == 0) and has_removed):
return None
elif ((len(res) == 1) and has_removed):
return res[0]
return res
return out_vars | -7,001,538,010,932,496,000 | Removes invalid value for various-length return statement | python/paddle/fluid/dygraph/dygraph_to_static/partial_program.py | _remove_no_value | CheQiXiao/Paddle | python | def _remove_no_value(self, out_vars):
'\n \n '
if isinstance(out_vars, core.VarBase):
if self._is_no_value(out_vars):
return None
return out_vars
elif isinstance(out_vars, (tuple, list)):
if isinstance(out_vars, tuple):
res = tuple((var for var in out_vars if (not self._is_no_value(var))))
else:
res = [var for var in out_vars if (not self._is_no_value(var))]
has_removed = (len(out_vars) > len(res))
if ((len(res) == 0) and has_removed):
return None
elif ((len(res) == 1) and has_removed):
return res[0]
return res
return out_vars |
def _remove_op_call_stack(self, main_program):
"\n Remove op's python call stack with redundant low-level error messages related to\n transforamtions to avoid confusing users.\n "
assert isinstance(main_program, framework.Program)
for block in main_program.blocks:
for op in block.ops:
if op.has_attr('op_callstack'):
op._remove_attr('op_callstack')
return main_program | 2,915,306,587,125,424,000 | Remove op's python call stack with redundant low-level error messages related to
transforamtions to avoid confusing users. | python/paddle/fluid/dygraph/dygraph_to_static/partial_program.py | _remove_op_call_stack | CheQiXiao/Paddle | python | def _remove_op_call_stack(self, main_program):
"\n Remove op's python call stack with redundant low-level error messages related to\n transforamtions to avoid confusing users.\n "
assert isinstance(main_program, framework.Program)
for block in main_program.blocks:
for op in block.ops:
if op.has_attr('op_callstack'):
op._remove_attr('op_callstack')
return main_program |
def _check_params_all_inited(self, main_program):
'\n Check all params from main program are already initialized, see details as follows:\n 1. all parameters in self._params should be type `framework.ParamBase` which are created in dygraph.\n 2. all parameters from transformed program can be found in self._params.\n Because they share same data with ParamBase of original dygraph.\n '
if (not isinstance(self._params, (list, tuple))):
raise TypeError(('Type of self._params in PartialProgramLayer should be list or tuple, but received %s.' % type(self._params)))
param_and_buffer_names_set = set()
for (i, var) in enumerate(self._params):
if (not isinstance(var, core.VarBase)):
raise TypeError('Type of self._params[{}] in PartialProgramLayer should be Parameter or Variable, but received {}.'.format(i, type(var)))
param_and_buffer_names_set.add(var.name)
for block in main_program.blocks:
for (name, var) in six.iteritems(block.vars):
if isinstance(var, framework.Parameter):
if (name not in param_and_buffer_names_set):
raise ValueError(("\n\tWe don't support to define layer with parameters in the function decorated by `@declarative`.\n\tBecause that will re-defined parameters every time when you run the function.\n\tBut we found parameter(%s) was created in the decorated function.\n\tPlease define the layer with parameters in `__init__` function." % name)) | -1,005,667,989,976,922,900 | Check all params from main program are already initialized, see details as follows:
1. all parameters in self._params should be type `framework.ParamBase` which are created in dygraph.
2. all parameters from transformed program can be found in self._params.
Because they share same data with ParamBase of original dygraph. | python/paddle/fluid/dygraph/dygraph_to_static/partial_program.py | _check_params_all_inited | CheQiXiao/Paddle | python | def _check_params_all_inited(self, main_program):
'\n Check all params from main program are already initialized, see details as follows:\n 1. all parameters in self._params should be type `framework.ParamBase` which are created in dygraph.\n 2. all parameters from transformed program can be found in self._params.\n Because they share same data with ParamBase of original dygraph.\n '
if (not isinstance(self._params, (list, tuple))):
raise TypeError(('Type of self._params in PartialProgramLayer should be list or tuple, but received %s.' % type(self._params)))
param_and_buffer_names_set = set()
for (i, var) in enumerate(self._params):
if (not isinstance(var, core.VarBase)):
raise TypeError('Type of self._params[{}] in PartialProgramLayer should be Parameter or Variable, but received {}.'.format(i, type(var)))
param_and_buffer_names_set.add(var.name)
for block in main_program.blocks:
for (name, var) in six.iteritems(block.vars):
if isinstance(var, framework.Parameter):
if (name not in param_and_buffer_names_set):
raise ValueError(("\n\tWe don't support to define layer with parameters in the function decorated by `@declarative`.\n\tBecause that will re-defined parameters every time when you run the function.\n\tBut we found parameter(%s) was created in the decorated function.\n\tPlease define the layer with parameters in `__init__` function." % name)) |
def __init__(self, eventEngine, gatewayName):
'Constructor'
self.eventEngine = eventEngine
self.gatewayName = gatewayName | 1,672,423,060,279,163,100 | Constructor | redtorch/trader/vtGateway.py | __init__ | sun0x00/redtorch_python | python | def __init__(self, eventEngine, gatewayName):
self.eventEngine = eventEngine
self.gatewayName = gatewayName |
def onTick(self, tick):
'市场行情推送'
event1 = Event(type_=EVENT_TICK)
event1.dict_['data'] = tick
self.eventEngine.put(event1)
event2 = Event(type_=(EVENT_TICK + tick.vtSymbol))
event2.dict_['data'] = tick
self.eventEngine.put(event2) | 3,856,064,092,815,750,700 | 市场行情推送 | redtorch/trader/vtGateway.py | onTick | sun0x00/redtorch_python | python | def onTick(self, tick):
event1 = Event(type_=EVENT_TICK)
event1.dict_['data'] = tick
self.eventEngine.put(event1)
event2 = Event(type_=(EVENT_TICK + tick.vtSymbol))
event2.dict_['data'] = tick
self.eventEngine.put(event2) |
def onTrade(self, trade):
'成交信息推送'
event1 = Event(type_=EVENT_TRADE)
event1.dict_['data'] = trade
self.eventEngine.put(event1)
event2 = Event(type_=(EVENT_TRADE + trade.vtSymbol))
event2.dict_['data'] = trade
self.eventEngine.put(event2) | 2,063,537,404,431,998,500 | 成交信息推送 | redtorch/trader/vtGateway.py | onTrade | sun0x00/redtorch_python | python | def onTrade(self, trade):
event1 = Event(type_=EVENT_TRADE)
event1.dict_['data'] = trade
self.eventEngine.put(event1)
event2 = Event(type_=(EVENT_TRADE + trade.vtSymbol))
event2.dict_['data'] = trade
self.eventEngine.put(event2) |
def onOrder(self, order):
'订单变化推送'
event1 = Event(type_=EVENT_ORDER)
event1.dict_['data'] = order
self.eventEngine.put(event1)
event2 = Event(type_=(EVENT_ORDER + order.vtOrderID))
event2.dict_['data'] = order
self.eventEngine.put(event2) | 5,707,298,845,992,048,000 | 订单变化推送 | redtorch/trader/vtGateway.py | onOrder | sun0x00/redtorch_python | python | def onOrder(self, order):
event1 = Event(type_=EVENT_ORDER)
event1.dict_['data'] = order
self.eventEngine.put(event1)
event2 = Event(type_=(EVENT_ORDER + order.vtOrderID))
event2.dict_['data'] = order
self.eventEngine.put(event2) |
def onPosition(self, position):
'持仓信息推送'
event1 = Event(type_=EVENT_POSITION)
event1.dict_['data'] = position
self.eventEngine.put(event1)
event2 = Event(type_=(EVENT_POSITION + position.vtSymbol))
event2.dict_['data'] = position
self.eventEngine.put(event2) | 7,488,092,332,243,463,000 | 持仓信息推送 | redtorch/trader/vtGateway.py | onPosition | sun0x00/redtorch_python | python | def onPosition(self, position):
event1 = Event(type_=EVENT_POSITION)
event1.dict_['data'] = position
self.eventEngine.put(event1)
event2 = Event(type_=(EVENT_POSITION + position.vtSymbol))
event2.dict_['data'] = position
self.eventEngine.put(event2) |
def onAccount(self, account):
'账户信息推送'
event1 = Event(type_=EVENT_ACCOUNT)
event1.dict_['data'] = account
self.eventEngine.put(event1)
event2 = Event(type_=(EVENT_ACCOUNT + account.vtAccountID))
event2.dict_['data'] = account
self.eventEngine.put(event2) | -2,795,242,707,031,535,600 | 账户信息推送 | redtorch/trader/vtGateway.py | onAccount | sun0x00/redtorch_python | python | def onAccount(self, account):
event1 = Event(type_=EVENT_ACCOUNT)
event1.dict_['data'] = account
self.eventEngine.put(event1)
event2 = Event(type_=(EVENT_ACCOUNT + account.vtAccountID))
event2.dict_['data'] = account
self.eventEngine.put(event2) |
def onError(self, error):
'错误信息推送'
event1 = Event(type_=EVENT_ERROR)
event1.dict_['data'] = error
self.eventEngine.put(event1) | 4,894,823,628,181,121,000 | 错误信息推送 | redtorch/trader/vtGateway.py | onError | sun0x00/redtorch_python | python | def onError(self, error):
event1 = Event(type_=EVENT_ERROR)
event1.dict_['data'] = error
self.eventEngine.put(event1) |
def onLog(self, log):
'日志推送'
event1 = Event(type_=EVENT_LOG)
event1.dict_['data'] = log
self.eventEngine.put(event1) | 7,426,680,771,114,056,000 | 日志推送 | redtorch/trader/vtGateway.py | onLog | sun0x00/redtorch_python | python | def onLog(self, log):
event1 = Event(type_=EVENT_LOG)
event1.dict_['data'] = log
self.eventEngine.put(event1) |
def onContract(self, contract):
'合约基础信息推送'
event1 = Event(type_=EVENT_CONTRACT)
event1.dict_['data'] = contract
self.eventEngine.put(event1) | 2,881,356,330,586,334,000 | 合约基础信息推送 | redtorch/trader/vtGateway.py | onContract | sun0x00/redtorch_python | python | def onContract(self, contract):
event1 = Event(type_=EVENT_CONTRACT)
event1.dict_['data'] = contract
self.eventEngine.put(event1) |
def connect(self):
'连接'
pass | 8,699,725,801,578,168,000 | 连接 | redtorch/trader/vtGateway.py | connect | sun0x00/redtorch_python | python | def connect(self):
pass |
def subscribe(self, subscribeReq):
'订阅行情'
pass | -1,651,100,944,133,235,000 | 订阅行情 | redtorch/trader/vtGateway.py | subscribe | sun0x00/redtorch_python | python | def subscribe(self, subscribeReq):
pass |
def sendOrder(self, orderReq):
'发单'
pass | -6,865,453,469,559,764,000 | 发单 | redtorch/trader/vtGateway.py | sendOrder | sun0x00/redtorch_python | python | def sendOrder(self, orderReq):
pass |
def cancelOrder(self, cancelOrderReq):
'撤单'
pass | 5,289,705,947,194,827,000 | 撤单 | redtorch/trader/vtGateway.py | cancelOrder | sun0x00/redtorch_python | python | def cancelOrder(self, cancelOrderReq):
pass |
def qryAccount(self):
'查询账户资金'
pass | 8,067,137,450,306,017,000 | 查询账户资金 | redtorch/trader/vtGateway.py | qryAccount | sun0x00/redtorch_python | python | def qryAccount(self):
pass |
def qryPosition(self):
'查询持仓'
pass | 1,786,019,952,844,000,000 | 查询持仓 | redtorch/trader/vtGateway.py | qryPosition | sun0x00/redtorch_python | python | def qryPosition(self):
pass |
def close(self):
'关闭'
pass | 8,479,221,086,581,067,000 | 关闭 | redtorch/trader/vtGateway.py | close | sun0x00/redtorch_python | python | def close(self):
pass |
def get(self, request):
'Retrieve the user.'
user = request.user
serializer = self.serializer_class(user)
return Response(serializer.data) | 7,155,900,420,248,859,000 | Retrieve the user. | dakara_server/users/views.py | get | DakaraProject/dakara-server | python | def get(self, request):
user = request.user
serializer = self.serializer_class(user)
return Response(serializer.data) |
def skip_201911_and_older(duthost):
' Skip the current test if the DUT version is 201911 or older.\n '
if (parse_version(duthost.kernel_version) <= parse_version('4.9.0')):
pytest.skip('Test not supported for 201911 images or older. Skipping the test') | -6,194,294,265,274,752,000 | Skip the current test if the DUT version is 201911 or older. | tests/route/test_static_route.py | skip_201911_and_older | LiuKuan-AF/sonic-mgmt | python | def skip_201911_and_older(duthost):
' \n '
if (parse_version(duthost.kernel_version) <= parse_version('4.9.0')):
pytest.skip('Test not supported for 201911 images or older. Skipping the test') |
def is_dualtor(tbinfo):
'Check if the testbed is dualtor.'
return ('dualtor' in tbinfo['topo']['name']) | 2,524,877,780,519,400,400 | Check if the testbed is dualtor. | tests/route/test_static_route.py | is_dualtor | LiuKuan-AF/sonic-mgmt | python | def is_dualtor(tbinfo):
return ('dualtor' in tbinfo['topo']['name']) |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.