body
stringlengths
26
98.2k
body_hash
int64
-9,222,864,604,528,158,000
9,221,803,474B
docstring
stringlengths
1
16.8k
path
stringlengths
5
230
name
stringlengths
1
96
repository_name
stringlengths
7
89
lang
stringclasses
1 value
body_without_docstring
stringlengths
20
98.2k
def median(self, axis: Optional[Axis]=None, numeric_only: bool=None, accuracy: int=10000) -> Union[(Scalar, 'Series')]: "\n Return the median of the values for the requested axis.\n\n .. note:: Unlike pandas', the median in pandas-on-Spark is an approximated median based upon\n approximate percentile computation because computing median across a large dataset\n is extremely expensive.\n\n Parameters\n ----------\n axis : {index (0), columns (1)}\n Axis for the function to be applied on.\n numeric_only : bool, default None\n Include only float, int, boolean columns. False is not supported. This parameter\n is mainly for pandas compatibility.\n accuracy : int, optional\n Default accuracy of approximation. Larger value means better accuracy.\n The relative error can be deduced by 1.0 / accuracy.\n\n Returns\n -------\n median : scalar or Series\n\n Examples\n --------\n >>> df = ps.DataFrame({\n ... 'a': [24., 21., 25., 33., 26.], 'b': [1, 2, 3, 4, 5]}, columns=['a', 'b'])\n >>> df\n a b\n 0 24.0 1\n 1 21.0 2\n 2 25.0 3\n 3 33.0 4\n 4 26.0 5\n\n On a DataFrame:\n\n >>> df.median()\n a 25.0\n b 3.0\n dtype: float64\n\n On a Series:\n\n >>> df['a'].median()\n 25.0\n >>> (df['b'] + 100).median()\n 103.0\n\n For multi-index columns,\n\n >>> df.columns = pd.MultiIndex.from_tuples([('x', 'a'), ('y', 'b')])\n >>> df\n x y\n a b\n 0 24.0 1\n 1 21.0 2\n 2 25.0 3\n 3 33.0 4\n 4 26.0 5\n\n On a DataFrame:\n\n >>> df.median()\n x a 25.0\n y b 3.0\n dtype: float64\n\n >>> df.median(axis=1)\n 0 12.5\n 1 11.5\n 2 14.0\n 3 18.5\n 4 15.5\n dtype: float64\n\n On a Series:\n\n >>> df[('x', 'a')].median()\n 25.0\n >>> (df[('y', 'b')] + 100).median()\n 103.0\n " axis = validate_axis(axis) if ((numeric_only is None) and (axis == 0)): numeric_only = True if (not isinstance(accuracy, int)): raise TypeError(('accuracy must be an integer; however, got [%s]' % type(accuracy).__name__)) def median(spark_column: Column, spark_type: DataType) -> Column: if isinstance(spark_type, (BooleanType, NumericType)): return F.percentile_approx(spark_column.cast(DoubleType()), 0.5, accuracy) else: raise TypeError('Could not convert {} ({}) to numeric'.format(spark_type_to_pandas_dtype(spark_type), spark_type.simpleString())) return self._reduce_for_stat_function(median, name='median', numeric_only=numeric_only, axis=axis)
-876,830,135,422,874,200
Return the median of the values for the requested axis. .. note:: Unlike pandas', the median in pandas-on-Spark is an approximated median based upon approximate percentile computation because computing median across a large dataset is extremely expensive. Parameters ---------- axis : {index (0), columns (1)} Axis for the function to be applied on. numeric_only : bool, default None Include only float, int, boolean columns. False is not supported. This parameter is mainly for pandas compatibility. accuracy : int, optional Default accuracy of approximation. Larger value means better accuracy. The relative error can be deduced by 1.0 / accuracy. Returns ------- median : scalar or Series Examples -------- >>> df = ps.DataFrame({ ... 'a': [24., 21., 25., 33., 26.], 'b': [1, 2, 3, 4, 5]}, columns=['a', 'b']) >>> df a b 0 24.0 1 1 21.0 2 2 25.0 3 3 33.0 4 4 26.0 5 On a DataFrame: >>> df.median() a 25.0 b 3.0 dtype: float64 On a Series: >>> df['a'].median() 25.0 >>> (df['b'] + 100).median() 103.0 For multi-index columns, >>> df.columns = pd.MultiIndex.from_tuples([('x', 'a'), ('y', 'b')]) >>> df x y a b 0 24.0 1 1 21.0 2 2 25.0 3 3 33.0 4 4 26.0 5 On a DataFrame: >>> df.median() x a 25.0 y b 3.0 dtype: float64 >>> df.median(axis=1) 0 12.5 1 11.5 2 14.0 3 18.5 4 15.5 dtype: float64 On a Series: >>> df[('x', 'a')].median() 25.0 >>> (df[('y', 'b')] + 100).median() 103.0
python/pyspark/pandas/generic.py
median
XpressAI/spark
python
def median(self, axis: Optional[Axis]=None, numeric_only: bool=None, accuracy: int=10000) -> Union[(Scalar, 'Series')]: "\n Return the median of the values for the requested axis.\n\n .. note:: Unlike pandas', the median in pandas-on-Spark is an approximated median based upon\n approximate percentile computation because computing median across a large dataset\n is extremely expensive.\n\n Parameters\n ----------\n axis : {index (0), columns (1)}\n Axis for the function to be applied on.\n numeric_only : bool, default None\n Include only float, int, boolean columns. False is not supported. This parameter\n is mainly for pandas compatibility.\n accuracy : int, optional\n Default accuracy of approximation. Larger value means better accuracy.\n The relative error can be deduced by 1.0 / accuracy.\n\n Returns\n -------\n median : scalar or Series\n\n Examples\n --------\n >>> df = ps.DataFrame({\n ... 'a': [24., 21., 25., 33., 26.], 'b': [1, 2, 3, 4, 5]}, columns=['a', 'b'])\n >>> df\n a b\n 0 24.0 1\n 1 21.0 2\n 2 25.0 3\n 3 33.0 4\n 4 26.0 5\n\n On a DataFrame:\n\n >>> df.median()\n a 25.0\n b 3.0\n dtype: float64\n\n On a Series:\n\n >>> df['a'].median()\n 25.0\n >>> (df['b'] + 100).median()\n 103.0\n\n For multi-index columns,\n\n >>> df.columns = pd.MultiIndex.from_tuples([('x', 'a'), ('y', 'b')])\n >>> df\n x y\n a b\n 0 24.0 1\n 1 21.0 2\n 2 25.0 3\n 3 33.0 4\n 4 26.0 5\n\n On a DataFrame:\n\n >>> df.median()\n x a 25.0\n y b 3.0\n dtype: float64\n\n >>> df.median(axis=1)\n 0 12.5\n 1 11.5\n 2 14.0\n 3 18.5\n 4 15.5\n dtype: float64\n\n On a Series:\n\n >>> df[('x', 'a')].median()\n 25.0\n >>> (df[('y', 'b')] + 100).median()\n 103.0\n " axis = validate_axis(axis) if ((numeric_only is None) and (axis == 0)): numeric_only = True if (not isinstance(accuracy, int)): raise TypeError(('accuracy must be an integer; however, got [%s]' % type(accuracy).__name__)) def median(spark_column: Column, spark_type: DataType) -> Column: if isinstance(spark_type, (BooleanType, NumericType)): return F.percentile_approx(spark_column.cast(DoubleType()), 0.5, accuracy) else: raise TypeError('Could not convert {} ({}) to numeric'.format(spark_type_to_pandas_dtype(spark_type), spark_type.simpleString())) return self._reduce_for_stat_function(median, name='median', numeric_only=numeric_only, axis=axis)
def sem(self, axis: Optional[Axis]=None, ddof: int=1, numeric_only: bool=None) -> Union[(Scalar, 'Series')]: '\n Return unbiased standard error of the mean over requested axis.\n\n Parameters\n ----------\n axis : {index (0), columns (1)}\n Axis for the function to be applied on.\n ddof : int, default 1\n Delta Degrees of Freedom. The divisor used in calculations is N - ddof,\n where N represents the number of elements.\n numeric_only : bool, default None\n Include only float, int, boolean columns. False is not supported. This parameter\n is mainly for pandas compatibility.\n\n Returns\n -------\n scalar(for Series) or Series(for DataFrame)\n\n Examples\n --------\n >>> psdf = ps.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})\n >>> psdf\n a b\n 0 1 4\n 1 2 5\n 2 3 6\n\n >>> psdf.sem()\n a 0.57735\n b 0.57735\n dtype: float64\n\n >>> psdf.sem(ddof=0)\n a 0.471405\n b 0.471405\n dtype: float64\n\n >>> psdf.sem(axis=1)\n 0 1.5\n 1 1.5\n 2 1.5\n dtype: float64\n\n Support for Series\n\n >>> psser = psdf.a\n >>> psser\n 0 1\n 1 2\n 2 3\n Name: a, dtype: int64\n\n >>> psser.sem()\n 0.5773502691896258\n\n >>> psser.sem(ddof=0)\n 0.47140452079103173\n ' assert (ddof in (0, 1)) axis = validate_axis(axis) if ((numeric_only is None) and (axis == 0)): numeric_only = True def std(spark_column: Column, spark_type: DataType) -> Column: if isinstance(spark_type, BooleanType): spark_column = spark_column.cast(LongType()) elif (not isinstance(spark_type, NumericType)): raise TypeError('Could not convert {} ({}) to numeric'.format(spark_type_to_pandas_dtype(spark_type), spark_type.simpleString())) if (ddof == 0): return F.stddev_pop(spark_column) else: return F.stddev_samp(spark_column) def sem(spark_column: Column, spark_type: DataType) -> Column: return (std(spark_column, spark_type) / pow(Frame._count_expr(spark_column, spark_type), 0.5)) return self._reduce_for_stat_function(sem, name='sem', numeric_only=numeric_only, axis=axis, ddof=ddof)
4,448,271,079,385,983,500
Return unbiased standard error of the mean over requested axis. Parameters ---------- axis : {index (0), columns (1)} Axis for the function to be applied on. ddof : int, default 1 Delta Degrees of Freedom. The divisor used in calculations is N - ddof, where N represents the number of elements. numeric_only : bool, default None Include only float, int, boolean columns. False is not supported. This parameter is mainly for pandas compatibility. Returns ------- scalar(for Series) or Series(for DataFrame) Examples -------- >>> psdf = ps.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}) >>> psdf a b 0 1 4 1 2 5 2 3 6 >>> psdf.sem() a 0.57735 b 0.57735 dtype: float64 >>> psdf.sem(ddof=0) a 0.471405 b 0.471405 dtype: float64 >>> psdf.sem(axis=1) 0 1.5 1 1.5 2 1.5 dtype: float64 Support for Series >>> psser = psdf.a >>> psser 0 1 1 2 2 3 Name: a, dtype: int64 >>> psser.sem() 0.5773502691896258 >>> psser.sem(ddof=0) 0.47140452079103173
python/pyspark/pandas/generic.py
sem
XpressAI/spark
python
def sem(self, axis: Optional[Axis]=None, ddof: int=1, numeric_only: bool=None) -> Union[(Scalar, 'Series')]: '\n Return unbiased standard error of the mean over requested axis.\n\n Parameters\n ----------\n axis : {index (0), columns (1)}\n Axis for the function to be applied on.\n ddof : int, default 1\n Delta Degrees of Freedom. The divisor used in calculations is N - ddof,\n where N represents the number of elements.\n numeric_only : bool, default None\n Include only float, int, boolean columns. False is not supported. This parameter\n is mainly for pandas compatibility.\n\n Returns\n -------\n scalar(for Series) or Series(for DataFrame)\n\n Examples\n --------\n >>> psdf = ps.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})\n >>> psdf\n a b\n 0 1 4\n 1 2 5\n 2 3 6\n\n >>> psdf.sem()\n a 0.57735\n b 0.57735\n dtype: float64\n\n >>> psdf.sem(ddof=0)\n a 0.471405\n b 0.471405\n dtype: float64\n\n >>> psdf.sem(axis=1)\n 0 1.5\n 1 1.5\n 2 1.5\n dtype: float64\n\n Support for Series\n\n >>> psser = psdf.a\n >>> psser\n 0 1\n 1 2\n 2 3\n Name: a, dtype: int64\n\n >>> psser.sem()\n 0.5773502691896258\n\n >>> psser.sem(ddof=0)\n 0.47140452079103173\n ' assert (ddof in (0, 1)) axis = validate_axis(axis) if ((numeric_only is None) and (axis == 0)): numeric_only = True def std(spark_column: Column, spark_type: DataType) -> Column: if isinstance(spark_type, BooleanType): spark_column = spark_column.cast(LongType()) elif (not isinstance(spark_type, NumericType)): raise TypeError('Could not convert {} ({}) to numeric'.format(spark_type_to_pandas_dtype(spark_type), spark_type.simpleString())) if (ddof == 0): return F.stddev_pop(spark_column) else: return F.stddev_samp(spark_column) def sem(spark_column: Column, spark_type: DataType) -> Column: return (std(spark_column, spark_type) / pow(Frame._count_expr(spark_column, spark_type), 0.5)) return self._reduce_for_stat_function(sem, name='sem', numeric_only=numeric_only, axis=axis, ddof=ddof)
@property def size(self) -> int: "\n Return an int representing the number of elements in this object.\n\n Return the number of rows if Series. Otherwise return the number of\n rows times number of columns if DataFrame.\n\n Examples\n --------\n >>> s = ps.Series({'a': 1, 'b': 2, 'c': None})\n >>> s.size\n 3\n\n >>> df = ps.DataFrame({'col1': [1, 2, None], 'col2': [3, 4, None]})\n >>> df.size\n 6\n\n >>> df = ps.DataFrame(index=[1, 2, None])\n >>> df.size\n 0\n " num_columns = len(self._internal.data_spark_columns) if (num_columns == 0): return 0 else: return (len(self) * num_columns)
-4,052,080,229,098,774,500
Return an int representing the number of elements in this object. Return the number of rows if Series. Otherwise return the number of rows times number of columns if DataFrame. Examples -------- >>> s = ps.Series({'a': 1, 'b': 2, 'c': None}) >>> s.size 3 >>> df = ps.DataFrame({'col1': [1, 2, None], 'col2': [3, 4, None]}) >>> df.size 6 >>> df = ps.DataFrame(index=[1, 2, None]) >>> df.size 0
python/pyspark/pandas/generic.py
size
XpressAI/spark
python
@property def size(self) -> int: "\n Return an int representing the number of elements in this object.\n\n Return the number of rows if Series. Otherwise return the number of\n rows times number of columns if DataFrame.\n\n Examples\n --------\n >>> s = ps.Series({'a': 1, 'b': 2, 'c': None})\n >>> s.size\n 3\n\n >>> df = ps.DataFrame({'col1': [1, 2, None], 'col2': [3, 4, None]})\n >>> df.size\n 6\n\n >>> df = ps.DataFrame(index=[1, 2, None])\n >>> df.size\n 0\n " num_columns = len(self._internal.data_spark_columns) if (num_columns == 0): return 0 else: return (len(self) * num_columns)
def abs(self: FrameLike) -> FrameLike: "\n Return a Series/DataFrame with absolute numeric value of each element.\n\n Returns\n -------\n abs : Series/DataFrame containing the absolute value of each element.\n\n Examples\n --------\n\n Absolute numeric values in a Series.\n\n >>> s = ps.Series([-1.10, 2, -3.33, 4])\n >>> s.abs()\n 0 1.10\n 1 2.00\n 2 3.33\n 3 4.00\n dtype: float64\n\n Absolute numeric values in a DataFrame.\n\n >>> df = ps.DataFrame({\n ... 'a': [4, 5, 6, 7],\n ... 'b': [10, 20, 30, 40],\n ... 'c': [100, 50, -30, -50]\n ... },\n ... columns=['a', 'b', 'c'])\n >>> df.abs()\n a b c\n 0 4 10 100\n 1 5 20 50\n 2 6 30 30\n 3 7 40 50\n " def abs(psser: 'Series') -> Union[('Series', Column)]: if isinstance(psser.spark.data_type, BooleanType): return psser elif isinstance(psser.spark.data_type, NumericType): return psser._with_new_scol(F.abs(psser.spark.column), field=psser._internal.data_fields[0]) else: raise TypeError('bad operand type for abs(): {} ({})'.format(spark_type_to_pandas_dtype(psser.spark.data_type), psser.spark.data_type.simpleString())) return self._apply_series_op(abs)
-8,096,641,240,787,788,000
Return a Series/DataFrame with absolute numeric value of each element. Returns ------- abs : Series/DataFrame containing the absolute value of each element. Examples -------- Absolute numeric values in a Series. >>> s = ps.Series([-1.10, 2, -3.33, 4]) >>> s.abs() 0 1.10 1 2.00 2 3.33 3 4.00 dtype: float64 Absolute numeric values in a DataFrame. >>> df = ps.DataFrame({ ... 'a': [4, 5, 6, 7], ... 'b': [10, 20, 30, 40], ... 'c': [100, 50, -30, -50] ... }, ... columns=['a', 'b', 'c']) >>> df.abs() a b c 0 4 10 100 1 5 20 50 2 6 30 30 3 7 40 50
python/pyspark/pandas/generic.py
abs
XpressAI/spark
python
def abs(self: FrameLike) -> FrameLike: "\n Return a Series/DataFrame with absolute numeric value of each element.\n\n Returns\n -------\n abs : Series/DataFrame containing the absolute value of each element.\n\n Examples\n --------\n\n Absolute numeric values in a Series.\n\n >>> s = ps.Series([-1.10, 2, -3.33, 4])\n >>> s.abs()\n 0 1.10\n 1 2.00\n 2 3.33\n 3 4.00\n dtype: float64\n\n Absolute numeric values in a DataFrame.\n\n >>> df = ps.DataFrame({\n ... 'a': [4, 5, 6, 7],\n ... 'b': [10, 20, 30, 40],\n ... 'c': [100, 50, -30, -50]\n ... },\n ... columns=['a', 'b', 'c'])\n >>> df.abs()\n a b c\n 0 4 10 100\n 1 5 20 50\n 2 6 30 30\n 3 7 40 50\n " def abs(psser: 'Series') -> Union[('Series', Column)]: if isinstance(psser.spark.data_type, BooleanType): return psser elif isinstance(psser.spark.data_type, NumericType): return psser._with_new_scol(F.abs(psser.spark.column), field=psser._internal.data_fields[0]) else: raise TypeError('bad operand type for abs(): {} ({})'.format(spark_type_to_pandas_dtype(psser.spark.data_type), psser.spark.data_type.simpleString())) return self._apply_series_op(abs)
def groupby(self: FrameLike, by: Union[(Any, Tuple, 'Series', List[Union[(Any, Tuple, 'Series')]])], axis: Axis=0, as_index: bool=True, dropna: bool=True) -> 'GroupBy[FrameLike]': '\n Group DataFrame or Series using a Series of columns.\n\n A groupby operation involves some combination of splitting the\n object, applying a function, and combining the results. This can be\n used to group large amounts of data and compute operations on these\n groups.\n\n Parameters\n ----------\n by : Series, label, or list of labels\n Used to determine the groups for the groupby.\n If Series is passed, the Series or dict VALUES\n will be used to determine the groups. A label or list of\n labels may be passed to group by the columns in ``self``.\n axis : int, default 0 or \'index\'\n Can only be set to 0 at the moment.\n as_index : bool, default True\n For aggregated output, return object with group labels as the\n index. Only relevant for DataFrame input. as_index=False is\n effectively "SQL-style" grouped output.\n dropna : bool, default True\n If True, and if group keys contain NA values,\n NA values together with row/column will be dropped.\n If False, NA values will also be treated as the key in groups.\n\n Returns\n -------\n DataFrameGroupBy or SeriesGroupBy\n Depends on the calling object and returns groupby object that\n contains information about the groups.\n\n See Also\n --------\n pyspark.pandas.groupby.GroupBy\n\n Examples\n --------\n >>> df = ps.DataFrame({\'Animal\': [\'Falcon\', \'Falcon\',\n ... \'Parrot\', \'Parrot\'],\n ... \'Max Speed\': [380., 370., 24., 26.]},\n ... columns=[\'Animal\', \'Max Speed\'])\n >>> df\n Animal Max Speed\n 0 Falcon 380.0\n 1 Falcon 370.0\n 2 Parrot 24.0\n 3 Parrot 26.0\n\n >>> df.groupby([\'Animal\']).mean().sort_index() # doctest: +NORMALIZE_WHITESPACE\n Max Speed\n Animal\n Falcon 375.0\n Parrot 25.0\n\n >>> df.groupby([\'Animal\'], as_index=False).mean().sort_values(\'Animal\')\n ... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE\n Animal Max Speed\n ...Falcon 375.0\n ...Parrot 25.0\n\n We can also choose to include NA in group keys or not by setting dropna parameter,\n the default setting is True:\n\n >>> l = [[1, 2, 3], [1, None, 4], [2, 1, 3], [1, 2, 2]]\n >>> df = ps.DataFrame(l, columns=["a", "b", "c"])\n >>> df.groupby(by=["b"]).sum().sort_index() # doctest: +NORMALIZE_WHITESPACE\n a c\n b\n 1.0 2 3\n 2.0 2 5\n\n >>> df.groupby(by=["b"], dropna=False).sum().sort_index() # doctest: +NORMALIZE_WHITESPACE\n a c\n b\n 1.0 2 3\n 2.0 2 5\n NaN 1 4\n ' if isinstance(by, ps.DataFrame): raise ValueError("Grouper for '{}' not 1-dimensional".format(type(by).__name__)) elif isinstance(by, ps.Series): new_by = [by] elif is_name_like_tuple(by): if isinstance(self, ps.Series): raise KeyError(by) new_by = [cast(Tuple, by)] elif is_name_like_value(by): if isinstance(self, ps.Series): raise KeyError(by) new_by = [(by,)] elif is_list_like(by): new_by = [] for key in by: if isinstance(key, ps.DataFrame): raise ValueError("Grouper for '{}' not 1-dimensional".format(type(key).__name__)) elif isinstance(key, ps.Series): new_by.append(key) elif is_name_like_tuple(key): if isinstance(self, ps.Series): raise KeyError(key) new_by.append(key) elif is_name_like_value(key): if isinstance(self, ps.Series): raise KeyError(key) new_by.append((key,)) else: raise ValueError("Grouper for '{}' not 1-dimensional".format(type(key).__name__)) else: raise ValueError("Grouper for '{}' not 1-dimensional".format(type(by).__name__)) if (not len(new_by)): raise ValueError('No group keys passed!') axis = validate_axis(axis) if (axis != 0): raise NotImplementedError('axis should be either 0 or "index" currently.') return self._build_groupby(by=new_by, as_index=as_index, dropna=dropna)
5,165,825,879,761,929,000
Group DataFrame or Series using a Series of columns. A groupby operation involves some combination of splitting the object, applying a function, and combining the results. This can be used to group large amounts of data and compute operations on these groups. Parameters ---------- by : Series, label, or list of labels Used to determine the groups for the groupby. If Series is passed, the Series or dict VALUES will be used to determine the groups. A label or list of labels may be passed to group by the columns in ``self``. axis : int, default 0 or 'index' Can only be set to 0 at the moment. as_index : bool, default True For aggregated output, return object with group labels as the index. Only relevant for DataFrame input. as_index=False is effectively "SQL-style" grouped output. dropna : bool, default True If True, and if group keys contain NA values, NA values together with row/column will be dropped. If False, NA values will also be treated as the key in groups. Returns ------- DataFrameGroupBy or SeriesGroupBy Depends on the calling object and returns groupby object that contains information about the groups. See Also -------- pyspark.pandas.groupby.GroupBy Examples -------- >>> df = ps.DataFrame({'Animal': ['Falcon', 'Falcon', ... 'Parrot', 'Parrot'], ... 'Max Speed': [380., 370., 24., 26.]}, ... columns=['Animal', 'Max Speed']) >>> df Animal Max Speed 0 Falcon 380.0 1 Falcon 370.0 2 Parrot 24.0 3 Parrot 26.0 >>> df.groupby(['Animal']).mean().sort_index() # doctest: +NORMALIZE_WHITESPACE Max Speed Animal Falcon 375.0 Parrot 25.0 >>> df.groupby(['Animal'], as_index=False).mean().sort_values('Animal') ... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE Animal Max Speed ...Falcon 375.0 ...Parrot 25.0 We can also choose to include NA in group keys or not by setting dropna parameter, the default setting is True: >>> l = [[1, 2, 3], [1, None, 4], [2, 1, 3], [1, 2, 2]] >>> df = ps.DataFrame(l, columns=["a", "b", "c"]) >>> df.groupby(by=["b"]).sum().sort_index() # doctest: +NORMALIZE_WHITESPACE a c b 1.0 2 3 2.0 2 5 >>> df.groupby(by=["b"], dropna=False).sum().sort_index() # doctest: +NORMALIZE_WHITESPACE a c b 1.0 2 3 2.0 2 5 NaN 1 4
python/pyspark/pandas/generic.py
groupby
XpressAI/spark
python
def groupby(self: FrameLike, by: Union[(Any, Tuple, 'Series', List[Union[(Any, Tuple, 'Series')]])], axis: Axis=0, as_index: bool=True, dropna: bool=True) -> 'GroupBy[FrameLike]': '\n Group DataFrame or Series using a Series of columns.\n\n A groupby operation involves some combination of splitting the\n object, applying a function, and combining the results. This can be\n used to group large amounts of data and compute operations on these\n groups.\n\n Parameters\n ----------\n by : Series, label, or list of labels\n Used to determine the groups for the groupby.\n If Series is passed, the Series or dict VALUES\n will be used to determine the groups. A label or list of\n labels may be passed to group by the columns in ``self``.\n axis : int, default 0 or \'index\'\n Can only be set to 0 at the moment.\n as_index : bool, default True\n For aggregated output, return object with group labels as the\n index. Only relevant for DataFrame input. as_index=False is\n effectively "SQL-style" grouped output.\n dropna : bool, default True\n If True, and if group keys contain NA values,\n NA values together with row/column will be dropped.\n If False, NA values will also be treated as the key in groups.\n\n Returns\n -------\n DataFrameGroupBy or SeriesGroupBy\n Depends on the calling object and returns groupby object that\n contains information about the groups.\n\n See Also\n --------\n pyspark.pandas.groupby.GroupBy\n\n Examples\n --------\n >>> df = ps.DataFrame({\'Animal\': [\'Falcon\', \'Falcon\',\n ... \'Parrot\', \'Parrot\'],\n ... \'Max Speed\': [380., 370., 24., 26.]},\n ... columns=[\'Animal\', \'Max Speed\'])\n >>> df\n Animal Max Speed\n 0 Falcon 380.0\n 1 Falcon 370.0\n 2 Parrot 24.0\n 3 Parrot 26.0\n\n >>> df.groupby([\'Animal\']).mean().sort_index() # doctest: +NORMALIZE_WHITESPACE\n Max Speed\n Animal\n Falcon 375.0\n Parrot 25.0\n\n >>> df.groupby([\'Animal\'], as_index=False).mean().sort_values(\'Animal\')\n ... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE\n Animal Max Speed\n ...Falcon 375.0\n ...Parrot 25.0\n\n We can also choose to include NA in group keys or not by setting dropna parameter,\n the default setting is True:\n\n >>> l = [[1, 2, 3], [1, None, 4], [2, 1, 3], [1, 2, 2]]\n >>> df = ps.DataFrame(l, columns=["a", "b", "c"])\n >>> df.groupby(by=["b"]).sum().sort_index() # doctest: +NORMALIZE_WHITESPACE\n a c\n b\n 1.0 2 3\n 2.0 2 5\n\n >>> df.groupby(by=["b"], dropna=False).sum().sort_index() # doctest: +NORMALIZE_WHITESPACE\n a c\n b\n 1.0 2 3\n 2.0 2 5\n NaN 1 4\n ' if isinstance(by, ps.DataFrame): raise ValueError("Grouper for '{}' not 1-dimensional".format(type(by).__name__)) elif isinstance(by, ps.Series): new_by = [by] elif is_name_like_tuple(by): if isinstance(self, ps.Series): raise KeyError(by) new_by = [cast(Tuple, by)] elif is_name_like_value(by): if isinstance(self, ps.Series): raise KeyError(by) new_by = [(by,)] elif is_list_like(by): new_by = [] for key in by: if isinstance(key, ps.DataFrame): raise ValueError("Grouper for '{}' not 1-dimensional".format(type(key).__name__)) elif isinstance(key, ps.Series): new_by.append(key) elif is_name_like_tuple(key): if isinstance(self, ps.Series): raise KeyError(key) new_by.append(key) elif is_name_like_value(key): if isinstance(self, ps.Series): raise KeyError(key) new_by.append((key,)) else: raise ValueError("Grouper for '{}' not 1-dimensional".format(type(key).__name__)) else: raise ValueError("Grouper for '{}' not 1-dimensional".format(type(by).__name__)) if (not len(new_by)): raise ValueError('No group keys passed!') axis = validate_axis(axis) if (axis != 0): raise NotImplementedError('axis should be either 0 or "index" currently.') return self._build_groupby(by=new_by, as_index=as_index, dropna=dropna)
def bool(self) -> bool: "\n Return the bool of a single element in the current object.\n\n This must be a boolean scalar value, either True or False. Raise a ValueError if\n the object does not have exactly 1 element, or that element is not boolean\n\n Returns\n --------\n bool\n\n Examples\n --------\n >>> ps.DataFrame({'a': [True]}).bool()\n True\n\n >>> ps.Series([False]).bool()\n False\n\n If there are non-boolean or multiple values exist, it raises an exception in all\n cases as below.\n\n >>> ps.DataFrame({'a': ['a']}).bool()\n Traceback (most recent call last):\n ...\n ValueError: bool cannot act on a non-boolean single element DataFrame\n\n >>> ps.DataFrame({'a': [True], 'b': [False]}).bool() # doctest: +NORMALIZE_WHITESPACE\n Traceback (most recent call last):\n ...\n ValueError: The truth value of a DataFrame is ambiguous. Use a.empty, a.bool(),\n a.item(), a.any() or a.all().\n\n >>> ps.Series([1]).bool()\n Traceback (most recent call last):\n ...\n ValueError: bool cannot act on a non-boolean single element DataFrame\n " if isinstance(self, ps.DataFrame): df = self elif isinstance(self, ps.Series): df = self.to_dataframe() else: raise TypeError(('bool() expects DataFrame or Series; however, got [%s]' % (self,))) return df.head(2)._to_internal_pandas().bool()
-1,561,303,879,960,741,400
Return the bool of a single element in the current object. This must be a boolean scalar value, either True or False. Raise a ValueError if the object does not have exactly 1 element, or that element is not boolean Returns -------- bool Examples -------- >>> ps.DataFrame({'a': [True]}).bool() True >>> ps.Series([False]).bool() False If there are non-boolean or multiple values exist, it raises an exception in all cases as below. >>> ps.DataFrame({'a': ['a']}).bool() Traceback (most recent call last): ... ValueError: bool cannot act on a non-boolean single element DataFrame >>> ps.DataFrame({'a': [True], 'b': [False]}).bool() # doctest: +NORMALIZE_WHITESPACE Traceback (most recent call last): ... ValueError: The truth value of a DataFrame is ambiguous. Use a.empty, a.bool(), a.item(), a.any() or a.all(). >>> ps.Series([1]).bool() Traceback (most recent call last): ... ValueError: bool cannot act on a non-boolean single element DataFrame
python/pyspark/pandas/generic.py
bool
XpressAI/spark
python
def bool(self) -> bool: "\n Return the bool of a single element in the current object.\n\n This must be a boolean scalar value, either True or False. Raise a ValueError if\n the object does not have exactly 1 element, or that element is not boolean\n\n Returns\n --------\n bool\n\n Examples\n --------\n >>> ps.DataFrame({'a': [True]}).bool()\n True\n\n >>> ps.Series([False]).bool()\n False\n\n If there are non-boolean or multiple values exist, it raises an exception in all\n cases as below.\n\n >>> ps.DataFrame({'a': ['a']}).bool()\n Traceback (most recent call last):\n ...\n ValueError: bool cannot act on a non-boolean single element DataFrame\n\n >>> ps.DataFrame({'a': [True], 'b': [False]}).bool() # doctest: +NORMALIZE_WHITESPACE\n Traceback (most recent call last):\n ...\n ValueError: The truth value of a DataFrame is ambiguous. Use a.empty, a.bool(),\n a.item(), a.any() or a.all().\n\n >>> ps.Series([1]).bool()\n Traceback (most recent call last):\n ...\n ValueError: bool cannot act on a non-boolean single element DataFrame\n " if isinstance(self, ps.DataFrame): df = self elif isinstance(self, ps.Series): df = self.to_dataframe() else: raise TypeError(('bool() expects DataFrame or Series; however, got [%s]' % (self,))) return df.head(2)._to_internal_pandas().bool()
def first_valid_index(self) -> Optional[Union[(Scalar, Tuple[(Scalar, ...)])]]: "\n Retrieves the index of the first valid value.\n\n Returns\n -------\n scalar, tuple, or None\n\n Examples\n --------\n\n Support for DataFrame\n\n >>> psdf = ps.DataFrame({'a': [None, 2, 3, 2],\n ... 'b': [None, 2.0, 3.0, 1.0],\n ... 'c': [None, 200, 400, 200]},\n ... index=['Q', 'W', 'E', 'R'])\n >>> psdf\n a b c\n Q NaN NaN NaN\n W 2.0 2.0 200.0\n E 3.0 3.0 400.0\n R 2.0 1.0 200.0\n\n >>> psdf.first_valid_index()\n 'W'\n\n Support for MultiIndex columns\n\n >>> psdf.columns = pd.MultiIndex.from_tuples([('a', 'x'), ('b', 'y'), ('c', 'z')])\n >>> psdf\n a b c\n x y z\n Q NaN NaN NaN\n W 2.0 2.0 200.0\n E 3.0 3.0 400.0\n R 2.0 1.0 200.0\n\n >>> psdf.first_valid_index()\n 'W'\n\n Support for Series.\n\n >>> s = ps.Series([None, None, 3, 4, 5], index=[100, 200, 300, 400, 500])\n >>> s\n 100 NaN\n 200 NaN\n 300 3.0\n 400 4.0\n 500 5.0\n dtype: float64\n\n >>> s.first_valid_index()\n 300\n\n Support for MultiIndex\n\n >>> midx = pd.MultiIndex([['lama', 'cow', 'falcon'],\n ... ['speed', 'weight', 'length']],\n ... [[0, 0, 0, 1, 1, 1, 2, 2, 2],\n ... [0, 1, 2, 0, 1, 2, 0, 1, 2]])\n >>> s = ps.Series([None, None, None, None, 250, 1.5, 320, 1, 0.3], index=midx)\n >>> s\n lama speed NaN\n weight NaN\n length NaN\n cow speed NaN\n weight 250.0\n length 1.5\n falcon speed 320.0\n weight 1.0\n length 0.3\n dtype: float64\n\n >>> s.first_valid_index()\n ('cow', 'weight')\n " data_spark_columns = self._internal.data_spark_columns if (len(data_spark_columns) == 0): return None cond = reduce((lambda x, y: (x & y)), map((lambda x: x.isNotNull()), data_spark_columns)) with sql_conf({SPARK_CONF_ARROW_ENABLED: False}): first_valid_row = cast(pd.DataFrame, self._internal.spark_frame.filter(cond).select(self._internal.index_spark_columns).limit(1).toPandas()) if (len(first_valid_row) == 0): return None first_valid_row = first_valid_row.iloc[0] if (len(first_valid_row) == 1): return first_valid_row.iloc[0] else: return tuple(first_valid_row)
-2,649,245,325,038,494,000
Retrieves the index of the first valid value. Returns ------- scalar, tuple, or None Examples -------- Support for DataFrame >>> psdf = ps.DataFrame({'a': [None, 2, 3, 2], ... 'b': [None, 2.0, 3.0, 1.0], ... 'c': [None, 200, 400, 200]}, ... index=['Q', 'W', 'E', 'R']) >>> psdf a b c Q NaN NaN NaN W 2.0 2.0 200.0 E 3.0 3.0 400.0 R 2.0 1.0 200.0 >>> psdf.first_valid_index() 'W' Support for MultiIndex columns >>> psdf.columns = pd.MultiIndex.from_tuples([('a', 'x'), ('b', 'y'), ('c', 'z')]) >>> psdf a b c x y z Q NaN NaN NaN W 2.0 2.0 200.0 E 3.0 3.0 400.0 R 2.0 1.0 200.0 >>> psdf.first_valid_index() 'W' Support for Series. >>> s = ps.Series([None, None, 3, 4, 5], index=[100, 200, 300, 400, 500]) >>> s 100 NaN 200 NaN 300 3.0 400 4.0 500 5.0 dtype: float64 >>> s.first_valid_index() 300 Support for MultiIndex >>> midx = pd.MultiIndex([['lama', 'cow', 'falcon'], ... ['speed', 'weight', 'length']], ... [[0, 0, 0, 1, 1, 1, 2, 2, 2], ... [0, 1, 2, 0, 1, 2, 0, 1, 2]]) >>> s = ps.Series([None, None, None, None, 250, 1.5, 320, 1, 0.3], index=midx) >>> s lama speed NaN weight NaN length NaN cow speed NaN weight 250.0 length 1.5 falcon speed 320.0 weight 1.0 length 0.3 dtype: float64 >>> s.first_valid_index() ('cow', 'weight')
python/pyspark/pandas/generic.py
first_valid_index
XpressAI/spark
python
def first_valid_index(self) -> Optional[Union[(Scalar, Tuple[(Scalar, ...)])]]: "\n Retrieves the index of the first valid value.\n\n Returns\n -------\n scalar, tuple, or None\n\n Examples\n --------\n\n Support for DataFrame\n\n >>> psdf = ps.DataFrame({'a': [None, 2, 3, 2],\n ... 'b': [None, 2.0, 3.0, 1.0],\n ... 'c': [None, 200, 400, 200]},\n ... index=['Q', 'W', 'E', 'R'])\n >>> psdf\n a b c\n Q NaN NaN NaN\n W 2.0 2.0 200.0\n E 3.0 3.0 400.0\n R 2.0 1.0 200.0\n\n >>> psdf.first_valid_index()\n 'W'\n\n Support for MultiIndex columns\n\n >>> psdf.columns = pd.MultiIndex.from_tuples([('a', 'x'), ('b', 'y'), ('c', 'z')])\n >>> psdf\n a b c\n x y z\n Q NaN NaN NaN\n W 2.0 2.0 200.0\n E 3.0 3.0 400.0\n R 2.0 1.0 200.0\n\n >>> psdf.first_valid_index()\n 'W'\n\n Support for Series.\n\n >>> s = ps.Series([None, None, 3, 4, 5], index=[100, 200, 300, 400, 500])\n >>> s\n 100 NaN\n 200 NaN\n 300 3.0\n 400 4.0\n 500 5.0\n dtype: float64\n\n >>> s.first_valid_index()\n 300\n\n Support for MultiIndex\n\n >>> midx = pd.MultiIndex([['lama', 'cow', 'falcon'],\n ... ['speed', 'weight', 'length']],\n ... [[0, 0, 0, 1, 1, 1, 2, 2, 2],\n ... [0, 1, 2, 0, 1, 2, 0, 1, 2]])\n >>> s = ps.Series([None, None, None, None, 250, 1.5, 320, 1, 0.3], index=midx)\n >>> s\n lama speed NaN\n weight NaN\n length NaN\n cow speed NaN\n weight 250.0\n length 1.5\n falcon speed 320.0\n weight 1.0\n length 0.3\n dtype: float64\n\n >>> s.first_valid_index()\n ('cow', 'weight')\n " data_spark_columns = self._internal.data_spark_columns if (len(data_spark_columns) == 0): return None cond = reduce((lambda x, y: (x & y)), map((lambda x: x.isNotNull()), data_spark_columns)) with sql_conf({SPARK_CONF_ARROW_ENABLED: False}): first_valid_row = cast(pd.DataFrame, self._internal.spark_frame.filter(cond).select(self._internal.index_spark_columns).limit(1).toPandas()) if (len(first_valid_row) == 0): return None first_valid_row = first_valid_row.iloc[0] if (len(first_valid_row) == 1): return first_valid_row.iloc[0] else: return tuple(first_valid_row)
def last_valid_index(self) -> Optional[Union[(Scalar, Tuple[(Scalar, ...)])]]: "\n Return index for last non-NA/null value.\n\n Returns\n -------\n scalar, tuple, or None\n\n Notes\n -----\n This API only works with PySpark >= 3.0.\n\n Examples\n --------\n\n Support for DataFrame\n\n >>> psdf = ps.DataFrame({'a': [1, 2, 3, None],\n ... 'b': [1.0, 2.0, 3.0, None],\n ... 'c': [100, 200, 400, None]},\n ... index=['Q', 'W', 'E', 'R'])\n >>> psdf\n a b c\n Q 1.0 1.0 100.0\n W 2.0 2.0 200.0\n E 3.0 3.0 400.0\n R NaN NaN NaN\n\n >>> psdf.last_valid_index() # doctest: +SKIP\n 'E'\n\n Support for MultiIndex columns\n\n >>> psdf.columns = pd.MultiIndex.from_tuples([('a', 'x'), ('b', 'y'), ('c', 'z')])\n >>> psdf\n a b c\n x y z\n Q 1.0 1.0 100.0\n W 2.0 2.0 200.0\n E 3.0 3.0 400.0\n R NaN NaN NaN\n\n >>> psdf.last_valid_index() # doctest: +SKIP\n 'E'\n\n Support for Series.\n\n >>> s = ps.Series([1, 2, 3, None, None], index=[100, 200, 300, 400, 500])\n >>> s\n 100 1.0\n 200 2.0\n 300 3.0\n 400 NaN\n 500 NaN\n dtype: float64\n\n >>> s.last_valid_index() # doctest: +SKIP\n 300\n\n Support for MultiIndex\n\n >>> midx = pd.MultiIndex([['lama', 'cow', 'falcon'],\n ... ['speed', 'weight', 'length']],\n ... [[0, 0, 0, 1, 1, 1, 2, 2, 2],\n ... [0, 1, 2, 0, 1, 2, 0, 1, 2]])\n >>> s = ps.Series([250, 1.5, 320, 1, 0.3, None, None, None, None], index=midx)\n >>> s\n lama speed 250.0\n weight 1.5\n length 320.0\n cow speed 1.0\n weight 0.3\n length NaN\n falcon speed NaN\n weight NaN\n length NaN\n dtype: float64\n\n >>> s.last_valid_index() # doctest: +SKIP\n ('cow', 'weight')\n " data_spark_columns = self._internal.data_spark_columns if (len(data_spark_columns) == 0): return None cond = reduce((lambda x, y: (x & y)), map((lambda x: x.isNotNull()), data_spark_columns)) last_valid_rows = self._internal.spark_frame.filter(cond).select(self._internal.index_spark_columns).tail(1) if (len(last_valid_rows) == 0): return None last_valid_row = last_valid_rows[0] if (len(last_valid_row) == 1): return last_valid_row[0] else: return tuple(last_valid_row)
613,440,096,527,983,700
Return index for last non-NA/null value. Returns ------- scalar, tuple, or None Notes ----- This API only works with PySpark >= 3.0. Examples -------- Support for DataFrame >>> psdf = ps.DataFrame({'a': [1, 2, 3, None], ... 'b': [1.0, 2.0, 3.0, None], ... 'c': [100, 200, 400, None]}, ... index=['Q', 'W', 'E', 'R']) >>> psdf a b c Q 1.0 1.0 100.0 W 2.0 2.0 200.0 E 3.0 3.0 400.0 R NaN NaN NaN >>> psdf.last_valid_index() # doctest: +SKIP 'E' Support for MultiIndex columns >>> psdf.columns = pd.MultiIndex.from_tuples([('a', 'x'), ('b', 'y'), ('c', 'z')]) >>> psdf a b c x y z Q 1.0 1.0 100.0 W 2.0 2.0 200.0 E 3.0 3.0 400.0 R NaN NaN NaN >>> psdf.last_valid_index() # doctest: +SKIP 'E' Support for Series. >>> s = ps.Series([1, 2, 3, None, None], index=[100, 200, 300, 400, 500]) >>> s 100 1.0 200 2.0 300 3.0 400 NaN 500 NaN dtype: float64 >>> s.last_valid_index() # doctest: +SKIP 300 Support for MultiIndex >>> midx = pd.MultiIndex([['lama', 'cow', 'falcon'], ... ['speed', 'weight', 'length']], ... [[0, 0, 0, 1, 1, 1, 2, 2, 2], ... [0, 1, 2, 0, 1, 2, 0, 1, 2]]) >>> s = ps.Series([250, 1.5, 320, 1, 0.3, None, None, None, None], index=midx) >>> s lama speed 250.0 weight 1.5 length 320.0 cow speed 1.0 weight 0.3 length NaN falcon speed NaN weight NaN length NaN dtype: float64 >>> s.last_valid_index() # doctest: +SKIP ('cow', 'weight')
python/pyspark/pandas/generic.py
last_valid_index
XpressAI/spark
python
def last_valid_index(self) -> Optional[Union[(Scalar, Tuple[(Scalar, ...)])]]: "\n Return index for last non-NA/null value.\n\n Returns\n -------\n scalar, tuple, or None\n\n Notes\n -----\n This API only works with PySpark >= 3.0.\n\n Examples\n --------\n\n Support for DataFrame\n\n >>> psdf = ps.DataFrame({'a': [1, 2, 3, None],\n ... 'b': [1.0, 2.0, 3.0, None],\n ... 'c': [100, 200, 400, None]},\n ... index=['Q', 'W', 'E', 'R'])\n >>> psdf\n a b c\n Q 1.0 1.0 100.0\n W 2.0 2.0 200.0\n E 3.0 3.0 400.0\n R NaN NaN NaN\n\n >>> psdf.last_valid_index() # doctest: +SKIP\n 'E'\n\n Support for MultiIndex columns\n\n >>> psdf.columns = pd.MultiIndex.from_tuples([('a', 'x'), ('b', 'y'), ('c', 'z')])\n >>> psdf\n a b c\n x y z\n Q 1.0 1.0 100.0\n W 2.0 2.0 200.0\n E 3.0 3.0 400.0\n R NaN NaN NaN\n\n >>> psdf.last_valid_index() # doctest: +SKIP\n 'E'\n\n Support for Series.\n\n >>> s = ps.Series([1, 2, 3, None, None], index=[100, 200, 300, 400, 500])\n >>> s\n 100 1.0\n 200 2.0\n 300 3.0\n 400 NaN\n 500 NaN\n dtype: float64\n\n >>> s.last_valid_index() # doctest: +SKIP\n 300\n\n Support for MultiIndex\n\n >>> midx = pd.MultiIndex([['lama', 'cow', 'falcon'],\n ... ['speed', 'weight', 'length']],\n ... [[0, 0, 0, 1, 1, 1, 2, 2, 2],\n ... [0, 1, 2, 0, 1, 2, 0, 1, 2]])\n >>> s = ps.Series([250, 1.5, 320, 1, 0.3, None, None, None, None], index=midx)\n >>> s\n lama speed 250.0\n weight 1.5\n length 320.0\n cow speed 1.0\n weight 0.3\n length NaN\n falcon speed NaN\n weight NaN\n length NaN\n dtype: float64\n\n >>> s.last_valid_index() # doctest: +SKIP\n ('cow', 'weight')\n " data_spark_columns = self._internal.data_spark_columns if (len(data_spark_columns) == 0): return None cond = reduce((lambda x, y: (x & y)), map((lambda x: x.isNotNull()), data_spark_columns)) last_valid_rows = self._internal.spark_frame.filter(cond).select(self._internal.index_spark_columns).tail(1) if (len(last_valid_rows) == 0): return None last_valid_row = last_valid_rows[0] if (len(last_valid_row) == 1): return last_valid_row[0] else: return tuple(last_valid_row)
def rolling(self: FrameLike, window: int, min_periods: Optional[int]=None) -> 'Rolling[FrameLike]': "\n Provide rolling transformations.\n\n .. note:: 'min_periods' in pandas-on-Spark works as a fixed window size unlike pandas.\n Unlike pandas, NA is also counted as the period. This might be changed\n in the near future.\n\n Parameters\n ----------\n window : int, or offset\n Size of the moving window.\n This is the number of observations used for calculating the statistic.\n Each window will be a fixed size.\n\n min_periods : int, default None\n Minimum number of observations in window required to have a value\n (otherwise result is NA).\n For a window that is specified by an offset, min_periods will default to 1.\n Otherwise, min_periods will default to the size of the window.\n\n Returns\n -------\n a Window sub-classed for the particular operation\n " from pyspark.pandas.window import Rolling return Rolling(self, window=window, min_periods=min_periods)
3,435,677,467,007,363,600
Provide rolling transformations. .. note:: 'min_periods' in pandas-on-Spark works as a fixed window size unlike pandas. Unlike pandas, NA is also counted as the period. This might be changed in the near future. Parameters ---------- window : int, or offset Size of the moving window. This is the number of observations used for calculating the statistic. Each window will be a fixed size. min_periods : int, default None Minimum number of observations in window required to have a value (otherwise result is NA). For a window that is specified by an offset, min_periods will default to 1. Otherwise, min_periods will default to the size of the window. Returns ------- a Window sub-classed for the particular operation
python/pyspark/pandas/generic.py
rolling
XpressAI/spark
python
def rolling(self: FrameLike, window: int, min_periods: Optional[int]=None) -> 'Rolling[FrameLike]': "\n Provide rolling transformations.\n\n .. note:: 'min_periods' in pandas-on-Spark works as a fixed window size unlike pandas.\n Unlike pandas, NA is also counted as the period. This might be changed\n in the near future.\n\n Parameters\n ----------\n window : int, or offset\n Size of the moving window.\n This is the number of observations used for calculating the statistic.\n Each window will be a fixed size.\n\n min_periods : int, default None\n Minimum number of observations in window required to have a value\n (otherwise result is NA).\n For a window that is specified by an offset, min_periods will default to 1.\n Otherwise, min_periods will default to the size of the window.\n\n Returns\n -------\n a Window sub-classed for the particular operation\n " from pyspark.pandas.window import Rolling return Rolling(self, window=window, min_periods=min_periods)
def expanding(self: FrameLike, min_periods: int=1) -> 'Expanding[FrameLike]': "\n Provide expanding transformations.\n\n .. note:: 'min_periods' in pandas-on-Spark works as a fixed window size unlike pandas.\n Unlike pandas, NA is also counted as the period. This might be changed\n in the near future.\n\n Parameters\n ----------\n min_periods : int, default 1\n Minimum number of observations in window required to have a value\n (otherwise result is NA).\n\n Returns\n -------\n a Window sub-classed for the particular operation\n " from pyspark.pandas.window import Expanding return Expanding(self, min_periods=min_periods)
-4,089,165,445,054,864,000
Provide expanding transformations. .. note:: 'min_periods' in pandas-on-Spark works as a fixed window size unlike pandas. Unlike pandas, NA is also counted as the period. This might be changed in the near future. Parameters ---------- min_periods : int, default 1 Minimum number of observations in window required to have a value (otherwise result is NA). Returns ------- a Window sub-classed for the particular operation
python/pyspark/pandas/generic.py
expanding
XpressAI/spark
python
def expanding(self: FrameLike, min_periods: int=1) -> 'Expanding[FrameLike]': "\n Provide expanding transformations.\n\n .. note:: 'min_periods' in pandas-on-Spark works as a fixed window size unlike pandas.\n Unlike pandas, NA is also counted as the period. This might be changed\n in the near future.\n\n Parameters\n ----------\n min_periods : int, default 1\n Minimum number of observations in window required to have a value\n (otherwise result is NA).\n\n Returns\n -------\n a Window sub-classed for the particular operation\n " from pyspark.pandas.window import Expanding return Expanding(self, min_periods=min_periods)
def get(self, key: Any, default: Optional[Any]=None) -> Any: "\n Get item from object for given key (DataFrame column, Panel slice,\n etc.). Returns default value if not found.\n\n Parameters\n ----------\n key : object\n\n Returns\n -------\n value : same type as items contained in object\n\n Examples\n --------\n >>> df = ps.DataFrame({'x':range(3), 'y':['a','b','b'], 'z':['a','b','b']},\n ... columns=['x', 'y', 'z'], index=[10, 20, 20])\n >>> df\n x y z\n 10 0 a a\n 20 1 b b\n 20 2 b b\n\n >>> df.get('x')\n 10 0\n 20 1\n 20 2\n Name: x, dtype: int64\n\n >>> df.get(['x', 'y'])\n x y\n 10 0 a\n 20 1 b\n 20 2 b\n\n >>> df.x.get(10)\n 0\n\n >>> df.x.get(20)\n 20 1\n 20 2\n Name: x, dtype: int64\n\n >>> df.x.get(15, -1)\n -1\n " try: return self[key] except (KeyError, ValueError, IndexError): return default
1,690,284,315,299,788,500
Get item from object for given key (DataFrame column, Panel slice, etc.). Returns default value if not found. Parameters ---------- key : object Returns ------- value : same type as items contained in object Examples -------- >>> df = ps.DataFrame({'x':range(3), 'y':['a','b','b'], 'z':['a','b','b']}, ... columns=['x', 'y', 'z'], index=[10, 20, 20]) >>> df x y z 10 0 a a 20 1 b b 20 2 b b >>> df.get('x') 10 0 20 1 20 2 Name: x, dtype: int64 >>> df.get(['x', 'y']) x y 10 0 a 20 1 b 20 2 b >>> df.x.get(10) 0 >>> df.x.get(20) 20 1 20 2 Name: x, dtype: int64 >>> df.x.get(15, -1) -1
python/pyspark/pandas/generic.py
get
XpressAI/spark
python
def get(self, key: Any, default: Optional[Any]=None) -> Any: "\n Get item from object for given key (DataFrame column, Panel slice,\n etc.). Returns default value if not found.\n\n Parameters\n ----------\n key : object\n\n Returns\n -------\n value : same type as items contained in object\n\n Examples\n --------\n >>> df = ps.DataFrame({'x':range(3), 'y':['a','b','b'], 'z':['a','b','b']},\n ... columns=['x', 'y', 'z'], index=[10, 20, 20])\n >>> df\n x y z\n 10 0 a a\n 20 1 b b\n 20 2 b b\n\n >>> df.get('x')\n 10 0\n 20 1\n 20 2\n Name: x, dtype: int64\n\n >>> df.get(['x', 'y'])\n x y\n 10 0 a\n 20 1 b\n 20 2 b\n\n >>> df.x.get(10)\n 0\n\n >>> df.x.get(20)\n 20 1\n 20 2\n Name: x, dtype: int64\n\n >>> df.x.get(15, -1)\n -1\n " try: return self[key] except (KeyError, ValueError, IndexError): return default
def squeeze(self, axis: Optional[Axis]=None) -> Union[(Scalar, 'DataFrame', 'Series')]: "\n Squeeze 1 dimensional axis objects into scalars.\n\n Series or DataFrames with a single element are squeezed to a scalar.\n DataFrames with a single column or a single row are squeezed to a\n Series. Otherwise the object is unchanged.\n\n This method is most useful when you don't know if your\n object is a Series or DataFrame, but you do know it has just a single\n column. In that case you can safely call `squeeze` to ensure you have a\n Series.\n\n Parameters\n ----------\n axis : {0 or 'index', 1 or 'columns', None}, default None\n A specific axis to squeeze. By default, all length-1 axes are\n squeezed.\n\n Returns\n -------\n DataFrame, Series, or scalar\n The projection after squeezing `axis` or all the axes.\n\n See Also\n --------\n Series.iloc : Integer-location based indexing for selecting scalars.\n DataFrame.iloc : Integer-location based indexing for selecting Series.\n Series.to_frame : Inverse of DataFrame.squeeze for a\n single-column DataFrame.\n\n Examples\n --------\n >>> primes = ps.Series([2, 3, 5, 7])\n\n Slicing might produce a Series with a single value:\n\n >>> even_primes = primes[primes % 2 == 0]\n >>> even_primes\n 0 2\n dtype: int64\n\n >>> even_primes.squeeze()\n 2\n\n Squeezing objects with more than one value in every axis does nothing:\n\n >>> odd_primes = primes[primes % 2 == 1]\n >>> odd_primes\n 1 3\n 2 5\n 3 7\n dtype: int64\n\n >>> odd_primes.squeeze()\n 1 3\n 2 5\n 3 7\n dtype: int64\n\n Squeezing is even more effective when used with DataFrames.\n\n >>> df = ps.DataFrame([[1, 2], [3, 4]], columns=['a', 'b'])\n >>> df\n a b\n 0 1 2\n 1 3 4\n\n Slicing a single column will produce a DataFrame with the columns\n having only one value:\n\n >>> df_a = df[['a']]\n >>> df_a\n a\n 0 1\n 1 3\n\n So the columns can be squeezed down, resulting in a Series:\n\n >>> df_a.squeeze('columns')\n 0 1\n 1 3\n Name: a, dtype: int64\n\n Slicing a single row from a single column will produce a single\n scalar DataFrame:\n\n >>> df_1a = df.loc[[1], ['a']]\n >>> df_1a\n a\n 1 3\n\n Squeezing the rows produces a single scalar Series:\n\n >>> df_1a.squeeze('rows')\n a 3\n Name: 1, dtype: int64\n\n Squeezing all axes will project directly into a scalar:\n\n >>> df_1a.squeeze()\n 3\n " if (axis is not None): axis = ('index' if (axis == 'rows') else axis) axis = validate_axis(axis) if isinstance(self, ps.DataFrame): from pyspark.pandas.series import first_series is_squeezable = (len(self.columns[:2]) == 1) if (not is_squeezable): return self series_from_column = first_series(self) has_single_value = (len(series_from_column.head(2)) == 1) if has_single_value: result = self._to_internal_pandas().squeeze(axis) return (ps.Series(result) if isinstance(result, pd.Series) else result) elif (axis == 0): return self else: return series_from_column else: self_top_two = cast('Series', self).head(2) has_single_value = (len(self_top_two) == 1) return cast(Union[(Scalar, ps.Series)], (self_top_two[0] if has_single_value else self))
-1,916,325,584,634,146,800
Squeeze 1 dimensional axis objects into scalars. Series or DataFrames with a single element are squeezed to a scalar. DataFrames with a single column or a single row are squeezed to a Series. Otherwise the object is unchanged. This method is most useful when you don't know if your object is a Series or DataFrame, but you do know it has just a single column. In that case you can safely call `squeeze` to ensure you have a Series. Parameters ---------- axis : {0 or 'index', 1 or 'columns', None}, default None A specific axis to squeeze. By default, all length-1 axes are squeezed. Returns ------- DataFrame, Series, or scalar The projection after squeezing `axis` or all the axes. See Also -------- Series.iloc : Integer-location based indexing for selecting scalars. DataFrame.iloc : Integer-location based indexing for selecting Series. Series.to_frame : Inverse of DataFrame.squeeze for a single-column DataFrame. Examples -------- >>> primes = ps.Series([2, 3, 5, 7]) Slicing might produce a Series with a single value: >>> even_primes = primes[primes % 2 == 0] >>> even_primes 0 2 dtype: int64 >>> even_primes.squeeze() 2 Squeezing objects with more than one value in every axis does nothing: >>> odd_primes = primes[primes % 2 == 1] >>> odd_primes 1 3 2 5 3 7 dtype: int64 >>> odd_primes.squeeze() 1 3 2 5 3 7 dtype: int64 Squeezing is even more effective when used with DataFrames. >>> df = ps.DataFrame([[1, 2], [3, 4]], columns=['a', 'b']) >>> df a b 0 1 2 1 3 4 Slicing a single column will produce a DataFrame with the columns having only one value: >>> df_a = df[['a']] >>> df_a a 0 1 1 3 So the columns can be squeezed down, resulting in a Series: >>> df_a.squeeze('columns') 0 1 1 3 Name: a, dtype: int64 Slicing a single row from a single column will produce a single scalar DataFrame: >>> df_1a = df.loc[[1], ['a']] >>> df_1a a 1 3 Squeezing the rows produces a single scalar Series: >>> df_1a.squeeze('rows') a 3 Name: 1, dtype: int64 Squeezing all axes will project directly into a scalar: >>> df_1a.squeeze() 3
python/pyspark/pandas/generic.py
squeeze
XpressAI/spark
python
def squeeze(self, axis: Optional[Axis]=None) -> Union[(Scalar, 'DataFrame', 'Series')]: "\n Squeeze 1 dimensional axis objects into scalars.\n\n Series or DataFrames with a single element are squeezed to a scalar.\n DataFrames with a single column or a single row are squeezed to a\n Series. Otherwise the object is unchanged.\n\n This method is most useful when you don't know if your\n object is a Series or DataFrame, but you do know it has just a single\n column. In that case you can safely call `squeeze` to ensure you have a\n Series.\n\n Parameters\n ----------\n axis : {0 or 'index', 1 or 'columns', None}, default None\n A specific axis to squeeze. By default, all length-1 axes are\n squeezed.\n\n Returns\n -------\n DataFrame, Series, or scalar\n The projection after squeezing `axis` or all the axes.\n\n See Also\n --------\n Series.iloc : Integer-location based indexing for selecting scalars.\n DataFrame.iloc : Integer-location based indexing for selecting Series.\n Series.to_frame : Inverse of DataFrame.squeeze for a\n single-column DataFrame.\n\n Examples\n --------\n >>> primes = ps.Series([2, 3, 5, 7])\n\n Slicing might produce a Series with a single value:\n\n >>> even_primes = primes[primes % 2 == 0]\n >>> even_primes\n 0 2\n dtype: int64\n\n >>> even_primes.squeeze()\n 2\n\n Squeezing objects with more than one value in every axis does nothing:\n\n >>> odd_primes = primes[primes % 2 == 1]\n >>> odd_primes\n 1 3\n 2 5\n 3 7\n dtype: int64\n\n >>> odd_primes.squeeze()\n 1 3\n 2 5\n 3 7\n dtype: int64\n\n Squeezing is even more effective when used with DataFrames.\n\n >>> df = ps.DataFrame([[1, 2], [3, 4]], columns=['a', 'b'])\n >>> df\n a b\n 0 1 2\n 1 3 4\n\n Slicing a single column will produce a DataFrame with the columns\n having only one value:\n\n >>> df_a = df[['a']]\n >>> df_a\n a\n 0 1\n 1 3\n\n So the columns can be squeezed down, resulting in a Series:\n\n >>> df_a.squeeze('columns')\n 0 1\n 1 3\n Name: a, dtype: int64\n\n Slicing a single row from a single column will produce a single\n scalar DataFrame:\n\n >>> df_1a = df.loc[[1], ['a']]\n >>> df_1a\n a\n 1 3\n\n Squeezing the rows produces a single scalar Series:\n\n >>> df_1a.squeeze('rows')\n a 3\n Name: 1, dtype: int64\n\n Squeezing all axes will project directly into a scalar:\n\n >>> df_1a.squeeze()\n 3\n " if (axis is not None): axis = ('index' if (axis == 'rows') else axis) axis = validate_axis(axis) if isinstance(self, ps.DataFrame): from pyspark.pandas.series import first_series is_squeezable = (len(self.columns[:2]) == 1) if (not is_squeezable): return self series_from_column = first_series(self) has_single_value = (len(series_from_column.head(2)) == 1) if has_single_value: result = self._to_internal_pandas().squeeze(axis) return (ps.Series(result) if isinstance(result, pd.Series) else result) elif (axis == 0): return self else: return series_from_column else: self_top_two = cast('Series', self).head(2) has_single_value = (len(self_top_two) == 1) return cast(Union[(Scalar, ps.Series)], (self_top_two[0] if has_single_value else self))
def truncate(self, before: Optional[Any]=None, after: Optional[Any]=None, axis: Optional[Axis]=None, copy: bool_type=True) -> DataFrameOrSeries: '\n Truncate a Series or DataFrame before and after some index value.\n\n This is a useful shorthand for boolean indexing based on index\n values above or below certain thresholds.\n\n .. note:: This API is dependent on :meth:`Index.is_monotonic_increasing`\n which can be expensive.\n\n Parameters\n ----------\n before : date, str, int\n Truncate all rows before this index value.\n after : date, str, int\n Truncate all rows after this index value.\n axis : {0 or \'index\', 1 or \'columns\'}, optional\n Axis to truncate. Truncates the index (rows) by default.\n copy : bool, default is True,\n Return a copy of the truncated section.\n\n Returns\n -------\n type of caller\n The truncated Series or DataFrame.\n\n See Also\n --------\n DataFrame.loc : Select a subset of a DataFrame by label.\n DataFrame.iloc : Select a subset of a DataFrame by position.\n\n Examples\n --------\n >>> df = ps.DataFrame({\'A\': [\'a\', \'b\', \'c\', \'d\', \'e\'],\n ... \'B\': [\'f\', \'g\', \'h\', \'i\', \'j\'],\n ... \'C\': [\'k\', \'l\', \'m\', \'n\', \'o\']},\n ... index=[1, 2, 3, 4, 5])\n >>> df\n A B C\n 1 a f k\n 2 b g l\n 3 c h m\n 4 d i n\n 5 e j o\n\n >>> df.truncate(before=2, after=4)\n A B C\n 2 b g l\n 3 c h m\n 4 d i n\n\n The columns of a DataFrame can be truncated.\n\n >>> df.truncate(before="A", after="B", axis="columns")\n A B\n 1 a f\n 2 b g\n 3 c h\n 4 d i\n 5 e j\n\n For Series, only rows can be truncated.\n\n >>> df[\'A\'].truncate(before=2, after=4)\n 2 b\n 3 c\n 4 d\n Name: A, dtype: object\n\n A Series has index that sorted integers.\n\n >>> s = ps.Series([10, 20, 30, 40, 50, 60, 70],\n ... index=[1, 2, 3, 4, 5, 6, 7])\n >>> s\n 1 10\n 2 20\n 3 30\n 4 40\n 5 50\n 6 60\n 7 70\n dtype: int64\n\n >>> s.truncate(2, 5)\n 2 20\n 3 30\n 4 40\n 5 50\n dtype: int64\n\n A Series has index that sorted strings.\n\n >>> s = ps.Series([10, 20, 30, 40, 50, 60, 70],\n ... index=[\'a\', \'b\', \'c\', \'d\', \'e\', \'f\', \'g\'])\n >>> s\n a 10\n b 20\n c 30\n d 40\n e 50\n f 60\n g 70\n dtype: int64\n\n >>> s.truncate(\'b\', \'e\')\n b 20\n c 30\n d 40\n e 50\n dtype: int64\n ' from pyspark.pandas.series import first_series axis = validate_axis(axis) indexes = self.index indexes_increasing = indexes.is_monotonic_increasing if ((not indexes_increasing) and (not indexes.is_monotonic_decreasing)): raise ValueError('truncate requires a sorted index') if ((before is None) and (after is None)): return cast(Union[(ps.DataFrame, ps.Series)], (self.copy() if copy else self)) if (((before is not None) and (after is not None)) and (before > after)): raise ValueError(('Truncate: %s must be after %s' % (after, before))) if isinstance(self, ps.Series): if indexes_increasing: result = first_series(self.to_frame().loc[before:after]).rename(self.name) else: result = first_series(self.to_frame().loc[after:before]).rename(self.name) elif isinstance(self, ps.DataFrame): if (axis == 0): if indexes_increasing: result = self.loc[before:after] else: result = self.loc[after:before] elif (axis == 1): result = self.loc[:, before:after] return cast(DataFrameOrSeries, (result.copy() if copy else result))
6,132,906,944,478,476,000
Truncate a Series or DataFrame before and after some index value. This is a useful shorthand for boolean indexing based on index values above or below certain thresholds. .. note:: This API is dependent on :meth:`Index.is_monotonic_increasing` which can be expensive. Parameters ---------- before : date, str, int Truncate all rows before this index value. after : date, str, int Truncate all rows after this index value. axis : {0 or 'index', 1 or 'columns'}, optional Axis to truncate. Truncates the index (rows) by default. copy : bool, default is True, Return a copy of the truncated section. Returns ------- type of caller The truncated Series or DataFrame. See Also -------- DataFrame.loc : Select a subset of a DataFrame by label. DataFrame.iloc : Select a subset of a DataFrame by position. Examples -------- >>> df = ps.DataFrame({'A': ['a', 'b', 'c', 'd', 'e'], ... 'B': ['f', 'g', 'h', 'i', 'j'], ... 'C': ['k', 'l', 'm', 'n', 'o']}, ... index=[1, 2, 3, 4, 5]) >>> df A B C 1 a f k 2 b g l 3 c h m 4 d i n 5 e j o >>> df.truncate(before=2, after=4) A B C 2 b g l 3 c h m 4 d i n The columns of a DataFrame can be truncated. >>> df.truncate(before="A", after="B", axis="columns") A B 1 a f 2 b g 3 c h 4 d i 5 e j For Series, only rows can be truncated. >>> df['A'].truncate(before=2, after=4) 2 b 3 c 4 d Name: A, dtype: object A Series has index that sorted integers. >>> s = ps.Series([10, 20, 30, 40, 50, 60, 70], ... index=[1, 2, 3, 4, 5, 6, 7]) >>> s 1 10 2 20 3 30 4 40 5 50 6 60 7 70 dtype: int64 >>> s.truncate(2, 5) 2 20 3 30 4 40 5 50 dtype: int64 A Series has index that sorted strings. >>> s = ps.Series([10, 20, 30, 40, 50, 60, 70], ... index=['a', 'b', 'c', 'd', 'e', 'f', 'g']) >>> s a 10 b 20 c 30 d 40 e 50 f 60 g 70 dtype: int64 >>> s.truncate('b', 'e') b 20 c 30 d 40 e 50 dtype: int64
python/pyspark/pandas/generic.py
truncate
XpressAI/spark
python
def truncate(self, before: Optional[Any]=None, after: Optional[Any]=None, axis: Optional[Axis]=None, copy: bool_type=True) -> DataFrameOrSeries: '\n Truncate a Series or DataFrame before and after some index value.\n\n This is a useful shorthand for boolean indexing based on index\n values above or below certain thresholds.\n\n .. note:: This API is dependent on :meth:`Index.is_monotonic_increasing`\n which can be expensive.\n\n Parameters\n ----------\n before : date, str, int\n Truncate all rows before this index value.\n after : date, str, int\n Truncate all rows after this index value.\n axis : {0 or \'index\', 1 or \'columns\'}, optional\n Axis to truncate. Truncates the index (rows) by default.\n copy : bool, default is True,\n Return a copy of the truncated section.\n\n Returns\n -------\n type of caller\n The truncated Series or DataFrame.\n\n See Also\n --------\n DataFrame.loc : Select a subset of a DataFrame by label.\n DataFrame.iloc : Select a subset of a DataFrame by position.\n\n Examples\n --------\n >>> df = ps.DataFrame({\'A\': [\'a\', \'b\', \'c\', \'d\', \'e\'],\n ... \'B\': [\'f\', \'g\', \'h\', \'i\', \'j\'],\n ... \'C\': [\'k\', \'l\', \'m\', \'n\', \'o\']},\n ... index=[1, 2, 3, 4, 5])\n >>> df\n A B C\n 1 a f k\n 2 b g l\n 3 c h m\n 4 d i n\n 5 e j o\n\n >>> df.truncate(before=2, after=4)\n A B C\n 2 b g l\n 3 c h m\n 4 d i n\n\n The columns of a DataFrame can be truncated.\n\n >>> df.truncate(before="A", after="B", axis="columns")\n A B\n 1 a f\n 2 b g\n 3 c h\n 4 d i\n 5 e j\n\n For Series, only rows can be truncated.\n\n >>> df[\'A\'].truncate(before=2, after=4)\n 2 b\n 3 c\n 4 d\n Name: A, dtype: object\n\n A Series has index that sorted integers.\n\n >>> s = ps.Series([10, 20, 30, 40, 50, 60, 70],\n ... index=[1, 2, 3, 4, 5, 6, 7])\n >>> s\n 1 10\n 2 20\n 3 30\n 4 40\n 5 50\n 6 60\n 7 70\n dtype: int64\n\n >>> s.truncate(2, 5)\n 2 20\n 3 30\n 4 40\n 5 50\n dtype: int64\n\n A Series has index that sorted strings.\n\n >>> s = ps.Series([10, 20, 30, 40, 50, 60, 70],\n ... index=[\'a\', \'b\', \'c\', \'d\', \'e\', \'f\', \'g\'])\n >>> s\n a 10\n b 20\n c 30\n d 40\n e 50\n f 60\n g 70\n dtype: int64\n\n >>> s.truncate(\'b\', \'e\')\n b 20\n c 30\n d 40\n e 50\n dtype: int64\n ' from pyspark.pandas.series import first_series axis = validate_axis(axis) indexes = self.index indexes_increasing = indexes.is_monotonic_increasing if ((not indexes_increasing) and (not indexes.is_monotonic_decreasing)): raise ValueError('truncate requires a sorted index') if ((before is None) and (after is None)): return cast(Union[(ps.DataFrame, ps.Series)], (self.copy() if copy else self)) if (((before is not None) and (after is not None)) and (before > after)): raise ValueError(('Truncate: %s must be after %s' % (after, before))) if isinstance(self, ps.Series): if indexes_increasing: result = first_series(self.to_frame().loc[before:after]).rename(self.name) else: result = first_series(self.to_frame().loc[after:before]).rename(self.name) elif isinstance(self, ps.DataFrame): if (axis == 0): if indexes_increasing: result = self.loc[before:after] else: result = self.loc[after:before] elif (axis == 1): result = self.loc[:, before:after] return cast(DataFrameOrSeries, (result.copy() if copy else result))
def to_markdown(self, buf: Optional[Union[(IO[str], str)]]=None, mode: Optional[str]=None) -> str: '\n Print Series or DataFrame in Markdown-friendly format.\n\n .. note:: This method should only be used if the resulting pandas object is expected\n to be small, as all the data is loaded into the driver\'s memory.\n\n Parameters\n ----------\n buf : writable buffer, defaults to sys.stdout\n Where to send the output. By default, the output is printed to\n sys.stdout. Pass a writable buffer if you need to further process\n the output.\n mode : str, optional\n Mode in which file is opened.\n **kwargs\n These parameters will be passed to `tabulate`.\n\n Returns\n -------\n str\n Series or DataFrame in Markdown-friendly format.\n\n Notes\n -----\n Requires the `tabulate <https://pypi.org/project/tabulate>`_ package.\n\n Examples\n --------\n >>> psser = ps.Series(["elk", "pig", "dog", "quetzal"], name="animal")\n >>> print(psser.to_markdown()) # doctest: +SKIP\n | | animal |\n |---:|:---------|\n | 0 | elk |\n | 1 | pig |\n | 2 | dog |\n | 3 | quetzal |\n\n >>> psdf = ps.DataFrame(\n ... data={"animal_1": ["elk", "pig"], "animal_2": ["dog", "quetzal"]}\n ... )\n >>> print(psdf.to_markdown()) # doctest: +SKIP\n | | animal_1 | animal_2 |\n |---:|:-----------|:-----------|\n | 0 | elk | dog |\n | 1 | pig | quetzal |\n ' if (LooseVersion(pd.__version__) < LooseVersion('1.0.0')): raise NotImplementedError('`to_markdown()` only supported in pandas-on-Spark with pandas >= 1.0.0') args = locals() psser_or_psdf = self internal_pandas = psser_or_psdf._to_internal_pandas() return validate_arguments_and_invoke_function(internal_pandas, self.to_markdown, type(internal_pandas).to_markdown, args)
-2,431,315,716,865,093,000
Print Series or DataFrame in Markdown-friendly format. .. note:: This method should only be used if the resulting pandas object is expected to be small, as all the data is loaded into the driver's memory. Parameters ---------- buf : writable buffer, defaults to sys.stdout Where to send the output. By default, the output is printed to sys.stdout. Pass a writable buffer if you need to further process the output. mode : str, optional Mode in which file is opened. **kwargs These parameters will be passed to `tabulate`. Returns ------- str Series or DataFrame in Markdown-friendly format. Notes ----- Requires the `tabulate <https://pypi.org/project/tabulate>`_ package. Examples -------- >>> psser = ps.Series(["elk", "pig", "dog", "quetzal"], name="animal") >>> print(psser.to_markdown()) # doctest: +SKIP | | animal | |---:|:---------| | 0 | elk | | 1 | pig | | 2 | dog | | 3 | quetzal | >>> psdf = ps.DataFrame( ... data={"animal_1": ["elk", "pig"], "animal_2": ["dog", "quetzal"]} ... ) >>> print(psdf.to_markdown()) # doctest: +SKIP | | animal_1 | animal_2 | |---:|:-----------|:-----------| | 0 | elk | dog | | 1 | pig | quetzal |
python/pyspark/pandas/generic.py
to_markdown
XpressAI/spark
python
def to_markdown(self, buf: Optional[Union[(IO[str], str)]]=None, mode: Optional[str]=None) -> str: '\n Print Series or DataFrame in Markdown-friendly format.\n\n .. note:: This method should only be used if the resulting pandas object is expected\n to be small, as all the data is loaded into the driver\'s memory.\n\n Parameters\n ----------\n buf : writable buffer, defaults to sys.stdout\n Where to send the output. By default, the output is printed to\n sys.stdout. Pass a writable buffer if you need to further process\n the output.\n mode : str, optional\n Mode in which file is opened.\n **kwargs\n These parameters will be passed to `tabulate`.\n\n Returns\n -------\n str\n Series or DataFrame in Markdown-friendly format.\n\n Notes\n -----\n Requires the `tabulate <https://pypi.org/project/tabulate>`_ package.\n\n Examples\n --------\n >>> psser = ps.Series(["elk", "pig", "dog", "quetzal"], name="animal")\n >>> print(psser.to_markdown()) # doctest: +SKIP\n | | animal |\n |---:|:---------|\n | 0 | elk |\n | 1 | pig |\n | 2 | dog |\n | 3 | quetzal |\n\n >>> psdf = ps.DataFrame(\n ... data={"animal_1": ["elk", "pig"], "animal_2": ["dog", "quetzal"]}\n ... )\n >>> print(psdf.to_markdown()) # doctest: +SKIP\n | | animal_1 | animal_2 |\n |---:|:-----------|:-----------|\n | 0 | elk | dog |\n | 1 | pig | quetzal |\n ' if (LooseVersion(pd.__version__) < LooseVersion('1.0.0')): raise NotImplementedError('`to_markdown()` only supported in pandas-on-Spark with pandas >= 1.0.0') args = locals() psser_or_psdf = self internal_pandas = psser_or_psdf._to_internal_pandas() return validate_arguments_and_invoke_function(internal_pandas, self.to_markdown, type(internal_pandas).to_markdown, args)
def bfill(self: FrameLike, axis: Optional[Axis]=None, inplace: bool_type=False, limit: Optional[int]=None) -> FrameLike: "\n Synonym for `DataFrame.fillna()` or `Series.fillna()` with ``method=`bfill```.\n\n .. note:: the current implementation of 'bfill' uses Spark's Window\n without specifying partition specification. This leads to move all data into\n single partition in single machine and could cause serious\n performance degradation. Avoid this method against very large dataset.\n\n Parameters\n ----------\n axis : {0 or `index`}\n 1 and `columns` are not supported.\n inplace : boolean, default False\n Fill in place (do not create a new object)\n limit : int, default None\n If method is specified, this is the maximum number of consecutive NaN values to\n forward/backward fill. In other words, if there is a gap with more than this number of\n consecutive NaNs, it will only be partially filled. If method is not specified,\n this is the maximum number of entries along the entire axis where NaNs will be filled.\n Must be greater than 0 if not None\n\n Returns\n -------\n DataFrame or Series\n DataFrame or Series with NA entries filled.\n\n Examples\n --------\n >>> psdf = ps.DataFrame({\n ... 'A': [None, 3, None, None],\n ... 'B': [2, 4, None, 3],\n ... 'C': [None, None, None, 1],\n ... 'D': [0, 1, 5, 4]\n ... },\n ... columns=['A', 'B', 'C', 'D'])\n >>> psdf\n A B C D\n 0 NaN 2.0 NaN 0\n 1 3.0 4.0 NaN 1\n 2 NaN NaN NaN 5\n 3 NaN 3.0 1.0 4\n\n Propagate non-null values backward.\n\n >>> psdf.bfill()\n A B C D\n 0 3.0 2.0 1.0 0\n 1 3.0 4.0 1.0 1\n 2 NaN 3.0 1.0 5\n 3 NaN 3.0 1.0 4\n\n For Series\n\n >>> psser = ps.Series([None, None, None, 1])\n >>> psser\n 0 NaN\n 1 NaN\n 2 NaN\n 3 1.0\n dtype: float64\n\n >>> psser.bfill()\n 0 1.0\n 1 1.0\n 2 1.0\n 3 1.0\n dtype: float64\n " return self.fillna(method='bfill', axis=axis, inplace=inplace, limit=limit)
-4,754,868,684,147,332,000
Synonym for `DataFrame.fillna()` or `Series.fillna()` with ``method=`bfill```. .. note:: the current implementation of 'bfill' uses Spark's Window without specifying partition specification. This leads to move all data into single partition in single machine and could cause serious performance degradation. Avoid this method against very large dataset. Parameters ---------- axis : {0 or `index`} 1 and `columns` are not supported. inplace : boolean, default False Fill in place (do not create a new object) limit : int, default None If method is specified, this is the maximum number of consecutive NaN values to forward/backward fill. In other words, if there is a gap with more than this number of consecutive NaNs, it will only be partially filled. If method is not specified, this is the maximum number of entries along the entire axis where NaNs will be filled. Must be greater than 0 if not None Returns ------- DataFrame or Series DataFrame or Series with NA entries filled. Examples -------- >>> psdf = ps.DataFrame({ ... 'A': [None, 3, None, None], ... 'B': [2, 4, None, 3], ... 'C': [None, None, None, 1], ... 'D': [0, 1, 5, 4] ... }, ... columns=['A', 'B', 'C', 'D']) >>> psdf A B C D 0 NaN 2.0 NaN 0 1 3.0 4.0 NaN 1 2 NaN NaN NaN 5 3 NaN 3.0 1.0 4 Propagate non-null values backward. >>> psdf.bfill() A B C D 0 3.0 2.0 1.0 0 1 3.0 4.0 1.0 1 2 NaN 3.0 1.0 5 3 NaN 3.0 1.0 4 For Series >>> psser = ps.Series([None, None, None, 1]) >>> psser 0 NaN 1 NaN 2 NaN 3 1.0 dtype: float64 >>> psser.bfill() 0 1.0 1 1.0 2 1.0 3 1.0 dtype: float64
python/pyspark/pandas/generic.py
bfill
XpressAI/spark
python
def bfill(self: FrameLike, axis: Optional[Axis]=None, inplace: bool_type=False, limit: Optional[int]=None) -> FrameLike: "\n Synonym for `DataFrame.fillna()` or `Series.fillna()` with ``method=`bfill```.\n\n .. note:: the current implementation of 'bfill' uses Spark's Window\n without specifying partition specification. This leads to move all data into\n single partition in single machine and could cause serious\n performance degradation. Avoid this method against very large dataset.\n\n Parameters\n ----------\n axis : {0 or `index`}\n 1 and `columns` are not supported.\n inplace : boolean, default False\n Fill in place (do not create a new object)\n limit : int, default None\n If method is specified, this is the maximum number of consecutive NaN values to\n forward/backward fill. In other words, if there is a gap with more than this number of\n consecutive NaNs, it will only be partially filled. If method is not specified,\n this is the maximum number of entries along the entire axis where NaNs will be filled.\n Must be greater than 0 if not None\n\n Returns\n -------\n DataFrame or Series\n DataFrame or Series with NA entries filled.\n\n Examples\n --------\n >>> psdf = ps.DataFrame({\n ... 'A': [None, 3, None, None],\n ... 'B': [2, 4, None, 3],\n ... 'C': [None, None, None, 1],\n ... 'D': [0, 1, 5, 4]\n ... },\n ... columns=['A', 'B', 'C', 'D'])\n >>> psdf\n A B C D\n 0 NaN 2.0 NaN 0\n 1 3.0 4.0 NaN 1\n 2 NaN NaN NaN 5\n 3 NaN 3.0 1.0 4\n\n Propagate non-null values backward.\n\n >>> psdf.bfill()\n A B C D\n 0 3.0 2.0 1.0 0\n 1 3.0 4.0 1.0 1\n 2 NaN 3.0 1.0 5\n 3 NaN 3.0 1.0 4\n\n For Series\n\n >>> psser = ps.Series([None, None, None, 1])\n >>> psser\n 0 NaN\n 1 NaN\n 2 NaN\n 3 1.0\n dtype: float64\n\n >>> psser.bfill()\n 0 1.0\n 1 1.0\n 2 1.0\n 3 1.0\n dtype: float64\n " return self.fillna(method='bfill', axis=axis, inplace=inplace, limit=limit)
def ffill(self: FrameLike, axis: Optional[Axis]=None, inplace: bool_type=False, limit: Optional[int]=None) -> FrameLike: "\n Synonym for `DataFrame.fillna()` or `Series.fillna()` with ``method=`ffill```.\n\n .. note:: the current implementation of 'ffill' uses Spark's Window\n without specifying partition specification. This leads to move all data into\n single partition in single machine and could cause serious\n performance degradation. Avoid this method against very large dataset.\n\n Parameters\n ----------\n axis : {0 or `index`}\n 1 and `columns` are not supported.\n inplace : boolean, default False\n Fill in place (do not create a new object)\n limit : int, default None\n If method is specified, this is the maximum number of consecutive NaN values to\n forward/backward fill. In other words, if there is a gap with more than this number of\n consecutive NaNs, it will only be partially filled. If method is not specified,\n this is the maximum number of entries along the entire axis where NaNs will be filled.\n Must be greater than 0 if not None\n\n Returns\n -------\n DataFrame or Series\n DataFrame or Series with NA entries filled.\n\n Examples\n --------\n >>> psdf = ps.DataFrame({\n ... 'A': [None, 3, None, None],\n ... 'B': [2, 4, None, 3],\n ... 'C': [None, None, None, 1],\n ... 'D': [0, 1, 5, 4]\n ... },\n ... columns=['A', 'B', 'C', 'D'])\n >>> psdf\n A B C D\n 0 NaN 2.0 NaN 0\n 1 3.0 4.0 NaN 1\n 2 NaN NaN NaN 5\n 3 NaN 3.0 1.0 4\n\n Propagate non-null values forward.\n\n >>> psdf.ffill()\n A B C D\n 0 NaN 2.0 NaN 0\n 1 3.0 4.0 NaN 1\n 2 3.0 4.0 NaN 5\n 3 3.0 3.0 1.0 4\n\n For Series\n\n >>> psser = ps.Series([2, 4, None, 3])\n >>> psser\n 0 2.0\n 1 4.0\n 2 NaN\n 3 3.0\n dtype: float64\n\n >>> psser.ffill()\n 0 2.0\n 1 4.0\n 2 4.0\n 3 3.0\n dtype: float64\n " return self.fillna(method='ffill', axis=axis, inplace=inplace, limit=limit)
6,601,667,604,905,121,000
Synonym for `DataFrame.fillna()` or `Series.fillna()` with ``method=`ffill```. .. note:: the current implementation of 'ffill' uses Spark's Window without specifying partition specification. This leads to move all data into single partition in single machine and could cause serious performance degradation. Avoid this method against very large dataset. Parameters ---------- axis : {0 or `index`} 1 and `columns` are not supported. inplace : boolean, default False Fill in place (do not create a new object) limit : int, default None If method is specified, this is the maximum number of consecutive NaN values to forward/backward fill. In other words, if there is a gap with more than this number of consecutive NaNs, it will only be partially filled. If method is not specified, this is the maximum number of entries along the entire axis where NaNs will be filled. Must be greater than 0 if not None Returns ------- DataFrame or Series DataFrame or Series with NA entries filled. Examples -------- >>> psdf = ps.DataFrame({ ... 'A': [None, 3, None, None], ... 'B': [2, 4, None, 3], ... 'C': [None, None, None, 1], ... 'D': [0, 1, 5, 4] ... }, ... columns=['A', 'B', 'C', 'D']) >>> psdf A B C D 0 NaN 2.0 NaN 0 1 3.0 4.0 NaN 1 2 NaN NaN NaN 5 3 NaN 3.0 1.0 4 Propagate non-null values forward. >>> psdf.ffill() A B C D 0 NaN 2.0 NaN 0 1 3.0 4.0 NaN 1 2 3.0 4.0 NaN 5 3 3.0 3.0 1.0 4 For Series >>> psser = ps.Series([2, 4, None, 3]) >>> psser 0 2.0 1 4.0 2 NaN 3 3.0 dtype: float64 >>> psser.ffill() 0 2.0 1 4.0 2 4.0 3 3.0 dtype: float64
python/pyspark/pandas/generic.py
ffill
XpressAI/spark
python
def ffill(self: FrameLike, axis: Optional[Axis]=None, inplace: bool_type=False, limit: Optional[int]=None) -> FrameLike: "\n Synonym for `DataFrame.fillna()` or `Series.fillna()` with ``method=`ffill```.\n\n .. note:: the current implementation of 'ffill' uses Spark's Window\n without specifying partition specification. This leads to move all data into\n single partition in single machine and could cause serious\n performance degradation. Avoid this method against very large dataset.\n\n Parameters\n ----------\n axis : {0 or `index`}\n 1 and `columns` are not supported.\n inplace : boolean, default False\n Fill in place (do not create a new object)\n limit : int, default None\n If method is specified, this is the maximum number of consecutive NaN values to\n forward/backward fill. In other words, if there is a gap with more than this number of\n consecutive NaNs, it will only be partially filled. If method is not specified,\n this is the maximum number of entries along the entire axis where NaNs will be filled.\n Must be greater than 0 if not None\n\n Returns\n -------\n DataFrame or Series\n DataFrame or Series with NA entries filled.\n\n Examples\n --------\n >>> psdf = ps.DataFrame({\n ... 'A': [None, 3, None, None],\n ... 'B': [2, 4, None, 3],\n ... 'C': [None, None, None, 1],\n ... 'D': [0, 1, 5, 4]\n ... },\n ... columns=['A', 'B', 'C', 'D'])\n >>> psdf\n A B C D\n 0 NaN 2.0 NaN 0\n 1 3.0 4.0 NaN 1\n 2 NaN NaN NaN 5\n 3 NaN 3.0 1.0 4\n\n Propagate non-null values forward.\n\n >>> psdf.ffill()\n A B C D\n 0 NaN 2.0 NaN 0\n 1 3.0 4.0 NaN 1\n 2 3.0 4.0 NaN 5\n 3 3.0 3.0 1.0 4\n\n For Series\n\n >>> psser = ps.Series([2, 4, None, 3])\n >>> psser\n 0 2.0\n 1 4.0\n 2 NaN\n 3 3.0\n dtype: float64\n\n >>> psser.ffill()\n 0 2.0\n 1 4.0\n 2 4.0\n 3 3.0\n dtype: float64\n " return self.fillna(method='ffill', axis=axis, inplace=inplace, limit=limit)
def clean_slug(slug): 'Clean a possible Slug string to remove dashes and lower case.' return slug.replace('-', '').lower()
8,895,740,661,734,946,000
Clean a possible Slug string to remove dashes and lower case.
src/coolbeans/plugins/sheetsaccount.py
clean_slug
runarp/coolbeans
python
def clean_slug(slug): return slug.replace('-', ).lower()
def coolbean_sheets(entries, context): 'Given a set of entries, pull out any slugs and add them to the context' settings = context.setdefault('coolbean-accounts', {}) for entry in entries: if isinstance(entry, data.Open): document = entry.meta.get('document_name', None) tab = entry.meta.get('document_tab', None) slug = entry.meta.get('slug', '') if (document and tab and slug): settings[slug] = {'account': entry.account, 'document': document, 'tab': tab, 'currencies': entry.currencies} elif (document or tab): print(f'Skipping {entry.account}: {document}/{tab}/{slug}') return (entries, [])
5,413,436,553,337,824,000
Given a set of entries, pull out any slugs and add them to the context
src/coolbeans/plugins/sheetsaccount.py
coolbean_sheets
runarp/coolbeans
python
def coolbean_sheets(entries, context): settings = context.setdefault('coolbean-accounts', {}) for entry in entries: if isinstance(entry, data.Open): document = entry.meta.get('document_name', None) tab = entry.meta.get('document_tab', None) slug = entry.meta.get('slug', ) if (document and tab and slug): settings[slug] = {'account': entry.account, 'document': document, 'tab': tab, 'currencies': entry.currencies} elif (document or tab): print(f'Skipping {entry.account}: {document}/{tab}/{slug}') return (entries, [])
def remote_entries(entries, options_map): '\n\n @param entries:\n @param options_map:\n @return:\n ' errors = [] settings = options_map['coolbeans'] secrets_file = get_setting('google-apis', settings) connection = google_connect(secrets_file) new_entries_path = None new_entries_file = get_setting('new-entries-bean', settings) if new_entries_file: new_entries_path = pathlib.Path(new_entries_file) remote_accounts = {} for entry in entries: if (not isinstance(entry, data.Open)): continue document_name = entry.meta.get('document_name', None) default_currency = (entry.currencies[0] if entry.currencies else DEFAULT_CURRENCY) if document_name: options = dict(document_name=document_name, document_tab=entry.meta.get('document_tab', None), reverse_amount=entry.meta.get('reverse', False), default_currency=default_currency, entry=entry, entry_file=new_entries_path) remote_accounts[entry.account] = options new_entries = [] for (account, options) in remote_accounts.items(): try: new_entries += load_remote_account(connection=connection, errors=errors, account=account, options=options) except Exception as exc: logger.error(f'while processing {account}', exc_info=exc) if (new_entries and new_entries_path): from beancount.parser import printer with new_entries_path.open('w') as stream: printer.print_entries(new_entries, file=stream) logger.info(f'Wrote {len(new_entries)} new account(s) to {new_entries_path}.') return ((entries + new_entries), errors)
-2,312,854,070,579,368,400
@param entries: @param options_map: @return:
src/coolbeans/plugins/sheetsaccount.py
remote_entries
runarp/coolbeans
python
def remote_entries(entries, options_map): '\n\n @param entries:\n @param options_map:\n @return:\n ' errors = [] settings = options_map['coolbeans'] secrets_file = get_setting('google-apis', settings) connection = google_connect(secrets_file) new_entries_path = None new_entries_file = get_setting('new-entries-bean', settings) if new_entries_file: new_entries_path = pathlib.Path(new_entries_file) remote_accounts = {} for entry in entries: if (not isinstance(entry, data.Open)): continue document_name = entry.meta.get('document_name', None) default_currency = (entry.currencies[0] if entry.currencies else DEFAULT_CURRENCY) if document_name: options = dict(document_name=document_name, document_tab=entry.meta.get('document_tab', None), reverse_amount=entry.meta.get('reverse', False), default_currency=default_currency, entry=entry, entry_file=new_entries_path) remote_accounts[entry.account] = options new_entries = [] for (account, options) in remote_accounts.items(): try: new_entries += load_remote_account(connection=connection, errors=errors, account=account, options=options) except Exception as exc: logger.error(f'while processing {account}', exc_info=exc) if (new_entries and new_entries_path): from beancount.parser import printer with new_entries_path.open('w') as stream: printer.print_entries(new_entries, file=stream) logger.info(f'Wrote {len(new_entries)} new account(s) to {new_entries_path}.') return ((entries + new_entries), errors)
def clean_record(record: typing.Dict[(str, str)]): "This is a bit of a hack. But using get_all_records doesn't leave many\n options" new_record = {} for (k, v) in record.items(): k = slugify.slugify(k.lower().strip()) v = str(v) for (field, names) in ALIASES.items(): new_record.setdefault(field, '') if (k in names): new_record[field] += (('. ' if new_record[field] else '') + v) k = None break if (k == 'amount'): v = v.replace(',', '') for s in STRIP_SYMOLS: v = v.replace(s, '') if (v and (not v[0].isdecimal()) and (not (v[0] == '-'))): v = v[1:] try: v = decimal.Decimal(v) except decimal.InvalidOperation: v = 0 if k: new_record[k] = v return new_record
-8,839,127,115,597,063,000
This is a bit of a hack. But using get_all_records doesn't leave many options
src/coolbeans/plugins/sheetsaccount.py
clean_record
runarp/coolbeans
python
def clean_record(record: typing.Dict[(str, str)]): "This is a bit of a hack. But using get_all_records doesn't leave many\n options" new_record = {} for (k, v) in record.items(): k = slugify.slugify(k.lower().strip()) v = str(v) for (field, names) in ALIASES.items(): new_record.setdefault(field, ) if (k in names): new_record[field] += (('. ' if new_record[field] else ) + v) k = None break if (k == 'amount'): v = v.replace(',', ) for s in STRIP_SYMOLS: v = v.replace(s, ) if (v and (not v[0].isdecimal()) and (not (v[0] == '-'))): v = v[1:] try: v = decimal.Decimal(v) except decimal.InvalidOperation: v = 0 if k: new_record[k] = v return new_record
def load_remote_account(connection: gspread.Client, errors: list, account: str, options: typing.Dict[(str, str)]): 'Try to Load Entries from URL into Account.\n\n options include:\n - document_name -- the Actual Google Doc name\n - document_tab -- the Tab name on the Doc\n - default_currency - the entry currency if None is provided\n - reverse_amount - if true, assume positive entries are credits\n\n ' entries = [] document_name = options['document_name'] document_tab = (options.get('document_tab', 0) or 0) default_currency = options['default_currency'] reverse_amount = options.get('reverse_amount', False) if (not document_name): return m = ((- 1) if reverse_amount else 1) logger.info(f'Attempting to download entries for {account} from {document_name}.{document_tab}') workbook = connection.open(document_name) sheet = None try: document_tab = int(document_tab) sheet = workbook.get_worksheet(document_tab) except ValueError: pass if (sheet is None): sheet = workbook.worksheet(document_tab) records = sheet.get_all_records() import re row = 0 for record in records: row += 1 record = clean_record(record) if (('date' not in record) or (not record['date'])): continue if (('amount' not in record) or (not record['amount'])): continue narration = record.pop('narration', None) payee = record.pop('payee', None) tagstr = record.pop('tags', '') tags = (set(re.split('\\W+', tagstr)) if tagstr else set()) date = dateparser.parse(record.pop('date')) if date: date = datetime.date(year=date.year, month=date.month, day=date.day) linkstr = record.pop('links', '') links = (set(re.split('\\W+', linkstr)) if linkstr else set()) meta = {'filename': str(options['entry_file']), 'lineno': 0, 'document-sheet-row': f'{document_name}/{document_tab}/{(row + 1)}'} amount = (decimal.Decimal(record.pop('amount')) * m) currency = record.pop('currency', default_currency) entry_account = record.pop('account') for (k, v) in record.items(): if v: meta[k] = v try: if (not entry_account): errors.append(f"Skipping Record with Blank Account: {meta['document-sheet-row']}") logger.warning(f"Skipping Record with Blank Account: {meta['document-sheet-row']}") continue entry = data.Transaction(date=date, narration=narration, payee=payee, tags=tags, meta=meta, links=links, flag='*', postings=[data.Posting(account=account, units=data.Amount(amount, currency), cost=None, price=None, flag='*', meta={}), data.Posting(account=entry_account, units=data.Amount((- amount), currency), cost=None, price=None, flag='*', meta={})]) entries.append(entry) except Exception as exc: logger.error(f'Error while parsing {record}', exc_info=exc) errors.append(str(exc)) logger.info(f'Loaded {len(entries)} entries for {account} from {document_name}.{document_tab}') return entries
-5,359,603,005,264,168,000
Try to Load Entries from URL into Account. options include: - document_name -- the Actual Google Doc name - document_tab -- the Tab name on the Doc - default_currency - the entry currency if None is provided - reverse_amount - if true, assume positive entries are credits
src/coolbeans/plugins/sheetsaccount.py
load_remote_account
runarp/coolbeans
python
def load_remote_account(connection: gspread.Client, errors: list, account: str, options: typing.Dict[(str, str)]): 'Try to Load Entries from URL into Account.\n\n options include:\n - document_name -- the Actual Google Doc name\n - document_tab -- the Tab name on the Doc\n - default_currency - the entry currency if None is provided\n - reverse_amount - if true, assume positive entries are credits\n\n ' entries = [] document_name = options['document_name'] document_tab = (options.get('document_tab', 0) or 0) default_currency = options['default_currency'] reverse_amount = options.get('reverse_amount', False) if (not document_name): return m = ((- 1) if reverse_amount else 1) logger.info(f'Attempting to download entries for {account} from {document_name}.{document_tab}') workbook = connection.open(document_name) sheet = None try: document_tab = int(document_tab) sheet = workbook.get_worksheet(document_tab) except ValueError: pass if (sheet is None): sheet = workbook.worksheet(document_tab) records = sheet.get_all_records() import re row = 0 for record in records: row += 1 record = clean_record(record) if (('date' not in record) or (not record['date'])): continue if (('amount' not in record) or (not record['amount'])): continue narration = record.pop('narration', None) payee = record.pop('payee', None) tagstr = record.pop('tags', ) tags = (set(re.split('\\W+', tagstr)) if tagstr else set()) date = dateparser.parse(record.pop('date')) if date: date = datetime.date(year=date.year, month=date.month, day=date.day) linkstr = record.pop('links', ) links = (set(re.split('\\W+', linkstr)) if linkstr else set()) meta = {'filename': str(options['entry_file']), 'lineno': 0, 'document-sheet-row': f'{document_name}/{document_tab}/{(row + 1)}'} amount = (decimal.Decimal(record.pop('amount')) * m) currency = record.pop('currency', default_currency) entry_account = record.pop('account') for (k, v) in record.items(): if v: meta[k] = v try: if (not entry_account): errors.append(f"Skipping Record with Blank Account: {meta['document-sheet-row']}") logger.warning(f"Skipping Record with Blank Account: {meta['document-sheet-row']}") continue entry = data.Transaction(date=date, narration=narration, payee=payee, tags=tags, meta=meta, links=links, flag='*', postings=[data.Posting(account=account, units=data.Amount(amount, currency), cost=None, price=None, flag='*', meta={}), data.Posting(account=entry_account, units=data.Amount((- amount), currency), cost=None, price=None, flag='*', meta={})]) entries.append(entry) except Exception as exc: logger.error(f'Error while parsing {record}', exc_info=exc) errors.append(str(exc)) logger.info(f'Loaded {len(entries)} entries for {account} from {document_name}.{document_tab}') return entries
def setUp(self): 'Setup' super(TestPropertyOnlyOne, self).setUp() self.collection.register(OnlyOne())
-8,261,507,296,794,374,000
Setup
test/unit/rules/resources/properties/test_onlyone.py
setUp
awkspace/cfn-python-lint
python
def setUp(self): super(TestPropertyOnlyOne, self).setUp() self.collection.register(OnlyOne())
def test_file_positive(self): 'Test Positive' self.helper_file_positive()
-1,556,978,985,838,885,400
Test Positive
test/unit/rules/resources/properties/test_onlyone.py
test_file_positive
awkspace/cfn-python-lint
python
def test_file_positive(self): self.helper_file_positive()
def test_file_negative(self): 'Test failure' self.helper_file_negative('test/fixtures/templates/bad/resources/properties/onlyone.yaml', 5)
4,632,762,795,947,568,000
Test failure
test/unit/rules/resources/properties/test_onlyone.py
test_file_negative
awkspace/cfn-python-lint
python
def test_file_negative(self): self.helper_file_negative('test/fixtures/templates/bad/resources/properties/onlyone.yaml', 5)
def testWhileTypeErrors(self): 'Test typing error messages for while.' tuple_treedef = tree_util.tree_structure((1.0, 1.0)) leaf_treedef = tree_util.tree_structure(0.0) with self.assertRaisesRegex(TypeError, re.escape(f'cond_fun must return a boolean scalar, but got pytree {tuple_treedef}.')): lax.while_loop((lambda c: (1.0, 1.0)), (lambda c: c), 0.0) with self.assertRaisesRegex(TypeError, re.escape('cond_fun must return a boolean scalar, but got output type(s) [ShapedArray(float32[])].')): lax.while_loop((lambda c: np.float32(1.0)), (lambda c: c), np.float32(0.0)) with self.assertRaisesRegex(TypeError, re.escape(f'body_fun output and input must have same type structure, got {tuple_treedef} and {leaf_treedef}.')): lax.while_loop((lambda c: True), (lambda c: (1.0, 1.0)), 0.0) with self.assertRaisesWithLiteralMatch(TypeError, 'body_fun output and input must have identical types, got\nShapedArray(bool[], weak_type=True)\nand\nShapedArray(float32[]).'): lax.while_loop((lambda c: True), (lambda c: True), np.float32(0.0))
-5,920,863,230,374,971,000
Test typing error messages for while.
tests/lax_control_flow_test.py
testWhileTypeErrors
cdfreeman-google/jax
python
def testWhileTypeErrors(self): tuple_treedef = tree_util.tree_structure((1.0, 1.0)) leaf_treedef = tree_util.tree_structure(0.0) with self.assertRaisesRegex(TypeError, re.escape(f'cond_fun must return a boolean scalar, but got pytree {tuple_treedef}.')): lax.while_loop((lambda c: (1.0, 1.0)), (lambda c: c), 0.0) with self.assertRaisesRegex(TypeError, re.escape('cond_fun must return a boolean scalar, but got output type(s) [ShapedArray(float32[])].')): lax.while_loop((lambda c: np.float32(1.0)), (lambda c: c), np.float32(0.0)) with self.assertRaisesRegex(TypeError, re.escape(f'body_fun output and input must have same type structure, got {tuple_treedef} and {leaf_treedef}.')): lax.while_loop((lambda c: True), (lambda c: (1.0, 1.0)), 0.0) with self.assertRaisesWithLiteralMatch(TypeError, 'body_fun output and input must have identical types, got\nShapedArray(bool[], weak_type=True)\nand\nShapedArray(float32[]).'): lax.while_loop((lambda c: True), (lambda c: True), np.float32(0.0))
def testForiLoopErrors(self): 'Test typing error messages for while.' with self.assertRaisesRegex(TypeError, 'arguments to fori_loop must have equal types'): lax.fori_loop(np.int16(0), jnp.int32(10), (lambda i, c: c), jnp.float32(7))
-5,943,806,257,004,347,000
Test typing error messages for while.
tests/lax_control_flow_test.py
testForiLoopErrors
cdfreeman-google/jax
python
def testForiLoopErrors(self): with self.assertRaisesRegex(TypeError, 'arguments to fori_loop must have equal types'): lax.fori_loop(np.int16(0), jnp.int32(10), (lambda i, c: c), jnp.float32(7))
def testCondTypeErrors(self): 'Test typing error messages for cond.' with self.assertRaisesRegex(TypeError, re.escape('Pred type must be either boolean or number, got <function')): lax.cond((lambda x: True), (lambda top: 2.0), (lambda fop: 3.0), 1.0) with self.assertRaisesRegex(TypeError, re.escape("Pred must be a scalar, got foo of type <class 'str'>")): lax.cond('foo', (lambda top: 2.0), (lambda fop: 3.0), 1.0) with self.assertRaisesRegex(TypeError, re.escape("Pred must be a scalar, got (1.0, 1.0) of type <class 'tuple'>")): lax.cond((1.0, 1.0), (lambda top: 2.0), (lambda fop: 3.0), 1.0) with self.assertRaisesRegex(TypeError, re.escape(f'true_fun and false_fun output must have same type structure, got {tree_util.tree_structure(2.0)} and {tree_util.tree_structure((3.0, 3.0))}.')): lax.cond(True, (lambda top: 2.0), (lambda fop: (3.0, 3.0)), 1.0) with self.assertRaisesRegex(TypeError, textwrap.dedent('\n true_fun and false_fun output must have identical types, got\n ShapedArray\\(float32\\[1\\]\\)\n and\n ShapedArray\\(float32\\[\\].*\\).').strip()): lax.cond(True, (lambda top: jnp.array([1.0], jnp.float32)), (lambda fop: jnp.float32(1.0)), 1.0)
-5,686,292,944,912,370,000
Test typing error messages for cond.
tests/lax_control_flow_test.py
testCondTypeErrors
cdfreeman-google/jax
python
def testCondTypeErrors(self): with self.assertRaisesRegex(TypeError, re.escape('Pred type must be either boolean or number, got <function')): lax.cond((lambda x: True), (lambda top: 2.0), (lambda fop: 3.0), 1.0) with self.assertRaisesRegex(TypeError, re.escape("Pred must be a scalar, got foo of type <class 'str'>")): lax.cond('foo', (lambda top: 2.0), (lambda fop: 3.0), 1.0) with self.assertRaisesRegex(TypeError, re.escape("Pred must be a scalar, got (1.0, 1.0) of type <class 'tuple'>")): lax.cond((1.0, 1.0), (lambda top: 2.0), (lambda fop: 3.0), 1.0) with self.assertRaisesRegex(TypeError, re.escape(f'true_fun and false_fun output must have same type structure, got {tree_util.tree_structure(2.0)} and {tree_util.tree_structure((3.0, 3.0))}.')): lax.cond(True, (lambda top: 2.0), (lambda fop: (3.0, 3.0)), 1.0) with self.assertRaisesRegex(TypeError, textwrap.dedent('\n true_fun and false_fun output must have identical types, got\n ShapedArray\\(float32\\[1\\]\\)\n and\n ShapedArray\\(float32\\[\\].*\\).').strip()): lax.cond(True, (lambda top: jnp.array([1.0], jnp.float32)), (lambda fop: jnp.float32(1.0)), 1.0)
def testSwitchErrors(self): 'Test typing error messages for switch.' with self.assertRaisesRegex(TypeError, re.escape('Index type must be an integer, got <function')): lax.switch((lambda x: True), [(lambda _: 2.0), (lambda _: 3.0)], 1.0) with self.assertRaisesRegex(TypeError, re.escape('Index type must be an integer, got foo.')): lax.switch('foo', [(lambda _: 2.0), (lambda _: 3.0)], 1.0) with self.assertRaisesRegex(TypeError, re.escape('Branch index must be scalar, got (1.0, 1.0) of shape (2,).')): lax.switch((1.0, 1.0), [(lambda _: 2.0), (lambda _: 3.0)], 1.0) with self.assertRaisesRegex(ValueError, re.escape('Empty branch sequence')): lax.switch(0, [], 1.0) with self.assertRaisesRegex(TypeError, re.escape(f'branch 0 and 1 outputs must have same type structure, got {tree_util.tree_structure(2.0)} and {tree_util.tree_structure((3.0, 3.0))}.')): lax.switch(1, [(lambda _: 2.0), (lambda _: (3.0, 3.0))], 1.0) with self.assertRaisesRegex(TypeError, textwrap.dedent('\n branch 0 and 1 outputs must have identical types, got\n ShapedArray\\(float32\\[1\\]\\)\n and\n ShapedArray\\(float32\\[\\].*\\).').strip()): lax.switch(1, [(lambda _: jnp.array([1.0], jnp.float32)), (lambda _: jnp.float32(1.0))], 1.0)
-1,112,016,817,928,494,600
Test typing error messages for switch.
tests/lax_control_flow_test.py
testSwitchErrors
cdfreeman-google/jax
python
def testSwitchErrors(self): with self.assertRaisesRegex(TypeError, re.escape('Index type must be an integer, got <function')): lax.switch((lambda x: True), [(lambda _: 2.0), (lambda _: 3.0)], 1.0) with self.assertRaisesRegex(TypeError, re.escape('Index type must be an integer, got foo.')): lax.switch('foo', [(lambda _: 2.0), (lambda _: 3.0)], 1.0) with self.assertRaisesRegex(TypeError, re.escape('Branch index must be scalar, got (1.0, 1.0) of shape (2,).')): lax.switch((1.0, 1.0), [(lambda _: 2.0), (lambda _: 3.0)], 1.0) with self.assertRaisesRegex(ValueError, re.escape('Empty branch sequence')): lax.switch(0, [], 1.0) with self.assertRaisesRegex(TypeError, re.escape(f'branch 0 and 1 outputs must have same type structure, got {tree_util.tree_structure(2.0)} and {tree_util.tree_structure((3.0, 3.0))}.')): lax.switch(1, [(lambda _: 2.0), (lambda _: (3.0, 3.0))], 1.0) with self.assertRaisesRegex(TypeError, textwrap.dedent('\n branch 0 and 1 outputs must have identical types, got\n ShapedArray\\(float32\\[1\\]\\)\n and\n ShapedArray\\(float32\\[\\].*\\).').strip()): lax.switch(1, [(lambda _: jnp.array([1.0], jnp.float32)), (lambda _: jnp.float32(1.0))], 1.0)
def testScanTypeErrors(self): 'Test typing error messages for scan.' a = jnp.arange(5) with self.assertRaisesRegex(TypeError, re.escape('scan body output must be a pair, got ShapedArray(float32[]).')): lax.scan((lambda c, x: np.float32(0.0)), 0, a) with self.assertRaisesRegex(TypeError, re.escape(f'scan carry output and input must have same type structure, got {tree_util.tree_structure((0, 0, 0))} and {tree_util.tree_structure((1, (2, 3)))}')): lax.scan((lambda c, x: ((0, 0, 0), x)), (1, (2, 3)), a) with self.assertRaisesRegex(TypeError, re.escape(f'scan carry output and input must have same type structure, got {tree_util.tree_structure(a)} and {tree_util.tree_structure(None)}.')): lax.scan((lambda c, x: (0, x)), None, a) with self.assertRaisesWithLiteralMatch(TypeError, 'scan carry output and input must have identical types, got\nShapedArray(int32[])\nand\nShapedArray(float32[]).'): lax.scan((lambda c, x: (np.int32(0), x)), np.float32(1.0), a) with self.assertRaisesRegex(TypeError, re.escape(f'scan carry output and input must have same type structure, got {tree_util.tree_structure(a)} and {tree_util.tree_structure((1, 2))}.')): lax.scan((lambda c, x: (0, x)), (1, 2), a)
-7,095,610,930,942,497,000
Test typing error messages for scan.
tests/lax_control_flow_test.py
testScanTypeErrors
cdfreeman-google/jax
python
def testScanTypeErrors(self): a = jnp.arange(5) with self.assertRaisesRegex(TypeError, re.escape('scan body output must be a pair, got ShapedArray(float32[]).')): lax.scan((lambda c, x: np.float32(0.0)), 0, a) with self.assertRaisesRegex(TypeError, re.escape(f'scan carry output and input must have same type structure, got {tree_util.tree_structure((0, 0, 0))} and {tree_util.tree_structure((1, (2, 3)))}')): lax.scan((lambda c, x: ((0, 0, 0), x)), (1, (2, 3)), a) with self.assertRaisesRegex(TypeError, re.escape(f'scan carry output and input must have same type structure, got {tree_util.tree_structure(a)} and {tree_util.tree_structure(None)}.')): lax.scan((lambda c, x: (0, x)), None, a) with self.assertRaisesWithLiteralMatch(TypeError, 'scan carry output and input must have identical types, got\nShapedArray(int32[])\nand\nShapedArray(float32[]).'): lax.scan((lambda c, x: (np.int32(0), x)), np.float32(1.0), a) with self.assertRaisesRegex(TypeError, re.escape(f'scan carry output and input must have same type structure, got {tree_util.tree_structure(a)} and {tree_util.tree_structure((1, 2))}.')): lax.scan((lambda c, x: (0, x)), (1, 2), a)
@jtu.skip_on_flag('jax_skip_slow_tests', True) def test_custom_linear_solve_pytree(self): 'Test custom linear solve with inputs and outputs that are pytrees.' def unrolled_matvec(mat, x): 'Apply a Python list of lists of scalars to a list of scalars.' result = [] for i in range(len(mat)): v = 0 for j in range(len(x)): if (mat[i][j] is not None): v += (mat[i][j] * x[j]) result.append(v) return result def unrolled_substitution_solve(matvec, b, lower_tri): 'Solve a triangular unrolled system with fwd/back substitution.' zero = jnp.zeros(()) one = jnp.ones(()) x = [zero for _ in b] ordering = (range(len(b)) if lower_tri else range((len(b) - 1), (- 1), (- 1))) for i in ordering: residual = (b[i] - matvec(x)[i]) diagonal = matvec([(one if (i == j) else zero) for j in range(len(b))])[i] x[i] = (residual / diagonal) return x def custom_unrolled_lower_tri_solve(mat, b): return lax.custom_linear_solve(partial(unrolled_matvec, mat), b, partial(unrolled_substitution_solve, lower_tri=True), partial(unrolled_substitution_solve, lower_tri=False)) mat = [[1.0, None, None, None, None, None, None], [1.0, 1.0, None, None, None, None, None], [None, 1.0, 1.0, None, None, None, None], [None, None, 1.0, 1.0, None, None, None], [None, None, None, 1.0, 1.0, None, None], [None, None, None, None, None, 2.0, None], [None, None, None, None, None, 4.0, 3.0]] rng = np.random.RandomState(0) b = list(rng.randn(7)) jtu.check_grads(custom_unrolled_lower_tri_solve, (mat, b), order=2, rtol={jnp.float32: 0.02}) b_bat = list(b) b_bat[3] = rng.randn(3) jtu.check_grads(api.vmap(custom_unrolled_lower_tri_solve, in_axes=(None, [None, None, None, 0, None, None, None]), out_axes=[0, 0, 0, 0, 0, None, None]), (mat, b_bat), order=2, rtol={jnp.float32: 0.01}) mat[2][1] = rng.randn(3) mat_axis_tree = [[(0 if ((i == 2) and (j == 1)) else None) for j in range(7)] for i in range(7)] jtu.check_grads(api.vmap(custom_unrolled_lower_tri_solve, in_axes=(mat_axis_tree, None), out_axes=[0, 0, 0, 0, 0, None, None]), (mat, b), order=2)
-7,709,107,645,537,896,000
Test custom linear solve with inputs and outputs that are pytrees.
tests/lax_control_flow_test.py
test_custom_linear_solve_pytree
cdfreeman-google/jax
python
@jtu.skip_on_flag('jax_skip_slow_tests', True) def test_custom_linear_solve_pytree(self): def unrolled_matvec(mat, x): 'Apply a Python list of lists of scalars to a list of scalars.' result = [] for i in range(len(mat)): v = 0 for j in range(len(x)): if (mat[i][j] is not None): v += (mat[i][j] * x[j]) result.append(v) return result def unrolled_substitution_solve(matvec, b, lower_tri): 'Solve a triangular unrolled system with fwd/back substitution.' zero = jnp.zeros(()) one = jnp.ones(()) x = [zero for _ in b] ordering = (range(len(b)) if lower_tri else range((len(b) - 1), (- 1), (- 1))) for i in ordering: residual = (b[i] - matvec(x)[i]) diagonal = matvec([(one if (i == j) else zero) for j in range(len(b))])[i] x[i] = (residual / diagonal) return x def custom_unrolled_lower_tri_solve(mat, b): return lax.custom_linear_solve(partial(unrolled_matvec, mat), b, partial(unrolled_substitution_solve, lower_tri=True), partial(unrolled_substitution_solve, lower_tri=False)) mat = [[1.0, None, None, None, None, None, None], [1.0, 1.0, None, None, None, None, None], [None, 1.0, 1.0, None, None, None, None], [None, None, 1.0, 1.0, None, None, None], [None, None, None, 1.0, 1.0, None, None], [None, None, None, None, None, 2.0, None], [None, None, None, None, None, 4.0, 3.0]] rng = np.random.RandomState(0) b = list(rng.randn(7)) jtu.check_grads(custom_unrolled_lower_tri_solve, (mat, b), order=2, rtol={jnp.float32: 0.02}) b_bat = list(b) b_bat[3] = rng.randn(3) jtu.check_grads(api.vmap(custom_unrolled_lower_tri_solve, in_axes=(None, [None, None, None, 0, None, None, None]), out_axes=[0, 0, 0, 0, 0, None, None]), (mat, b_bat), order=2, rtol={jnp.float32: 0.01}) mat[2][1] = rng.randn(3) mat_axis_tree = [[(0 if ((i == 2) and (j == 1)) else None) for j in range(7)] for i in range(7)] jtu.check_grads(api.vmap(custom_unrolled_lower_tri_solve, in_axes=(mat_axis_tree, None), out_axes=[0, 0, 0, 0, 0, None, None]), (mat, b), order=2)
def unrolled_matvec(mat, x): 'Apply a Python list of lists of scalars to a list of scalars.' result = [] for i in range(len(mat)): v = 0 for j in range(len(x)): if (mat[i][j] is not None): v += (mat[i][j] * x[j]) result.append(v) return result
-1,598,892,281,679,112,400
Apply a Python list of lists of scalars to a list of scalars.
tests/lax_control_flow_test.py
unrolled_matvec
cdfreeman-google/jax
python
def unrolled_matvec(mat, x): result = [] for i in range(len(mat)): v = 0 for j in range(len(x)): if (mat[i][j] is not None): v += (mat[i][j] * x[j]) result.append(v) return result
def unrolled_substitution_solve(matvec, b, lower_tri): 'Solve a triangular unrolled system with fwd/back substitution.' zero = jnp.zeros(()) one = jnp.ones(()) x = [zero for _ in b] ordering = (range(len(b)) if lower_tri else range((len(b) - 1), (- 1), (- 1))) for i in ordering: residual = (b[i] - matvec(x)[i]) diagonal = matvec([(one if (i == j) else zero) for j in range(len(b))])[i] x[i] = (residual / diagonal) return x
146,545,813,991,085,980
Solve a triangular unrolled system with fwd/back substitution.
tests/lax_control_flow_test.py
unrolled_substitution_solve
cdfreeman-google/jax
python
def unrolled_substitution_solve(matvec, b, lower_tri): zero = jnp.zeros(()) one = jnp.ones(()) x = [zero for _ in b] ordering = (range(len(b)) if lower_tri else range((len(b) - 1), (- 1), (- 1))) for i in ordering: residual = (b[i] - matvec(x)[i]) diagonal = matvec([(one if (i == j) else zero) for j in range(len(b))])[i] x[i] = (residual / diagonal) return x
def nodeset(self): 'set of all node idxs' raise NotImplementedError()
-6,226,151,121,687,879,000
set of all node idxs
LocalMercurial/mercurial/dagutil.py
nodeset
l2dy/machg
python
def nodeset(self): raise NotImplementedError()
def heads(self): 'list of head ixs' raise NotImplementedError()
-2,098,944,108,082,554,600
list of head ixs
LocalMercurial/mercurial/dagutil.py
heads
l2dy/machg
python
def heads(self): raise NotImplementedError()
def parents(self, ix): 'list of parents ixs of ix' raise NotImplementedError()
2,204,374,004,237,189,400
list of parents ixs of ix
LocalMercurial/mercurial/dagutil.py
parents
l2dy/machg
python
def parents(self, ix): raise NotImplementedError()
def inverse(self): 'inverse DAG, where parents becomes children, etc.' raise NotImplementedError()
1,920,263,474,415,640,000
inverse DAG, where parents becomes children, etc.
LocalMercurial/mercurial/dagutil.py
inverse
l2dy/machg
python
def inverse(self): raise NotImplementedError()
def ancestorset(self, starts, stops=None): '\n set of all ancestors of starts (incl), but stop walk at stops (excl)\n ' raise NotImplementedError()
5,984,893,086,663,339,000
set of all ancestors of starts (incl), but stop walk at stops (excl)
LocalMercurial/mercurial/dagutil.py
ancestorset
l2dy/machg
python
def ancestorset(self, starts, stops=None): '\n \n ' raise NotImplementedError()
def descendantset(self, starts, stops=None): '\n set of all descendants of starts (incl), but stop walk at stops (excl)\n ' return self.inverse().ancestorset(starts, stops)
8,106,918,935,212,782,000
set of all descendants of starts (incl), but stop walk at stops (excl)
LocalMercurial/mercurial/dagutil.py
descendantset
l2dy/machg
python
def descendantset(self, starts, stops=None): '\n \n ' return self.inverse().ancestorset(starts, stops)
def headsetofconnecteds(self, ixs): '\n subset of connected list of ixs so that no node has a descendant in it\n\n By "connected list" we mean that if an ancestor and a descendant are in\n the list, then so is at least one path connecting them.\n ' raise NotImplementedError()
3,914,432,140,549,609,500
subset of connected list of ixs so that no node has a descendant in it By "connected list" we mean that if an ancestor and a descendant are in the list, then so is at least one path connecting them.
LocalMercurial/mercurial/dagutil.py
headsetofconnecteds
l2dy/machg
python
def headsetofconnecteds(self, ixs): '\n subset of connected list of ixs so that no node has a descendant in it\n\n By "connected list" we mean that if an ancestor and a descendant are in\n the list, then so is at least one path connecting them.\n ' raise NotImplementedError()
def externalize(self, ix): 'return a list of (or set if given a set) of node ids' return self._externalize(ix)
-5,374,513,761,426,537,000
return a list of (or set if given a set) of node ids
LocalMercurial/mercurial/dagutil.py
externalize
l2dy/machg
python
def externalize(self, ix): return self._externalize(ix)
def externalizeall(self, ixs): 'return a list of (or set if given a set) of node ids' ids = self._externalizeall(ixs) if isinstance(ixs, set): return set(ids) return list(ids)
-807,789,611,213,240,000
return a list of (or set if given a set) of node ids
LocalMercurial/mercurial/dagutil.py
externalizeall
l2dy/machg
python
def externalizeall(self, ixs): ids = self._externalizeall(ixs) if isinstance(ixs, set): return set(ids) return list(ids)
def internalize(self, id): 'return a list of (or set if given a set) of node ixs' return self._internalize(id)
2,039,032,217,749,656,000
return a list of (or set if given a set) of node ixs
LocalMercurial/mercurial/dagutil.py
internalize
l2dy/machg
python
def internalize(self, id): return self._internalize(id)
def internalizeall(self, ids, filterunknown=False): 'return a list of (or set if given a set) of node ids' ixs = self._internalizeall(ids, filterunknown) if isinstance(ids, set): return set(ixs) return list(ixs)
-8,972,105,589,571,481,000
return a list of (or set if given a set) of node ids
LocalMercurial/mercurial/dagutil.py
internalizeall
l2dy/machg
python
def internalizeall(self, ids, filterunknown=False): ixs = self._internalizeall(ids, filterunknown) if isinstance(ids, set): return set(ixs) return list(ixs)
def linearize(self, ixs): 'linearize and topologically sort a list of revisions\n\n The linearization process tries to create long runs of revs where\n a child rev comes immediately after its first parent. This is done by\n visiting the heads of the given revs in inverse topological order,\n and for each visited rev, visiting its second parent, then its first\n parent, then adding the rev itself to the output list.\n ' sorted = [] visit = list(self.headsetofconnecteds(ixs)) visit.sort(reverse=True) finished = set() while visit: cur = visit.pop() if (cur < 0): cur = ((- cur) - 1) if (cur not in finished): sorted.append(cur) finished.add(cur) else: visit.append(((- cur) - 1)) visit += [p for p in self.parents(cur) if ((p in ixs) and (p not in finished))] assert (len(sorted) == len(ixs)) return sorted
-5,283,142,161,144,543,000
linearize and topologically sort a list of revisions The linearization process tries to create long runs of revs where a child rev comes immediately after its first parent. This is done by visiting the heads of the given revs in inverse topological order, and for each visited rev, visiting its second parent, then its first parent, then adding the rev itself to the output list.
LocalMercurial/mercurial/dagutil.py
linearize
l2dy/machg
python
def linearize(self, ixs): 'linearize and topologically sort a list of revisions\n\n The linearization process tries to create long runs of revs where\n a child rev comes immediately after its first parent. This is done by\n visiting the heads of the given revs in inverse topological order,\n and for each visited rev, visiting its second parent, then its first\n parent, then adding the rev itself to the output list.\n ' sorted = [] visit = list(self.headsetofconnecteds(ixs)) visit.sort(reverse=True) finished = set() while visit: cur = visit.pop() if (cur < 0): cur = ((- cur) - 1) if (cur not in finished): sorted.append(cur) finished.add(cur) else: visit.append(((- cur) - 1)) visit += [p for p in self.parents(cur) if ((p in ixs) and (p not in finished))] assert (len(sorted) == len(ixs)) return sorted
def test_login_required_to_view_ingredients(self): 'Test that authentication is needed to view the ingredients.' res = self.client.get(INGREDIENTS_URL) self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
1,475,239,166,932,663,800
Test that authentication is needed to view the ingredients.
app/recipe_app/tests/test_ingredients_api.py
test_login_required_to_view_ingredients
oyekanmiayo/recipe-app-api
python
def test_login_required_to_view_ingredients(self): res = self.client.get(INGREDIENTS_URL) self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
def test_retrieve_ingredients_is_successful(self): 'Test retrieve ingredients' Ingredient.objects.create(user=self.user, name='Carrot') Ingredient.objects.create(user=self.user, name='Lemon') res = self.client.get(INGREDIENTS_URL) ingredients = Ingredient.objects.all().order_by('-name') serializer = IngredientSerializer(ingredients, many=True) self.assertEqual(res.status_code, status.HTTP_200_OK) self.assertEqual(res.data, serializer.data)
6,252,634,421,779,660,000
Test retrieve ingredients
app/recipe_app/tests/test_ingredients_api.py
test_retrieve_ingredients_is_successful
oyekanmiayo/recipe-app-api
python
def test_retrieve_ingredients_is_successful(self): Ingredient.objects.create(user=self.user, name='Carrot') Ingredient.objects.create(user=self.user, name='Lemon') res = self.client.get(INGREDIENTS_URL) ingredients = Ingredient.objects.all().order_by('-name') serializer = IngredientSerializer(ingredients, many=True) self.assertEqual(res.status_code, status.HTTP_200_OK) self.assertEqual(res.data, serializer.data)
def test_retrieved_ingredients_limited_to_user(self): "Tests that only the user's ingredients are retrieved" user2 = create_user(fname='Test2', lname='User2', email='[email protected]', password='test2pass') Ingredient.objects.create(user=user2, name='Carrot') ingredient = Ingredient.objects.create(user=self.user, name='Lemon') res = self.client.get(INGREDIENTS_URL) self.assertEqual(res.status_code, status.HTTP_200_OK) self.assertEqual(len(res.data), 1) self.assertEqual(res.data[0]['name'], ingredient.name)
6,069,663,110,617,207,000
Tests that only the user's ingredients are retrieved
app/recipe_app/tests/test_ingredients_api.py
test_retrieved_ingredients_limited_to_user
oyekanmiayo/recipe-app-api
python
def test_retrieved_ingredients_limited_to_user(self): user2 = create_user(fname='Test2', lname='User2', email='[email protected]', password='test2pass') Ingredient.objects.create(user=user2, name='Carrot') ingredient = Ingredient.objects.create(user=self.user, name='Lemon') res = self.client.get(INGREDIENTS_URL) self.assertEqual(res.status_code, status.HTTP_200_OK) self.assertEqual(len(res.data), 1) self.assertEqual(res.data[0]['name'], ingredient.name)
def test_create_ingredient_is_successful(self): 'Test that creating a new ingredient is successful.' payload = {'name': 'Lemon'} self.client.post(INGREDIENTS_URL, payload) exists = Ingredient.objects.filter(user=self.user, name=payload['name']).exists() self.assertTrue(exists)
-6,702,414,564,749,636,000
Test that creating a new ingredient is successful.
app/recipe_app/tests/test_ingredients_api.py
test_create_ingredient_is_successful
oyekanmiayo/recipe-app-api
python
def test_create_ingredient_is_successful(self): payload = {'name': 'Lemon'} self.client.post(INGREDIENTS_URL, payload) exists = Ingredient.objects.filter(user=self.user, name=payload['name']).exists() self.assertTrue(exists)
def test_create_ingredient_with_invalid_details_invalid(self): 'Test that ingredients is not created with invalid details' payload = {'name': ''} res = self.client.post(INGREDIENTS_URL, payload) self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
-1,150,530,564,363,053,600
Test that ingredients is not created with invalid details
app/recipe_app/tests/test_ingredients_api.py
test_create_ingredient_with_invalid_details_invalid
oyekanmiayo/recipe-app-api
python
def test_create_ingredient_with_invalid_details_invalid(self): payload = {'name': } res = self.client.post(INGREDIENTS_URL, payload) self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
@property def priority(self): 'Priority.' try: return self._priority except Exception as e: raise e
4,155,562,068,715,867,000
Priority.
nitro/resource/config/lb/lbvserver_appfwpolicy_binding.py
priority
HanseMerkur/nitro-python
python
@property def priority(self): try: return self._priority except Exception as e: raise e
@priority.setter def priority(self, priority): 'Priority.\n\n :param priority: \n\n ' try: self._priority = priority except Exception as e: raise e
4,503,601,667,460,624,400
Priority. :param priority:
nitro/resource/config/lb/lbvserver_appfwpolicy_binding.py
priority
HanseMerkur/nitro-python
python
@priority.setter def priority(self, priority): 'Priority.\n\n :param priority: \n\n ' try: self._priority = priority except Exception as e: raise e
@property def gotopriorityexpression(self): 'Expression specifying the priority of the next policy which will get evaluated if the current policy rule evaluates to TRUE.' try: return self._gotopriorityexpression except Exception as e: raise e
8,059,722,376,498,583,000
Expression specifying the priority of the next policy which will get evaluated if the current policy rule evaluates to TRUE.
nitro/resource/config/lb/lbvserver_appfwpolicy_binding.py
gotopriorityexpression
HanseMerkur/nitro-python
python
@property def gotopriorityexpression(self): try: return self._gotopriorityexpression except Exception as e: raise e
@gotopriorityexpression.setter def gotopriorityexpression(self, gotopriorityexpression): 'Expression specifying the priority of the next policy which will get evaluated if the current policy rule evaluates to TRUE.\n\n :param gotopriorityexpression: \n\n ' try: self._gotopriorityexpression = gotopriorityexpression except Exception as e: raise e
-6,898,664,913,910,547,000
Expression specifying the priority of the next policy which will get evaluated if the current policy rule evaluates to TRUE. :param gotopriorityexpression:
nitro/resource/config/lb/lbvserver_appfwpolicy_binding.py
gotopriorityexpression
HanseMerkur/nitro-python
python
@gotopriorityexpression.setter def gotopriorityexpression(self, gotopriorityexpression): 'Expression specifying the priority of the next policy which will get evaluated if the current policy rule evaluates to TRUE.\n\n :param gotopriorityexpression: \n\n ' try: self._gotopriorityexpression = gotopriorityexpression except Exception as e: raise e
@property def policyname(self): 'Name of the policy bound to the LB vserver.' try: return self._policyname except Exception as e: raise e
-5,365,522,352,492,252,000
Name of the policy bound to the LB vserver.
nitro/resource/config/lb/lbvserver_appfwpolicy_binding.py
policyname
HanseMerkur/nitro-python
python
@property def policyname(self): try: return self._policyname except Exception as e: raise e
@policyname.setter def policyname(self, policyname): 'Name of the policy bound to the LB vserver.\n\n :param policyname: \n\n ' try: self._policyname = policyname except Exception as e: raise e
-1,122,037,315,034,263,200
Name of the policy bound to the LB vserver. :param policyname:
nitro/resource/config/lb/lbvserver_appfwpolicy_binding.py
policyname
HanseMerkur/nitro-python
python
@policyname.setter def policyname(self, policyname): 'Name of the policy bound to the LB vserver.\n\n :param policyname: \n\n ' try: self._policyname = policyname except Exception as e: raise e
@property def name(self): 'Name for the virtual server. Must begin with an ASCII alphanumeric or underscore (_) character, and must contain only ASCII alphanumeric, underscore, hash (#), period (.), space, colon (:), at sign (@), equal sign (=), and hyphen (-) characters. Can be changed after the virtual server is created.\n CLI Users: If the name includes one or more spaces, enclose the name in double or single quotation marks (for example, "my vserver" or \'my vserver\'). .<br/>Minimum length = 1.\n\n\n ' try: return self._name except Exception as e: raise e
6,946,818,036,726,950,000
Name for the virtual server. Must begin with an ASCII alphanumeric or underscore (_) character, and must contain only ASCII alphanumeric, underscore, hash (#), period (.), space, colon (:), at sign (@), equal sign (=), and hyphen (-) characters. Can be changed after the virtual server is created. CLI Users: If the name includes one or more spaces, enclose the name in double or single quotation marks (for example, "my vserver" or 'my vserver'). .<br/>Minimum length = 1.
nitro/resource/config/lb/lbvserver_appfwpolicy_binding.py
name
HanseMerkur/nitro-python
python
@property def name(self): 'Name for the virtual server. Must begin with an ASCII alphanumeric or underscore (_) character, and must contain only ASCII alphanumeric, underscore, hash (#), period (.), space, colon (:), at sign (@), equal sign (=), and hyphen (-) characters. Can be changed after the virtual server is created.\n CLI Users: If the name includes one or more spaces, enclose the name in double or single quotation marks (for example, "my vserver" or \'my vserver\'). .<br/>Minimum length = 1.\n\n\n ' try: return self._name except Exception as e: raise e
@name.setter def name(self, name): 'Name for the virtual server. Must begin with an ASCII alphanumeric or underscore (_) character, and must contain only ASCII alphanumeric, underscore, hash (#), period (.), space, colon (:), at sign (@), equal sign (=), and hyphen (-) characters. Can be changed after the virtual server is created.\n CLI Users: If the name includes one or more spaces, enclose the name in double or single quotation marks (for example, "my vserver" or \'my vserver\'). .<br/>Minimum length = 1\n\n :param name: \n\n ' try: self._name = name except Exception as e: raise e
4,977,305,799,432,753,000
Name for the virtual server. Must begin with an ASCII alphanumeric or underscore (_) character, and must contain only ASCII alphanumeric, underscore, hash (#), period (.), space, colon (:), at sign (@), equal sign (=), and hyphen (-) characters. Can be changed after the virtual server is created. CLI Users: If the name includes one or more spaces, enclose the name in double or single quotation marks (for example, "my vserver" or 'my vserver'). .<br/>Minimum length = 1 :param name:
nitro/resource/config/lb/lbvserver_appfwpolicy_binding.py
name
HanseMerkur/nitro-python
python
@name.setter def name(self, name): 'Name for the virtual server. Must begin with an ASCII alphanumeric or underscore (_) character, and must contain only ASCII alphanumeric, underscore, hash (#), period (.), space, colon (:), at sign (@), equal sign (=), and hyphen (-) characters. Can be changed after the virtual server is created.\n CLI Users: If the name includes one or more spaces, enclose the name in double or single quotation marks (for example, "my vserver" or \'my vserver\'). .<br/>Minimum length = 1\n\n :param name: \n\n ' try: self._name = name except Exception as e: raise e
@property def bindpoint(self): 'The bindpoint to which the policy is bound.<br/>Possible values = REQUEST, RESPONSE.' try: return self._bindpoint except Exception as e: raise e
-4,716,883,532,547,503,000
The bindpoint to which the policy is bound.<br/>Possible values = REQUEST, RESPONSE.
nitro/resource/config/lb/lbvserver_appfwpolicy_binding.py
bindpoint
HanseMerkur/nitro-python
python
@property def bindpoint(self): try: return self._bindpoint except Exception as e: raise e
@bindpoint.setter def bindpoint(self, bindpoint): 'The bindpoint to which the policy is bound.<br/>Possible values = REQUEST, RESPONSE\n\n :param bindpoint: \n\n ' try: self._bindpoint = bindpoint except Exception as e: raise e
-1,073,388,628,172,473,000
The bindpoint to which the policy is bound.<br/>Possible values = REQUEST, RESPONSE :param bindpoint:
nitro/resource/config/lb/lbvserver_appfwpolicy_binding.py
bindpoint
HanseMerkur/nitro-python
python
@bindpoint.setter def bindpoint(self, bindpoint): 'The bindpoint to which the policy is bound.<br/>Possible values = REQUEST, RESPONSE\n\n :param bindpoint: \n\n ' try: self._bindpoint = bindpoint except Exception as e: raise e
@property def labeltype(self): 'The invocation type.<br/>Possible values = reqvserver, resvserver, policylabel.' try: return self._labeltype except Exception as e: raise e
3,900,809,274,961,790,000
The invocation type.<br/>Possible values = reqvserver, resvserver, policylabel.
nitro/resource/config/lb/lbvserver_appfwpolicy_binding.py
labeltype
HanseMerkur/nitro-python
python
@property def labeltype(self): try: return self._labeltype except Exception as e: raise e
@labeltype.setter def labeltype(self, labeltype): 'The invocation type.<br/>Possible values = reqvserver, resvserver, policylabel\n\n :param labeltype: \n\n ' try: self._labeltype = labeltype except Exception as e: raise e
4,802,778,948,420,752,000
The invocation type.<br/>Possible values = reqvserver, resvserver, policylabel :param labeltype:
nitro/resource/config/lb/lbvserver_appfwpolicy_binding.py
labeltype
HanseMerkur/nitro-python
python
@labeltype.setter def labeltype(self, labeltype): 'The invocation type.<br/>Possible values = reqvserver, resvserver, policylabel\n\n :param labeltype: \n\n ' try: self._labeltype = labeltype except Exception as e: raise e
@property def labelname(self): 'Name of the label invoked.' try: return self._labelname except Exception as e: raise e
-1,357,503,408,469,624,600
Name of the label invoked.
nitro/resource/config/lb/lbvserver_appfwpolicy_binding.py
labelname
HanseMerkur/nitro-python
python
@property def labelname(self): try: return self._labelname except Exception as e: raise e
@labelname.setter def labelname(self, labelname): 'Name of the label invoked.\n\n :param labelname: \n\n ' try: self._labelname = labelname except Exception as e: raise e
8,556,602,361,713,529,000
Name of the label invoked. :param labelname:
nitro/resource/config/lb/lbvserver_appfwpolicy_binding.py
labelname
HanseMerkur/nitro-python
python
@labelname.setter def labelname(self, labelname): 'Name of the label invoked.\n\n :param labelname: \n\n ' try: self._labelname = labelname except Exception as e: raise e
@property def invoke(self): 'Invoke policies bound to a virtual server or policy label.' try: return self._invoke except Exception as e: raise e
-2,666,846,950,919,318,000
Invoke policies bound to a virtual server or policy label.
nitro/resource/config/lb/lbvserver_appfwpolicy_binding.py
invoke
HanseMerkur/nitro-python
python
@property def invoke(self): try: return self._invoke except Exception as e: raise e
@invoke.setter def invoke(self, invoke): 'Invoke policies bound to a virtual server or policy label.\n\n :param invoke: \n\n ' try: self._invoke = invoke except Exception as e: raise e
1,051,209,353,183,409,900
Invoke policies bound to a virtual server or policy label. :param invoke:
nitro/resource/config/lb/lbvserver_appfwpolicy_binding.py
invoke
HanseMerkur/nitro-python
python
@invoke.setter def invoke(self, invoke): 'Invoke policies bound to a virtual server or policy label.\n\n :param invoke: \n\n ' try: self._invoke = invoke except Exception as e: raise e
@property def sc(self): 'Use SureConnect on the virtual server.<br/>Default value: OFF<br/>Possible values = ON, OFF.' try: return self._sc except Exception as e: raise e
-6,716,229,637,475,839,000
Use SureConnect on the virtual server.<br/>Default value: OFF<br/>Possible values = ON, OFF.
nitro/resource/config/lb/lbvserver_appfwpolicy_binding.py
sc
HanseMerkur/nitro-python
python
@property def sc(self): try: return self._sc except Exception as e: raise e
def _get_nitro_response(self, service, response): 'converts nitro response into object and returns the object array in case of get request.\n\n :param service: \n :param response: \n\n ' try: result = service.payload_formatter.string_to_resource(lbvserver_appfwpolicy_binding_response, response, self.__class__.__name__) if (result.errorcode != 0): if (result.errorcode == 444): service.clear_session(self) if result.severity: if (result.severity == 'ERROR'): raise nitro_exception(result.errorcode, str(result.message), str(result.severity)) else: raise nitro_exception(result.errorcode, str(result.message), str(result.severity)) return result.lbvserver_appfwpolicy_binding except Exception as e: raise e
-8,598,073,818,403,330,000
converts nitro response into object and returns the object array in case of get request. :param service: :param response:
nitro/resource/config/lb/lbvserver_appfwpolicy_binding.py
_get_nitro_response
HanseMerkur/nitro-python
python
def _get_nitro_response(self, service, response): 'converts nitro response into object and returns the object array in case of get request.\n\n :param service: \n :param response: \n\n ' try: result = service.payload_formatter.string_to_resource(lbvserver_appfwpolicy_binding_response, response, self.__class__.__name__) if (result.errorcode != 0): if (result.errorcode == 444): service.clear_session(self) if result.severity: if (result.severity == 'ERROR'): raise nitro_exception(result.errorcode, str(result.message), str(result.severity)) else: raise nitro_exception(result.errorcode, str(result.message), str(result.severity)) return result.lbvserver_appfwpolicy_binding except Exception as e: raise e
def _get_object_name(self): 'Returns the value of object identifier argument' try: if (self.name is not None): return str(self.name) return None except Exception as e: raise e
2,555,744,638,475,687,000
Returns the value of object identifier argument
nitro/resource/config/lb/lbvserver_appfwpolicy_binding.py
_get_object_name
HanseMerkur/nitro-python
python
def _get_object_name(self): try: if (self.name is not None): return str(self.name) return None except Exception as e: raise e
@classmethod def add(cls, client, resource): '\n\n :param client: \n :param resource: \n\n ' try: if (resource and (type(resource) is not list)): updateresource = lbvserver_appfwpolicy_binding() updateresource.name = resource.name updateresource.policyname = resource.policyname updateresource.priority = resource.priority updateresource.gotopriorityexpression = resource.gotopriorityexpression updateresource.bindpoint = resource.bindpoint updateresource.invoke = resource.invoke updateresource.labeltype = resource.labeltype updateresource.labelname = resource.labelname return updateresource.update_resource(client) else: if (resource and (len(resource) > 0)): updateresources = [lbvserver_appfwpolicy_binding() for _ in range(len(resource))] for i in range(len(resource)): updateresources[i].name = resource[i].name updateresources[i].policyname = resource[i].policyname updateresources[i].priority = resource[i].priority updateresources[i].gotopriorityexpression = resource[i].gotopriorityexpression updateresources[i].bindpoint = resource[i].bindpoint updateresources[i].invoke = resource[i].invoke updateresources[i].labeltype = resource[i].labeltype updateresources[i].labelname = resource[i].labelname return cls.update_bulk_request(client, updateresources) except Exception as e: raise e
-1,235,520,822,178,636,500
:param client: :param resource:
nitro/resource/config/lb/lbvserver_appfwpolicy_binding.py
add
HanseMerkur/nitro-python
python
@classmethod def add(cls, client, resource): '\n\n :param client: \n :param resource: \n\n ' try: if (resource and (type(resource) is not list)): updateresource = lbvserver_appfwpolicy_binding() updateresource.name = resource.name updateresource.policyname = resource.policyname updateresource.priority = resource.priority updateresource.gotopriorityexpression = resource.gotopriorityexpression updateresource.bindpoint = resource.bindpoint updateresource.invoke = resource.invoke updateresource.labeltype = resource.labeltype updateresource.labelname = resource.labelname return updateresource.update_resource(client) else: if (resource and (len(resource) > 0)): updateresources = [lbvserver_appfwpolicy_binding() for _ in range(len(resource))] for i in range(len(resource)): updateresources[i].name = resource[i].name updateresources[i].policyname = resource[i].policyname updateresources[i].priority = resource[i].priority updateresources[i].gotopriorityexpression = resource[i].gotopriorityexpression updateresources[i].bindpoint = resource[i].bindpoint updateresources[i].invoke = resource[i].invoke updateresources[i].labeltype = resource[i].labeltype updateresources[i].labelname = resource[i].labelname return cls.update_bulk_request(client, updateresources) except Exception as e: raise e
@classmethod def delete(cls, client, resource): '\n\n :param client: \n :param resource: \n\n ' try: if (resource and (type(resource) is not list)): deleteresource = lbvserver_appfwpolicy_binding() deleteresource.name = resource.name deleteresource.policyname = resource.policyname deleteresource.bindpoint = resource.bindpoint deleteresource.priority = resource.priority return deleteresource.delete_resource(client) else: if (resource and (len(resource) > 0)): deleteresources = [lbvserver_appfwpolicy_binding() for _ in range(len(resource))] for i in range(len(resource)): deleteresources[i].name = resource[i].name deleteresources[i].policyname = resource[i].policyname deleteresources[i].bindpoint = resource[i].bindpoint deleteresources[i].priority = resource[i].priority return cls.delete_bulk_request(client, deleteresources) except Exception as e: raise e
5,475,910,030,271,163,000
:param client: :param resource:
nitro/resource/config/lb/lbvserver_appfwpolicy_binding.py
delete
HanseMerkur/nitro-python
python
@classmethod def delete(cls, client, resource): '\n\n :param client: \n :param resource: \n\n ' try: if (resource and (type(resource) is not list)): deleteresource = lbvserver_appfwpolicy_binding() deleteresource.name = resource.name deleteresource.policyname = resource.policyname deleteresource.bindpoint = resource.bindpoint deleteresource.priority = resource.priority return deleteresource.delete_resource(client) else: if (resource and (len(resource) > 0)): deleteresources = [lbvserver_appfwpolicy_binding() for _ in range(len(resource))] for i in range(len(resource)): deleteresources[i].name = resource[i].name deleteresources[i].policyname = resource[i].policyname deleteresources[i].bindpoint = resource[i].bindpoint deleteresources[i].priority = resource[i].priority return cls.delete_bulk_request(client, deleteresources) except Exception as e: raise e
@classmethod def get(cls, service, name): 'Use this API to fetch lbvserver_appfwpolicy_binding resources.\n\n :param service: \n :param name: \n\n ' try: obj = lbvserver_appfwpolicy_binding() obj.name = name response = obj.get_resources(service) return response except Exception as e: raise e
3,176,071,130,034,916,400
Use this API to fetch lbvserver_appfwpolicy_binding resources. :param service: :param name:
nitro/resource/config/lb/lbvserver_appfwpolicy_binding.py
get
HanseMerkur/nitro-python
python
@classmethod def get(cls, service, name): 'Use this API to fetch lbvserver_appfwpolicy_binding resources.\n\n :param service: \n :param name: \n\n ' try: obj = lbvserver_appfwpolicy_binding() obj.name = name response = obj.get_resources(service) return response except Exception as e: raise e
@classmethod def get_filtered(cls, service, name, filter_): 'Use this API to fetch filtered set of lbvserver_appfwpolicy_binding resources.\n Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".\n\n :param service: \n :param name: \n :param filter_: \n\n ' try: obj = lbvserver_appfwpolicy_binding() obj.name = name option_ = options() option_.filter = filter_ response = obj.getfiltered(service, option_) return response except Exception as e: raise e
-2,542,339,016,633,254,000
Use this API to fetch filtered set of lbvserver_appfwpolicy_binding resources. Filter string should be in JSON format.eg: "port:80,servicetype:HTTP". :param service: :param name: :param filter_:
nitro/resource/config/lb/lbvserver_appfwpolicy_binding.py
get_filtered
HanseMerkur/nitro-python
python
@classmethod def get_filtered(cls, service, name, filter_): 'Use this API to fetch filtered set of lbvserver_appfwpolicy_binding resources.\n Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".\n\n :param service: \n :param name: \n :param filter_: \n\n ' try: obj = lbvserver_appfwpolicy_binding() obj.name = name option_ = options() option_.filter = filter_ response = obj.getfiltered(service, option_) return response except Exception as e: raise e
@classmethod def count(cls, service, name): 'Use this API to count lbvserver_appfwpolicy_binding resources configued on NetScaler.\n\n :param service: \n :param name: \n\n ' try: obj = lbvserver_appfwpolicy_binding() obj.name = name option_ = options() option_.count = True response = obj.get_resources(service, option_) if response: return response[0].__dict__['___count'] return 0 except Exception as e: raise e
7,837,158,330,813,566,000
Use this API to count lbvserver_appfwpolicy_binding resources configued on NetScaler. :param service: :param name:
nitro/resource/config/lb/lbvserver_appfwpolicy_binding.py
count
HanseMerkur/nitro-python
python
@classmethod def count(cls, service, name): 'Use this API to count lbvserver_appfwpolicy_binding resources configued on NetScaler.\n\n :param service: \n :param name: \n\n ' try: obj = lbvserver_appfwpolicy_binding() obj.name = name option_ = options() option_.count = True response = obj.get_resources(service, option_) if response: return response[0].__dict__['___count'] return 0 except Exception as e: raise e
@classmethod def count_filtered(cls, service, name, filter_): 'Use this API to count the filtered set of lbvserver_appfwpolicy_binding resources.\n Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".\n\n :param service: \n :param name: \n :param filter_: \n\n ' try: obj = lbvserver_appfwpolicy_binding() obj.name = name option_ = options() option_.count = True option_.filter = filter_ response = obj.getfiltered(service, option_) if response: return response[0].__dict__['___count'] return 0 except Exception as e: raise e
7,667,054,189,601,754,000
Use this API to count the filtered set of lbvserver_appfwpolicy_binding resources. Filter string should be in JSON format.eg: "port:80,servicetype:HTTP". :param service: :param name: :param filter_:
nitro/resource/config/lb/lbvserver_appfwpolicy_binding.py
count_filtered
HanseMerkur/nitro-python
python
@classmethod def count_filtered(cls, service, name, filter_): 'Use this API to count the filtered set of lbvserver_appfwpolicy_binding resources.\n Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".\n\n :param service: \n :param name: \n :param filter_: \n\n ' try: obj = lbvserver_appfwpolicy_binding() obj.name = name option_ = options() option_.count = True option_.filter = filter_ response = obj.getfiltered(service, option_) if response: return response[0].__dict__['___count'] return 0 except Exception as e: raise e
async def ping(self, ctx): '\n Args:\n ctx: FContext\n ' pass
2,692,111,709,897,499,000
Args: ctx: FContext
test/expected/python.asyncio/service_extension_same_file/f_Pinger.py
ping
trevorackerman-wk/frugal
python
async def ping(self, ctx): '\n Args:\n ctx: FContext\n ' pass
def __init__(self, provider, middleware=None): '\n Create a new Client with an FServiceProvider containing a transport\n and protocol factory.\n\n Args:\n provider: FServiceProvider\n middleware: ServiceMiddleware or list of ServiceMiddleware\n ' middleware = (middleware or []) if (middleware and (not isinstance(middleware, list))): middleware = [middleware] super(Client, self).__init__(provider, middleware=middleware) middleware += provider.get_middleware() self._methods.update({'ping': Method(self._ping, middleware)})
6,234,326,604,996,853,000
Create a new Client with an FServiceProvider containing a transport and protocol factory. Args: provider: FServiceProvider middleware: ServiceMiddleware or list of ServiceMiddleware
test/expected/python.asyncio/service_extension_same_file/f_Pinger.py
__init__
trevorackerman-wk/frugal
python
def __init__(self, provider, middleware=None): '\n Create a new Client with an FServiceProvider containing a transport\n and protocol factory.\n\n Args:\n provider: FServiceProvider\n middleware: ServiceMiddleware or list of ServiceMiddleware\n ' middleware = (middleware or []) if (middleware and (not isinstance(middleware, list))): middleware = [middleware] super(Client, self).__init__(provider, middleware=middleware) middleware += provider.get_middleware() self._methods.update({'ping': Method(self._ping, middleware)})
async def ping(self, ctx): '\n Args:\n ctx: FContext\n ' return (await self._methods['ping']([ctx]))
-7,926,762,067,287,352,000
Args: ctx: FContext
test/expected/python.asyncio/service_extension_same_file/f_Pinger.py
ping
trevorackerman-wk/frugal
python
async def ping(self, ctx): '\n Args:\n ctx: FContext\n ' return (await self._methods['ping']([ctx]))
def __init__(self, handler, middleware=None): '\n Create a new Processor.\n\n Args:\n handler: Iface\n ' if (middleware and (not isinstance(middleware, list))): middleware = [middleware] super(Processor, self).__init__(handler, middleware=middleware) self.add_to_processor_map('ping', _ping(Method(handler.ping, middleware), self.get_write_lock()))
-6,148,350,140,761,615,000
Create a new Processor. Args: handler: Iface
test/expected/python.asyncio/service_extension_same_file/f_Pinger.py
__init__
trevorackerman-wk/frugal
python
def __init__(self, handler, middleware=None): '\n Create a new Processor.\n\n Args:\n handler: Iface\n ' if (middleware and (not isinstance(middleware, list))): middleware = [middleware] super(Processor, self).__init__(handler, middleware=middleware) self.add_to_processor_map('ping', _ping(Method(handler.ping, middleware), self.get_write_lock()))
def final_eos_is_already_included(header_block: Union[(UnfinishedHeaderBlock, UnfinishedBlock, HeaderBlock, FullBlock)], blocks: BlockchainInterface, sub_slot_iters: uint64) -> bool: '\n Args:\n header_block: An overflow block, with potentially missing information about the new sub slot\n blocks: all blocks that have been included before header_block\n sub_slot_iters: sub_slot_iters at the header_block\n\n Returns: True iff the missing sub slot was already included in a previous block. Returns False if the sub\n slot was not included yet, and therefore it is the responsibility of this block to include it\n\n ' if (len(header_block.finished_sub_slots) > 0): return False curr: BlockRecord = blocks.block_record(header_block.prev_header_hash) seen_overflow_block = (curr.overflow and ((header_block.total_iters - curr.total_iters) < (sub_slot_iters // 2))) while ((not curr.first_in_sub_slot) and (not (curr.height == 0))): if (curr.overflow and ((header_block.total_iters - curr.total_iters) < (sub_slot_iters // 2))): seen_overflow_block = True curr = blocks.block_record(curr.prev_hash) if (curr.first_in_sub_slot and seen_overflow_block): return True return False
1,981,469,205,373,613,600
Args: header_block: An overflow block, with potentially missing information about the new sub slot blocks: all blocks that have been included before header_block sub_slot_iters: sub_slot_iters at the header_block Returns: True iff the missing sub slot was already included in a previous block. Returns False if the sub slot was not included yet, and therefore it is the responsibility of this block to include it
cactus/consensus/get_block_challenge.py
final_eos_is_already_included
Cactus-Network/cactus-blockchain
python
def final_eos_is_already_included(header_block: Union[(UnfinishedHeaderBlock, UnfinishedBlock, HeaderBlock, FullBlock)], blocks: BlockchainInterface, sub_slot_iters: uint64) -> bool: '\n Args:\n header_block: An overflow block, with potentially missing information about the new sub slot\n blocks: all blocks that have been included before header_block\n sub_slot_iters: sub_slot_iters at the header_block\n\n Returns: True iff the missing sub slot was already included in a previous block. Returns False if the sub\n slot was not included yet, and therefore it is the responsibility of this block to include it\n\n ' if (len(header_block.finished_sub_slots) > 0): return False curr: BlockRecord = blocks.block_record(header_block.prev_header_hash) seen_overflow_block = (curr.overflow and ((header_block.total_iters - curr.total_iters) < (sub_slot_iters // 2))) while ((not curr.first_in_sub_slot) and (not (curr.height == 0))): if (curr.overflow and ((header_block.total_iters - curr.total_iters) < (sub_slot_iters // 2))): seen_overflow_block = True curr = blocks.block_record(curr.prev_hash) if (curr.first_in_sub_slot and seen_overflow_block): return True return False
@property def color(self): "\n The 'color' property is a color and may be specified as:\n - A hex string (e.g. '#ff0000')\n - An rgb/rgba string (e.g. 'rgb(255,0,0)')\n - An hsl/hsla string (e.g. 'hsl(0,100%,50%)')\n - An hsv/hsva string (e.g. 'hsv(0,100%,100%)')\n - A named CSS color:\n aliceblue, antiquewhite, aqua, aquamarine, azure,\n beige, bisque, black, blanchedalmond, blue,\n blueviolet, brown, burlywood, cadetblue,\n chartreuse, chocolate, coral, cornflowerblue,\n cornsilk, crimson, cyan, darkblue, darkcyan,\n darkgoldenrod, darkgray, darkgrey, darkgreen,\n darkkhaki, darkmagenta, darkolivegreen, darkorange,\n darkorchid, darkred, darksalmon, darkseagreen,\n darkslateblue, darkslategray, darkslategrey,\n darkturquoise, darkviolet, deeppink, deepskyblue,\n dimgray, dimgrey, dodgerblue, firebrick,\n floralwhite, forestgreen, fuchsia, gainsboro,\n ghostwhite, gold, goldenrod, gray, grey, green,\n greenyellow, honeydew, hotpink, indianred, indigo,\n ivory, khaki, lavender, lavenderblush, lawngreen,\n lemonchiffon, lightblue, lightcoral, lightcyan,\n lightgoldenrodyellow, lightgray, lightgrey,\n lightgreen, lightpink, lightsalmon, lightseagreen,\n lightskyblue, lightslategray, lightslategrey,\n lightsteelblue, lightyellow, lime, limegreen,\n linen, magenta, maroon, mediumaquamarine,\n mediumblue, mediumorchid, mediumpurple,\n mediumseagreen, mediumslateblue, mediumspringgreen,\n mediumturquoise, mediumvioletred, midnightblue,\n mintcream, mistyrose, moccasin, navajowhite, navy,\n oldlace, olive, olivedrab, orange, orangered,\n orchid, palegoldenrod, palegreen, paleturquoise,\n palevioletred, papayawhip, peachpuff, peru, pink,\n plum, powderblue, purple, red, rosybrown,\n royalblue, rebeccapurple, saddlebrown, salmon,\n sandybrown, seagreen, seashell, sienna, silver,\n skyblue, slateblue, slategray, slategrey, snow,\n springgreen, steelblue, tan, teal, thistle, tomato,\n turquoise, violet, wheat, white, whitesmoke,\n yellow, yellowgreen\n\n Returns\n -------\n str\n " return self['color']
-6,704,044,870,786,772,000
The 'color' property is a color and may be specified as: - A hex string (e.g. '#ff0000') - An rgb/rgba string (e.g. 'rgb(255,0,0)') - An hsl/hsla string (e.g. 'hsl(0,100%,50%)') - An hsv/hsva string (e.g. 'hsv(0,100%,100%)') - A named CSS color: aliceblue, antiquewhite, aqua, aquamarine, azure, beige, bisque, black, blanchedalmond, blue, blueviolet, brown, burlywood, cadetblue, chartreuse, chocolate, coral, cornflowerblue, cornsilk, crimson, cyan, darkblue, darkcyan, darkgoldenrod, darkgray, darkgrey, darkgreen, darkkhaki, darkmagenta, darkolivegreen, darkorange, darkorchid, darkred, darksalmon, darkseagreen, darkslateblue, darkslategray, darkslategrey, darkturquoise, darkviolet, deeppink, deepskyblue, dimgray, dimgrey, dodgerblue, firebrick, floralwhite, forestgreen, fuchsia, gainsboro, ghostwhite, gold, goldenrod, gray, grey, green, greenyellow, honeydew, hotpink, indianred, indigo, ivory, khaki, lavender, lavenderblush, lawngreen, lemonchiffon, lightblue, lightcoral, lightcyan, lightgoldenrodyellow, lightgray, lightgrey, lightgreen, lightpink, lightsalmon, lightseagreen, lightskyblue, lightslategray, lightslategrey, lightsteelblue, lightyellow, lime, limegreen, linen, magenta, maroon, mediumaquamarine, mediumblue, mediumorchid, mediumpurple, mediumseagreen, mediumslateblue, mediumspringgreen, mediumturquoise, mediumvioletred, midnightblue, mintcream, mistyrose, moccasin, navajowhite, navy, oldlace, olive, olivedrab, orange, orangered, orchid, palegoldenrod, palegreen, paleturquoise, palevioletred, papayawhip, peachpuff, peru, pink, plum, powderblue, purple, red, rosybrown, royalblue, rebeccapurple, saddlebrown, salmon, sandybrown, seagreen, seashell, sienna, silver, skyblue, slateblue, slategray, slategrey, snow, springgreen, steelblue, tan, teal, thistle, tomato, turquoise, violet, wheat, white, whitesmoke, yellow, yellowgreen Returns ------- str
packages/python/plotly/plotly/graph_objs/scatterternary/marker/colorbar/_tickfont.py
color
1abner1/plotly.py
python
@property def color(self): "\n The 'color' property is a color and may be specified as:\n - A hex string (e.g. '#ff0000')\n - An rgb/rgba string (e.g. 'rgb(255,0,0)')\n - An hsl/hsla string (e.g. 'hsl(0,100%,50%)')\n - An hsv/hsva string (e.g. 'hsv(0,100%,100%)')\n - A named CSS color:\n aliceblue, antiquewhite, aqua, aquamarine, azure,\n beige, bisque, black, blanchedalmond, blue,\n blueviolet, brown, burlywood, cadetblue,\n chartreuse, chocolate, coral, cornflowerblue,\n cornsilk, crimson, cyan, darkblue, darkcyan,\n darkgoldenrod, darkgray, darkgrey, darkgreen,\n darkkhaki, darkmagenta, darkolivegreen, darkorange,\n darkorchid, darkred, darksalmon, darkseagreen,\n darkslateblue, darkslategray, darkslategrey,\n darkturquoise, darkviolet, deeppink, deepskyblue,\n dimgray, dimgrey, dodgerblue, firebrick,\n floralwhite, forestgreen, fuchsia, gainsboro,\n ghostwhite, gold, goldenrod, gray, grey, green,\n greenyellow, honeydew, hotpink, indianred, indigo,\n ivory, khaki, lavender, lavenderblush, lawngreen,\n lemonchiffon, lightblue, lightcoral, lightcyan,\n lightgoldenrodyellow, lightgray, lightgrey,\n lightgreen, lightpink, lightsalmon, lightseagreen,\n lightskyblue, lightslategray, lightslategrey,\n lightsteelblue, lightyellow, lime, limegreen,\n linen, magenta, maroon, mediumaquamarine,\n mediumblue, mediumorchid, mediumpurple,\n mediumseagreen, mediumslateblue, mediumspringgreen,\n mediumturquoise, mediumvioletred, midnightblue,\n mintcream, mistyrose, moccasin, navajowhite, navy,\n oldlace, olive, olivedrab, orange, orangered,\n orchid, palegoldenrod, palegreen, paleturquoise,\n palevioletred, papayawhip, peachpuff, peru, pink,\n plum, powderblue, purple, red, rosybrown,\n royalblue, rebeccapurple, saddlebrown, salmon,\n sandybrown, seagreen, seashell, sienna, silver,\n skyblue, slateblue, slategray, slategrey, snow,\n springgreen, steelblue, tan, teal, thistle, tomato,\n turquoise, violet, wheat, white, whitesmoke,\n yellow, yellowgreen\n\n Returns\n -------\n str\n " return self['color']
@property def family(self): '\n HTML font family - the typeface that will be applied by the web\n browser. The web browser will only be able to apply a font if\n it is available on the system which it operates. Provide\n multiple font families, separated by commas, to indicate the\n preference in which to apply fonts if they aren\'t available on\n the system. The Chart Studio Cloud (at https://chart-\n studio.plotly.com or on-premise) generates images on a server,\n where only a select number of fonts are installed and\n supported. These include "Arial", "Balto", "Courier New",\n "Droid Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas\n One", "Old Standard TT", "Open Sans", "Overpass", "PT Sans\n Narrow", "Raleway", "Times New Roman".\n \n The \'family\' property is a string and must be specified as:\n - A non-empty string\n\n Returns\n -------\n str\n ' return self['family']
3,791,649,582,837,001,000
HTML font family - the typeface that will be applied by the web browser. The web browser will only be able to apply a font if it is available on the system which it operates. Provide multiple font families, separated by commas, to indicate the preference in which to apply fonts if they aren't available on the system. The Chart Studio Cloud (at https://chart- studio.plotly.com or on-premise) generates images on a server, where only a select number of fonts are installed and supported. These include "Arial", "Balto", "Courier New", "Droid Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas One", "Old Standard TT", "Open Sans", "Overpass", "PT Sans Narrow", "Raleway", "Times New Roman". The 'family' property is a string and must be specified as: - A non-empty string Returns ------- str
packages/python/plotly/plotly/graph_objs/scatterternary/marker/colorbar/_tickfont.py
family
1abner1/plotly.py
python
@property def family(self): '\n HTML font family - the typeface that will be applied by the web\n browser. The web browser will only be able to apply a font if\n it is available on the system which it operates. Provide\n multiple font families, separated by commas, to indicate the\n preference in which to apply fonts if they aren\'t available on\n the system. The Chart Studio Cloud (at https://chart-\n studio.plotly.com or on-premise) generates images on a server,\n where only a select number of fonts are installed and\n supported. These include "Arial", "Balto", "Courier New",\n "Droid Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas\n One", "Old Standard TT", "Open Sans", "Overpass", "PT Sans\n Narrow", "Raleway", "Times New Roman".\n \n The \'family\' property is a string and must be specified as:\n - A non-empty string\n\n Returns\n -------\n str\n ' return self['family']
@property def size(self): "\n The 'size' property is a number and may be specified as:\n - An int or float in the interval [1, inf]\n\n Returns\n -------\n int|float\n " return self['size']
4,214,108,177,685,330,000
The 'size' property is a number and may be specified as: - An int or float in the interval [1, inf] Returns ------- int|float
packages/python/plotly/plotly/graph_objs/scatterternary/marker/colorbar/_tickfont.py
size
1abner1/plotly.py
python
@property def size(self): "\n The 'size' property is a number and may be specified as:\n - An int or float in the interval [1, inf]\n\n Returns\n -------\n int|float\n " return self['size']
def __init__(self, arg=None, color=None, family=None, size=None, **kwargs): '\n Construct a new Tickfont object\n \n Sets the color bar\'s tick label font\n\n Parameters\n ----------\n arg\n dict of properties compatible with this constructor or\n an instance of :class:`plotly.graph_objs.scatterternary\n .marker.colorbar.Tickfont`\n color\n\n family\n HTML font family - the typeface that will be applied by\n the web browser. The web browser will only be able to\n apply a font if it is available on the system which it\n operates. Provide multiple font families, separated by\n commas, to indicate the preference in which to apply\n fonts if they aren\'t available on the system. The Chart\n Studio Cloud (at https://chart-studio.plotly.com or on-\n premise) generates images on a server, where only a\n select number of fonts are installed and supported.\n These include "Arial", "Balto", "Courier New", "Droid\n Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas\n One", "Old Standard TT", "Open Sans", "Overpass", "PT\n Sans Narrow", "Raleway", "Times New Roman".\n size\n\n\n Returns\n -------\n Tickfont\n ' super(Tickfont, self).__init__('tickfont') if ('_parent' in kwargs): self._parent = kwargs['_parent'] return if (arg is None): arg = {} elif isinstance(arg, self.__class__): arg = arg.to_plotly_json() elif isinstance(arg, dict): arg = _copy.copy(arg) else: raise ValueError('The first argument to the plotly.graph_objs.scatterternary.marker.colorbar.Tickfont \nconstructor must be a dict or \nan instance of :class:`plotly.graph_objs.scatterternary.marker.colorbar.Tickfont`') self._skip_invalid = kwargs.pop('skip_invalid', False) self._validate = kwargs.pop('_validate', True) _v = arg.pop('color', None) _v = (color if (color is not None) else _v) if (_v is not None): self['color'] = _v _v = arg.pop('family', None) _v = (family if (family is not None) else _v) if (_v is not None): self['family'] = _v _v = arg.pop('size', None) _v = (size if (size is not None) else _v) if (_v is not None): self['size'] = _v self._process_kwargs(**dict(arg, **kwargs)) self._skip_invalid = False
7,054,196,890,296,316,000
Construct a new Tickfont object Sets the color bar's tick label font Parameters ---------- arg dict of properties compatible with this constructor or an instance of :class:`plotly.graph_objs.scatterternary .marker.colorbar.Tickfont` color family HTML font family - the typeface that will be applied by the web browser. The web browser will only be able to apply a font if it is available on the system which it operates. Provide multiple font families, separated by commas, to indicate the preference in which to apply fonts if they aren't available on the system. The Chart Studio Cloud (at https://chart-studio.plotly.com or on- premise) generates images on a server, where only a select number of fonts are installed and supported. These include "Arial", "Balto", "Courier New", "Droid Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas One", "Old Standard TT", "Open Sans", "Overpass", "PT Sans Narrow", "Raleway", "Times New Roman". size Returns ------- Tickfont
packages/python/plotly/plotly/graph_objs/scatterternary/marker/colorbar/_tickfont.py
__init__
1abner1/plotly.py
python
def __init__(self, arg=None, color=None, family=None, size=None, **kwargs): '\n Construct a new Tickfont object\n \n Sets the color bar\'s tick label font\n\n Parameters\n ----------\n arg\n dict of properties compatible with this constructor or\n an instance of :class:`plotly.graph_objs.scatterternary\n .marker.colorbar.Tickfont`\n color\n\n family\n HTML font family - the typeface that will be applied by\n the web browser. The web browser will only be able to\n apply a font if it is available on the system which it\n operates. Provide multiple font families, separated by\n commas, to indicate the preference in which to apply\n fonts if they aren\'t available on the system. The Chart\n Studio Cloud (at https://chart-studio.plotly.com or on-\n premise) generates images on a server, where only a\n select number of fonts are installed and supported.\n These include "Arial", "Balto", "Courier New", "Droid\n Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas\n One", "Old Standard TT", "Open Sans", "Overpass", "PT\n Sans Narrow", "Raleway", "Times New Roman".\n size\n\n\n Returns\n -------\n Tickfont\n ' super(Tickfont, self).__init__('tickfont') if ('_parent' in kwargs): self._parent = kwargs['_parent'] return if (arg is None): arg = {} elif isinstance(arg, self.__class__): arg = arg.to_plotly_json() elif isinstance(arg, dict): arg = _copy.copy(arg) else: raise ValueError('The first argument to the plotly.graph_objs.scatterternary.marker.colorbar.Tickfont \nconstructor must be a dict or \nan instance of :class:`plotly.graph_objs.scatterternary.marker.colorbar.Tickfont`') self._skip_invalid = kwargs.pop('skip_invalid', False) self._validate = kwargs.pop('_validate', True) _v = arg.pop('color', None) _v = (color if (color is not None) else _v) if (_v is not None): self['color'] = _v _v = arg.pop('family', None) _v = (family if (family is not None) else _v) if (_v is not None): self['family'] = _v _v = arg.pop('size', None) _v = (size if (size is not None) else _v) if (_v is not None): self['size'] = _v self._process_kwargs(**dict(arg, **kwargs)) self._skip_invalid = False
def print_localconfvalue(name): 'Syntax: [storm localconfvalue conf-name]\n\n Prints out the value for conf-name in the local Storm configs.\n The local Storm configs are the ones in ~/.storm/storm.yaml merged\n in with the configs in defaults.yaml.\n ' print(((name + ': ') + confvalue(name, [USER_CONF_DIR])))
4,783,880,202,942,562,000
Syntax: [storm localconfvalue conf-name] Prints out the value for conf-name in the local Storm configs. The local Storm configs are the ones in ~/.storm/storm.yaml merged in with the configs in defaults.yaml.
bin/storm.py
print_localconfvalue
JamiesZhang/Storm
python
def print_localconfvalue(name): 'Syntax: [storm localconfvalue conf-name]\n\n Prints out the value for conf-name in the local Storm configs.\n The local Storm configs are the ones in ~/.storm/storm.yaml merged\n in with the configs in defaults.yaml.\n ' print(((name + ': ') + confvalue(name, [USER_CONF_DIR])))
def print_remoteconfvalue(name): "Syntax: [storm remoteconfvalue conf-name]\n\n Prints out the value for conf-name in the cluster's Storm configs.\n The cluster's Storm configs are the ones in $STORM-PATH/conf/storm.yaml\n merged in with the configs in defaults.yaml.\n\n This command must be run on a cluster machine.\n " print(((name + ': ') + confvalue(name, [CLUSTER_CONF_DIR])))
5,267,290,543,717,283,000
Syntax: [storm remoteconfvalue conf-name] Prints out the value for conf-name in the cluster's Storm configs. The cluster's Storm configs are the ones in $STORM-PATH/conf/storm.yaml merged in with the configs in defaults.yaml. This command must be run on a cluster machine.
bin/storm.py
print_remoteconfvalue
JamiesZhang/Storm
python
def print_remoteconfvalue(name): "Syntax: [storm remoteconfvalue conf-name]\n\n Prints out the value for conf-name in the cluster's Storm configs.\n The cluster's Storm configs are the ones in $STORM-PATH/conf/storm.yaml\n merged in with the configs in defaults.yaml.\n\n This command must be run on a cluster machine.\n " print(((name + ': ') + confvalue(name, [CLUSTER_CONF_DIR])))
def parse_args(string): 'Takes a string of whitespace-separated tokens and parses it into a list.\n Whitespace inside tokens may be quoted with single quotes, double quotes or\n backslash (similar to command-line arguments in bash).\n\n >>> parse_args(r\'\'\'"a a" \'b b\' c\\ c "d\'d" \'e"e\' \'f\'f\' "g"g" "i""i" \'j\'\'j\' k" "k l\' l\' mm n\\n\'\'\')\n [\'a a\', \'b b\', \'c c\', "d\'d", \'e"e\', "f\'f", \'g"g\', \'ii\', \'jj\', \'k k\', \'l l\', \'mm\', r\'n\n\']\n ' re_split = re.compile('((?:\n [^\\s"\'\\\\] |\n "(?: [^"\\\\] | \\\\.)*" |\n \'(?: [^\'\\\\] | \\\\.)*\' |\n \\\\.\n )+)', re.VERBOSE) args = re_split.split(string)[1::2] args = [re.compile('"((?:[^"\\\\]|\\\\.)*)"').sub('\\1', x) for x in args] args = [re.compile("'((?:[^'\\\\]|\\\\.)*)'").sub('\\1', x) for x in args] return [re.compile('\\\\(.)').sub('\\1', x) for x in args]
-1,373,043,317,218,771,500
Takes a string of whitespace-separated tokens and parses it into a list. Whitespace inside tokens may be quoted with single quotes, double quotes or backslash (similar to command-line arguments in bash). >>> parse_args(r'''"a a" 'b b' c\ c "d'd" 'e"e' 'f'f' "g"g" "i""i" 'j''j' k" "k l' l' mm n\n''') ['a a', 'b b', 'c c', "d'd", 'e"e', "f'f", 'g"g', 'ii', 'jj', 'k k', 'l l', 'mm', r'n ']
bin/storm.py
parse_args
JamiesZhang/Storm
python
def parse_args(string): 'Takes a string of whitespace-separated tokens and parses it into a list.\n Whitespace inside tokens may be quoted with single quotes, double quotes or\n backslash (similar to command-line arguments in bash).\n\n >>> parse_args(r\'\'\'"a a" \'b b\' c\\ c "d\'d" \'e"e\' \'f\'f\' "g"g" "ii" \'j\'\'j\' k" "k l\' l\' mm n\\n\'\'\')\n [\'a a\', \'b b\', \'c c\', "d\'d", \'e"e\', "f\'f", \'g"g\', \'ii\', \'jj\', \'k k\', \'l l\', \'mm\', r\'n\n\']\n ' re_split = re.compile('((?:\n [^\\s"\'\\\\] |\n "(?: [^"\\\\] | \\\\.)*" |\n \'(?: [^\'\\\\] | \\\\.)*\' |\n \\\\.\n )+)', re.VERBOSE) args = re_split.split(string)[1::2] args = [re.compile('"((?:[^"\\\\]|\\\\.)*)"').sub('\\1', x) for x in args] args = [re.compile("'((?:[^'\\\\]|\\\\.)*)'").sub('\\1', x) for x in args] return [re.compile('\\\\(.)').sub('\\1', x) for x in args]
def local(jarfile, klass, *args): 'Syntax: [storm local topology-jar-path class ...]\n\n Runs the main method of class with the specified arguments but pointing to a local cluster\n The storm jars and configs in ~/.storm are put on the classpath.\n The process is configured so that StormSubmitter\n (http://storm.apache.org/releases/current/javadocs/org/apache/storm/StormSubmitter.html)\n and others will interact with a local cluster instead of the one configured by default.\n\n Most options should work just like with the storm jar command.\n\n local also adds in the option --local-ttl which sets the number of seconds the\n local cluster will run for before it shuts down.\n\n --java-debug lets you turn on java debugging and set the parameters passed to -agentlib:jdwp on the JDK\n --java-debug transport=dt_socket,address=localhost:8000\n will open up a debugging server on port 8000.\n ' [ttl, debug_args, args] = parse_local_opts(args) extrajvmopts = [('-Dstorm.local.sleeptime=' + ttl)] if (debug_args != None): extrajvmopts = (extrajvmopts + [('-agentlib:jdwp=' + debug_args)]) run_client_jar(jarfile, 'org.apache.storm.LocalCluster', ([klass] + list(args)), client=False, daemon=False, extrajvmopts=extrajvmopts)
-86,530,798,587,278,830
Syntax: [storm local topology-jar-path class ...] Runs the main method of class with the specified arguments but pointing to a local cluster The storm jars and configs in ~/.storm are put on the classpath. The process is configured so that StormSubmitter (http://storm.apache.org/releases/current/javadocs/org/apache/storm/StormSubmitter.html) and others will interact with a local cluster instead of the one configured by default. Most options should work just like with the storm jar command. local also adds in the option --local-ttl which sets the number of seconds the local cluster will run for before it shuts down. --java-debug lets you turn on java debugging and set the parameters passed to -agentlib:jdwp on the JDK --java-debug transport=dt_socket,address=localhost:8000 will open up a debugging server on port 8000.
bin/storm.py
local
JamiesZhang/Storm
python
def local(jarfile, klass, *args): 'Syntax: [storm local topology-jar-path class ...]\n\n Runs the main method of class with the specified arguments but pointing to a local cluster\n The storm jars and configs in ~/.storm are put on the classpath.\n The process is configured so that StormSubmitter\n (http://storm.apache.org/releases/current/javadocs/org/apache/storm/StormSubmitter.html)\n and others will interact with a local cluster instead of the one configured by default.\n\n Most options should work just like with the storm jar command.\n\n local also adds in the option --local-ttl which sets the number of seconds the\n local cluster will run for before it shuts down.\n\n --java-debug lets you turn on java debugging and set the parameters passed to -agentlib:jdwp on the JDK\n --java-debug transport=dt_socket,address=localhost:8000\n will open up a debugging server on port 8000.\n ' [ttl, debug_args, args] = parse_local_opts(args) extrajvmopts = [('-Dstorm.local.sleeptime=' + ttl)] if (debug_args != None): extrajvmopts = (extrajvmopts + [('-agentlib:jdwp=' + debug_args)]) run_client_jar(jarfile, 'org.apache.storm.LocalCluster', ([klass] + list(args)), client=False, daemon=False, extrajvmopts=extrajvmopts)
def jar(jarfile, klass, *args): 'Syntax: [storm jar topology-jar-path class ...]\n\n Runs the main method of class with the specified arguments.\n The storm worker dependencies and configs in ~/.storm are put on the classpath.\n The process is configured so that StormSubmitter\n (http://storm.apache.org/releases/current/javadocs/org/apache/storm/StormSubmitter.html)\n will upload the jar at topology-jar-path when the topology is submitted.\n\n When you want to ship other jars which is not included to application jar, you can pass them to --jars option with comma-separated string.\n For example, --jars "your-local-jar.jar,your-local-jar2.jar" will load your-local-jar.jar and your-local-jar2.jar.\n And when you want to ship maven artifacts and its transitive dependencies, you can pass them to --artifacts with comma-separated string.\n You can also exclude some dependencies like what you\'re doing in maven pom.\n Please add exclusion artifacts with \'^\' separated string after the artifact.\n For example, -artifacts "redis.clients:jedis:2.9.0,org.apache.kafka:kafka-clients:1.0.0^org.slf4j:slf4j-api" will load jedis and kafka-clients artifact and all of transitive dependencies but exclude slf4j-api from kafka.\n\n When you need to pull the artifacts from other than Maven Central, you can pass remote repositories to --artifactRepositories option with comma-separated string.\n Repository format is "<name>^<url>". \'^\' is taken as separator because URL allows various characters.\n For example, --artifactRepositories "jboss-repository^http://repository.jboss.com/maven2,HDPRepo^http://repo.hortonworks.com/content/groups/public/" will add JBoss and HDP repositories for dependency resolver.\n You can provide local maven repository directory via --mavenLocalRepositoryDirectory if you would like to use specific directory. It might help when you don\'t have \'.m2/repository\' directory in home directory, because CWD is sometimes non-deterministic (fragile).\n\n You can also provide proxy information to let dependency resolver utilizing proxy if needed. There\'re three parameters for proxy:\n --proxyUrl: URL representation of proxy (\'http://host:port\')\n --proxyUsername: username of proxy if it requires basic auth\n --proxyPassword: password of proxy if it requires basic auth\n\n Complete example of options is here: `./bin/storm jar example/storm-starter/storm-starter-topologies-*.jar org.apache.storm.starter.RollingTopWords blobstore-remote2 remote --jars "./external/storm-redis/storm-redis-1.1.0.jar,./external/storm-kafka-client/storm-kafka-client-1.1.0.jar" --artifacts "redis.clients:jedis:2.9.0,org.apache.kafka:kafka-clients:1.0.0^org.slf4j:slf4j-api" --artifactRepositories "jboss-repository^http://repository.jboss.com/maven2,HDPRepo^http://repo.hortonworks.com/content/groups/public/"`\n\n When you pass jars and/or artifacts options, StormSubmitter will upload them when the topology is submitted, and they will be included to classpath of both the process which runs the class, and also workers for that topology.\n\n If for some reason you need to have the full storm classpath, not just the one for the worker you may include the command line option `--storm-server-classpath`. Please be careful because this will add things to the classpath that will not be on the worker classpath and could result in the worker not running.\n ' [server_class_path, args] = parse_jar_opts(args) run_client_jar(jarfile, klass, list(args), client=(not server_class_path), daemon=False)
-5,195,932,568,094,356,000
Syntax: [storm jar topology-jar-path class ...] Runs the main method of class with the specified arguments. The storm worker dependencies and configs in ~/.storm are put on the classpath. The process is configured so that StormSubmitter (http://storm.apache.org/releases/current/javadocs/org/apache/storm/StormSubmitter.html) will upload the jar at topology-jar-path when the topology is submitted. When you want to ship other jars which is not included to application jar, you can pass them to --jars option with comma-separated string. For example, --jars "your-local-jar.jar,your-local-jar2.jar" will load your-local-jar.jar and your-local-jar2.jar. And when you want to ship maven artifacts and its transitive dependencies, you can pass them to --artifacts with comma-separated string. You can also exclude some dependencies like what you're doing in maven pom. Please add exclusion artifacts with '^' separated string after the artifact. For example, -artifacts "redis.clients:jedis:2.9.0,org.apache.kafka:kafka-clients:1.0.0^org.slf4j:slf4j-api" will load jedis and kafka-clients artifact and all of transitive dependencies but exclude slf4j-api from kafka. When you need to pull the artifacts from other than Maven Central, you can pass remote repositories to --artifactRepositories option with comma-separated string. Repository format is "<name>^<url>". '^' is taken as separator because URL allows various characters. For example, --artifactRepositories "jboss-repository^http://repository.jboss.com/maven2,HDPRepo^http://repo.hortonworks.com/content/groups/public/" will add JBoss and HDP repositories for dependency resolver. You can provide local maven repository directory via --mavenLocalRepositoryDirectory if you would like to use specific directory. It might help when you don't have '.m2/repository' directory in home directory, because CWD is sometimes non-deterministic (fragile). You can also provide proxy information to let dependency resolver utilizing proxy if needed. There're three parameters for proxy: --proxyUrl: URL representation of proxy ('http://host:port') --proxyUsername: username of proxy if it requires basic auth --proxyPassword: password of proxy if it requires basic auth Complete example of options is here: `./bin/storm jar example/storm-starter/storm-starter-topologies-*.jar org.apache.storm.starter.RollingTopWords blobstore-remote2 remote --jars "./external/storm-redis/storm-redis-1.1.0.jar,./external/storm-kafka-client/storm-kafka-client-1.1.0.jar" --artifacts "redis.clients:jedis:2.9.0,org.apache.kafka:kafka-clients:1.0.0^org.slf4j:slf4j-api" --artifactRepositories "jboss-repository^http://repository.jboss.com/maven2,HDPRepo^http://repo.hortonworks.com/content/groups/public/"` When you pass jars and/or artifacts options, StormSubmitter will upload them when the topology is submitted, and they will be included to classpath of both the process which runs the class, and also workers for that topology. If for some reason you need to have the full storm classpath, not just the one for the worker you may include the command line option `--storm-server-classpath`. Please be careful because this will add things to the classpath that will not be on the worker classpath and could result in the worker not running.
bin/storm.py
jar
JamiesZhang/Storm
python
def jar(jarfile, klass, *args): 'Syntax: [storm jar topology-jar-path class ...]\n\n Runs the main method of class with the specified arguments.\n The storm worker dependencies and configs in ~/.storm are put on the classpath.\n The process is configured so that StormSubmitter\n (http://storm.apache.org/releases/current/javadocs/org/apache/storm/StormSubmitter.html)\n will upload the jar at topology-jar-path when the topology is submitted.\n\n When you want to ship other jars which is not included to application jar, you can pass them to --jars option with comma-separated string.\n For example, --jars "your-local-jar.jar,your-local-jar2.jar" will load your-local-jar.jar and your-local-jar2.jar.\n And when you want to ship maven artifacts and its transitive dependencies, you can pass them to --artifacts with comma-separated string.\n You can also exclude some dependencies like what you\'re doing in maven pom.\n Please add exclusion artifacts with \'^\' separated string after the artifact.\n For example, -artifacts "redis.clients:jedis:2.9.0,org.apache.kafka:kafka-clients:1.0.0^org.slf4j:slf4j-api" will load jedis and kafka-clients artifact and all of transitive dependencies but exclude slf4j-api from kafka.\n\n When you need to pull the artifacts from other than Maven Central, you can pass remote repositories to --artifactRepositories option with comma-separated string.\n Repository format is "<name>^<url>". \'^\' is taken as separator because URL allows various characters.\n For example, --artifactRepositories "jboss-repository^http://repository.jboss.com/maven2,HDPRepo^http://repo.hortonworks.com/content/groups/public/" will add JBoss and HDP repositories for dependency resolver.\n You can provide local maven repository directory via --mavenLocalRepositoryDirectory if you would like to use specific directory. It might help when you don\'t have \'.m2/repository\' directory in home directory, because CWD is sometimes non-deterministic (fragile).\n\n You can also provide proxy information to let dependency resolver utilizing proxy if needed. There\'re three parameters for proxy:\n --proxyUrl: URL representation of proxy (\'http://host:port\')\n --proxyUsername: username of proxy if it requires basic auth\n --proxyPassword: password of proxy if it requires basic auth\n\n Complete example of options is here: `./bin/storm jar example/storm-starter/storm-starter-topologies-*.jar org.apache.storm.starter.RollingTopWords blobstore-remote2 remote --jars "./external/storm-redis/storm-redis-1.1.0.jar,./external/storm-kafka-client/storm-kafka-client-1.1.0.jar" --artifacts "redis.clients:jedis:2.9.0,org.apache.kafka:kafka-clients:1.0.0^org.slf4j:slf4j-api" --artifactRepositories "jboss-repository^http://repository.jboss.com/maven2,HDPRepo^http://repo.hortonworks.com/content/groups/public/"`\n\n When you pass jars and/or artifacts options, StormSubmitter will upload them when the topology is submitted, and they will be included to classpath of both the process which runs the class, and also workers for that topology.\n\n If for some reason you need to have the full storm classpath, not just the one for the worker you may include the command line option `--storm-server-classpath`. Please be careful because this will add things to the classpath that will not be on the worker classpath and could result in the worker not running.\n ' [server_class_path, args] = parse_jar_opts(args) run_client_jar(jarfile, klass, list(args), client=(not server_class_path), daemon=False)
def sql(sql_file, topology_name): 'Syntax: [storm sql sql-file topology-name], or [storm sql sql-file --explain] when activating explain mode\n\n Compiles the SQL statements into a Trident topology and submits it to Storm.\n If user activates explain mode, SQL Runner analyzes each query statement and shows query plan instead of submitting topology.\n\n --jars and --artifacts, and --artifactRepositories, --mavenLocalRepositoryDirectory, --proxyUrl, --proxyUsername, --proxyPassword options available for jar are also applied to sql command.\n Please refer "help jar" to see how to use --jars and --artifacts, and --artifactRepositories, --proxyUrl, --proxyUsername, --proxyPassword options.\n You normally want to pass these options since you need to set data source to your sql which is an external storage in many cases.\n ' global DEP_JARS_OPTS, DEP_ARTIFACTS_OPTS, DEP_ARTIFACTS_REPOSITORIES_OPTS, DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY, DEP_PROXY_URL, DEP_PROXY_USERNAME, DEP_PROXY_PASSWORD local_jars = DEP_JARS_OPTS artifact_to_file_jars = resolve_dependencies(DEP_ARTIFACTS_OPTS, DEP_ARTIFACTS_REPOSITORIES_OPTS, DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY, DEP_PROXY_URL, DEP_PROXY_USERNAME, DEP_PROXY_PASSWORD) sql_runtime_jars = get_jars_full(os.path.join(STORM_TOOLS_LIB_DIR, 'sql', 'runtime')) local_jars.extend(sql_runtime_jars) extrajars = [USER_CONF_DIR, STORM_BIN_DIR] extrajars.extend(local_jars) extrajars.extend(artifact_to_file_jars.values()) sql_core_jars = get_wildcard_dir(os.path.join(STORM_TOOLS_LIB_DIR, 'sql', 'core')) extrajars.extend(sql_core_jars) if (topology_name == '--explain'): args = ['--file', sql_file, '--explain'] else: args = ['--file', sql_file, '--topology', topology_name] exec_storm_class('org.apache.storm.sql.StormSqlRunner', jvmtype='-client', extrajars=extrajars, args=args, daemon=False, jvmopts=([('-Dstorm.dependency.jars=' + ','.join(local_jars))] + [('-Dstorm.dependency.artifacts=' + json.dumps(artifact_to_file_jars))]))
4,051,998,150,311,554,000
Syntax: [storm sql sql-file topology-name], or [storm sql sql-file --explain] when activating explain mode Compiles the SQL statements into a Trident topology and submits it to Storm. If user activates explain mode, SQL Runner analyzes each query statement and shows query plan instead of submitting topology. --jars and --artifacts, and --artifactRepositories, --mavenLocalRepositoryDirectory, --proxyUrl, --proxyUsername, --proxyPassword options available for jar are also applied to sql command. Please refer "help jar" to see how to use --jars and --artifacts, and --artifactRepositories, --proxyUrl, --proxyUsername, --proxyPassword options. You normally want to pass these options since you need to set data source to your sql which is an external storage in many cases.
bin/storm.py
sql
JamiesZhang/Storm
python
def sql(sql_file, topology_name): 'Syntax: [storm sql sql-file topology-name], or [storm sql sql-file --explain] when activating explain mode\n\n Compiles the SQL statements into a Trident topology and submits it to Storm.\n If user activates explain mode, SQL Runner analyzes each query statement and shows query plan instead of submitting topology.\n\n --jars and --artifacts, and --artifactRepositories, --mavenLocalRepositoryDirectory, --proxyUrl, --proxyUsername, --proxyPassword options available for jar are also applied to sql command.\n Please refer "help jar" to see how to use --jars and --artifacts, and --artifactRepositories, --proxyUrl, --proxyUsername, --proxyPassword options.\n You normally want to pass these options since you need to set data source to your sql which is an external storage in many cases.\n ' global DEP_JARS_OPTS, DEP_ARTIFACTS_OPTS, DEP_ARTIFACTS_REPOSITORIES_OPTS, DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY, DEP_PROXY_URL, DEP_PROXY_USERNAME, DEP_PROXY_PASSWORD local_jars = DEP_JARS_OPTS artifact_to_file_jars = resolve_dependencies(DEP_ARTIFACTS_OPTS, DEP_ARTIFACTS_REPOSITORIES_OPTS, DEP_MAVEN_LOCAL_REPOSITORY_DIRECTORY, DEP_PROXY_URL, DEP_PROXY_USERNAME, DEP_PROXY_PASSWORD) sql_runtime_jars = get_jars_full(os.path.join(STORM_TOOLS_LIB_DIR, 'sql', 'runtime')) local_jars.extend(sql_runtime_jars) extrajars = [USER_CONF_DIR, STORM_BIN_DIR] extrajars.extend(local_jars) extrajars.extend(artifact_to_file_jars.values()) sql_core_jars = get_wildcard_dir(os.path.join(STORM_TOOLS_LIB_DIR, 'sql', 'core')) extrajars.extend(sql_core_jars) if (topology_name == '--explain'): args = ['--file', sql_file, '--explain'] else: args = ['--file', sql_file, '--topology', topology_name] exec_storm_class('org.apache.storm.sql.StormSqlRunner', jvmtype='-client', extrajars=extrajars, args=args, daemon=False, jvmopts=([('-Dstorm.dependency.jars=' + ','.join(local_jars))] + [('-Dstorm.dependency.artifacts=' + json.dumps(artifact_to_file_jars))]))
def kill(*args): "Syntax: [storm kill topology-name [-w wait-time-secs]]\n\n Kills the topology with the name topology-name. Storm will\n first deactivate the topology's spouts for the duration of\n the topology's message timeout to allow all messages currently\n being processed to finish processing. Storm will then shutdown\n the workers and clean up their state. You can override the length\n of time Storm waits between deactivation and shutdown with the -w flag.\n " if (not args): print_usage(command='kill') sys.exit(2) exec_storm_class('org.apache.storm.command.KillTopology', args=args, jvmtype='-client', extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
7,946,931,335,110,760,000
Syntax: [storm kill topology-name [-w wait-time-secs]] Kills the topology with the name topology-name. Storm will first deactivate the topology's spouts for the duration of the topology's message timeout to allow all messages currently being processed to finish processing. Storm will then shutdown the workers and clean up their state. You can override the length of time Storm waits between deactivation and shutdown with the -w flag.
bin/storm.py
kill
JamiesZhang/Storm
python
def kill(*args): "Syntax: [storm kill topology-name [-w wait-time-secs]]\n\n Kills the topology with the name topology-name. Storm will\n first deactivate the topology's spouts for the duration of\n the topology's message timeout to allow all messages currently\n being processed to finish processing. Storm will then shutdown\n the workers and clean up their state. You can override the length\n of time Storm waits between deactivation and shutdown with the -w flag.\n " if (not args): print_usage(command='kill') sys.exit(2) exec_storm_class('org.apache.storm.command.KillTopology', args=args, jvmtype='-client', extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def upload_credentials(*args): 'Syntax: [storm upload-credentials topology-name [credkey credvalue]*]\n\n Uploads a new set of credentials to a running topology\n ' if (not args): print_usage(command='upload-credentials') sys.exit(2) exec_storm_class('org.apache.storm.command.UploadCredentials', args=args, jvmtype='-client', extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
3,213,165,739,736,285,000
Syntax: [storm upload-credentials topology-name [credkey credvalue]*] Uploads a new set of credentials to a running topology
bin/storm.py
upload_credentials
JamiesZhang/Storm
python
def upload_credentials(*args): 'Syntax: [storm upload-credentials topology-name [credkey credvalue]*]\n\n Uploads a new set of credentials to a running topology\n ' if (not args): print_usage(command='upload-credentials') sys.exit(2) exec_storm_class('org.apache.storm.command.UploadCredentials', args=args, jvmtype='-client', extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def blobstore(*args): 'Syntax: [storm blobstore cmd]\n\n list [KEY...] - lists blobs currently in the blob store\n cat [-f FILE] KEY - read a blob and then either write it to a file, or STDOUT (requires read access).\n create [-f FILE] [-a ACL ...] [--replication-factor NUMBER] KEY - create a new blob. Contents comes from a FILE\n or STDIN. ACL is in the form [uo]:[username]:[r-][w-][a-] can be comma separated list.\n update [-f FILE] KEY - update the contents of a blob. Contents comes from\n a FILE or STDIN (requires write access).\n delete KEY - delete an entry from the blob store (requires write access).\n set-acl [-s ACL] KEY - ACL is in the form [uo]:[username]:[r-][w-][a-] can be comma\n separated list (requires admin access).\n replication --read KEY - Used to read the replication factor of the blob.\n replication --update --replication-factor NUMBER KEY where NUMBER > 0. It is used to update the\n replication factor of a blob.\n For example, the following would create a mytopo:data.tgz key using the data\n stored in data.tgz. User alice would have full access, bob would have\n read/write access and everyone else would have read access.\n storm blobstore create mytopo:data.tgz -f data.tgz -a u:alice:rwa,u:bob:rw,o::r\n ' exec_storm_class('org.apache.storm.command.Blobstore', args=args, jvmtype='-client', extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
-7,622,729,085,902,317,000
Syntax: [storm blobstore cmd] list [KEY...] - lists blobs currently in the blob store cat [-f FILE] KEY - read a blob and then either write it to a file, or STDOUT (requires read access). create [-f FILE] [-a ACL ...] [--replication-factor NUMBER] KEY - create a new blob. Contents comes from a FILE or STDIN. ACL is in the form [uo]:[username]:[r-][w-][a-] can be comma separated list. update [-f FILE] KEY - update the contents of a blob. Contents comes from a FILE or STDIN (requires write access). delete KEY - delete an entry from the blob store (requires write access). set-acl [-s ACL] KEY - ACL is in the form [uo]:[username]:[r-][w-][a-] can be comma separated list (requires admin access). replication --read KEY - Used to read the replication factor of the blob. replication --update --replication-factor NUMBER KEY where NUMBER > 0. It is used to update the replication factor of a blob. For example, the following would create a mytopo:data.tgz key using the data stored in data.tgz. User alice would have full access, bob would have read/write access and everyone else would have read access. storm blobstore create mytopo:data.tgz -f data.tgz -a u:alice:rwa,u:bob:rw,o::r
bin/storm.py
blobstore
JamiesZhang/Storm
python
def blobstore(*args): 'Syntax: [storm blobstore cmd]\n\n list [KEY...] - lists blobs currently in the blob store\n cat [-f FILE] KEY - read a blob and then either write it to a file, or STDOUT (requires read access).\n create [-f FILE] [-a ACL ...] [--replication-factor NUMBER] KEY - create a new blob. Contents comes from a FILE\n or STDIN. ACL is in the form [uo]:[username]:[r-][w-][a-] can be comma separated list.\n update [-f FILE] KEY - update the contents of a blob. Contents comes from\n a FILE or STDIN (requires write access).\n delete KEY - delete an entry from the blob store (requires write access).\n set-acl [-s ACL] KEY - ACL is in the form [uo]:[username]:[r-][w-][a-] can be comma\n separated list (requires admin access).\n replication --read KEY - Used to read the replication factor of the blob.\n replication --update --replication-factor NUMBER KEY where NUMBER > 0. It is used to update the\n replication factor of a blob.\n For example, the following would create a mytopo:data.tgz key using the data\n stored in data.tgz. User alice would have full access, bob would have\n read/write access and everyone else would have read access.\n storm blobstore create mytopo:data.tgz -f data.tgz -a u:alice:rwa,u:bob:rw,o::r\n ' exec_storm_class('org.apache.storm.command.Blobstore', args=args, jvmtype='-client', extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def heartbeats(*args): 'Syntax: [storm heartbeats [cmd]]\n\n list PATH - lists heartbeats nodes under PATH currently in the ClusterState.\n get PATH - Get the heartbeat data at PATH\n ' exec_storm_class('org.apache.storm.command.Heartbeats', args=args, jvmtype='-client', extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
5,319,599,684,336,833,000
Syntax: [storm heartbeats [cmd]] list PATH - lists heartbeats nodes under PATH currently in the ClusterState. get PATH - Get the heartbeat data at PATH
bin/storm.py
heartbeats
JamiesZhang/Storm
python
def heartbeats(*args): 'Syntax: [storm heartbeats [cmd]]\n\n list PATH - lists heartbeats nodes under PATH currently in the ClusterState.\n get PATH - Get the heartbeat data at PATH\n ' exec_storm_class('org.apache.storm.command.Heartbeats', args=args, jvmtype='-client', extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def activate(*args): "Syntax: [storm activate topology-name]\n\n Activates the specified topology's spouts.\n " if (not args): print_usage(command='activate') sys.exit(2) exec_storm_class('org.apache.storm.command.Activate', args=args, jvmtype='-client', extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
-5,705,921,422,986,034,000
Syntax: [storm activate topology-name] Activates the specified topology's spouts.
bin/storm.py
activate
JamiesZhang/Storm
python
def activate(*args): "Syntax: [storm activate topology-name]\n\n Activates the specified topology's spouts.\n " if (not args): print_usage(command='activate') sys.exit(2) exec_storm_class('org.apache.storm.command.Activate', args=args, jvmtype='-client', extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def set_log_level(*args): "\n Dynamically change topology log levels\n\n Syntax: [storm set_log_level -l [logger name]=[log level][:optional timeout] -r [logger name] topology-name]\n where log level is one of:\n ALL, TRACE, DEBUG, INFO, WARN, ERROR, FATAL, OFF\n and timeout is integer seconds.\n\n e.g.\n ./bin/storm set_log_level -l ROOT=DEBUG:30 topology-name\n\n Set the root logger's level to DEBUG for 30 seconds\n\n ./bin/storm set_log_level -l com.myapp=WARN topology-name\n\n Set the com.myapp logger's level to WARN for 30 seconds\n\n ./bin/storm set_log_level -l com.myapp=WARN -l com.myOtherLogger=ERROR:123 topology-name\n\n Set the com.myapp logger's level to WARN indifinitely, and com.myOtherLogger\n to ERROR for 123 seconds\n\n ./bin/storm set_log_level -r com.myOtherLogger topology-name\n\n Clears settings, resetting back to the original level\n " exec_storm_class('org.apache.storm.command.SetLogLevel', args=args, jvmtype='-client', extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
1,301,626,724,388,236,500
Dynamically change topology log levels Syntax: [storm set_log_level -l [logger name]=[log level][:optional timeout] -r [logger name] topology-name] where log level is one of: ALL, TRACE, DEBUG, INFO, WARN, ERROR, FATAL, OFF and timeout is integer seconds. e.g. ./bin/storm set_log_level -l ROOT=DEBUG:30 topology-name Set the root logger's level to DEBUG for 30 seconds ./bin/storm set_log_level -l com.myapp=WARN topology-name Set the com.myapp logger's level to WARN for 30 seconds ./bin/storm set_log_level -l com.myapp=WARN -l com.myOtherLogger=ERROR:123 topology-name Set the com.myapp logger's level to WARN indifinitely, and com.myOtherLogger to ERROR for 123 seconds ./bin/storm set_log_level -r com.myOtherLogger topology-name Clears settings, resetting back to the original level
bin/storm.py
set_log_level
JamiesZhang/Storm
python
def set_log_level(*args): "\n Dynamically change topology log levels\n\n Syntax: [storm set_log_level -l [logger name]=[log level][:optional timeout] -r [logger name] topology-name]\n where log level is one of:\n ALL, TRACE, DEBUG, INFO, WARN, ERROR, FATAL, OFF\n and timeout is integer seconds.\n\n e.g.\n ./bin/storm set_log_level -l ROOT=DEBUG:30 topology-name\n\n Set the root logger's level to DEBUG for 30 seconds\n\n ./bin/storm set_log_level -l com.myapp=WARN topology-name\n\n Set the com.myapp logger's level to WARN for 30 seconds\n\n ./bin/storm set_log_level -l com.myapp=WARN -l com.myOtherLogger=ERROR:123 topology-name\n\n Set the com.myapp logger's level to WARN indifinitely, and com.myOtherLogger\n to ERROR for 123 seconds\n\n ./bin/storm set_log_level -r com.myOtherLogger topology-name\n\n Clears settings, resetting back to the original level\n " exec_storm_class('org.apache.storm.command.SetLogLevel', args=args, jvmtype='-client', extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def listtopos(*args): 'Syntax: [storm list]\n\n List the running topologies and their statuses.\n ' exec_storm_class('org.apache.storm.command.ListTopologies', args=args, jvmtype='-client', extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
715,366,473,802,686,500
Syntax: [storm list] List the running topologies and their statuses.
bin/storm.py
listtopos
JamiesZhang/Storm
python
def listtopos(*args): 'Syntax: [storm list]\n\n List the running topologies and their statuses.\n ' exec_storm_class('org.apache.storm.command.ListTopologies', args=args, jvmtype='-client', extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def deactivate(*args): "Syntax: [storm deactivate topology-name]\n\n Deactivates the specified topology's spouts.\n " if (not args): print_usage(command='deactivate') sys.exit(2) exec_storm_class('org.apache.storm.command.Deactivate', args=args, jvmtype='-client', extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
7,762,466,626,678,783,000
Syntax: [storm deactivate topology-name] Deactivates the specified topology's spouts.
bin/storm.py
deactivate
JamiesZhang/Storm
python
def deactivate(*args): "Syntax: [storm deactivate topology-name]\n\n Deactivates the specified topology's spouts.\n " if (not args): print_usage(command='deactivate') sys.exit(2) exec_storm_class('org.apache.storm.command.Deactivate', args=args, jvmtype='-client', extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def rebalance(*args): 'Syntax: [storm rebalance topology-name [-w wait-time-secs] [-n new-num-workers] [-e component=parallelism]* [-r \'{"component1": {"resource1": new_amount, "resource2": new_amount, ... }*}\'] [-t \'{"conf1": newValue, *}\']]\n\n Sometimes you may wish to spread out the workers for a running topology.\n For example, let\'s say you have a 10 node cluster running\n 4 workers per node, and then let\'s say you add another 10 nodes to\n the cluster. You may wish to have Storm spread out the workers for the\n running topology so that each node runs 2 workers. One way to do this\n is to kill the topology and resubmit it, but Storm provides a "rebalance"\n command that provides an easier way to do this.\n\n Rebalance will first deactivate the topology for the duration of the\n message timeout (overridable with the -w flag) make requested adjustments to the topology\n and let the scheduler try to find a better scheduling based off of the\n new situation. The topology will then return to its previous state of activation\n (so a deactivated topology will still be deactivated and an activated\n topology will go back to being activated).\n\n Some of what you can change about a topology includes the number of requested workers (-n flag)\n The number of executors for a given component (-e flag) the resources each component is\n requesting as used by the resource aware scheduler (-r flag) and configs (-t flag).\n ' if (not args): print_usage(command='rebalance') sys.exit(2) exec_storm_class('org.apache.storm.command.Rebalance', args=args, jvmtype='-client', extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
-8,924,590,559,328,843,000
Syntax: [storm rebalance topology-name [-w wait-time-secs] [-n new-num-workers] [-e component=parallelism]* [-r '{"component1": {"resource1": new_amount, "resource2": new_amount, ... }*}'] [-t '{"conf1": newValue, *}']] Sometimes you may wish to spread out the workers for a running topology. For example, let's say you have a 10 node cluster running 4 workers per node, and then let's say you add another 10 nodes to the cluster. You may wish to have Storm spread out the workers for the running topology so that each node runs 2 workers. One way to do this is to kill the topology and resubmit it, but Storm provides a "rebalance" command that provides an easier way to do this. Rebalance will first deactivate the topology for the duration of the message timeout (overridable with the -w flag) make requested adjustments to the topology and let the scheduler try to find a better scheduling based off of the new situation. The topology will then return to its previous state of activation (so a deactivated topology will still be deactivated and an activated topology will go back to being activated). Some of what you can change about a topology includes the number of requested workers (-n flag) The number of executors for a given component (-e flag) the resources each component is requesting as used by the resource aware scheduler (-r flag) and configs (-t flag).
bin/storm.py
rebalance
JamiesZhang/Storm
python
def rebalance(*args): 'Syntax: [storm rebalance topology-name [-w wait-time-secs] [-n new-num-workers] [-e component=parallelism]* [-r \'{"component1": {"resource1": new_amount, "resource2": new_amount, ... }*}\'] [-t \'{"conf1": newValue, *}\']]\n\n Sometimes you may wish to spread out the workers for a running topology.\n For example, let\'s say you have a 10 node cluster running\n 4 workers per node, and then let\'s say you add another 10 nodes to\n the cluster. You may wish to have Storm spread out the workers for the\n running topology so that each node runs 2 workers. One way to do this\n is to kill the topology and resubmit it, but Storm provides a "rebalance"\n command that provides an easier way to do this.\n\n Rebalance will first deactivate the topology for the duration of the\n message timeout (overridable with the -w flag) make requested adjustments to the topology\n and let the scheduler try to find a better scheduling based off of the\n new situation. The topology will then return to its previous state of activation\n (so a deactivated topology will still be deactivated and an activated\n topology will go back to being activated).\n\n Some of what you can change about a topology includes the number of requested workers (-n flag)\n The number of executors for a given component (-e flag) the resources each component is\n requesting as used by the resource aware scheduler (-r flag) and configs (-t flag).\n ' if (not args): print_usage(command='rebalance') sys.exit(2) exec_storm_class('org.apache.storm.command.Rebalance', args=args, jvmtype='-client', extrajars=[USER_CONF_DIR, STORM_BIN_DIR])
def get_errors(*args): 'Syntax: [storm get-errors topology-name]\n\n Get the latest error from the running topology. The returned result contains\n the key value pairs for component-name and component-error for the components in error.\n The result is returned in json format.\n ' if (not args): print_usage(command='get-errors') sys.exit(2) exec_storm_class('org.apache.storm.command.GetErrors', args=args, jvmtype='-client', extrajars=[USER_CONF_DIR, os.path.join(STORM_DIR, 'bin')])
-7,079,342,827,459,128,000
Syntax: [storm get-errors topology-name] Get the latest error from the running topology. The returned result contains the key value pairs for component-name and component-error for the components in error. The result is returned in json format.
bin/storm.py
get_errors
JamiesZhang/Storm
python
def get_errors(*args): 'Syntax: [storm get-errors topology-name]\n\n Get the latest error from the running topology. The returned result contains\n the key value pairs for component-name and component-error for the components in error.\n The result is returned in json format.\n ' if (not args): print_usage(command='get-errors') sys.exit(2) exec_storm_class('org.apache.storm.command.GetErrors', args=args, jvmtype='-client', extrajars=[USER_CONF_DIR, os.path.join(STORM_DIR, 'bin')])
def healthcheck(*args): 'Syntax: [storm node-health-check]\n\n Run health checks on the local supervisor.\n ' exec_storm_class('org.apache.storm.command.HealthCheck', args=args, jvmtype='-client', extrajars=[USER_CONF_DIR, os.path.join(STORM_DIR, 'bin')])
-5,369,723,348,416,531,000
Syntax: [storm node-health-check] Run health checks on the local supervisor.
bin/storm.py
healthcheck
JamiesZhang/Storm
python
def healthcheck(*args): 'Syntax: [storm node-health-check]\n\n Run health checks on the local supervisor.\n ' exec_storm_class('org.apache.storm.command.HealthCheck', args=args, jvmtype='-client', extrajars=[USER_CONF_DIR, os.path.join(STORM_DIR, 'bin')])