body
stringlengths
26
98.2k
body_hash
int64
-9,222,864,604,528,158,000
9,221,803,474B
docstring
stringlengths
1
16.8k
path
stringlengths
5
230
name
stringlengths
1
96
repository_name
stringlengths
7
89
lang
stringclasses
1 value
body_without_docstring
stringlengths
20
98.2k
def __setstate__(self, state): 'Necessary for making this object picklable' if (not isinstance(state, dict)): raise Exception('invalid pickle state') if (('_categories' not in state) and ('_levels' in state)): state['_categories'] = self.dtype.validate_categories(state.pop('_levels')) if (('_codes' not in state) and ('labels' in state)): state['_codes'] = coerce_indexer_dtype(state.pop('labels'), state['_categories']) if ('_ordered' not in state): if ('ordered' in state): state['_ordered'] = state.pop('ordered') else: state['_ordered'] = False if ('_dtype' not in state): state['_dtype'] = CategoricalDtype(state['_categories'], state['_ordered']) for (k, v) in compat.iteritems(state): setattr(self, k, v)
-6,085,031,369,710,524,000
Necessary for making this object picklable
pandas/core/arrays/categorical.py
__setstate__
Adirio/pandas
python
def __setstate__(self, state): if (not isinstance(state, dict)): raise Exception('invalid pickle state') if (('_categories' not in state) and ('_levels' in state)): state['_categories'] = self.dtype.validate_categories(state.pop('_levels')) if (('_codes' not in state) and ('labels' in state)): state['_codes'] = coerce_indexer_dtype(state.pop('labels'), state['_categories']) if ('_ordered' not in state): if ('ordered' in state): state['_ordered'] = state.pop('ordered') else: state['_ordered'] = False if ('_dtype' not in state): state['_dtype'] = CategoricalDtype(state['_categories'], state['_ordered']) for (k, v) in compat.iteritems(state): setattr(self, k, v)
def memory_usage(self, deep=False): '\n Memory usage of my values\n\n Parameters\n ----------\n deep : bool\n Introspect the data deeply, interrogate\n `object` dtypes for system-level memory consumption\n\n Returns\n -------\n bytes used\n\n Notes\n -----\n Memory usage does not include memory consumed by elements that\n are not components of the array if deep=False\n\n See Also\n --------\n numpy.ndarray.nbytes\n ' return (self._codes.nbytes + self.dtype.categories.memory_usage(deep=deep))
-9,068,621,834,873,815,000
Memory usage of my values Parameters ---------- deep : bool Introspect the data deeply, interrogate `object` dtypes for system-level memory consumption Returns ------- bytes used Notes ----- Memory usage does not include memory consumed by elements that are not components of the array if deep=False See Also -------- numpy.ndarray.nbytes
pandas/core/arrays/categorical.py
memory_usage
Adirio/pandas
python
def memory_usage(self, deep=False): '\n Memory usage of my values\n\n Parameters\n ----------\n deep : bool\n Introspect the data deeply, interrogate\n `object` dtypes for system-level memory consumption\n\n Returns\n -------\n bytes used\n\n Notes\n -----\n Memory usage does not include memory consumed by elements that\n are not components of the array if deep=False\n\n See Also\n --------\n numpy.ndarray.nbytes\n ' return (self._codes.nbytes + self.dtype.categories.memory_usage(deep=deep))
def isna(self): '\n Detect missing values\n\n Missing values (-1 in .codes) are detected.\n\n Returns\n -------\n a boolean array of whether my values are null\n\n See also\n --------\n isna : top-level isna\n isnull : alias of isna\n Categorical.notna : boolean inverse of Categorical.isna\n\n ' ret = (self._codes == (- 1)) return ret
-5,370,868,005,612,063,000
Detect missing values Missing values (-1 in .codes) are detected. Returns ------- a boolean array of whether my values are null See also -------- isna : top-level isna isnull : alias of isna Categorical.notna : boolean inverse of Categorical.isna
pandas/core/arrays/categorical.py
isna
Adirio/pandas
python
def isna(self): '\n Detect missing values\n\n Missing values (-1 in .codes) are detected.\n\n Returns\n -------\n a boolean array of whether my values are null\n\n See also\n --------\n isna : top-level isna\n isnull : alias of isna\n Categorical.notna : boolean inverse of Categorical.isna\n\n ' ret = (self._codes == (- 1)) return ret
def notna(self): '\n Inverse of isna\n\n Both missing values (-1 in .codes) and NA as a category are detected as\n null.\n\n Returns\n -------\n a boolean array of whether my values are not null\n\n See also\n --------\n notna : top-level notna\n notnull : alias of notna\n Categorical.isna : boolean inverse of Categorical.notna\n\n ' return (~ self.isna())
6,422,727,367,209,304,000
Inverse of isna Both missing values (-1 in .codes) and NA as a category are detected as null. Returns ------- a boolean array of whether my values are not null See also -------- notna : top-level notna notnull : alias of notna Categorical.isna : boolean inverse of Categorical.notna
pandas/core/arrays/categorical.py
notna
Adirio/pandas
python
def notna(self): '\n Inverse of isna\n\n Both missing values (-1 in .codes) and NA as a category are detected as\n null.\n\n Returns\n -------\n a boolean array of whether my values are not null\n\n See also\n --------\n notna : top-level notna\n notnull : alias of notna\n Categorical.isna : boolean inverse of Categorical.notna\n\n ' return (~ self.isna())
def put(self, *args, **kwargs): '\n Replace specific elements in the Categorical with given values.\n ' raise NotImplementedError("'put' is not yet implemented for Categorical")
2,810,362,659,536,142,000
Replace specific elements in the Categorical with given values.
pandas/core/arrays/categorical.py
put
Adirio/pandas
python
def put(self, *args, **kwargs): '\n \n ' raise NotImplementedError("'put' is not yet implemented for Categorical")
def dropna(self): '\n Return the Categorical without null values.\n\n Missing values (-1 in .codes) are detected.\n\n Returns\n -------\n valid : Categorical\n ' result = self[self.notna()] return result
7,246,204,869,647,670,000
Return the Categorical without null values. Missing values (-1 in .codes) are detected. Returns ------- valid : Categorical
pandas/core/arrays/categorical.py
dropna
Adirio/pandas
python
def dropna(self): '\n Return the Categorical without null values.\n\n Missing values (-1 in .codes) are detected.\n\n Returns\n -------\n valid : Categorical\n ' result = self[self.notna()] return result
def value_counts(self, dropna=True): "\n Returns a Series containing counts of each category.\n\n Every category will have an entry, even those with a count of 0.\n\n Parameters\n ----------\n dropna : boolean, default True\n Don't include counts of NaN.\n\n Returns\n -------\n counts : Series\n\n See Also\n --------\n Series.value_counts\n\n " from numpy import bincount from pandas import Series, CategoricalIndex (code, cat) = (self._codes, self.categories) (ncat, mask) = (len(cat), (0 <= code)) (ix, clean) = (np.arange(ncat), mask.all()) if (dropna or clean): obs = (code if clean else code[mask]) count = bincount(obs, minlength=(ncat or None)) else: count = bincount(np.where(mask, code, ncat)) ix = np.append(ix, (- 1)) ix = self._constructor(ix, dtype=self.dtype, fastpath=True) return Series(count, index=CategoricalIndex(ix), dtype='int64')
4,811,169,102,183,675,000
Returns a Series containing counts of each category. Every category will have an entry, even those with a count of 0. Parameters ---------- dropna : boolean, default True Don't include counts of NaN. Returns ------- counts : Series See Also -------- Series.value_counts
pandas/core/arrays/categorical.py
value_counts
Adirio/pandas
python
def value_counts(self, dropna=True): "\n Returns a Series containing counts of each category.\n\n Every category will have an entry, even those with a count of 0.\n\n Parameters\n ----------\n dropna : boolean, default True\n Don't include counts of NaN.\n\n Returns\n -------\n counts : Series\n\n See Also\n --------\n Series.value_counts\n\n " from numpy import bincount from pandas import Series, CategoricalIndex (code, cat) = (self._codes, self.categories) (ncat, mask) = (len(cat), (0 <= code)) (ix, clean) = (np.arange(ncat), mask.all()) if (dropna or clean): obs = (code if clean else code[mask]) count = bincount(obs, minlength=(ncat or None)) else: count = bincount(np.where(mask, code, ncat)) ix = np.append(ix, (- 1)) ix = self._constructor(ix, dtype=self.dtype, fastpath=True) return Series(count, index=CategoricalIndex(ix), dtype='int64')
def get_values(self): ' Return the values.\n\n For internal compatibility with pandas formatting.\n\n Returns\n -------\n values : numpy array\n A numpy array of the same dtype as categorical.categories.dtype or\n Index if datetime / periods\n ' if is_datetimelike(self.categories): return self.categories.take(self._codes, fill_value=np.nan) return np.array(self)
8,280,021,067,267,941,000
Return the values. For internal compatibility with pandas formatting. Returns ------- values : numpy array A numpy array of the same dtype as categorical.categories.dtype or Index if datetime / periods
pandas/core/arrays/categorical.py
get_values
Adirio/pandas
python
def get_values(self): ' Return the values.\n\n For internal compatibility with pandas formatting.\n\n Returns\n -------\n values : numpy array\n A numpy array of the same dtype as categorical.categories.dtype or\n Index if datetime / periods\n ' if is_datetimelike(self.categories): return self.categories.take(self._codes, fill_value=np.nan) return np.array(self)
def check_for_ordered(self, op): ' assert that we are ordered ' if (not self.ordered): raise TypeError('Categorical is not ordered for operation {op}\nyou can use .as_ordered() to change the Categorical to an ordered one\n'.format(op=op))
-2,233,153,919,381,657,000
assert that we are ordered
pandas/core/arrays/categorical.py
check_for_ordered
Adirio/pandas
python
def check_for_ordered(self, op): ' ' if (not self.ordered): raise TypeError('Categorical is not ordered for operation {op}\nyou can use .as_ordered() to change the Categorical to an ordered one\n'.format(op=op))
def argsort(self, *args, **kwargs): "Return the indices that would sort the Categorical.\n\n Parameters\n ----------\n ascending : bool, default True\n Whether the indices should result in an ascending\n or descending sort.\n kind : {'quicksort', 'mergesort', 'heapsort'}, optional\n Sorting algorithm.\n *args, **kwargs:\n passed through to :func:`numpy.argsort`.\n\n Returns\n -------\n argsorted : numpy array\n\n See also\n --------\n numpy.ndarray.argsort\n\n Notes\n -----\n While an ordering is applied to the category values, arg-sorting\n in this context refers more to organizing and grouping together\n based on matching category values. Thus, this function can be\n called on an unordered Categorical instance unlike the functions\n 'Categorical.min' and 'Categorical.max'.\n\n Examples\n --------\n >>> pd.Categorical(['b', 'b', 'a', 'c']).argsort()\n array([2, 0, 1, 3])\n\n >>> cat = pd.Categorical(['b', 'b', 'a', 'c'],\n ... categories=['c', 'b', 'a'],\n ... ordered=True)\n >>> cat.argsort()\n array([3, 0, 1, 2])\n " return super(Categorical, self).argsort(*args, **kwargs)
4,805,162,541,346,104,000
Return the indices that would sort the Categorical. Parameters ---------- ascending : bool, default True Whether the indices should result in an ascending or descending sort. kind : {'quicksort', 'mergesort', 'heapsort'}, optional Sorting algorithm. *args, **kwargs: passed through to :func:`numpy.argsort`. Returns ------- argsorted : numpy array See also -------- numpy.ndarray.argsort Notes ----- While an ordering is applied to the category values, arg-sorting in this context refers more to organizing and grouping together based on matching category values. Thus, this function can be called on an unordered Categorical instance unlike the functions 'Categorical.min' and 'Categorical.max'. Examples -------- >>> pd.Categorical(['b', 'b', 'a', 'c']).argsort() array([2, 0, 1, 3]) >>> cat = pd.Categorical(['b', 'b', 'a', 'c'], ... categories=['c', 'b', 'a'], ... ordered=True) >>> cat.argsort() array([3, 0, 1, 2])
pandas/core/arrays/categorical.py
argsort
Adirio/pandas
python
def argsort(self, *args, **kwargs): "Return the indices that would sort the Categorical.\n\n Parameters\n ----------\n ascending : bool, default True\n Whether the indices should result in an ascending\n or descending sort.\n kind : {'quicksort', 'mergesort', 'heapsort'}, optional\n Sorting algorithm.\n *args, **kwargs:\n passed through to :func:`numpy.argsort`.\n\n Returns\n -------\n argsorted : numpy array\n\n See also\n --------\n numpy.ndarray.argsort\n\n Notes\n -----\n While an ordering is applied to the category values, arg-sorting\n in this context refers more to organizing and grouping together\n based on matching category values. Thus, this function can be\n called on an unordered Categorical instance unlike the functions\n 'Categorical.min' and 'Categorical.max'.\n\n Examples\n --------\n >>> pd.Categorical(['b', 'b', 'a', 'c']).argsort()\n array([2, 0, 1, 3])\n\n >>> cat = pd.Categorical(['b', 'b', 'a', 'c'],\n ... categories=['c', 'b', 'a'],\n ... ordered=True)\n >>> cat.argsort()\n array([3, 0, 1, 2])\n " return super(Categorical, self).argsort(*args, **kwargs)
def sort_values(self, inplace=False, ascending=True, na_position='last'): " Sorts the Categorical by category value returning a new\n Categorical by default.\n\n While an ordering is applied to the category values, sorting in this\n context refers more to organizing and grouping together based on\n matching category values. Thus, this function can be called on an\n unordered Categorical instance unlike the functions 'Categorical.min'\n and 'Categorical.max'.\n\n Parameters\n ----------\n inplace : boolean, default False\n Do operation in place.\n ascending : boolean, default True\n Order ascending. Passing False orders descending. The\n ordering parameter provides the method by which the\n category values are organized.\n na_position : {'first', 'last'} (optional, default='last')\n 'first' puts NaNs at the beginning\n 'last' puts NaNs at the end\n\n Returns\n -------\n y : Categorical or None\n\n See Also\n --------\n Categorical.sort\n Series.sort_values\n\n Examples\n --------\n >>> c = pd.Categorical([1, 2, 2, 1, 5])\n >>> c\n [1, 2, 2, 1, 5]\n Categories (3, int64): [1, 2, 5]\n >>> c.sort_values()\n [1, 1, 2, 2, 5]\n Categories (3, int64): [1, 2, 5]\n >>> c.sort_values(ascending=False)\n [5, 2, 2, 1, 1]\n Categories (3, int64): [1, 2, 5]\n\n Inplace sorting can be done as well:\n\n >>> c.sort_values(inplace=True)\n >>> c\n [1, 1, 2, 2, 5]\n Categories (3, int64): [1, 2, 5]\n >>>\n >>> c = pd.Categorical([1, 2, 2, 1, 5])\n\n 'sort_values' behaviour with NaNs. Note that 'na_position'\n is independent of the 'ascending' parameter:\n\n >>> c = pd.Categorical([np.nan, 2, 2, np.nan, 5])\n >>> c\n [NaN, 2.0, 2.0, NaN, 5.0]\n Categories (2, int64): [2, 5]\n >>> c.sort_values()\n [2.0, 2.0, 5.0, NaN, NaN]\n Categories (2, int64): [2, 5]\n >>> c.sort_values(ascending=False)\n [5.0, 2.0, 2.0, NaN, NaN]\n Categories (2, int64): [2, 5]\n >>> c.sort_values(na_position='first')\n [NaN, NaN, 2.0, 2.0, 5.0]\n Categories (2, int64): [2, 5]\n >>> c.sort_values(ascending=False, na_position='first')\n [NaN, NaN, 5.0, 2.0, 2.0]\n Categories (2, int64): [2, 5]\n " inplace = validate_bool_kwarg(inplace, 'inplace') if (na_position not in ['last', 'first']): msg = 'invalid na_position: {na_position!r}' raise ValueError(msg.format(na_position=na_position)) codes = np.sort(self._codes) if (not ascending): codes = codes[::(- 1)] na_mask = (codes == (- 1)) if na_mask.any(): n_nans = len(codes[na_mask]) if (na_position == 'first'): new_codes = codes.copy() new_codes[0:n_nans] = (- 1) new_codes[n_nans:] = codes[(~ na_mask)] codes = new_codes elif (na_position == 'last'): new_codes = codes.copy() pos = (len(codes) - n_nans) new_codes[0:pos] = codes[(~ na_mask)] new_codes[pos:] = (- 1) codes = new_codes if inplace: self._codes = codes return else: return self._constructor(values=codes, dtype=self.dtype, fastpath=True)
-4,179,080,878,714,503,700
Sorts the Categorical by category value returning a new Categorical by default. While an ordering is applied to the category values, sorting in this context refers more to organizing and grouping together based on matching category values. Thus, this function can be called on an unordered Categorical instance unlike the functions 'Categorical.min' and 'Categorical.max'. Parameters ---------- inplace : boolean, default False Do operation in place. ascending : boolean, default True Order ascending. Passing False orders descending. The ordering parameter provides the method by which the category values are organized. na_position : {'first', 'last'} (optional, default='last') 'first' puts NaNs at the beginning 'last' puts NaNs at the end Returns ------- y : Categorical or None See Also -------- Categorical.sort Series.sort_values Examples -------- >>> c = pd.Categorical([1, 2, 2, 1, 5]) >>> c [1, 2, 2, 1, 5] Categories (3, int64): [1, 2, 5] >>> c.sort_values() [1, 1, 2, 2, 5] Categories (3, int64): [1, 2, 5] >>> c.sort_values(ascending=False) [5, 2, 2, 1, 1] Categories (3, int64): [1, 2, 5] Inplace sorting can be done as well: >>> c.sort_values(inplace=True) >>> c [1, 1, 2, 2, 5] Categories (3, int64): [1, 2, 5] >>> >>> c = pd.Categorical([1, 2, 2, 1, 5]) 'sort_values' behaviour with NaNs. Note that 'na_position' is independent of the 'ascending' parameter: >>> c = pd.Categorical([np.nan, 2, 2, np.nan, 5]) >>> c [NaN, 2.0, 2.0, NaN, 5.0] Categories (2, int64): [2, 5] >>> c.sort_values() [2.0, 2.0, 5.0, NaN, NaN] Categories (2, int64): [2, 5] >>> c.sort_values(ascending=False) [5.0, 2.0, 2.0, NaN, NaN] Categories (2, int64): [2, 5] >>> c.sort_values(na_position='first') [NaN, NaN, 2.0, 2.0, 5.0] Categories (2, int64): [2, 5] >>> c.sort_values(ascending=False, na_position='first') [NaN, NaN, 5.0, 2.0, 2.0] Categories (2, int64): [2, 5]
pandas/core/arrays/categorical.py
sort_values
Adirio/pandas
python
def sort_values(self, inplace=False, ascending=True, na_position='last'): " Sorts the Categorical by category value returning a new\n Categorical by default.\n\n While an ordering is applied to the category values, sorting in this\n context refers more to organizing and grouping together based on\n matching category values. Thus, this function can be called on an\n unordered Categorical instance unlike the functions 'Categorical.min'\n and 'Categorical.max'.\n\n Parameters\n ----------\n inplace : boolean, default False\n Do operation in place.\n ascending : boolean, default True\n Order ascending. Passing False orders descending. The\n ordering parameter provides the method by which the\n category values are organized.\n na_position : {'first', 'last'} (optional, default='last')\n 'first' puts NaNs at the beginning\n 'last' puts NaNs at the end\n\n Returns\n -------\n y : Categorical or None\n\n See Also\n --------\n Categorical.sort\n Series.sort_values\n\n Examples\n --------\n >>> c = pd.Categorical([1, 2, 2, 1, 5])\n >>> c\n [1, 2, 2, 1, 5]\n Categories (3, int64): [1, 2, 5]\n >>> c.sort_values()\n [1, 1, 2, 2, 5]\n Categories (3, int64): [1, 2, 5]\n >>> c.sort_values(ascending=False)\n [5, 2, 2, 1, 1]\n Categories (3, int64): [1, 2, 5]\n\n Inplace sorting can be done as well:\n\n >>> c.sort_values(inplace=True)\n >>> c\n [1, 1, 2, 2, 5]\n Categories (3, int64): [1, 2, 5]\n >>>\n >>> c = pd.Categorical([1, 2, 2, 1, 5])\n\n 'sort_values' behaviour with NaNs. Note that 'na_position'\n is independent of the 'ascending' parameter:\n\n >>> c = pd.Categorical([np.nan, 2, 2, np.nan, 5])\n >>> c\n [NaN, 2.0, 2.0, NaN, 5.0]\n Categories (2, int64): [2, 5]\n >>> c.sort_values()\n [2.0, 2.0, 5.0, NaN, NaN]\n Categories (2, int64): [2, 5]\n >>> c.sort_values(ascending=False)\n [5.0, 2.0, 2.0, NaN, NaN]\n Categories (2, int64): [2, 5]\n >>> c.sort_values(na_position='first')\n [NaN, NaN, 2.0, 2.0, 5.0]\n Categories (2, int64): [2, 5]\n >>> c.sort_values(ascending=False, na_position='first')\n [NaN, NaN, 5.0, 2.0, 2.0]\n Categories (2, int64): [2, 5]\n " inplace = validate_bool_kwarg(inplace, 'inplace') if (na_position not in ['last', 'first']): msg = 'invalid na_position: {na_position!r}' raise ValueError(msg.format(na_position=na_position)) codes = np.sort(self._codes) if (not ascending): codes = codes[::(- 1)] na_mask = (codes == (- 1)) if na_mask.any(): n_nans = len(codes[na_mask]) if (na_position == 'first'): new_codes = codes.copy() new_codes[0:n_nans] = (- 1) new_codes[n_nans:] = codes[(~ na_mask)] codes = new_codes elif (na_position == 'last'): new_codes = codes.copy() pos = (len(codes) - n_nans) new_codes[0:pos] = codes[(~ na_mask)] new_codes[pos:] = (- 1) codes = new_codes if inplace: self._codes = codes return else: return self._constructor(values=codes, dtype=self.dtype, fastpath=True)
def _values_for_rank(self): '\n For correctly ranking ordered categorical data. See GH#15420\n\n Ordered categorical data should be ranked on the basis of\n codes with -1 translated to NaN.\n\n Returns\n -------\n numpy array\n\n ' from pandas import Series if self.ordered: values = self.codes mask = (values == (- 1)) if mask.any(): values = values.astype('float64') values[mask] = np.nan elif self.categories.is_numeric(): values = np.array(self) else: values = np.array(self.rename_categories(Series(self.categories).rank().values)) return values
8,110,166,736,292,636,000
For correctly ranking ordered categorical data. See GH#15420 Ordered categorical data should be ranked on the basis of codes with -1 translated to NaN. Returns ------- numpy array
pandas/core/arrays/categorical.py
_values_for_rank
Adirio/pandas
python
def _values_for_rank(self): '\n For correctly ranking ordered categorical data. See GH#15420\n\n Ordered categorical data should be ranked on the basis of\n codes with -1 translated to NaN.\n\n Returns\n -------\n numpy array\n\n ' from pandas import Series if self.ordered: values = self.codes mask = (values == (- 1)) if mask.any(): values = values.astype('float64') values[mask] = np.nan elif self.categories.is_numeric(): values = np.array(self) else: values = np.array(self.rename_categories(Series(self.categories).rank().values)) return values
def ravel(self, order='C'): ' Return a flattened (numpy) array.\n\n For internal compatibility with numpy arrays.\n\n Returns\n -------\n raveled : numpy array\n ' return np.array(self)
2,510,775,633,478,743,600
Return a flattened (numpy) array. For internal compatibility with numpy arrays. Returns ------- raveled : numpy array
pandas/core/arrays/categorical.py
ravel
Adirio/pandas
python
def ravel(self, order='C'): ' Return a flattened (numpy) array.\n\n For internal compatibility with numpy arrays.\n\n Returns\n -------\n raveled : numpy array\n ' return np.array(self)
def view(self): 'Return a view of myself.\n\n For internal compatibility with numpy arrays.\n\n Returns\n -------\n view : Categorical\n Returns `self`!\n ' return self
7,402,810,746,540,229,000
Return a view of myself. For internal compatibility with numpy arrays. Returns ------- view : Categorical Returns `self`!
pandas/core/arrays/categorical.py
view
Adirio/pandas
python
def view(self): 'Return a view of myself.\n\n For internal compatibility with numpy arrays.\n\n Returns\n -------\n view : Categorical\n Returns `self`!\n ' return self
def to_dense(self): "Return my 'dense' representation\n\n For internal compatibility with numpy arrays.\n\n Returns\n -------\n dense : array\n " return np.asarray(self)
-296,838,486,681,540,900
Return my 'dense' representation For internal compatibility with numpy arrays. Returns ------- dense : array
pandas/core/arrays/categorical.py
to_dense
Adirio/pandas
python
def to_dense(self): "Return my 'dense' representation\n\n For internal compatibility with numpy arrays.\n\n Returns\n -------\n dense : array\n " return np.asarray(self)
@deprecate_kwarg(old_arg_name='fill_value', new_arg_name='value') def fillna(self, value=None, method=None, limit=None): " Fill NA/NaN values using the specified method.\n\n Parameters\n ----------\n value : scalar, dict, Series\n If a scalar value is passed it is used to fill all missing values.\n Alternatively, a Series or dict can be used to fill in different\n values for each index. The value should not be a list. The\n value(s) passed should either be in the categories or should be\n NaN.\n method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None\n Method to use for filling holes in reindexed Series\n pad / ffill: propagate last valid observation forward to next valid\n backfill / bfill: use NEXT valid observation to fill gap\n limit : int, default None\n (Not implemented yet for Categorical!)\n If method is specified, this is the maximum number of consecutive\n NaN values to forward/backward fill. In other words, if there is\n a gap with more than this number of consecutive NaNs, it will only\n be partially filled. If method is not specified, this is the\n maximum number of entries along the entire axis where NaNs will be\n filled.\n\n Returns\n -------\n filled : Categorical with NA/NaN filled\n " (value, method) = validate_fillna_kwargs(value, method, validate_scalar_dict_value=False) if (value is None): value = np.nan if (limit is not None): raise NotImplementedError('specifying a limit for fillna has not been implemented yet') codes = self._codes if (method is not None): values = self.to_dense().reshape((- 1), len(self)) values = interpolate_2d(values, method, 0, None, value).astype(self.categories.dtype)[0] codes = _get_codes_for_values(values, self.categories) elif isinstance(value, ABCSeries): if (not value[(~ value.isin(self.categories))].isna().all()): raise ValueError('fill value must be in categories') values_codes = _get_codes_for_values(value, self.categories) indexer = np.where((values_codes != (- 1))) codes[indexer] = values_codes[(values_codes != (- 1))] elif is_hashable(value): if ((not isna(value)) and (value not in self.categories)): raise ValueError('fill value must be in categories') mask = (codes == (- 1)) if mask.any(): codes = codes.copy() if isna(value): codes[mask] = (- 1) else: codes[mask] = self.categories.get_loc(value) else: raise TypeError('"value" parameter must be a scalar, dict or Series, but you passed a "{0}"'.format(type(value).__name__)) return self._constructor(codes, dtype=self.dtype, fastpath=True)
-1,140,899,440,471,338,100
Fill NA/NaN values using the specified method. Parameters ---------- value : scalar, dict, Series If a scalar value is passed it is used to fill all missing values. Alternatively, a Series or dict can be used to fill in different values for each index. The value should not be a list. The value(s) passed should either be in the categories or should be NaN. method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None Method to use for filling holes in reindexed Series pad / ffill: propagate last valid observation forward to next valid backfill / bfill: use NEXT valid observation to fill gap limit : int, default None (Not implemented yet for Categorical!) If method is specified, this is the maximum number of consecutive NaN values to forward/backward fill. In other words, if there is a gap with more than this number of consecutive NaNs, it will only be partially filled. If method is not specified, this is the maximum number of entries along the entire axis where NaNs will be filled. Returns ------- filled : Categorical with NA/NaN filled
pandas/core/arrays/categorical.py
fillna
Adirio/pandas
python
@deprecate_kwarg(old_arg_name='fill_value', new_arg_name='value') def fillna(self, value=None, method=None, limit=None): " Fill NA/NaN values using the specified method.\n\n Parameters\n ----------\n value : scalar, dict, Series\n If a scalar value is passed it is used to fill all missing values.\n Alternatively, a Series or dict can be used to fill in different\n values for each index. The value should not be a list. The\n value(s) passed should either be in the categories or should be\n NaN.\n method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None\n Method to use for filling holes in reindexed Series\n pad / ffill: propagate last valid observation forward to next valid\n backfill / bfill: use NEXT valid observation to fill gap\n limit : int, default None\n (Not implemented yet for Categorical!)\n If method is specified, this is the maximum number of consecutive\n NaN values to forward/backward fill. In other words, if there is\n a gap with more than this number of consecutive NaNs, it will only\n be partially filled. If method is not specified, this is the\n maximum number of entries along the entire axis where NaNs will be\n filled.\n\n Returns\n -------\n filled : Categorical with NA/NaN filled\n " (value, method) = validate_fillna_kwargs(value, method, validate_scalar_dict_value=False) if (value is None): value = np.nan if (limit is not None): raise NotImplementedError('specifying a limit for fillna has not been implemented yet') codes = self._codes if (method is not None): values = self.to_dense().reshape((- 1), len(self)) values = interpolate_2d(values, method, 0, None, value).astype(self.categories.dtype)[0] codes = _get_codes_for_values(values, self.categories) elif isinstance(value, ABCSeries): if (not value[(~ value.isin(self.categories))].isna().all()): raise ValueError('fill value must be in categories') values_codes = _get_codes_for_values(value, self.categories) indexer = np.where((values_codes != (- 1))) codes[indexer] = values_codes[(values_codes != (- 1))] elif is_hashable(value): if ((not isna(value)) and (value not in self.categories)): raise ValueError('fill value must be in categories') mask = (codes == (- 1)) if mask.any(): codes = codes.copy() if isna(value): codes[mask] = (- 1) else: codes[mask] = self.categories.get_loc(value) else: raise TypeError('"value" parameter must be a scalar, dict or Series, but you passed a "{0}"'.format(type(value).__name__)) return self._constructor(codes, dtype=self.dtype, fastpath=True)
def take_nd(self, indexer, allow_fill=None, fill_value=None): '\n Take elements from the Categorical.\n\n Parameters\n ----------\n indexer : sequence of integers\n allow_fill : bool, default None.\n How to handle negative values in `indexer`.\n\n * False: negative values in `indices` indicate positional indices\n from the right. This is similar to\n :func:`numpy.take`.\n\n * True: negative values in `indices` indicate missing values\n (the default). These values are set to `fill_value`. Any other\n other negative values raise a ``ValueError``.\n\n .. versionchanged:: 0.23.0\n\n Deprecated the default value of `allow_fill`. The deprecated\n default is ``True``. In the future, this will change to\n ``False``.\n\n Returns\n -------\n Categorical\n This Categorical will have the same categories and ordered as\n `self`.\n ' indexer = np.asarray(indexer, dtype=np.intp) if (allow_fill is None): if (indexer < 0).any(): warn(_take_msg, FutureWarning, stacklevel=2) allow_fill = True if isna(fill_value): fill_value = (- 1) codes = take(self._codes, indexer, allow_fill=allow_fill, fill_value=fill_value) result = self._constructor(codes, dtype=self.dtype, fastpath=True) return result
-7,154,992,052,181,375,000
Take elements from the Categorical. Parameters ---------- indexer : sequence of integers allow_fill : bool, default None. How to handle negative values in `indexer`. * False: negative values in `indices` indicate positional indices from the right. This is similar to :func:`numpy.take`. * True: negative values in `indices` indicate missing values (the default). These values are set to `fill_value`. Any other other negative values raise a ``ValueError``. .. versionchanged:: 0.23.0 Deprecated the default value of `allow_fill`. The deprecated default is ``True``. In the future, this will change to ``False``. Returns ------- Categorical This Categorical will have the same categories and ordered as `self`.
pandas/core/arrays/categorical.py
take_nd
Adirio/pandas
python
def take_nd(self, indexer, allow_fill=None, fill_value=None): '\n Take elements from the Categorical.\n\n Parameters\n ----------\n indexer : sequence of integers\n allow_fill : bool, default None.\n How to handle negative values in `indexer`.\n\n * False: negative values in `indices` indicate positional indices\n from the right. This is similar to\n :func:`numpy.take`.\n\n * True: negative values in `indices` indicate missing values\n (the default). These values are set to `fill_value`. Any other\n other negative values raise a ``ValueError``.\n\n .. versionchanged:: 0.23.0\n\n Deprecated the default value of `allow_fill`. The deprecated\n default is ``True``. In the future, this will change to\n ``False``.\n\n Returns\n -------\n Categorical\n This Categorical will have the same categories and ordered as\n `self`.\n ' indexer = np.asarray(indexer, dtype=np.intp) if (allow_fill is None): if (indexer < 0).any(): warn(_take_msg, FutureWarning, stacklevel=2) allow_fill = True if isna(fill_value): fill_value = (- 1) codes = take(self._codes, indexer, allow_fill=allow_fill, fill_value=fill_value) result = self._constructor(codes, dtype=self.dtype, fastpath=True) return result
def _slice(self, slicer): ' Return a slice of myself.\n\n For internal compatibility with numpy arrays.\n ' if (isinstance(slicer, tuple) and (len(slicer) == 2)): if (not com.is_null_slice(slicer[0])): raise AssertionError('invalid slicing for a 1-ndim categorical') slicer = slicer[1] codes = self._codes[slicer] return self._constructor(values=codes, dtype=self.dtype, fastpath=True)
-1,092,373,934,511,431,700
Return a slice of myself. For internal compatibility with numpy arrays.
pandas/core/arrays/categorical.py
_slice
Adirio/pandas
python
def _slice(self, slicer): ' Return a slice of myself.\n\n For internal compatibility with numpy arrays.\n ' if (isinstance(slicer, tuple) and (len(slicer) == 2)): if (not com.is_null_slice(slicer[0])): raise AssertionError('invalid slicing for a 1-ndim categorical') slicer = slicer[1] codes = self._codes[slicer] return self._constructor(values=codes, dtype=self.dtype, fastpath=True)
def __len__(self): 'The length of this Categorical.' return len(self._codes)
8,220,366,926,502,292,000
The length of this Categorical.
pandas/core/arrays/categorical.py
__len__
Adirio/pandas
python
def __len__(self): return len(self._codes)
def __iter__(self): 'Returns an Iterator over the values of this Categorical.' return iter(self.get_values().tolist())
-8,865,596,715,025,023,000
Returns an Iterator over the values of this Categorical.
pandas/core/arrays/categorical.py
__iter__
Adirio/pandas
python
def __iter__(self): return iter(self.get_values().tolist())
def __contains__(self, key): 'Returns True if `key` is in this Categorical.' if isna(key): return self.isna().any() return contains(self, key, container=self._codes)
8,849,127,086,134,680,000
Returns True if `key` is in this Categorical.
pandas/core/arrays/categorical.py
__contains__
Adirio/pandas
python
def __contains__(self, key): if isna(key): return self.isna().any() return contains(self, key, container=self._codes)
def _tidy_repr(self, max_vals=10, footer=True): ' a short repr displaying only max_vals and an optional (but default\n footer)\n ' num = (max_vals // 2) head = self[:num]._get_repr(length=False, footer=False) tail = self[(- (max_vals - num)):]._get_repr(length=False, footer=False) result = u('{head}, ..., {tail}').format(head=head[:(- 1)], tail=tail[1:]) if footer: result = u('{result}\n{footer}').format(result=result, footer=self._repr_footer()) return compat.text_type(result)
-7,886,453,035,888,673,000
a short repr displaying only max_vals and an optional (but default footer)
pandas/core/arrays/categorical.py
_tidy_repr
Adirio/pandas
python
def _tidy_repr(self, max_vals=10, footer=True): ' a short repr displaying only max_vals and an optional (but default\n footer)\n ' num = (max_vals // 2) head = self[:num]._get_repr(length=False, footer=False) tail = self[(- (max_vals - num)):]._get_repr(length=False, footer=False) result = u('{head}, ..., {tail}').format(head=head[:(- 1)], tail=tail[1:]) if footer: result = u('{result}\n{footer}').format(result=result, footer=self._repr_footer()) return compat.text_type(result)
def _repr_categories(self): ' return the base repr for the categories ' max_categories = (10 if (get_option('display.max_categories') == 0) else get_option('display.max_categories')) from pandas.io.formats import format as fmt if (len(self.categories) > max_categories): num = (max_categories // 2) head = fmt.format_array(self.categories[:num], None) tail = fmt.format_array(self.categories[(- num):], None) category_strs = ((head + ['...']) + tail) else: category_strs = fmt.format_array(self.categories, None) category_strs = [x.strip() for x in category_strs] return category_strs
2,356,138,557,134,862,300
return the base repr for the categories
pandas/core/arrays/categorical.py
_repr_categories
Adirio/pandas
python
def _repr_categories(self): ' ' max_categories = (10 if (get_option('display.max_categories') == 0) else get_option('display.max_categories')) from pandas.io.formats import format as fmt if (len(self.categories) > max_categories): num = (max_categories // 2) head = fmt.format_array(self.categories[:num], None) tail = fmt.format_array(self.categories[(- num):], None) category_strs = ((head + ['...']) + tail) else: category_strs = fmt.format_array(self.categories, None) category_strs = [x.strip() for x in category_strs] return category_strs
def _repr_categories_info(self): ' Returns a string representation of the footer.' category_strs = self._repr_categories() dtype = getattr(self.categories, 'dtype_str', str(self.categories.dtype)) levheader = 'Categories ({length}, {dtype}): '.format(length=len(self.categories), dtype=dtype) (width, height) = get_terminal_size() max_width = (get_option('display.width') or width) if console.in_ipython_frontend(): max_width = 0 levstring = '' start = True cur_col_len = len(levheader) (sep_len, sep) = ((3, ' < ') if self.ordered else (2, ', ')) linesep = (sep.rstrip() + '\n') for val in category_strs: if ((max_width != 0) and (((cur_col_len + sep_len) + len(val)) > max_width)): levstring += (linesep + (' ' * (len(levheader) + 1))) cur_col_len = (len(levheader) + 1) elif (not start): levstring += sep cur_col_len += len(val) levstring += val start = False return (((levheader + '[') + levstring.replace(' < ... < ', ' ... ')) + ']')
-1,056,543,306,580,598,300
Returns a string representation of the footer.
pandas/core/arrays/categorical.py
_repr_categories_info
Adirio/pandas
python
def _repr_categories_info(self): ' ' category_strs = self._repr_categories() dtype = getattr(self.categories, 'dtype_str', str(self.categories.dtype)) levheader = 'Categories ({length}, {dtype}): '.format(length=len(self.categories), dtype=dtype) (width, height) = get_terminal_size() max_width = (get_option('display.width') or width) if console.in_ipython_frontend(): max_width = 0 levstring = start = True cur_col_len = len(levheader) (sep_len, sep) = ((3, ' < ') if self.ordered else (2, ', ')) linesep = (sep.rstrip() + '\n') for val in category_strs: if ((max_width != 0) and (((cur_col_len + sep_len) + len(val)) > max_width)): levstring += (linesep + (' ' * (len(levheader) + 1))) cur_col_len = (len(levheader) + 1) elif (not start): levstring += sep cur_col_len += len(val) levstring += val start = False return (((levheader + '[') + levstring.replace(' < ... < ', ' ... ')) + ']')
def __unicode__(self): ' Unicode representation. ' _maxlen = 10 if (len(self._codes) > _maxlen): result = self._tidy_repr(_maxlen) elif (len(self._codes) > 0): result = self._get_repr(length=(len(self) > _maxlen)) else: msg = self._get_repr(length=False, footer=True).replace('\n', ', ') result = '[], {repr_msg}'.format(repr_msg=msg) return result
-2,285,762,338,194,706,700
Unicode representation.
pandas/core/arrays/categorical.py
__unicode__
Adirio/pandas
python
def __unicode__(self): ' ' _maxlen = 10 if (len(self._codes) > _maxlen): result = self._tidy_repr(_maxlen) elif (len(self._codes) > 0): result = self._get_repr(length=(len(self) > _maxlen)) else: msg = self._get_repr(length=False, footer=True).replace('\n', ', ') result = '[], {repr_msg}'.format(repr_msg=msg) return result
def _maybe_coerce_indexer(self, indexer): ' return an indexer coerced to the codes dtype ' if (isinstance(indexer, np.ndarray) and (indexer.dtype.kind == 'i')): indexer = indexer.astype(self._codes.dtype) return indexer
-8,060,042,278,688,720,000
return an indexer coerced to the codes dtype
pandas/core/arrays/categorical.py
_maybe_coerce_indexer
Adirio/pandas
python
def _maybe_coerce_indexer(self, indexer): ' ' if (isinstance(indexer, np.ndarray) and (indexer.dtype.kind == 'i')): indexer = indexer.astype(self._codes.dtype) return indexer
def __getitem__(self, key): ' Return an item. ' if isinstance(key, (int, np.integer)): i = self._codes[key] if (i == (- 1)): return np.nan else: return self.categories[i] else: return self._constructor(values=self._codes[key], dtype=self.dtype, fastpath=True)
7,442,828,030,867,244,000
Return an item.
pandas/core/arrays/categorical.py
__getitem__
Adirio/pandas
python
def __getitem__(self, key): ' ' if isinstance(key, (int, np.integer)): i = self._codes[key] if (i == (- 1)): return np.nan else: return self.categories[i] else: return self._constructor(values=self._codes[key], dtype=self.dtype, fastpath=True)
def __setitem__(self, key, value): ' Item assignment.\n\n\n Raises\n ------\n ValueError\n If (one or more) Value is not in categories or if a assigned\n `Categorical` does not have the same categories\n ' if isinstance(value, Categorical): if (not value.categories.equals(self.categories)): raise ValueError('Cannot set a Categorical with another, without identical categories') rvalue = (value if is_list_like(value) else [value]) from pandas import Index to_add = Index(rvalue).difference(self.categories) if (len(to_add) and (not isna(to_add).all())): raise ValueError('Cannot setitem on a Categorical with a new category, set the categories first') if isinstance(key, (int, np.integer)): pass elif isinstance(key, tuple): if (len(key) == 2): if (not com.is_null_slice(key[0])): raise AssertionError('invalid slicing for a 1-ndim categorical') key = key[1] elif (len(key) == 1): key = key[0] else: raise AssertionError('invalid slicing for a 1-ndim categorical') elif isinstance(key, slice): pass else: key = np.asarray(key) lindexer = self.categories.get_indexer(rvalue) lindexer = self._maybe_coerce_indexer(lindexer) self._codes[key] = lindexer
4,775,883,318,009,241,000
Item assignment. Raises ------ ValueError If (one or more) Value is not in categories or if a assigned `Categorical` does not have the same categories
pandas/core/arrays/categorical.py
__setitem__
Adirio/pandas
python
def __setitem__(self, key, value): ' Item assignment.\n\n\n Raises\n ------\n ValueError\n If (one or more) Value is not in categories or if a assigned\n `Categorical` does not have the same categories\n ' if isinstance(value, Categorical): if (not value.categories.equals(self.categories)): raise ValueError('Cannot set a Categorical with another, without identical categories') rvalue = (value if is_list_like(value) else [value]) from pandas import Index to_add = Index(rvalue).difference(self.categories) if (len(to_add) and (not isna(to_add).all())): raise ValueError('Cannot setitem on a Categorical with a new category, set the categories first') if isinstance(key, (int, np.integer)): pass elif isinstance(key, tuple): if (len(key) == 2): if (not com.is_null_slice(key[0])): raise AssertionError('invalid slicing for a 1-ndim categorical') key = key[1] elif (len(key) == 1): key = key[0] else: raise AssertionError('invalid slicing for a 1-ndim categorical') elif isinstance(key, slice): pass else: key = np.asarray(key) lindexer = self.categories.get_indexer(rvalue) lindexer = self._maybe_coerce_indexer(lindexer) self._codes[key] = lindexer
def _reverse_indexer(self): "\n Compute the inverse of a categorical, returning\n a dict of categories -> indexers.\n\n *This is an internal function*\n\n Returns\n -------\n dict of categories -> indexers\n\n Example\n -------\n In [1]: c = pd.Categorical(list('aabca'))\n\n In [2]: c\n Out[2]:\n [a, a, b, c, a]\n Categories (3, object): [a, b, c]\n\n In [3]: c.categories\n Out[3]: Index([u'a', u'b', u'c'], dtype='object')\n\n In [4]: c.codes\n Out[4]: array([0, 0, 1, 2, 0], dtype=int8)\n\n In [5]: c._reverse_indexer()\n Out[5]: {'a': array([0, 1, 4]), 'b': array([2]), 'c': array([3])}\n\n " categories = self.categories (r, counts) = libalgos.groupsort_indexer(self.codes.astype('int64'), categories.size) counts = counts.cumsum() result = [r[counts[indexer]:counts[(indexer + 1)]] for indexer in range((len(counts) - 1))] result = dict(zip(categories, result)) return result
-6,168,884,411,218,855,000
Compute the inverse of a categorical, returning a dict of categories -> indexers. *This is an internal function* Returns ------- dict of categories -> indexers Example ------- In [1]: c = pd.Categorical(list('aabca')) In [2]: c Out[2]: [a, a, b, c, a] Categories (3, object): [a, b, c] In [3]: c.categories Out[3]: Index([u'a', u'b', u'c'], dtype='object') In [4]: c.codes Out[4]: array([0, 0, 1, 2, 0], dtype=int8) In [5]: c._reverse_indexer() Out[5]: {'a': array([0, 1, 4]), 'b': array([2]), 'c': array([3])}
pandas/core/arrays/categorical.py
_reverse_indexer
Adirio/pandas
python
def _reverse_indexer(self): "\n Compute the inverse of a categorical, returning\n a dict of categories -> indexers.\n\n *This is an internal function*\n\n Returns\n -------\n dict of categories -> indexers\n\n Example\n -------\n In [1]: c = pd.Categorical(list('aabca'))\n\n In [2]: c\n Out[2]:\n [a, a, b, c, a]\n Categories (3, object): [a, b, c]\n\n In [3]: c.categories\n Out[3]: Index([u'a', u'b', u'c'], dtype='object')\n\n In [4]: c.codes\n Out[4]: array([0, 0, 1, 2, 0], dtype=int8)\n\n In [5]: c._reverse_indexer()\n Out[5]: {'a': array([0, 1, 4]), 'b': array([2]), 'c': array([3])}\n\n " categories = self.categories (r, counts) = libalgos.groupsort_indexer(self.codes.astype('int64'), categories.size) counts = counts.cumsum() result = [r[counts[indexer]:counts[(indexer + 1)]] for indexer in range((len(counts) - 1))] result = dict(zip(categories, result)) return result
def min(self, numeric_only=None, **kwargs): ' The minimum value of the object.\n\n Only ordered `Categoricals` have a minimum!\n\n Raises\n ------\n TypeError\n If the `Categorical` is not `ordered`.\n\n Returns\n -------\n min : the minimum of this `Categorical`\n ' self.check_for_ordered('min') if numeric_only: good = (self._codes != (- 1)) pointer = self._codes[good].min(**kwargs) else: pointer = self._codes.min(**kwargs) if (pointer == (- 1)): return np.nan else: return self.categories[pointer]
3,181,977,709,918,372,000
The minimum value of the object. Only ordered `Categoricals` have a minimum! Raises ------ TypeError If the `Categorical` is not `ordered`. Returns ------- min : the minimum of this `Categorical`
pandas/core/arrays/categorical.py
min
Adirio/pandas
python
def min(self, numeric_only=None, **kwargs): ' The minimum value of the object.\n\n Only ordered `Categoricals` have a minimum!\n\n Raises\n ------\n TypeError\n If the `Categorical` is not `ordered`.\n\n Returns\n -------\n min : the minimum of this `Categorical`\n ' self.check_for_ordered('min') if numeric_only: good = (self._codes != (- 1)) pointer = self._codes[good].min(**kwargs) else: pointer = self._codes.min(**kwargs) if (pointer == (- 1)): return np.nan else: return self.categories[pointer]
def max(self, numeric_only=None, **kwargs): ' The maximum value of the object.\n\n Only ordered `Categoricals` have a maximum!\n\n Raises\n ------\n TypeError\n If the `Categorical` is not `ordered`.\n\n Returns\n -------\n max : the maximum of this `Categorical`\n ' self.check_for_ordered('max') if numeric_only: good = (self._codes != (- 1)) pointer = self._codes[good].max(**kwargs) else: pointer = self._codes.max(**kwargs) if (pointer == (- 1)): return np.nan else: return self.categories[pointer]
2,410,826,779,364,321,000
The maximum value of the object. Only ordered `Categoricals` have a maximum! Raises ------ TypeError If the `Categorical` is not `ordered`. Returns ------- max : the maximum of this `Categorical`
pandas/core/arrays/categorical.py
max
Adirio/pandas
python
def max(self, numeric_only=None, **kwargs): ' The maximum value of the object.\n\n Only ordered `Categoricals` have a maximum!\n\n Raises\n ------\n TypeError\n If the `Categorical` is not `ordered`.\n\n Returns\n -------\n max : the maximum of this `Categorical`\n ' self.check_for_ordered('max') if numeric_only: good = (self._codes != (- 1)) pointer = self._codes[good].max(**kwargs) else: pointer = self._codes.max(**kwargs) if (pointer == (- 1)): return np.nan else: return self.categories[pointer]
def mode(self, dropna=True): "\n Returns the mode(s) of the Categorical.\n\n Always returns `Categorical` even if only one value.\n\n Parameters\n ----------\n dropna : boolean, default True\n Don't consider counts of NaN/NaT.\n\n .. versionadded:: 0.24.0\n\n Returns\n -------\n modes : `Categorical` (sorted)\n " import pandas._libs.hashtable as htable codes = self._codes if dropna: good = (self._codes != (- 1)) codes = self._codes[good] codes = sorted(htable.mode_int64(ensure_int64(codes), dropna)) return self._constructor(values=codes, dtype=self.dtype, fastpath=True)
2,182,157,549,901,241,900
Returns the mode(s) of the Categorical. Always returns `Categorical` even if only one value. Parameters ---------- dropna : boolean, default True Don't consider counts of NaN/NaT. .. versionadded:: 0.24.0 Returns ------- modes : `Categorical` (sorted)
pandas/core/arrays/categorical.py
mode
Adirio/pandas
python
def mode(self, dropna=True): "\n Returns the mode(s) of the Categorical.\n\n Always returns `Categorical` even if only one value.\n\n Parameters\n ----------\n dropna : boolean, default True\n Don't consider counts of NaN/NaT.\n\n .. versionadded:: 0.24.0\n\n Returns\n -------\n modes : `Categorical` (sorted)\n " import pandas._libs.hashtable as htable codes = self._codes if dropna: good = (self._codes != (- 1)) codes = self._codes[good] codes = sorted(htable.mode_int64(ensure_int64(codes), dropna)) return self._constructor(values=codes, dtype=self.dtype, fastpath=True)
def unique(self): "\n Return the ``Categorical`` which ``categories`` and ``codes`` are\n unique. Unused categories are NOT returned.\n\n - unordered category: values and categories are sorted by appearance\n order.\n - ordered category: values are sorted by appearance order, categories\n keeps existing order.\n\n Returns\n -------\n unique values : ``Categorical``\n\n Examples\n --------\n An unordered Categorical will return categories in the\n order of appearance.\n\n >>> pd.Categorical(list('baabc'))\n [b, a, c]\n Categories (3, object): [b, a, c]\n\n >>> pd.Categorical(list('baabc'), categories=list('abc'))\n [b, a, c]\n Categories (3, object): [b, a, c]\n\n An ordered Categorical preserves the category ordering.\n\n >>> pd.Categorical(list('baabc'),\n ... categories=list('abc'),\n ... ordered=True)\n [b, a, c]\n Categories (3, object): [a < b < c]\n\n See Also\n --------\n unique\n CategoricalIndex.unique\n Series.unique\n\n " unique_codes = unique1d(self.codes) cat = self.copy() cat._codes = unique_codes take_codes = unique_codes[(unique_codes != (- 1))] if self.ordered: take_codes = np.sort(take_codes) return cat.set_categories(cat.categories.take(take_codes))
1,525,540,672,474,470,100
Return the ``Categorical`` which ``categories`` and ``codes`` are unique. Unused categories are NOT returned. - unordered category: values and categories are sorted by appearance order. - ordered category: values are sorted by appearance order, categories keeps existing order. Returns ------- unique values : ``Categorical`` Examples -------- An unordered Categorical will return categories in the order of appearance. >>> pd.Categorical(list('baabc')) [b, a, c] Categories (3, object): [b, a, c] >>> pd.Categorical(list('baabc'), categories=list('abc')) [b, a, c] Categories (3, object): [b, a, c] An ordered Categorical preserves the category ordering. >>> pd.Categorical(list('baabc'), ... categories=list('abc'), ... ordered=True) [b, a, c] Categories (3, object): [a < b < c] See Also -------- unique CategoricalIndex.unique Series.unique
pandas/core/arrays/categorical.py
unique
Adirio/pandas
python
def unique(self): "\n Return the ``Categorical`` which ``categories`` and ``codes`` are\n unique. Unused categories are NOT returned.\n\n - unordered category: values and categories are sorted by appearance\n order.\n - ordered category: values are sorted by appearance order, categories\n keeps existing order.\n\n Returns\n -------\n unique values : ``Categorical``\n\n Examples\n --------\n An unordered Categorical will return categories in the\n order of appearance.\n\n >>> pd.Categorical(list('baabc'))\n [b, a, c]\n Categories (3, object): [b, a, c]\n\n >>> pd.Categorical(list('baabc'), categories=list('abc'))\n [b, a, c]\n Categories (3, object): [b, a, c]\n\n An ordered Categorical preserves the category ordering.\n\n >>> pd.Categorical(list('baabc'),\n ... categories=list('abc'),\n ... ordered=True)\n [b, a, c]\n Categories (3, object): [a < b < c]\n\n See Also\n --------\n unique\n CategoricalIndex.unique\n Series.unique\n\n " unique_codes = unique1d(self.codes) cat = self.copy() cat._codes = unique_codes take_codes = unique_codes[(unique_codes != (- 1))] if self.ordered: take_codes = np.sort(take_codes) return cat.set_categories(cat.categories.take(take_codes))
def equals(self, other): '\n Returns True if categorical arrays are equal.\n\n Parameters\n ----------\n other : `Categorical`\n\n Returns\n -------\n are_equal : boolean\n ' if self.is_dtype_equal(other): if self.categories.equals(other.categories): other_codes = other._codes else: other_codes = _recode_for_categories(other.codes, other.categories, self.categories) return np.array_equal(self._codes, other_codes) return False
2,929,335,446,468,426,000
Returns True if categorical arrays are equal. Parameters ---------- other : `Categorical` Returns ------- are_equal : boolean
pandas/core/arrays/categorical.py
equals
Adirio/pandas
python
def equals(self, other): '\n Returns True if categorical arrays are equal.\n\n Parameters\n ----------\n other : `Categorical`\n\n Returns\n -------\n are_equal : boolean\n ' if self.is_dtype_equal(other): if self.categories.equals(other.categories): other_codes = other._codes else: other_codes = _recode_for_categories(other.codes, other.categories, self.categories) return np.array_equal(self._codes, other_codes) return False
def is_dtype_equal(self, other): '\n Returns True if categoricals are the same dtype\n same categories, and same ordered\n\n Parameters\n ----------\n other : Categorical\n\n Returns\n -------\n are_equal : boolean\n ' try: return (hash(self.dtype) == hash(other.dtype)) except (AttributeError, TypeError): return False
-2,454,401,285,141,445,600
Returns True if categoricals are the same dtype same categories, and same ordered Parameters ---------- other : Categorical Returns ------- are_equal : boolean
pandas/core/arrays/categorical.py
is_dtype_equal
Adirio/pandas
python
def is_dtype_equal(self, other): '\n Returns True if categoricals are the same dtype\n same categories, and same ordered\n\n Parameters\n ----------\n other : Categorical\n\n Returns\n -------\n are_equal : boolean\n ' try: return (hash(self.dtype) == hash(other.dtype)) except (AttributeError, TypeError): return False
def describe(self): ' Describes this Categorical\n\n Returns\n -------\n description: `DataFrame`\n A dataframe with frequency and counts by category.\n ' counts = self.value_counts(dropna=False) freqs = (counts / float(counts.sum())) from pandas.core.reshape.concat import concat result = concat([counts, freqs], axis=1) result.columns = ['counts', 'freqs'] result.index.name = 'categories' return result
4,132,264,890,513,181,000
Describes this Categorical Returns ------- description: `DataFrame` A dataframe with frequency and counts by category.
pandas/core/arrays/categorical.py
describe
Adirio/pandas
python
def describe(self): ' Describes this Categorical\n\n Returns\n -------\n description: `DataFrame`\n A dataframe with frequency and counts by category.\n ' counts = self.value_counts(dropna=False) freqs = (counts / float(counts.sum())) from pandas.core.reshape.concat import concat result = concat([counts, freqs], axis=1) result.columns = ['counts', 'freqs'] result.index.name = 'categories' return result
def repeat(self, repeats, *args, **kwargs): '\n Repeat elements of a Categorical.\n\n See also\n --------\n numpy.ndarray.repeat\n\n ' nv.validate_repeat(args, kwargs) codes = self._codes.repeat(repeats) return self._constructor(values=codes, dtype=self.dtype, fastpath=True)
1,980,116,856,874,389,800
Repeat elements of a Categorical. See also -------- numpy.ndarray.repeat
pandas/core/arrays/categorical.py
repeat
Adirio/pandas
python
def repeat(self, repeats, *args, **kwargs): '\n Repeat elements of a Categorical.\n\n See also\n --------\n numpy.ndarray.repeat\n\n ' nv.validate_repeat(args, kwargs) codes = self._codes.repeat(repeats) return self._constructor(values=codes, dtype=self.dtype, fastpath=True)
def isin(self, values): "\n Check whether `values` are contained in Categorical.\n\n Return a boolean NumPy Array showing whether each element in\n the Categorical matches an element in the passed sequence of\n `values` exactly.\n\n Parameters\n ----------\n values : set or list-like\n The sequence of values to test. Passing in a single string will\n raise a ``TypeError``. Instead, turn a single string into a\n list of one element.\n\n Returns\n -------\n isin : numpy.ndarray (bool dtype)\n\n Raises\n ------\n TypeError\n * If `values` is not a set or list-like\n\n See Also\n --------\n pandas.Series.isin : equivalent method on Series\n\n Examples\n --------\n\n >>> s = pd.Categorical(['lama', 'cow', 'lama', 'beetle', 'lama',\n ... 'hippo'])\n >>> s.isin(['cow', 'lama'])\n array([ True, True, True, False, True, False])\n\n Passing a single string as ``s.isin('lama')`` will raise an error. Use\n a list of one element instead:\n\n >>> s.isin(['lama'])\n array([ True, False, True, False, True, False])\n " from pandas.core.series import _sanitize_array if (not is_list_like(values)): raise TypeError('only list-like objects are allowed to be passed to isin(), you passed a [{values_type}]'.format(values_type=type(values).__name__)) values = _sanitize_array(values, None, None) null_mask = np.asarray(isna(values)) code_values = self.categories.get_indexer(values) code_values = code_values[(null_mask | (code_values >= 0))] return algorithms.isin(self.codes, code_values)
7,898,877,785,712,441,000
Check whether `values` are contained in Categorical. Return a boolean NumPy Array showing whether each element in the Categorical matches an element in the passed sequence of `values` exactly. Parameters ---------- values : set or list-like The sequence of values to test. Passing in a single string will raise a ``TypeError``. Instead, turn a single string into a list of one element. Returns ------- isin : numpy.ndarray (bool dtype) Raises ------ TypeError * If `values` is not a set or list-like See Also -------- pandas.Series.isin : equivalent method on Series Examples -------- >>> s = pd.Categorical(['lama', 'cow', 'lama', 'beetle', 'lama', ... 'hippo']) >>> s.isin(['cow', 'lama']) array([ True, True, True, False, True, False]) Passing a single string as ``s.isin('lama')`` will raise an error. Use a list of one element instead: >>> s.isin(['lama']) array([ True, False, True, False, True, False])
pandas/core/arrays/categorical.py
isin
Adirio/pandas
python
def isin(self, values): "\n Check whether `values` are contained in Categorical.\n\n Return a boolean NumPy Array showing whether each element in\n the Categorical matches an element in the passed sequence of\n `values` exactly.\n\n Parameters\n ----------\n values : set or list-like\n The sequence of values to test. Passing in a single string will\n raise a ``TypeError``. Instead, turn a single string into a\n list of one element.\n\n Returns\n -------\n isin : numpy.ndarray (bool dtype)\n\n Raises\n ------\n TypeError\n * If `values` is not a set or list-like\n\n See Also\n --------\n pandas.Series.isin : equivalent method on Series\n\n Examples\n --------\n\n >>> s = pd.Categorical(['lama', 'cow', 'lama', 'beetle', 'lama',\n ... 'hippo'])\n >>> s.isin(['cow', 'lama'])\n array([ True, True, True, False, True, False])\n\n Passing a single string as ``s.isin('lama')`` will raise an error. Use\n a list of one element instead:\n\n >>> s.isin(['lama'])\n array([ True, False, True, False, True, False])\n " from pandas.core.series import _sanitize_array if (not is_list_like(values)): raise TypeError('only list-like objects are allowed to be passed to isin(), you passed a [{values_type}]'.format(values_type=type(values).__name__)) values = _sanitize_array(values, None, None) null_mask = np.asarray(isna(values)) code_values = self.categories.get_indexer(values) code_values = code_values[(null_mask | (code_values >= 0))] return algorithms.isin(self.codes, code_values)
def submit(self, request, transaction, mobile: str=None, valid_card_number: str=None, callback: str=None): "Submits a transaction to Pay.ir.\n\n When called, the method submits the necessary information about the transaction to Pay.ir and returns a\n HttpResponseRedirect object that can redirect the user to the gateway, if nothing goes wrong. In case of an\n error, a GatewayError is raised, containing the error_code and error_message reported by Pay.ir.\n\n :param request: The WSGIRequest object passed to the view.\n :param transaction: A transaction object (or a similar class) that's already been saved to the database.\n :param mobile: (Optional) Phone number of the payer. If provided, payer's saved card numbers will be listed for them in the gateway.\n :param valid_card_number: (Optional) Specifies a single card number as the only one that can complete the transaction.\n :param callback: (Optional) Overrides the default callback of the gateway.\n " payload = self._prepare_submission_payload(request, transaction, mobile, valid_card_number, (callback or self.default_callback)) response = requests.post(self.submission_url, data=payload) data = response.json() if response: transaction.token = data['token'] transaction.save() return redirect(f'https://pay.ir/pg/{transaction.token}') raise exceptions.GatewayError(error_code=data['errorCode'], error_message=data['errorMessage'])
-8,579,580,414,340,304,000
Submits a transaction to Pay.ir. When called, the method submits the necessary information about the transaction to Pay.ir and returns a HttpResponseRedirect object that can redirect the user to the gateway, if nothing goes wrong. In case of an error, a GatewayError is raised, containing the error_code and error_message reported by Pay.ir. :param request: The WSGIRequest object passed to the view. :param transaction: A transaction object (or a similar class) that's already been saved to the database. :param mobile: (Optional) Phone number of the payer. If provided, payer's saved card numbers will be listed for them in the gateway. :param valid_card_number: (Optional) Specifies a single card number as the only one that can complete the transaction. :param callback: (Optional) Overrides the default callback of the gateway.
payir/models.py
submit
farahmand-m/django-payir
python
def submit(self, request, transaction, mobile: str=None, valid_card_number: str=None, callback: str=None): "Submits a transaction to Pay.ir.\n\n When called, the method submits the necessary information about the transaction to Pay.ir and returns a\n HttpResponseRedirect object that can redirect the user to the gateway, if nothing goes wrong. In case of an\n error, a GatewayError is raised, containing the error_code and error_message reported by Pay.ir.\n\n :param request: The WSGIRequest object passed to the view.\n :param transaction: A transaction object (or a similar class) that's already been saved to the database.\n :param mobile: (Optional) Phone number of the payer. If provided, payer's saved card numbers will be listed for them in the gateway.\n :param valid_card_number: (Optional) Specifies a single card number as the only one that can complete the transaction.\n :param callback: (Optional) Overrides the default callback of the gateway.\n " payload = self._prepare_submission_payload(request, transaction, mobile, valid_card_number, (callback or self.default_callback)) response = requests.post(self.submission_url, data=payload) data = response.json() if response: transaction.token = data['token'] transaction.save() return redirect(f'https://pay.ir/pg/{transaction.token}') raise exceptions.GatewayError(error_code=data['errorCode'], error_message=data['errorMessage'])
def create_and_submit(self, request, account, amount: int, mobile: str=None, valid_card_number: str=None, callback: str=None): "Creates a transaction object and submits the transaction to Pay.ir.\n\n When called, the method submits the necessary information about the transaction to Pay.ir and returns a\n HttpResponseRedirect object that can redirect the user to the gateway, if nothing goes wrong. In case of an\n error, a GatewayError is raised, containing the error_code and error_message reported by Pay.ir.\n\n :param request: The WSGIRequest object passed to the view.\n :param account: Payer's account object. The account will be assigned to the transaction through a ForeignKey.\n :param amount: The amount of the transaction in IRR. The amount has to be more than 1000.\n :param mobile: (Optional) Phone number of the payer. If provided, payer's saved card numbers will be listed for them in the gateway.\n :param valid_card_number: (Optional) Specifies a single card number as the only one that can complete the transaction.\n :param callback: (Optional) Overrides the default callback of the gateway.\n " transaction = Transaction(account=account, amount=amount) transaction.save() return self.submit(request, transaction, mobile, valid_card_number, callback)
-2,672,328,821,681,613,000
Creates a transaction object and submits the transaction to Pay.ir. When called, the method submits the necessary information about the transaction to Pay.ir and returns a HttpResponseRedirect object that can redirect the user to the gateway, if nothing goes wrong. In case of an error, a GatewayError is raised, containing the error_code and error_message reported by Pay.ir. :param request: The WSGIRequest object passed to the view. :param account: Payer's account object. The account will be assigned to the transaction through a ForeignKey. :param amount: The amount of the transaction in IRR. The amount has to be more than 1000. :param mobile: (Optional) Phone number of the payer. If provided, payer's saved card numbers will be listed for them in the gateway. :param valid_card_number: (Optional) Specifies a single card number as the only one that can complete the transaction. :param callback: (Optional) Overrides the default callback of the gateway.
payir/models.py
create_and_submit
farahmand-m/django-payir
python
def create_and_submit(self, request, account, amount: int, mobile: str=None, valid_card_number: str=None, callback: str=None): "Creates a transaction object and submits the transaction to Pay.ir.\n\n When called, the method submits the necessary information about the transaction to Pay.ir and returns a\n HttpResponseRedirect object that can redirect the user to the gateway, if nothing goes wrong. In case of an\n error, a GatewayError is raised, containing the error_code and error_message reported by Pay.ir.\n\n :param request: The WSGIRequest object passed to the view.\n :param account: Payer's account object. The account will be assigned to the transaction through a ForeignKey.\n :param amount: The amount of the transaction in IRR. The amount has to be more than 1000.\n :param mobile: (Optional) Phone number of the payer. If provided, payer's saved card numbers will be listed for them in the gateway.\n :param valid_card_number: (Optional) Specifies a single card number as the only one that can complete the transaction.\n :param callback: (Optional) Overrides the default callback of the gateway.\n " transaction = Transaction(account=account, amount=amount) transaction.save() return self.submit(request, transaction, mobile, valid_card_number, callback)
def verify(self, transaction): "Verifies the transaction with Pay.ir.\n\n When a transaction returns with status '1', it must be verified with Pay.ir. Otherwise, it will be returned to\n the payer's bank account in 30 minutes. The method returns the updated transaction object and a boolean value.\n The boolean value would be True if the `verified` flag of the transaction was switched to True. If the\n `verified` attribute of transaction object and the returned boolean value do not match, the user might be trying\n to confirm a payment for a second time.\n\n :param transaction: The transaction object corresponding to the specified token in request.GET.\n " payload = {'api': self.api_key, 'token': transaction.token} response = requests.post(self.verification_url, data=payload) data = response.json() if response: if (not transaction.verified): transaction.gateway = self transaction.verified = True transaction.verified_at = timezone.now() transaction.save() return (transaction, True) else: return (transaction, False) raise exceptions.GatewayError(error_code=data['errorCode'], error_message=data['errorMessage'])
-1,236,591,773,615,743,000
Verifies the transaction with Pay.ir. When a transaction returns with status '1', it must be verified with Pay.ir. Otherwise, it will be returned to the payer's bank account in 30 minutes. The method returns the updated transaction object and a boolean value. The boolean value would be True if the `verified` flag of the transaction was switched to True. If the `verified` attribute of transaction object and the returned boolean value do not match, the user might be trying to confirm a payment for a second time. :param transaction: The transaction object corresponding to the specified token in request.GET.
payir/models.py
verify
farahmand-m/django-payir
python
def verify(self, transaction): "Verifies the transaction with Pay.ir.\n\n When a transaction returns with status '1', it must be verified with Pay.ir. Otherwise, it will be returned to\n the payer's bank account in 30 minutes. The method returns the updated transaction object and a boolean value.\n The boolean value would be True if the `verified` flag of the transaction was switched to True. If the\n `verified` attribute of transaction object and the returned boolean value do not match, the user might be trying\n to confirm a payment for a second time.\n\n :param transaction: The transaction object corresponding to the specified token in request.GET.\n " payload = {'api': self.api_key, 'token': transaction.token} response = requests.post(self.verification_url, data=payload) data = response.json() if response: if (not transaction.verified): transaction.gateway = self transaction.verified = True transaction.verified_at = timezone.now() transaction.save() return (transaction, True) else: return (transaction, False) raise exceptions.GatewayError(error_code=data['errorCode'], error_message=data['errorMessage'])
def find_and_verify(self, token: str): "Finds a transaction with a matching token value and verifies it with Pay.ir.\n\n When a transaction returns with status '1', it must be verified with Pay.ir. Otherwise, it will be returned to\n the payer's bank account in 30 minutes. The method returns the updated transaction object and a boolean value.\n The boolean value would be True if the `verified` flag of the transaction was switched to True. If the\n `verified` attribute of transaction object and the returned boolean value do not match, the user might be trying\n to confirm a payment for a second time.\n\n :param token: The token of the transaction, which can be found in request.GET. The method will look for a\n transaction object with the same token and return it as the first argument.\n " transaction = Transaction.objects.get(token=token) return self.verify(transaction)
7,657,918,661,398,567,000
Finds a transaction with a matching token value and verifies it with Pay.ir. When a transaction returns with status '1', it must be verified with Pay.ir. Otherwise, it will be returned to the payer's bank account in 30 minutes. The method returns the updated transaction object and a boolean value. The boolean value would be True if the `verified` flag of the transaction was switched to True. If the `verified` attribute of transaction object and the returned boolean value do not match, the user might be trying to confirm a payment for a second time. :param token: The token of the transaction, which can be found in request.GET. The method will look for a transaction object with the same token and return it as the first argument.
payir/models.py
find_and_verify
farahmand-m/django-payir
python
def find_and_verify(self, token: str): "Finds a transaction with a matching token value and verifies it with Pay.ir.\n\n When a transaction returns with status '1', it must be verified with Pay.ir. Otherwise, it will be returned to\n the payer's bank account in 30 minutes. The method returns the updated transaction object and a boolean value.\n The boolean value would be True if the `verified` flag of the transaction was switched to True. If the\n `verified` attribute of transaction object and the returned boolean value do not match, the user might be trying\n to confirm a payment for a second time.\n\n :param token: The token of the transaction, which can be found in request.GET. The method will look for a\n transaction object with the same token and return it as the first argument.\n " transaction = Transaction.objects.get(token=token) return self.verify(transaction)
async def test_hls_stream(hass, hass_client, stream_worker_sync): '\n Test hls stream.\n\n Purposefully not mocking anything here to test full\n integration with the stream component.\n ' (await async_setup_component(hass, 'stream', {'stream': {}})) stream_worker_sync.pause() source = generate_h264_video() stream = preload_stream(hass, source) stream.add_provider('hls') url = request_stream(hass, source) http_client = (await hass_client()) parsed_url = urlparse(url) playlist_response = (await http_client.get(parsed_url.path)) assert (playlist_response.status == 200) playlist = (await playlist_response.text()) playlist_url = '/'.join(parsed_url.path.split('/')[:(- 1)]) init_url = (playlist_url + '/init.mp4') init_response = (await http_client.get(init_url)) assert (init_response.status == 200) playlist = (await playlist_response.text()) playlist_url = '/'.join(parsed_url.path.split('/')[:(- 1)]) segment_url = ((playlist_url + '/') + playlist.splitlines()[(- 1)]) segment_response = (await http_client.get(segment_url)) assert (segment_response.status == 200) stream_worker_sync.resume() stream.stop() fail_response = (await http_client.get(parsed_url.path)) assert (fail_response.status == HTTP_NOT_FOUND)
8,384,524,558,263,135,000
Test hls stream. Purposefully not mocking anything here to test full integration with the stream component.
tests/components/stream/test_hls.py
test_hls_stream
BoresXP/core
python
async def test_hls_stream(hass, hass_client, stream_worker_sync): '\n Test hls stream.\n\n Purposefully not mocking anything here to test full\n integration with the stream component.\n ' (await async_setup_component(hass, 'stream', {'stream': {}})) stream_worker_sync.pause() source = generate_h264_video() stream = preload_stream(hass, source) stream.add_provider('hls') url = request_stream(hass, source) http_client = (await hass_client()) parsed_url = urlparse(url) playlist_response = (await http_client.get(parsed_url.path)) assert (playlist_response.status == 200) playlist = (await playlist_response.text()) playlist_url = '/'.join(parsed_url.path.split('/')[:(- 1)]) init_url = (playlist_url + '/init.mp4') init_response = (await http_client.get(init_url)) assert (init_response.status == 200) playlist = (await playlist_response.text()) playlist_url = '/'.join(parsed_url.path.split('/')[:(- 1)]) segment_url = ((playlist_url + '/') + playlist.splitlines()[(- 1)]) segment_response = (await http_client.get(segment_url)) assert (segment_response.status == 200) stream_worker_sync.resume() stream.stop() fail_response = (await http_client.get(parsed_url.path)) assert (fail_response.status == HTTP_NOT_FOUND)
async def test_stream_timeout(hass, hass_client, stream_worker_sync): 'Test hls stream timeout.' (await async_setup_component(hass, 'stream', {'stream': {}})) stream_worker_sync.pause() source = generate_h264_video() stream = preload_stream(hass, source) stream.add_provider('hls') url = request_stream(hass, source) http_client = (await hass_client()) parsed_url = urlparse(url) playlist_response = (await http_client.get(parsed_url.path)) assert (playlist_response.status == 200) future = (dt_util.utcnow() + timedelta(minutes=1)) async_fire_time_changed(hass, future) playlist_response = (await http_client.get(parsed_url.path)) assert (playlist_response.status == 200) stream_worker_sync.resume() future = (dt_util.utcnow() + timedelta(minutes=5)) async_fire_time_changed(hass, future) fail_response = (await http_client.get(parsed_url.path)) assert (fail_response.status == HTTP_NOT_FOUND)
1,161,223,324,562,560,800
Test hls stream timeout.
tests/components/stream/test_hls.py
test_stream_timeout
BoresXP/core
python
async def test_stream_timeout(hass, hass_client, stream_worker_sync): (await async_setup_component(hass, 'stream', {'stream': {}})) stream_worker_sync.pause() source = generate_h264_video() stream = preload_stream(hass, source) stream.add_provider('hls') url = request_stream(hass, source) http_client = (await hass_client()) parsed_url = urlparse(url) playlist_response = (await http_client.get(parsed_url.path)) assert (playlist_response.status == 200) future = (dt_util.utcnow() + timedelta(minutes=1)) async_fire_time_changed(hass, future) playlist_response = (await http_client.get(parsed_url.path)) assert (playlist_response.status == 200) stream_worker_sync.resume() future = (dt_util.utcnow() + timedelta(minutes=5)) async_fire_time_changed(hass, future) fail_response = (await http_client.get(parsed_url.path)) assert (fail_response.status == HTTP_NOT_FOUND)
async def test_stream_ended(hass, stream_worker_sync): 'Test hls stream packets ended.' (await async_setup_component(hass, 'stream', {'stream': {}})) stream_worker_sync.pause() source = generate_h264_video() stream = preload_stream(hass, source) track = stream.add_provider('hls') request_stream(hass, source) while True: segment = (await track.recv()) if (segment is None): break segments = segment.sequence if (segments > 1): stream_worker_sync.resume() assert (segments > 1) assert (not track.get_segment()) stream.stop()
-5,946,003,191,077,505,000
Test hls stream packets ended.
tests/components/stream/test_hls.py
test_stream_ended
BoresXP/core
python
async def test_stream_ended(hass, stream_worker_sync): (await async_setup_component(hass, 'stream', {'stream': {}})) stream_worker_sync.pause() source = generate_h264_video() stream = preload_stream(hass, source) track = stream.add_provider('hls') request_stream(hass, source) while True: segment = (await track.recv()) if (segment is None): break segments = segment.sequence if (segments > 1): stream_worker_sync.resume() assert (segments > 1) assert (not track.get_segment()) stream.stop()
async def test_stream_keepalive(hass): 'Test hls stream retries the stream when keepalive=True.' (await async_setup_component(hass, 'stream', {'stream': {}})) source = 'test_stream_keepalive_source' stream = preload_stream(hass, source) track = stream.add_provider('hls') track.num_segments = 2 cur_time = 0 def time_side_effect(): nonlocal cur_time if (cur_time >= 80): stream.keepalive = False cur_time += 40 return cur_time with patch('av.open') as av_open, patch('homeassistant.components.stream.worker.time') as mock_time, patch('homeassistant.components.stream.worker.STREAM_RESTART_INCREMENT', 0): av_open.side_effect = av.error.InvalidDataError((- 2), 'error') mock_time.time.side_effect = time_side_effect request_stream(hass, source, keepalive=True) stream._thread.join() stream._thread = None assert (av_open.call_count == 2) stream.stop()
5,628,805,682,028,231,000
Test hls stream retries the stream when keepalive=True.
tests/components/stream/test_hls.py
test_stream_keepalive
BoresXP/core
python
async def test_stream_keepalive(hass): (await async_setup_component(hass, 'stream', {'stream': {}})) source = 'test_stream_keepalive_source' stream = preload_stream(hass, source) track = stream.add_provider('hls') track.num_segments = 2 cur_time = 0 def time_side_effect(): nonlocal cur_time if (cur_time >= 80): stream.keepalive = False cur_time += 40 return cur_time with patch('av.open') as av_open, patch('homeassistant.components.stream.worker.time') as mock_time, patch('homeassistant.components.stream.worker.STREAM_RESTART_INCREMENT', 0): av_open.side_effect = av.error.InvalidDataError((- 2), 'error') mock_time.time.side_effect = time_side_effect request_stream(hass, source, keepalive=True) stream._thread.join() stream._thread = None assert (av_open.call_count == 2) stream.stop()
def split(self, batch_size, data): 'TODO: Annotation\n params:\n batch_size: int\n data: [B, x]\n ' if self.memory_net.use_rnn: data = tf.reshape(data, [batch_size, (- 1), tf.shape(data)[(- 1)]]) (d, d_) = (data[:, :(- 1)], data[:, 1:]) (d, d_) = (tf.reshape(d, [(- 1), tf.shape(d)[(- 1)]]), tf.reshape(d_, [(- 1), tf.shape(d_)[(- 1)]])) return (d, d_) else: return tf.split(data, num_or_size_splits=2, axis=0)
-1,039,740,822,517,881,500
TODO: Annotation params: batch_size: int data: [B, x]
rls/utils/build_networks.py
split
kiminh/RLs
python
def split(self, batch_size, data): 'TODO: Annotation\n params:\n batch_size: int\n data: [B, x]\n ' if self.memory_net.use_rnn: data = tf.reshape(data, [batch_size, (- 1), tf.shape(data)[(- 1)]]) (d, d_) = (data[:, :(- 1)], data[:, 1:]) (d, d_) = (tf.reshape(d, [(- 1), tf.shape(d)[(- 1)]]), tf.reshape(d_, [(- 1), tf.shape(d_)[(- 1)]])) return (d, d_) else: return tf.split(data, num_or_size_splits=2, axis=0)
def __call__(self, s, visual_s, cell_state, *, need_split=False): '\n params:\n s: [B*T, x]\n visual_s: [B*T, y]\n cell_state: Tuple([B, z],)\n return:\n feat: [B, a]\n cell_state: Tuple([B, z],)\n ' batch_size = tf.shape(s)[0] if self.memory_net.use_rnn: s = tf.reshape(s, [(- 1), tf.shape(s)[(- 1)]]) if self.visual_net.use_visual: visual_s = tf.reshape(visual_s, [(- 1), *tf.shape(visual_s)[2:]]) feat = self.get_encoder_feature(s, visual_s) if self.memory_net.use_rnn: feat = tf.reshape(feat, (batch_size, (- 1), tf.shape(feat)[(- 1)])) (feat, cell_state) = self.memory_net(feat, *cell_state) feat = tf.reshape(feat, ((- 1), tf.shape(feat)[(- 1)])) if need_split: feat = self.split(batch_size, feat) return (feat, cell_state)
170,773,381,567,324,300
params: s: [B*T, x] visual_s: [B*T, y] cell_state: Tuple([B, z],) return: feat: [B, a] cell_state: Tuple([B, z],)
rls/utils/build_networks.py
__call__
kiminh/RLs
python
def __call__(self, s, visual_s, cell_state, *, need_split=False): '\n params:\n s: [B*T, x]\n visual_s: [B*T, y]\n cell_state: Tuple([B, z],)\n return:\n feat: [B, a]\n cell_state: Tuple([B, z],)\n ' batch_size = tf.shape(s)[0] if self.memory_net.use_rnn: s = tf.reshape(s, [(- 1), tf.shape(s)[(- 1)]]) if self.visual_net.use_visual: visual_s = tf.reshape(visual_s, [(- 1), *tf.shape(visual_s)[2:]]) feat = self.get_encoder_feature(s, visual_s) if self.memory_net.use_rnn: feat = tf.reshape(feat, (batch_size, (- 1), tf.shape(feat)[(- 1)])) (feat, cell_state) = self.memory_net(feat, *cell_state) feat = tf.reshape(feat, ((- 1), tf.shape(feat)[(- 1)])) if need_split: feat = self.split(batch_size, feat) return (feat, cell_state)
def get_vis_feature(self, visual_s): '\n params:\n visual_s: [B, N, H, W, C]\n return:\n feat: [B, x]\n ' viss = [visual_s[:, i] for i in range(visual_s.shape[1])] return self.visual_net(*viss)
-8,670,938,418,787,934,000
params: visual_s: [B, N, H, W, C] return: feat: [B, x]
rls/utils/build_networks.py
get_vis_feature
kiminh/RLs
python
def get_vis_feature(self, visual_s): '\n params:\n visual_s: [B, N, H, W, C]\n return:\n feat: [B, x]\n ' viss = [visual_s[:, i] for i in range(visual_s.shape[1])] return self.visual_net(*viss)
def get_vec_feature(self, s): '\n params:\n s: [B, x]\n return:\n feat: [B, y]\n ' return self.vector_net(s)
1,844,289,576,694,748,700
params: s: [B, x] return: feat: [B, y]
rls/utils/build_networks.py
get_vec_feature
kiminh/RLs
python
def get_vec_feature(self, s): '\n params:\n s: [B, x]\n return:\n feat: [B, y]\n ' return self.vector_net(s)
def get_encoder_feature(self, s, visual_s): '\n params:\n s: [B, x]\n visual_s: [B, y]\n return:\n feat: [B, z]\n ' if (self.vector_net.use_vector and self.visual_net.use_visual): feat = self.get_vec_feature(s) vis_feat = self.get_vis_feature(visual_s) feat = tf.concat([feat, vis_feat], axis=(- 1)) elif self.visual_net.use_visual: vis_feat = self.get_vis_feature(visual_s) feat = vis_feat else: feat = self.get_vec_feature(s) encoder_feature = self.encoder_net(feat) return encoder_feature
4,595,706,411,565,255,000
params: s: [B, x] visual_s: [B, y] return: feat: [B, z]
rls/utils/build_networks.py
get_encoder_feature
kiminh/RLs
python
def get_encoder_feature(self, s, visual_s): '\n params:\n s: [B, x]\n visual_s: [B, y]\n return:\n feat: [B, z]\n ' if (self.vector_net.use_vector and self.visual_net.use_visual): feat = self.get_vec_feature(s) vis_feat = self.get_vis_feature(visual_s) feat = tf.concat([feat, vis_feat], axis=(- 1)) elif self.visual_net.use_visual: vis_feat = self.get_vis_feature(visual_s) feat = vis_feat else: feat = self.get_vec_feature(s) encoder_feature = self.encoder_net(feat) return encoder_feature
@property def _policy_models(self): '重载' models = super()._policy_models models.update({((self.name + '/') + 'policy_net'): self.policy_net}) return models
-3,166,260,519,035,728,000
重载
rls/utils/build_networks.py
_policy_models
kiminh/RLs
python
@property def _policy_models(self): models = super()._policy_models models.update({((self.name + '/') + 'policy_net'): self.policy_net}) return models
def update_bid(self, bid_basket_item_id, amount): '\n Update amount of bid. Delete bid if amount is 0.\n ' try: amount = Decimal(amount) except Exception as e: amount = Decimal('0') bid_basket_item = self.bids.get(pk=bid_basket_item_id) if (not bid_basket_item.is_locked()): if (amount == 0): bid_basket_item.delete() else: bid_basket_item.amount = amount bid_basket_item.save() self.save() return bid_basket_item
-4,093,918,117,560,578,000
Update amount of bid. Delete bid if amount is 0.
auction/models/bases.py
update_bid
JohnRomanski/django-auction
python
def update_bid(self, bid_basket_item_id, amount): '\n \n ' try: amount = Decimal(amount) except Exception as e: amount = Decimal('0') bid_basket_item = self.bids.get(pk=bid_basket_item_id) if (not bid_basket_item.is_locked()): if (amount == 0): bid_basket_item.delete() else: bid_basket_item.amount = amount bid_basket_item.save() self.save() return bid_basket_item
def delete_bid(self, bid_basket_item_id): '\n Delete a single item from bid basket.\n ' bid_basket_item = self.bids.get(pk=bid_basket_item_id) if (not bid_basket_item.is_locked()): bid_basket_item.delete() return bid_basket_item
8,172,840,115,005,275,000
Delete a single item from bid basket.
auction/models/bases.py
delete_bid
JohnRomanski/django-auction
python
def delete_bid(self, bid_basket_item_id): '\n \n ' bid_basket_item = self.bids.get(pk=bid_basket_item_id) if (not bid_basket_item.is_locked()): bid_basket_item.delete() return bid_basket_item
def empty(self): '\n Remove all bids from bid basket.\n ' if self.pk: bids = self.bids.all() for bid in bids: if (not bid.is_locked()): bid.delete()
-8,989,054,926,361,665,000
Remove all bids from bid basket.
auction/models/bases.py
empty
JohnRomanski/django-auction
python
def empty(self): '\n \n ' if self.pk: bids = self.bids.all() for bid in bids: if (not bid.is_locked()): bid.delete()
@property def bids(self): '\n Used as accessor for abstract related (BaseBidItem.bid_items).\n\n If you override BaseBidItem and use a label other than "auction"\n you will also need to set AUCTION_BIDBASKET_BIDS_RELATED_NAME.\n Example: foo_biditem_related\n (where your label is "foo" and your model is "BidItem")\n ' bids = getattr(settings, 'AUCTION_BIDBASKET_BIDS_RELATED_NAME', 'auction_biditem_related') return getattr(self, bids)
-5,169,305,097,375,760,000
Used as accessor for abstract related (BaseBidItem.bid_items). If you override BaseBidItem and use a label other than "auction" you will also need to set AUCTION_BIDBASKET_BIDS_RELATED_NAME. Example: foo_biditem_related (where your label is "foo" and your model is "BidItem")
auction/models/bases.py
bids
JohnRomanski/django-auction
python
@property def bids(self): '\n Used as accessor for abstract related (BaseBidItem.bid_items).\n\n If you override BaseBidItem and use a label other than "auction"\n you will also need to set AUCTION_BIDBASKET_BIDS_RELATED_NAME.\n Example: foo_biditem_related\n (where your label is "foo" and your model is "BidItem")\n ' bids = getattr(settings, 'AUCTION_BIDBASKET_BIDS_RELATED_NAME', 'auction_biditem_related') return getattr(self, bids)
@property def total_bids(self): '\n Returns total bids in basket.\n ' return len(self.bids.all())
-476,392,268,769,202,370
Returns total bids in basket.
auction/models/bases.py
total_bids
JohnRomanski/django-auction
python
@property def total_bids(self): '\n \n ' return len(self.bids.all())
@property def is_locked(self): '\n This property is meant to be overwritten with your own logic. Bid baskets\n check this method to find out if a bid can be manipulated.\n ' import auction.utils.generic now = auction.utils.generic.get_current_time() return (self.content_object.end_date <= now)
4,951,673,990,888,619,000
This property is meant to be overwritten with your own logic. Bid baskets check this method to find out if a bid can be manipulated.
auction/models/bases.py
is_locked
JohnRomanski/django-auction
python
@property def is_locked(self): '\n This property is meant to be overwritten with your own logic. Bid baskets\n check this method to find out if a bid can be manipulated.\n ' import auction.utils.generic now = auction.utils.generic.get_current_time() return (self.content_object.end_date <= now)
def __init__(self, session, object_factory, request_validator): 'Initialize a new Clients\n object with the provided RestSession.\n\n Args:\n session(RestSession): The RESTful session object to be used for\n API calls to the DNA Center service.\n\n Raises:\n TypeError: If the parameter types are incorrect.\n\n ' check_type(session, RestSession) super(Clients, self).__init__() self._session = session self._object_factory = object_factory self._request_validator = request_validator
9,048,934,399,632,874,000
Initialize a new Clients object with the provided RestSession. Args: session(RestSession): The RESTful session object to be used for API calls to the DNA Center service. Raises: TypeError: If the parameter types are incorrect.
dnacentersdk/api/v1_3_1/clients.py
__init__
cisco-en-programmability/dnacentersdk
python
def __init__(self, session, object_factory, request_validator): 'Initialize a new Clients\n object with the provided RestSession.\n\n Args:\n session(RestSession): The RESTful session object to be used for\n API calls to the DNA Center service.\n\n Raises:\n TypeError: If the parameter types are incorrect.\n\n ' check_type(session, RestSession) super(Clients, self).__init__() self._session = session self._object_factory = object_factory self._request_validator = request_validator
def get_client_enrichment_details(self, headers=None, **request_parameters): "Enriches a given network End User context (a network user-id or\n end user's device Mac Address) with details about the\n user, the devices that the user is connected to and the\n assurance issues that the user is impacted by.\n\n Args:\n headers(dict): Dictionary of HTTP Headers to send with the Request\n .\n **request_parameters: Additional request parameters (provides\n support for parameters that may be added in the future).\n\n Returns:\n list: JSON response. A list of MyDict objects.\n Access the object's properties by using the dot notation\n or the bracket notation.\n\n Raises:\n TypeError: If the parameter types are incorrect.\n MalformedRequest: If the request body created is invalid.\n ApiError: If the DNA Center cloud returns an error.\n " check_type(headers, dict) if (headers is not None): if ('entity_type' in headers): check_type(headers.get('entity_type'), basestring, may_be_none=False) if ('entity_value' in headers): check_type(headers.get('entity_value'), basestring, may_be_none=False) if ('issueCategory' in headers): check_type(headers.get('issueCategory'), basestring) if ('X-Auth-Token' in headers): check_type(headers.get('X-Auth-Token'), basestring, may_be_none=False) _params = {} _params.update(request_parameters) _params = dict_from_items_with_values(_params) path_params = {} with_custom_headers = False _headers = (self._session.headers or {}) if headers: _headers.update(dict_of_str(headers)) with_custom_headers = True e_url = '/dna/intent/api/v1/client-enrichment-details' endpoint_full_url = apply_path_params(e_url, path_params) if with_custom_headers: json_data = self._session.get(endpoint_full_url, params=_params, headers=_headers) else: json_data = self._session.get(endpoint_full_url, params=_params) return self._object_factory('bpm_b199685d4d089a67_v1_3_1', json_data)
-7,564,726,252,209,738,000
Enriches a given network End User context (a network user-id or end user's device Mac Address) with details about the user, the devices that the user is connected to and the assurance issues that the user is impacted by. Args: headers(dict): Dictionary of HTTP Headers to send with the Request . **request_parameters: Additional request parameters (provides support for parameters that may be added in the future). Returns: list: JSON response. A list of MyDict objects. Access the object's properties by using the dot notation or the bracket notation. Raises: TypeError: If the parameter types are incorrect. MalformedRequest: If the request body created is invalid. ApiError: If the DNA Center cloud returns an error.
dnacentersdk/api/v1_3_1/clients.py
get_client_enrichment_details
cisco-en-programmability/dnacentersdk
python
def get_client_enrichment_details(self, headers=None, **request_parameters): "Enriches a given network End User context (a network user-id or\n end user's device Mac Address) with details about the\n user, the devices that the user is connected to and the\n assurance issues that the user is impacted by.\n\n Args:\n headers(dict): Dictionary of HTTP Headers to send with the Request\n .\n **request_parameters: Additional request parameters (provides\n support for parameters that may be added in the future).\n\n Returns:\n list: JSON response. A list of MyDict objects.\n Access the object's properties by using the dot notation\n or the bracket notation.\n\n Raises:\n TypeError: If the parameter types are incorrect.\n MalformedRequest: If the request body created is invalid.\n ApiError: If the DNA Center cloud returns an error.\n " check_type(headers, dict) if (headers is not None): if ('entity_type' in headers): check_type(headers.get('entity_type'), basestring, may_be_none=False) if ('entity_value' in headers): check_type(headers.get('entity_value'), basestring, may_be_none=False) if ('issueCategory' in headers): check_type(headers.get('issueCategory'), basestring) if ('X-Auth-Token' in headers): check_type(headers.get('X-Auth-Token'), basestring, may_be_none=False) _params = {} _params.update(request_parameters) _params = dict_from_items_with_values(_params) path_params = {} with_custom_headers = False _headers = (self._session.headers or {}) if headers: _headers.update(dict_of_str(headers)) with_custom_headers = True e_url = '/dna/intent/api/v1/client-enrichment-details' endpoint_full_url = apply_path_params(e_url, path_params) if with_custom_headers: json_data = self._session.get(endpoint_full_url, params=_params, headers=_headers) else: json_data = self._session.get(endpoint_full_url, params=_params) return self._object_factory('bpm_b199685d4d089a67_v1_3_1', json_data)
def get_overall_client_health(self, timestamp=None, headers=None, **request_parameters): "Returns Overall Client Health information by Client type (Wired\n and Wireless) for any given point of time.\n\n Args:\n timestamp(basestring, int): Epoch time(in milliseconds) when the Client health data is required.\n headers(dict): Dictionary of HTTP Headers to send with the Request\n .\n **request_parameters: Additional request parameters (provides\n support for parameters that may be added in the future).\n\n Returns:\n MyDict: JSON response. Access the object's properties by using\n the dot notation or the bracket notation.\n\n Raises:\n TypeError: If the parameter types are incorrect.\n MalformedRequest: If the request body created is invalid.\n ApiError: If the DNA Center cloud returns an error.\n " check_type(headers, dict) check_type(timestamp, (basestring, int)) if (headers is not None): if ('X-Auth-Token' in headers): check_type(headers.get('X-Auth-Token'), basestring, may_be_none=False) _params = {'timestamp': timestamp} if (_params['timestamp'] is None): _params['timestamp'] = '' _params.update(request_parameters) _params = dict_from_items_with_values(_params) path_params = {} with_custom_headers = False _headers = (self._session.headers or {}) if headers: _headers.update(dict_of_str(headers)) with_custom_headers = True e_url = '/dna/intent/api/v1/client-health' endpoint_full_url = apply_path_params(e_url, path_params) if with_custom_headers: json_data = self._session.get(endpoint_full_url, params=_params, headers=_headers) else: json_data = self._session.get(endpoint_full_url, params=_params) return self._object_factory('bpm_149aa93b4ddb80dd_v1_3_1', json_data)
-784,818,564,822,489,000
Returns Overall Client Health information by Client type (Wired and Wireless) for any given point of time. Args: timestamp(basestring, int): Epoch time(in milliseconds) when the Client health data is required. headers(dict): Dictionary of HTTP Headers to send with the Request . **request_parameters: Additional request parameters (provides support for parameters that may be added in the future). Returns: MyDict: JSON response. Access the object's properties by using the dot notation or the bracket notation. Raises: TypeError: If the parameter types are incorrect. MalformedRequest: If the request body created is invalid. ApiError: If the DNA Center cloud returns an error.
dnacentersdk/api/v1_3_1/clients.py
get_overall_client_health
cisco-en-programmability/dnacentersdk
python
def get_overall_client_health(self, timestamp=None, headers=None, **request_parameters): "Returns Overall Client Health information by Client type (Wired\n and Wireless) for any given point of time.\n\n Args:\n timestamp(basestring, int): Epoch time(in milliseconds) when the Client health data is required.\n headers(dict): Dictionary of HTTP Headers to send with the Request\n .\n **request_parameters: Additional request parameters (provides\n support for parameters that may be added in the future).\n\n Returns:\n MyDict: JSON response. Access the object's properties by using\n the dot notation or the bracket notation.\n\n Raises:\n TypeError: If the parameter types are incorrect.\n MalformedRequest: If the request body created is invalid.\n ApiError: If the DNA Center cloud returns an error.\n " check_type(headers, dict) check_type(timestamp, (basestring, int)) if (headers is not None): if ('X-Auth-Token' in headers): check_type(headers.get('X-Auth-Token'), basestring, may_be_none=False) _params = {'timestamp': timestamp} if (_params['timestamp'] is None): _params['timestamp'] = _params.update(request_parameters) _params = dict_from_items_with_values(_params) path_params = {} with_custom_headers = False _headers = (self._session.headers or {}) if headers: _headers.update(dict_of_str(headers)) with_custom_headers = True e_url = '/dna/intent/api/v1/client-health' endpoint_full_url = apply_path_params(e_url, path_params) if with_custom_headers: json_data = self._session.get(endpoint_full_url, params=_params, headers=_headers) else: json_data = self._session.get(endpoint_full_url, params=_params) return self._object_factory('bpm_149aa93b4ddb80dd_v1_3_1', json_data)
def get_client_detail(self, mac_address, timestamp=None, headers=None, **request_parameters): "Returns detailed Client information retrieved by Mac Address for\n any given point of time. .\n\n Args:\n timestamp(basestring, int): Epoch time(in milliseconds) when the Client health data is required.\n mac_address(basestring): MAC Address of the client.\n headers(dict): Dictionary of HTTP Headers to send with the Request\n .\n **request_parameters: Additional request parameters (provides\n support for parameters that may be added in the future).\n\n Returns:\n MyDict: JSON response. Access the object's properties by using\n the dot notation or the bracket notation.\n\n Raises:\n TypeError: If the parameter types are incorrect.\n MalformedRequest: If the request body created is invalid.\n ApiError: If the DNA Center cloud returns an error.\n " check_type(headers, dict) check_type(timestamp, (basestring, int)) check_type(mac_address, basestring, may_be_none=False) if (headers is not None): if ('X-Auth-Token' in headers): check_type(headers.get('X-Auth-Token'), basestring, may_be_none=False) _params = {'timestamp': timestamp, 'macAddress': mac_address} if (_params['timestamp'] is None): _params['timestamp'] = '' _params.update(request_parameters) _params = dict_from_items_with_values(_params) path_params = {} with_custom_headers = False _headers = (self._session.headers or {}) if headers: _headers.update(dict_of_str(headers)) with_custom_headers = True e_url = '/dna/intent/api/v1/client-detail' endpoint_full_url = apply_path_params(e_url, path_params) if with_custom_headers: json_data = self._session.get(endpoint_full_url, params=_params, headers=_headers) else: json_data = self._session.get(endpoint_full_url, params=_params) return self._object_factory('bpm_e2adba7943bab3e9_v1_3_1', json_data)
5,349,272,156,075,314,000
Returns detailed Client information retrieved by Mac Address for any given point of time. . Args: timestamp(basestring, int): Epoch time(in milliseconds) when the Client health data is required. mac_address(basestring): MAC Address of the client. headers(dict): Dictionary of HTTP Headers to send with the Request . **request_parameters: Additional request parameters (provides support for parameters that may be added in the future). Returns: MyDict: JSON response. Access the object's properties by using the dot notation or the bracket notation. Raises: TypeError: If the parameter types are incorrect. MalformedRequest: If the request body created is invalid. ApiError: If the DNA Center cloud returns an error.
dnacentersdk/api/v1_3_1/clients.py
get_client_detail
cisco-en-programmability/dnacentersdk
python
def get_client_detail(self, mac_address, timestamp=None, headers=None, **request_parameters): "Returns detailed Client information retrieved by Mac Address for\n any given point of time. .\n\n Args:\n timestamp(basestring, int): Epoch time(in milliseconds) when the Client health data is required.\n mac_address(basestring): MAC Address of the client.\n headers(dict): Dictionary of HTTP Headers to send with the Request\n .\n **request_parameters: Additional request parameters (provides\n support for parameters that may be added in the future).\n\n Returns:\n MyDict: JSON response. Access the object's properties by using\n the dot notation or the bracket notation.\n\n Raises:\n TypeError: If the parameter types are incorrect.\n MalformedRequest: If the request body created is invalid.\n ApiError: If the DNA Center cloud returns an error.\n " check_type(headers, dict) check_type(timestamp, (basestring, int)) check_type(mac_address, basestring, may_be_none=False) if (headers is not None): if ('X-Auth-Token' in headers): check_type(headers.get('X-Auth-Token'), basestring, may_be_none=False) _params = {'timestamp': timestamp, 'macAddress': mac_address} if (_params['timestamp'] is None): _params['timestamp'] = _params.update(request_parameters) _params = dict_from_items_with_values(_params) path_params = {} with_custom_headers = False _headers = (self._session.headers or {}) if headers: _headers.update(dict_of_str(headers)) with_custom_headers = True e_url = '/dna/intent/api/v1/client-detail' endpoint_full_url = apply_path_params(e_url, path_params) if with_custom_headers: json_data = self._session.get(endpoint_full_url, params=_params, headers=_headers) else: json_data = self._session.get(endpoint_full_url, params=_params) return self._object_factory('bpm_e2adba7943bab3e9_v1_3_1', json_data)
@property def download_url(self): '\n the URL at which the file can be downloaded. This is only valid for a short time, so should not be cached.\n ' file_url = ('/files/%s' % self.id) rsp = self.session.get(file_url, allow_redirects=False) return rsp.headers['location']
-3,402,634,989,467,524,000
the URL at which the file can be downloaded. This is only valid for a short time, so should not be cached.
yandeley/models/files.py
download_url
shuichiro-makigaki/yandeley-python-sdk
python
@property def download_url(self): '\n \n ' file_url = ('/files/%s' % self.id) rsp = self.session.get(file_url, allow_redirects=False) return rsp.headers['location']
def document(self, view=None): '\n :param view: document view to return.\n :return: a :class:`UserDocument <yandeley.models.documents.UserDocument>` or\n :class:`CatalogDocument <yandeley.models.catalog.CatalogDocument>`, depending on which the document is\n attached to.\n ' if ('document_id' in self.json): return self.session.documents.get_lazy(self.json['document_id'], view=view) elif ('catalog_id' in self.json): return self.session.catalog.get_lazy(self.json['catalog_id'], view=view) else: return None
-7,163,505,978,791,217,000
:param view: document view to return. :return: a :class:`UserDocument <yandeley.models.documents.UserDocument>` or :class:`CatalogDocument <yandeley.models.catalog.CatalogDocument>`, depending on which the document is attached to.
yandeley/models/files.py
document
shuichiro-makigaki/yandeley-python-sdk
python
def document(self, view=None): '\n :param view: document view to return.\n :return: a :class:`UserDocument <yandeley.models.documents.UserDocument>` or\n :class:`CatalogDocument <yandeley.models.catalog.CatalogDocument>`, depending on which the document is\n attached to.\n ' if ('document_id' in self.json): return self.session.documents.get_lazy(self.json['document_id'], view=view) elif ('catalog_id' in self.json): return self.session.catalog.get_lazy(self.json['catalog_id'], view=view) else: return None
def download(self, directory): '\n Downloads the file.\n\n :param directory: the directory to download the file to. This must exist.\n :return: the path to the downloaded file.\n ' rsp = self.session.get(('/files/%s' % self.id), stream=True) filename = self.filename_regex.search(rsp.headers['content-disposition']).group(1) path = os.path.join(directory, filename) with open(path, 'wb') as f: for block in rsp.iter_content(1024): if (not block): break f.write(block) return path
-7,057,783,901,090,330,000
Downloads the file. :param directory: the directory to download the file to. This must exist. :return: the path to the downloaded file.
yandeley/models/files.py
download
shuichiro-makigaki/yandeley-python-sdk
python
def download(self, directory): '\n Downloads the file.\n\n :param directory: the directory to download the file to. This must exist.\n :return: the path to the downloaded file.\n ' rsp = self.session.get(('/files/%s' % self.id), stream=True) filename = self.filename_regex.search(rsp.headers['content-disposition']).group(1) path = os.path.join(directory, filename) with open(path, 'wb') as f: for block in rsp.iter_content(1024): if (not block): break f.write(block) return path
def delete(self): '\n Deletes the file.\n ' self.session.delete(('/files/%s' % self.id))
-7,918,245,386,690,900,000
Deletes the file.
yandeley/models/files.py
delete
shuichiro-makigaki/yandeley-python-sdk
python
def delete(self): '\n \n ' self.session.delete(('/files/%s' % self.id))
def add_sticky_note(self, text, x_position, y_position, page_number): '\n Adds a sticky note to this file.\n\n :param text: the text of the sticky_note.\n :param x_position: the x position on the file of the sticky_note.\n :param y_position: the y position on the file of the stick_note.\n :param page_number: the page_number on the file of the sticky_note.\n :return: a :class:`Annotation <yandeley.models.annotations.Annotation>`.\n ' position = {'x': x_position, 'y': y_position} bounding_box = {'top_left': position, 'bottom_right': position, 'page': page_number} annotation = {'document_id': self.document().id, 'text': text, 'filehash': self.filehash, 'positions': [bounding_box]} rsp = self.session.post('/annotations/', data=json.dumps(annotation), headers={'Accept': Annotation.content_type, 'Content-Type': Annotation.content_type}) return Annotation(self.session, rsp.json())
-6,243,088,608,459,309,000
Adds a sticky note to this file. :param text: the text of the sticky_note. :param x_position: the x position on the file of the sticky_note. :param y_position: the y position on the file of the stick_note. :param page_number: the page_number on the file of the sticky_note. :return: a :class:`Annotation <yandeley.models.annotations.Annotation>`.
yandeley/models/files.py
add_sticky_note
shuichiro-makigaki/yandeley-python-sdk
python
def add_sticky_note(self, text, x_position, y_position, page_number): '\n Adds a sticky note to this file.\n\n :param text: the text of the sticky_note.\n :param x_position: the x position on the file of the sticky_note.\n :param y_position: the y position on the file of the stick_note.\n :param page_number: the page_number on the file of the sticky_note.\n :return: a :class:`Annotation <yandeley.models.annotations.Annotation>`.\n ' position = {'x': x_position, 'y': y_position} bounding_box = {'top_left': position, 'bottom_right': position, 'page': page_number} annotation = {'document_id': self.document().id, 'text': text, 'filehash': self.filehash, 'positions': [bounding_box]} rsp = self.session.post('/annotations/', data=json.dumps(annotation), headers={'Accept': Annotation.content_type, 'Content-Type': Annotation.content_type}) return Annotation(self.session, rsp.json())
def add_highlight(self, bounding_boxes, color): '\n Adds a highlight to this file.\n\n :param bounding_boxes: the area the highlight covers on the file.\n :param color: the color of the highlight.\n :return: a :class:`Annotation <yandeley.models.annotations.Annotation>`.\n ' annotation = {'document_id': self.document().id, 'filehash': self.filehash, 'positions': [box.json for box in bounding_boxes], 'color': color.json} rsp = self.session.post('/annotations/', data=json.dumps(annotation), headers={'Accept': Annotation.content_type, 'Content-Type': Annotation.content_type}) return Annotation(self.session, rsp.json())
-813,811,400,724,956,800
Adds a highlight to this file. :param bounding_boxes: the area the highlight covers on the file. :param color: the color of the highlight. :return: a :class:`Annotation <yandeley.models.annotations.Annotation>`.
yandeley/models/files.py
add_highlight
shuichiro-makigaki/yandeley-python-sdk
python
def add_highlight(self, bounding_boxes, color): '\n Adds a highlight to this file.\n\n :param bounding_boxes: the area the highlight covers on the file.\n :param color: the color of the highlight.\n :return: a :class:`Annotation <yandeley.models.annotations.Annotation>`.\n ' annotation = {'document_id': self.document().id, 'filehash': self.filehash, 'positions': [box.json for box in bounding_boxes], 'color': color.json} rsp = self.session.post('/annotations/', data=json.dumps(annotation), headers={'Accept': Annotation.content_type, 'Content-Type': Annotation.content_type}) return Annotation(self.session, rsp.json())
def _middle(values): 'Lower bound of median, without using numpy (heavy reqs)' n = len(values) is_odd = (n % 2) middle_idx = (int(((n + is_odd) / 2)) - 1) return sorted(values)[middle_idx]
958,713,930,854,413,800
Lower bound of median, without using numpy (heavy reqs)
nuts_finder/nuts_finder.py
_middle
nestauk/nuts_finder
python
def _middle(values): n = len(values) is_odd = (n % 2) middle_idx = (int(((n + is_odd) / 2)) - 1) return sorted(values)[middle_idx]
def _setattr(obj, value, value_name, regex, selector): 'Either apply setattr on `obj` with value `value`, if `value` is not None, otherwise\n select a `value` from the available range of allowed values, selected by a custom `selector`\n function.\n\n Args:\n obj: An object on which to run setattr\n value: A value which if not None will be set as an attribute of object\n value_name (str): The name of the new attribute\n regex (str): regex string by which to find allowed values on the NUTS website.\n selector (function): Function which takes an iterable and selects a value.\n ' allowed_values = _get_available(regex) if (value is None): value = selector(allowed_values) if (value not in allowed_values): raise ValueError(f"'{value_name}' must be one of {allowed_values}") setattr(obj, value_name, value)
6,017,101,556,157,815,000
Either apply setattr on `obj` with value `value`, if `value` is not None, otherwise select a `value` from the available range of allowed values, selected by a custom `selector` function. Args: obj: An object on which to run setattr value: A value which if not None will be set as an attribute of object value_name (str): The name of the new attribute regex (str): regex string by which to find allowed values on the NUTS website. selector (function): Function which takes an iterable and selects a value.
nuts_finder/nuts_finder.py
_setattr
nestauk/nuts_finder
python
def _setattr(obj, value, value_name, regex, selector): 'Either apply setattr on `obj` with value `value`, if `value` is not None, otherwise\n select a `value` from the available range of allowed values, selected by a custom `selector`\n function.\n\n Args:\n obj: An object on which to run setattr\n value: A value which if not None will be set as an attribute of object\n value_name (str): The name of the new attribute\n regex (str): regex string by which to find allowed values on the NUTS website.\n selector (function): Function which takes an iterable and selects a value.\n ' allowed_values = _get_available(regex) if (value is None): value = selector(allowed_values) if (value not in allowed_values): raise ValueError(f"'{value_name}' must be one of {allowed_values}") setattr(obj, value_name, value)
@lru_cache() def _get_available(regex): 'Use the provided regex to find allowed values on the NUTS website.' r = requests.get(TOP_URL, verify=True) values = set((int(yr) for yr in re.findall(regex, r.text))) return values
-3,870,046,505,436,721,000
Use the provided regex to find allowed values on the NUTS website.
nuts_finder/nuts_finder.py
_get_available
nestauk/nuts_finder
python
@lru_cache() def _get_available(regex): r = requests.get(TOP_URL, verify=True) values = set((int(yr) for yr in re.findall(regex, r.text))) return values
def __init__(self, year=None, scale=None): '\n Args:\n year (int): If provided, NUTS regions for this year will be used (if available)\n scale (int): If provided, NUTS regions at this resolution will be used (if available)\n ' self.years = list(_get_available(YEAR_REGEX)) self.year_selector = max _setattr(self, year, 'year', YEAR_REGEX, self.year_selector) _setattr(self, scale, 'scale', SCALE_REGEX, _middle) self.shapes = self._get_shapes()
3,857,591,972,723,282,000
Args: year (int): If provided, NUTS regions for this year will be used (if available) scale (int): If provided, NUTS regions at this resolution will be used (if available)
nuts_finder/nuts_finder.py
__init__
nestauk/nuts_finder
python
def __init__(self, year=None, scale=None): '\n Args:\n year (int): If provided, NUTS regions for this year will be used (if available)\n scale (int): If provided, NUTS regions at this resolution will be used (if available)\n ' self.years = list(_get_available(YEAR_REGEX)) self.year_selector = max _setattr(self, year, 'year', YEAR_REGEX, self.year_selector) _setattr(self, scale, 'scale', SCALE_REGEX, _middle) self.shapes = self._get_shapes()
def _get_shapes(self): 'Load the shape files for the given year and scale' scale = str(self.scale).zfill(2) filename = NESTED_FILE.format(year=self.year, scale=scale) url = ZIP_URL.format(year=self.year, scale=scale) r = requests.get(url, verify=True) r.raise_for_status() try: with ZipFile(BytesIO(r.content)) as zipfile: with zipfile.open(filename) as f: shapes = geojson.load(f) except KeyError: logging.warning(f'No match for this year ({self.year}) and scale ({self.scale})') self.years.remove(self.year) self.year = self.year_selector(self.years) logging.warning(f'Retrying with year ({self.year})') return self._get_shapes() return shapes
1,341,815,081,636,938,800
Load the shape files for the given year and scale
nuts_finder/nuts_finder.py
_get_shapes
nestauk/nuts_finder
python
def _get_shapes(self): scale = str(self.scale).zfill(2) filename = NESTED_FILE.format(year=self.year, scale=scale) url = ZIP_URL.format(year=self.year, scale=scale) r = requests.get(url, verify=True) r.raise_for_status() try: with ZipFile(BytesIO(r.content)) as zipfile: with zipfile.open(filename) as f: shapes = geojson.load(f) except KeyError: logging.warning(f'No match for this year ({self.year}) and scale ({self.scale})') self.years.remove(self.year) self.year = self.year_selector(self.years) logging.warning(f'Retrying with year ({self.year})') return self._get_shapes() return shapes
def find(self, lat, lon): 'Find every NUTS region for this lat, lon' p = geometry.Point(lon, lat) nuts = [] for region in self.shapes['features']: s = geometry.shape(region['geometry']) if s.contains(p): nuts.append(region['properties']) return sorted(nuts, key=(lambda row: row['LEVL_CODE']))
4,858,413,133,958,075,000
Find every NUTS region for this lat, lon
nuts_finder/nuts_finder.py
find
nestauk/nuts_finder
python
def find(self, lat, lon): p = geometry.Point(lon, lat) nuts = [] for region in self.shapes['features']: s = geometry.shape(region['geometry']) if s.contains(p): nuts.append(region['properties']) return sorted(nuts, key=(lambda row: row['LEVL_CODE']))
def _pad_tensors_to_same_length(x, y): 'Pad x and y so that the results have the same length (second dimension).' with tf.name_scope('pad_to_same_length'): x_length = tf.shape(x)[1] y_length = tf.shape(y)[1] max_length = tf.maximum(x_length, y_length) x = tf.pad(x, [[0, 0], [0, (max_length - x_length)], [0, 0]]) y = tf.pad(y, [[0, 0], [0, (max_length - y_length)]]) return (x, y)
6,004,746,840,324,201,000
Pad x and y so that the results have the same length (second dimension).
official/nlp/transformer/utils/metrics.py
_pad_tensors_to_same_length
1110sillabo/models
python
def _pad_tensors_to_same_length(x, y): with tf.name_scope('pad_to_same_length'): x_length = tf.shape(x)[1] y_length = tf.shape(y)[1] max_length = tf.maximum(x_length, y_length) x = tf.pad(x, [[0, 0], [0, (max_length - x_length)], [0, 0]]) y = tf.pad(y, [[0, 0], [0, (max_length - y_length)]]) return (x, y)
def padded_cross_entropy_loss(logits, labels, smoothing, vocab_size): 'Calculate cross entropy loss while ignoring padding.\n\n Args:\n logits: Tensor of size [batch_size, length_logits, vocab_size]\n labels: Tensor of size [batch_size, length_labels]\n smoothing: Label smoothing constant, used to determine the on and off values\n vocab_size: int size of the vocabulary\n Returns:\n Returns the cross entropy loss and weight tensors: float32 tensors with\n shape [batch_size, max(length_logits, length_labels)]\n ' with tf.name_scope('loss', values=[logits, labels]): (logits, labels) = _pad_tensors_to_same_length(logits, labels) with tf.name_scope('smoothing_cross_entropy', values=[logits, labels]): confidence = (1.0 - smoothing) low_confidence = ((1.0 - confidence) / tf.to_float((vocab_size - 1))) soft_targets = tf.one_hot(tf.cast(labels, tf.int32), depth=vocab_size, on_value=confidence, off_value=low_confidence) xentropy = tf.nn.softmax_cross_entropy_with_logits_v2(logits=logits, labels=soft_targets) normalizing_constant = (- ((confidence * tf.log(confidence)) + ((tf.to_float((vocab_size - 1)) * low_confidence) * tf.log((low_confidence + 1e-20))))) xentropy -= normalizing_constant weights = tf.to_float(tf.not_equal(labels, 0)) return ((xentropy * weights), weights)
3,564,442,464,147,787,300
Calculate cross entropy loss while ignoring padding. Args: logits: Tensor of size [batch_size, length_logits, vocab_size] labels: Tensor of size [batch_size, length_labels] smoothing: Label smoothing constant, used to determine the on and off values vocab_size: int size of the vocabulary Returns: Returns the cross entropy loss and weight tensors: float32 tensors with shape [batch_size, max(length_logits, length_labels)]
official/nlp/transformer/utils/metrics.py
padded_cross_entropy_loss
1110sillabo/models
python
def padded_cross_entropy_loss(logits, labels, smoothing, vocab_size): 'Calculate cross entropy loss while ignoring padding.\n\n Args:\n logits: Tensor of size [batch_size, length_logits, vocab_size]\n labels: Tensor of size [batch_size, length_labels]\n smoothing: Label smoothing constant, used to determine the on and off values\n vocab_size: int size of the vocabulary\n Returns:\n Returns the cross entropy loss and weight tensors: float32 tensors with\n shape [batch_size, max(length_logits, length_labels)]\n ' with tf.name_scope('loss', values=[logits, labels]): (logits, labels) = _pad_tensors_to_same_length(logits, labels) with tf.name_scope('smoothing_cross_entropy', values=[logits, labels]): confidence = (1.0 - smoothing) low_confidence = ((1.0 - confidence) / tf.to_float((vocab_size - 1))) soft_targets = tf.one_hot(tf.cast(labels, tf.int32), depth=vocab_size, on_value=confidence, off_value=low_confidence) xentropy = tf.nn.softmax_cross_entropy_with_logits_v2(logits=logits, labels=soft_targets) normalizing_constant = (- ((confidence * tf.log(confidence)) + ((tf.to_float((vocab_size - 1)) * low_confidence) * tf.log((low_confidence + 1e-20))))) xentropy -= normalizing_constant weights = tf.to_float(tf.not_equal(labels, 0)) return ((xentropy * weights), weights)
def _convert_to_eval_metric(metric_fn): "Wrap a metric fn that returns scores and weights as an eval metric fn.\n\n The input metric_fn returns values for the current batch. The wrapper\n aggregates the return values collected over all of the batches evaluated.\n\n Args:\n metric_fn: function that returns scores and weights for the current batch's\n logits and predicted labels.\n\n Returns:\n function that aggregates the scores and weights from metric_fn.\n " def problem_metric_fn(*args): "Returns an aggregation of the metric_fn's returned values." (scores, weights) = metric_fn(*args) return tf.metrics.mean(scores, weights) return problem_metric_fn
-2,359,971,840,431,848,000
Wrap a metric fn that returns scores and weights as an eval metric fn. The input metric_fn returns values for the current batch. The wrapper aggregates the return values collected over all of the batches evaluated. Args: metric_fn: function that returns scores and weights for the current batch's logits and predicted labels. Returns: function that aggregates the scores and weights from metric_fn.
official/nlp/transformer/utils/metrics.py
_convert_to_eval_metric
1110sillabo/models
python
def _convert_to_eval_metric(metric_fn): "Wrap a metric fn that returns scores and weights as an eval metric fn.\n\n The input metric_fn returns values for the current batch. The wrapper\n aggregates the return values collected over all of the batches evaluated.\n\n Args:\n metric_fn: function that returns scores and weights for the current batch's\n logits and predicted labels.\n\n Returns:\n function that aggregates the scores and weights from metric_fn.\n " def problem_metric_fn(*args): "Returns an aggregation of the metric_fn's returned values." (scores, weights) = metric_fn(*args) return tf.metrics.mean(scores, weights) return problem_metric_fn
def get_eval_metrics(logits, labels, params): 'Return dictionary of model evaluation metrics.' metrics = {'accuracy': _convert_to_eval_metric(padded_accuracy)(logits, labels), 'accuracy_top5': _convert_to_eval_metric(padded_accuracy_top5)(logits, labels), 'accuracy_per_sequence': _convert_to_eval_metric(padded_sequence_accuracy)(logits, labels), 'neg_log_perplexity': _convert_to_eval_metric(padded_neg_log_perplexity)(logits, labels, params['vocab_size'])} if (not params['use_tpu']): metrics.update({'approx_bleu_score': _convert_to_eval_metric(bleu_score)(logits, labels), 'rouge_2_fscore': _convert_to_eval_metric(rouge_2_fscore)(logits, labels), 'rouge_L_fscore': _convert_to_eval_metric(rouge_l_fscore)(logits, labels)}) metrics = {('metrics/%s' % k): v for (k, v) in six.iteritems(metrics)} return metrics
3,380,199,622,328,414,000
Return dictionary of model evaluation metrics.
official/nlp/transformer/utils/metrics.py
get_eval_metrics
1110sillabo/models
python
def get_eval_metrics(logits, labels, params): metrics = {'accuracy': _convert_to_eval_metric(padded_accuracy)(logits, labels), 'accuracy_top5': _convert_to_eval_metric(padded_accuracy_top5)(logits, labels), 'accuracy_per_sequence': _convert_to_eval_metric(padded_sequence_accuracy)(logits, labels), 'neg_log_perplexity': _convert_to_eval_metric(padded_neg_log_perplexity)(logits, labels, params['vocab_size'])} if (not params['use_tpu']): metrics.update({'approx_bleu_score': _convert_to_eval_metric(bleu_score)(logits, labels), 'rouge_2_fscore': _convert_to_eval_metric(rouge_2_fscore)(logits, labels), 'rouge_L_fscore': _convert_to_eval_metric(rouge_l_fscore)(logits, labels)}) metrics = {('metrics/%s' % k): v for (k, v) in six.iteritems(metrics)} return metrics
def padded_accuracy(logits, labels): 'Percentage of times that predictions matches labels on non-0s.' with tf.variable_scope('padded_accuracy', values=[logits, labels]): (logits, labels) = _pad_tensors_to_same_length(logits, labels) weights = tf.to_float(tf.not_equal(labels, 0)) outputs = tf.to_int32(tf.argmax(logits, axis=(- 1))) padded_labels = tf.to_int32(labels) return (tf.to_float(tf.equal(outputs, padded_labels)), weights)
8,946,189,065,367,476,000
Percentage of times that predictions matches labels on non-0s.
official/nlp/transformer/utils/metrics.py
padded_accuracy
1110sillabo/models
python
def padded_accuracy(logits, labels): with tf.variable_scope('padded_accuracy', values=[logits, labels]): (logits, labels) = _pad_tensors_to_same_length(logits, labels) weights = tf.to_float(tf.not_equal(labels, 0)) outputs = tf.to_int32(tf.argmax(logits, axis=(- 1))) padded_labels = tf.to_int32(labels) return (tf.to_float(tf.equal(outputs, padded_labels)), weights)
def padded_accuracy_topk(logits, labels, k): 'Percentage of times that top-k predictions matches labels on non-0s.' with tf.variable_scope('padded_accuracy_topk', values=[logits, labels]): (logits, labels) = _pad_tensors_to_same_length(logits, labels) weights = tf.to_float(tf.not_equal(labels, 0)) effective_k = tf.minimum(k, tf.shape(logits)[(- 1)]) (_, outputs) = tf.nn.top_k(logits, k=effective_k) outputs = tf.to_int32(outputs) padded_labels = tf.to_int32(labels) padded_labels = tf.expand_dims(padded_labels, axis=(- 1)) padded_labels += tf.zeros_like(outputs) same = tf.to_float(tf.equal(outputs, padded_labels)) same_topk = tf.reduce_sum(same, axis=(- 1)) return (same_topk, weights)
4,883,132,312,032,429,000
Percentage of times that top-k predictions matches labels on non-0s.
official/nlp/transformer/utils/metrics.py
padded_accuracy_topk
1110sillabo/models
python
def padded_accuracy_topk(logits, labels, k): with tf.variable_scope('padded_accuracy_topk', values=[logits, labels]): (logits, labels) = _pad_tensors_to_same_length(logits, labels) weights = tf.to_float(tf.not_equal(labels, 0)) effective_k = tf.minimum(k, tf.shape(logits)[(- 1)]) (_, outputs) = tf.nn.top_k(logits, k=effective_k) outputs = tf.to_int32(outputs) padded_labels = tf.to_int32(labels) padded_labels = tf.expand_dims(padded_labels, axis=(- 1)) padded_labels += tf.zeros_like(outputs) same = tf.to_float(tf.equal(outputs, padded_labels)) same_topk = tf.reduce_sum(same, axis=(- 1)) return (same_topk, weights)
def padded_sequence_accuracy(logits, labels): 'Percentage of times that predictions matches labels everywhere (non-0).' with tf.variable_scope('padded_sequence_accuracy', values=[logits, labels]): (logits, labels) = _pad_tensors_to_same_length(logits, labels) weights = tf.to_float(tf.not_equal(labels, 0)) outputs = tf.to_int32(tf.argmax(logits, axis=(- 1))) padded_labels = tf.to_int32(labels) not_correct = (tf.to_float(tf.not_equal(outputs, padded_labels)) * weights) axis = list(range(1, len(outputs.get_shape()))) correct_seq = (1.0 - tf.minimum(1.0, tf.reduce_sum(not_correct, axis=axis))) return (correct_seq, tf.constant(1.0))
-1,587,949,503,459,970,300
Percentage of times that predictions matches labels everywhere (non-0).
official/nlp/transformer/utils/metrics.py
padded_sequence_accuracy
1110sillabo/models
python
def padded_sequence_accuracy(logits, labels): with tf.variable_scope('padded_sequence_accuracy', values=[logits, labels]): (logits, labels) = _pad_tensors_to_same_length(logits, labels) weights = tf.to_float(tf.not_equal(labels, 0)) outputs = tf.to_int32(tf.argmax(logits, axis=(- 1))) padded_labels = tf.to_int32(labels) not_correct = (tf.to_float(tf.not_equal(outputs, padded_labels)) * weights) axis = list(range(1, len(outputs.get_shape()))) correct_seq = (1.0 - tf.minimum(1.0, tf.reduce_sum(not_correct, axis=axis))) return (correct_seq, tf.constant(1.0))
def padded_neg_log_perplexity(logits, labels, vocab_size): 'Average log-perplexity excluding padding 0s. No smoothing.' (num, den) = padded_cross_entropy_loss(logits, labels, 0, vocab_size) return ((- num), den)
4,644,916,350,470,578,000
Average log-perplexity excluding padding 0s. No smoothing.
official/nlp/transformer/utils/metrics.py
padded_neg_log_perplexity
1110sillabo/models
python
def padded_neg_log_perplexity(logits, labels, vocab_size): (num, den) = padded_cross_entropy_loss(logits, labels, 0, vocab_size) return ((- num), den)
def bleu_score(logits, labels): 'Approximate BLEU score computation between labels and predictions.\n\n An approximate BLEU scoring method since we do not glue word pieces or\n decode the ids and tokenize the output. By default, we use ngram order of 4\n and use brevity penalty. Also, this does not have beam search.\n\n Args:\n logits: Tensor of size [batch_size, length_logits, vocab_size]\n labels: Tensor of size [batch-size, length_labels]\n\n Returns:\n bleu: int, approx bleu score\n ' predictions = tf.to_int32(tf.argmax(logits, axis=(- 1))) bleu = tf.py_func(compute_bleu, (labels, predictions), tf.float32) return (bleu, tf.constant(1.0))
292,391,744,416,105,400
Approximate BLEU score computation between labels and predictions. An approximate BLEU scoring method since we do not glue word pieces or decode the ids and tokenize the output. By default, we use ngram order of 4 and use brevity penalty. Also, this does not have beam search. Args: logits: Tensor of size [batch_size, length_logits, vocab_size] labels: Tensor of size [batch-size, length_labels] Returns: bleu: int, approx bleu score
official/nlp/transformer/utils/metrics.py
bleu_score
1110sillabo/models
python
def bleu_score(logits, labels): 'Approximate BLEU score computation between labels and predictions.\n\n An approximate BLEU scoring method since we do not glue word pieces or\n decode the ids and tokenize the output. By default, we use ngram order of 4\n and use brevity penalty. Also, this does not have beam search.\n\n Args:\n logits: Tensor of size [batch_size, length_logits, vocab_size]\n labels: Tensor of size [batch-size, length_labels]\n\n Returns:\n bleu: int, approx bleu score\n ' predictions = tf.to_int32(tf.argmax(logits, axis=(- 1))) bleu = tf.py_func(compute_bleu, (labels, predictions), tf.float32) return (bleu, tf.constant(1.0))
def _get_ngrams_with_counter(segment, max_order): 'Extracts all n-grams up to a given maximum order from an input segment.\n\n Args:\n segment: text segment from which n-grams will be extracted.\n max_order: maximum length in tokens of the n-grams returned by this\n methods.\n\n Returns:\n The Counter containing all n-grams upto max_order in segment\n with a count of how many times each n-gram occurred.\n ' ngram_counts = collections.Counter() for order in xrange(1, (max_order + 1)): for i in xrange(0, ((len(segment) - order) + 1)): ngram = tuple(segment[i:(i + order)]) ngram_counts[ngram] += 1 return ngram_counts
1,716,925,804,233,848,000
Extracts all n-grams up to a given maximum order from an input segment. Args: segment: text segment from which n-grams will be extracted. max_order: maximum length in tokens of the n-grams returned by this methods. Returns: The Counter containing all n-grams upto max_order in segment with a count of how many times each n-gram occurred.
official/nlp/transformer/utils/metrics.py
_get_ngrams_with_counter
1110sillabo/models
python
def _get_ngrams_with_counter(segment, max_order): 'Extracts all n-grams up to a given maximum order from an input segment.\n\n Args:\n segment: text segment from which n-grams will be extracted.\n max_order: maximum length in tokens of the n-grams returned by this\n methods.\n\n Returns:\n The Counter containing all n-grams upto max_order in segment\n with a count of how many times each n-gram occurred.\n ' ngram_counts = collections.Counter() for order in xrange(1, (max_order + 1)): for i in xrange(0, ((len(segment) - order) + 1)): ngram = tuple(segment[i:(i + order)]) ngram_counts[ngram] += 1 return ngram_counts
def compute_bleu(reference_corpus, translation_corpus, max_order=4, use_bp=True): 'Computes BLEU score of translated segments against one or more references.\n\n Args:\n reference_corpus: list of references for each translation. Each\n reference should be tokenized into a list of tokens.\n translation_corpus: list of translations to score. Each translation\n should be tokenized into a list of tokens.\n max_order: Maximum n-gram order to use when computing BLEU score.\n use_bp: boolean, whether to apply brevity penalty.\n\n Returns:\n BLEU score.\n ' reference_length = 0 translation_length = 0 bp = 1.0 geo_mean = 0 matches_by_order = ([0] * max_order) possible_matches_by_order = ([0] * max_order) precisions = [] for (references, translations) in zip(reference_corpus, translation_corpus): reference_length += len(references) translation_length += len(translations) ref_ngram_counts = _get_ngrams_with_counter(references, max_order) translation_ngram_counts = _get_ngrams_with_counter(translations, max_order) overlap = dict(((ngram, min(count, translation_ngram_counts[ngram])) for (ngram, count) in ref_ngram_counts.items())) for ngram in overlap: matches_by_order[(len(ngram) - 1)] += overlap[ngram] for ngram in translation_ngram_counts: possible_matches_by_order[(len(ngram) - 1)] += translation_ngram_counts[ngram] precisions = ([0] * max_order) smooth = 1.0 for i in xrange(0, max_order): if (possible_matches_by_order[i] > 0): precisions[i] = (float(matches_by_order[i]) / possible_matches_by_order[i]) if (matches_by_order[i] > 0): precisions[i] = (float(matches_by_order[i]) / possible_matches_by_order[i]) else: smooth *= 2 precisions[i] = (1.0 / (smooth * possible_matches_by_order[i])) else: precisions[i] = 0.0 if (max(precisions) > 0): p_log_sum = sum((math.log(p) for p in precisions if p)) geo_mean = math.exp((p_log_sum / max_order)) if use_bp: ratio = (translation_length / reference_length) bp = (math.exp((1 - (1.0 / ratio))) if (ratio < 1.0) else 1.0) bleu = (geo_mean * bp) return np.float32(bleu)
8,272,636,851,736,137,000
Computes BLEU score of translated segments against one or more references. Args: reference_corpus: list of references for each translation. Each reference should be tokenized into a list of tokens. translation_corpus: list of translations to score. Each translation should be tokenized into a list of tokens. max_order: Maximum n-gram order to use when computing BLEU score. use_bp: boolean, whether to apply brevity penalty. Returns: BLEU score.
official/nlp/transformer/utils/metrics.py
compute_bleu
1110sillabo/models
python
def compute_bleu(reference_corpus, translation_corpus, max_order=4, use_bp=True): 'Computes BLEU score of translated segments against one or more references.\n\n Args:\n reference_corpus: list of references for each translation. Each\n reference should be tokenized into a list of tokens.\n translation_corpus: list of translations to score. Each translation\n should be tokenized into a list of tokens.\n max_order: Maximum n-gram order to use when computing BLEU score.\n use_bp: boolean, whether to apply brevity penalty.\n\n Returns:\n BLEU score.\n ' reference_length = 0 translation_length = 0 bp = 1.0 geo_mean = 0 matches_by_order = ([0] * max_order) possible_matches_by_order = ([0] * max_order) precisions = [] for (references, translations) in zip(reference_corpus, translation_corpus): reference_length += len(references) translation_length += len(translations) ref_ngram_counts = _get_ngrams_with_counter(references, max_order) translation_ngram_counts = _get_ngrams_with_counter(translations, max_order) overlap = dict(((ngram, min(count, translation_ngram_counts[ngram])) for (ngram, count) in ref_ngram_counts.items())) for ngram in overlap: matches_by_order[(len(ngram) - 1)] += overlap[ngram] for ngram in translation_ngram_counts: possible_matches_by_order[(len(ngram) - 1)] += translation_ngram_counts[ngram] precisions = ([0] * max_order) smooth = 1.0 for i in xrange(0, max_order): if (possible_matches_by_order[i] > 0): precisions[i] = (float(matches_by_order[i]) / possible_matches_by_order[i]) if (matches_by_order[i] > 0): precisions[i] = (float(matches_by_order[i]) / possible_matches_by_order[i]) else: smooth *= 2 precisions[i] = (1.0 / (smooth * possible_matches_by_order[i])) else: precisions[i] = 0.0 if (max(precisions) > 0): p_log_sum = sum((math.log(p) for p in precisions if p)) geo_mean = math.exp((p_log_sum / max_order)) if use_bp: ratio = (translation_length / reference_length) bp = (math.exp((1 - (1.0 / ratio))) if (ratio < 1.0) else 1.0) bleu = (geo_mean * bp) return np.float32(bleu)
def rouge_2_fscore(logits, labels): 'ROUGE-2 F1 score computation between labels and predictions.\n\n This is an approximate ROUGE scoring method since we do not glue word pieces\n or decode the ids and tokenize the output.\n\n Args:\n logits: tensor, model predictions\n labels: tensor, gold output.\n\n Returns:\n rouge2_fscore: approx rouge-2 f1 score.\n ' predictions = tf.to_int32(tf.argmax(logits, axis=(- 1))) rouge_2_f_score = tf.py_func(rouge_n, (predictions, labels), tf.float32) return (rouge_2_f_score, tf.constant(1.0))
8,274,710,431,071,724,000
ROUGE-2 F1 score computation between labels and predictions. This is an approximate ROUGE scoring method since we do not glue word pieces or decode the ids and tokenize the output. Args: logits: tensor, model predictions labels: tensor, gold output. Returns: rouge2_fscore: approx rouge-2 f1 score.
official/nlp/transformer/utils/metrics.py
rouge_2_fscore
1110sillabo/models
python
def rouge_2_fscore(logits, labels): 'ROUGE-2 F1 score computation between labels and predictions.\n\n This is an approximate ROUGE scoring method since we do not glue word pieces\n or decode the ids and tokenize the output.\n\n Args:\n logits: tensor, model predictions\n labels: tensor, gold output.\n\n Returns:\n rouge2_fscore: approx rouge-2 f1 score.\n ' predictions = tf.to_int32(tf.argmax(logits, axis=(- 1))) rouge_2_f_score = tf.py_func(rouge_n, (predictions, labels), tf.float32) return (rouge_2_f_score, tf.constant(1.0))
def _get_ngrams(n, text): 'Calculates n-grams.\n\n Args:\n n: which n-grams to calculate\n text: An array of tokens\n\n Returns:\n A set of n-grams\n ' ngram_set = set() text_length = len(text) max_index_ngram_start = (text_length - n) for i in range((max_index_ngram_start + 1)): ngram_set.add(tuple(text[i:(i + n)])) return ngram_set
2,263,363,960,569,367,600
Calculates n-grams. Args: n: which n-grams to calculate text: An array of tokens Returns: A set of n-grams
official/nlp/transformer/utils/metrics.py
_get_ngrams
1110sillabo/models
python
def _get_ngrams(n, text): 'Calculates n-grams.\n\n Args:\n n: which n-grams to calculate\n text: An array of tokens\n\n Returns:\n A set of n-grams\n ' ngram_set = set() text_length = len(text) max_index_ngram_start = (text_length - n) for i in range((max_index_ngram_start + 1)): ngram_set.add(tuple(text[i:(i + n)])) return ngram_set
def rouge_n(eval_sentences, ref_sentences, n=2): 'Computes ROUGE-N f1 score of two text collections of sentences.\n\n Source: https://www.microsoft.com/en-us/research/publication/\n rouge-a-package-for-automatic-evaluation-of-summaries/\n\n Args:\n eval_sentences: Predicted sentences.\n ref_sentences: Sentences from the reference set\n n: Size of ngram. Defaults to 2.\n\n Returns:\n f1 score for ROUGE-N\n ' f1_scores = [] for (eval_sentence, ref_sentence) in zip(eval_sentences, ref_sentences): eval_ngrams = _get_ngrams(n, eval_sentence) ref_ngrams = _get_ngrams(n, ref_sentence) ref_count = len(ref_ngrams) eval_count = len(eval_ngrams) overlapping_ngrams = eval_ngrams.intersection(ref_ngrams) overlapping_count = len(overlapping_ngrams) if (eval_count == 0): precision = 0.0 else: precision = (float(overlapping_count) / eval_count) if (ref_count == 0): recall = 0.0 else: recall = (float(overlapping_count) / ref_count) f1_scores.append((2.0 * ((precision * recall) / ((precision + recall) + 1e-08)))) return np.mean(f1_scores, dtype=np.float32)
-306,006,793,984,337,500
Computes ROUGE-N f1 score of two text collections of sentences. Source: https://www.microsoft.com/en-us/research/publication/ rouge-a-package-for-automatic-evaluation-of-summaries/ Args: eval_sentences: Predicted sentences. ref_sentences: Sentences from the reference set n: Size of ngram. Defaults to 2. Returns: f1 score for ROUGE-N
official/nlp/transformer/utils/metrics.py
rouge_n
1110sillabo/models
python
def rouge_n(eval_sentences, ref_sentences, n=2): 'Computes ROUGE-N f1 score of two text collections of sentences.\n\n Source: https://www.microsoft.com/en-us/research/publication/\n rouge-a-package-for-automatic-evaluation-of-summaries/\n\n Args:\n eval_sentences: Predicted sentences.\n ref_sentences: Sentences from the reference set\n n: Size of ngram. Defaults to 2.\n\n Returns:\n f1 score for ROUGE-N\n ' f1_scores = [] for (eval_sentence, ref_sentence) in zip(eval_sentences, ref_sentences): eval_ngrams = _get_ngrams(n, eval_sentence) ref_ngrams = _get_ngrams(n, ref_sentence) ref_count = len(ref_ngrams) eval_count = len(eval_ngrams) overlapping_ngrams = eval_ngrams.intersection(ref_ngrams) overlapping_count = len(overlapping_ngrams) if (eval_count == 0): precision = 0.0 else: precision = (float(overlapping_count) / eval_count) if (ref_count == 0): recall = 0.0 else: recall = (float(overlapping_count) / ref_count) f1_scores.append((2.0 * ((precision * recall) / ((precision + recall) + 1e-08)))) return np.mean(f1_scores, dtype=np.float32)
def rouge_l_fscore(predictions, labels): 'ROUGE scores computation between labels and predictions.\n\n This is an approximate ROUGE scoring method since we do not glue word pieces\n or decode the ids and tokenize the output.\n\n Args:\n predictions: tensor, model predictions\n labels: tensor, gold output.\n\n Returns:\n rouge_l_fscore: approx rouge-l f1 score.\n ' outputs = tf.to_int32(tf.argmax(predictions, axis=(- 1))) rouge_l_f_score = tf.py_func(rouge_l_sentence_level, (outputs, labels), tf.float32) return (rouge_l_f_score, tf.constant(1.0))
3,165,181,742,827,321,000
ROUGE scores computation between labels and predictions. This is an approximate ROUGE scoring method since we do not glue word pieces or decode the ids and tokenize the output. Args: predictions: tensor, model predictions labels: tensor, gold output. Returns: rouge_l_fscore: approx rouge-l f1 score.
official/nlp/transformer/utils/metrics.py
rouge_l_fscore
1110sillabo/models
python
def rouge_l_fscore(predictions, labels): 'ROUGE scores computation between labels and predictions.\n\n This is an approximate ROUGE scoring method since we do not glue word pieces\n or decode the ids and tokenize the output.\n\n Args:\n predictions: tensor, model predictions\n labels: tensor, gold output.\n\n Returns:\n rouge_l_fscore: approx rouge-l f1 score.\n ' outputs = tf.to_int32(tf.argmax(predictions, axis=(- 1))) rouge_l_f_score = tf.py_func(rouge_l_sentence_level, (outputs, labels), tf.float32) return (rouge_l_f_score, tf.constant(1.0))
def rouge_l_sentence_level(eval_sentences, ref_sentences): 'Computes ROUGE-L (sentence level) of two collections of sentences.\n\n Source: https://www.microsoft.com/en-us/research/publication/\n rouge-a-package-for-automatic-evaluation-of-summaries/\n\n Calculated according to:\n R_lcs = LCS(X,Y)/m\n P_lcs = LCS(X,Y)/n\n F_lcs = ((1 + beta^2)*R_lcs*P_lcs) / (R_lcs + (beta^2) * P_lcs)\n\n where:\n X = reference summary\n Y = Candidate summary\n m = length of reference summary\n n = length of candidate summary\n\n Args:\n eval_sentences: The sentences that have been picked by the summarizer\n ref_sentences: The sentences from the reference set\n\n Returns:\n A float: F_lcs\n ' f1_scores = [] for (eval_sentence, ref_sentence) in zip(eval_sentences, ref_sentences): m = float(len(ref_sentence)) n = float(len(eval_sentence)) lcs = _len_lcs(eval_sentence, ref_sentence) f1_scores.append(_f_lcs(lcs, m, n)) return np.mean(f1_scores, dtype=np.float32)
-6,966,317,381,581,726,000
Computes ROUGE-L (sentence level) of two collections of sentences. Source: https://www.microsoft.com/en-us/research/publication/ rouge-a-package-for-automatic-evaluation-of-summaries/ Calculated according to: R_lcs = LCS(X,Y)/m P_lcs = LCS(X,Y)/n F_lcs = ((1 + beta^2)*R_lcs*P_lcs) / (R_lcs + (beta^2) * P_lcs) where: X = reference summary Y = Candidate summary m = length of reference summary n = length of candidate summary Args: eval_sentences: The sentences that have been picked by the summarizer ref_sentences: The sentences from the reference set Returns: A float: F_lcs
official/nlp/transformer/utils/metrics.py
rouge_l_sentence_level
1110sillabo/models
python
def rouge_l_sentence_level(eval_sentences, ref_sentences): 'Computes ROUGE-L (sentence level) of two collections of sentences.\n\n Source: https://www.microsoft.com/en-us/research/publication/\n rouge-a-package-for-automatic-evaluation-of-summaries/\n\n Calculated according to:\n R_lcs = LCS(X,Y)/m\n P_lcs = LCS(X,Y)/n\n F_lcs = ((1 + beta^2)*R_lcs*P_lcs) / (R_lcs + (beta^2) * P_lcs)\n\n where:\n X = reference summary\n Y = Candidate summary\n m = length of reference summary\n n = length of candidate summary\n\n Args:\n eval_sentences: The sentences that have been picked by the summarizer\n ref_sentences: The sentences from the reference set\n\n Returns:\n A float: F_lcs\n ' f1_scores = [] for (eval_sentence, ref_sentence) in zip(eval_sentences, ref_sentences): m = float(len(ref_sentence)) n = float(len(eval_sentence)) lcs = _len_lcs(eval_sentence, ref_sentence) f1_scores.append(_f_lcs(lcs, m, n)) return np.mean(f1_scores, dtype=np.float32)
def _len_lcs(x, y): 'Returns the length of the Longest Common Subsequence between two seqs.\n\n Source: http://www.algorithmist.com/index.php/Longest_Common_Subsequence\n\n Args:\n x: sequence of words\n y: sequence of words\n\n Returns\n integer: Length of LCS between x and y\n ' table = _lcs(x, y) (n, m) = (len(x), len(y)) return table[(n, m)]
8,835,899,292,213,967,000
Returns the length of the Longest Common Subsequence between two seqs. Source: http://www.algorithmist.com/index.php/Longest_Common_Subsequence Args: x: sequence of words y: sequence of words Returns integer: Length of LCS between x and y
official/nlp/transformer/utils/metrics.py
_len_lcs
1110sillabo/models
python
def _len_lcs(x, y): 'Returns the length of the Longest Common Subsequence between two seqs.\n\n Source: http://www.algorithmist.com/index.php/Longest_Common_Subsequence\n\n Args:\n x: sequence of words\n y: sequence of words\n\n Returns\n integer: Length of LCS between x and y\n ' table = _lcs(x, y) (n, m) = (len(x), len(y)) return table[(n, m)]
def _lcs(x, y): 'Computes the length of the LCS between two seqs.\n\n The implementation below uses a DP programming algorithm and runs\n in O(nm) time where n = len(x) and m = len(y).\n Source: http://www.algorithmist.com/index.php/Longest_Common_Subsequence\n\n Args:\n x: collection of words\n y: collection of words\n\n Returns:\n Table of dictionary of coord and len lcs\n ' (n, m) = (len(x), len(y)) table = dict() for i in range((n + 1)): for j in range((m + 1)): if ((i == 0) or (j == 0)): table[(i, j)] = 0 elif (x[(i - 1)] == y[(j - 1)]): table[(i, j)] = (table[((i - 1), (j - 1))] + 1) else: table[(i, j)] = max(table[((i - 1), j)], table[(i, (j - 1))]) return table
4,657,099,889,117,814,000
Computes the length of the LCS between two seqs. The implementation below uses a DP programming algorithm and runs in O(nm) time where n = len(x) and m = len(y). Source: http://www.algorithmist.com/index.php/Longest_Common_Subsequence Args: x: collection of words y: collection of words Returns: Table of dictionary of coord and len lcs
official/nlp/transformer/utils/metrics.py
_lcs
1110sillabo/models
python
def _lcs(x, y): 'Computes the length of the LCS between two seqs.\n\n The implementation below uses a DP programming algorithm and runs\n in O(nm) time where n = len(x) and m = len(y).\n Source: http://www.algorithmist.com/index.php/Longest_Common_Subsequence\n\n Args:\n x: collection of words\n y: collection of words\n\n Returns:\n Table of dictionary of coord and len lcs\n ' (n, m) = (len(x), len(y)) table = dict() for i in range((n + 1)): for j in range((m + 1)): if ((i == 0) or (j == 0)): table[(i, j)] = 0 elif (x[(i - 1)] == y[(j - 1)]): table[(i, j)] = (table[((i - 1), (j - 1))] + 1) else: table[(i, j)] = max(table[((i - 1), j)], table[(i, (j - 1))]) return table
def _f_lcs(llcs, m, n): 'Computes the LCS-based F-measure score.\n\n Source: http://research.microsoft.com/en-us/um/people/cyl/download/papers/\n rouge-working-note-v1.3.1.pdf\n\n Args:\n llcs: Length of LCS\n m: number of words in reference summary\n n: number of words in candidate summary\n\n Returns:\n Float. LCS-based F-measure score\n ' r_lcs = (llcs / m) p_lcs = (llcs / n) beta = (p_lcs / (r_lcs + 1e-12)) num = (((1 + (beta ** 2)) * r_lcs) * p_lcs) denom = (r_lcs + ((beta ** 2) * p_lcs)) f_lcs = (num / (denom + 1e-12)) return f_lcs
1,444,177,794,281,056,500
Computes the LCS-based F-measure score. Source: http://research.microsoft.com/en-us/um/people/cyl/download/papers/ rouge-working-note-v1.3.1.pdf Args: llcs: Length of LCS m: number of words in reference summary n: number of words in candidate summary Returns: Float. LCS-based F-measure score
official/nlp/transformer/utils/metrics.py
_f_lcs
1110sillabo/models
python
def _f_lcs(llcs, m, n): 'Computes the LCS-based F-measure score.\n\n Source: http://research.microsoft.com/en-us/um/people/cyl/download/papers/\n rouge-working-note-v1.3.1.pdf\n\n Args:\n llcs: Length of LCS\n m: number of words in reference summary\n n: number of words in candidate summary\n\n Returns:\n Float. LCS-based F-measure score\n ' r_lcs = (llcs / m) p_lcs = (llcs / n) beta = (p_lcs / (r_lcs + 1e-12)) num = (((1 + (beta ** 2)) * r_lcs) * p_lcs) denom = (r_lcs + ((beta ** 2) * p_lcs)) f_lcs = (num / (denom + 1e-12)) return f_lcs
def problem_metric_fn(*args): "Returns an aggregation of the metric_fn's returned values." (scores, weights) = metric_fn(*args) return tf.metrics.mean(scores, weights)
1,779,688,788,499,172,900
Returns an aggregation of the metric_fn's returned values.
official/nlp/transformer/utils/metrics.py
problem_metric_fn
1110sillabo/models
python
def problem_metric_fn(*args): (scores, weights) = metric_fn(*args) return tf.metrics.mean(scores, weights)
def __init__(self, app=None): 'Initialize the Flask-CLI.' if (app is not None): self.init_app(app)
3,250,348,104,500,289,500
Initialize the Flask-CLI.
virtual/lib/python3.6/site-packages/flask_cli/ext.py
__init__
muneneee/blog
python
def __init__(self, app=None): if (app is not None): self.init_app(app)
def init_app(self, app): 'Initialize a Flask application.' if (not hasattr(app, 'extensions')): app.extensions = {} if ('flask-cli' in app.extensions): raise RuntimeError('Flask-CLI application already initialized') app.extensions['flask-cli'] = self self.setup_pre10(app)
-4,036,279,792,543,169,000
Initialize a Flask application.
virtual/lib/python3.6/site-packages/flask_cli/ext.py
init_app
muneneee/blog
python
def init_app(self, app): if (not hasattr(app, 'extensions')): app.extensions = {} if ('flask-cli' in app.extensions): raise RuntimeError('Flask-CLI application already initialized') app.extensions['flask-cli'] = self self.setup_pre10(app)
def setup_pre10(self, app): 'Setup Flask pre-1.0 application object.' if hasattr(app, 'cli'): return from flask_cli.app import make_shell_context, shell_context_processor app.cli = AppGroup(app.name) app.shell_context_processors = [] app.make_shell_context = types.MethodType(make_shell_context, app) app.shell_context_processor = types.MethodType(shell_context_processor, app)
8,057,332,060,420,983,000
Setup Flask pre-1.0 application object.
virtual/lib/python3.6/site-packages/flask_cli/ext.py
setup_pre10
muneneee/blog
python
def setup_pre10(self, app): if hasattr(app, 'cli'): return from flask_cli.app import make_shell_context, shell_context_processor app.cli = AppGroup(app.name) app.shell_context_processors = [] app.make_shell_context = types.MethodType(make_shell_context, app) app.shell_context_processor = types.MethodType(shell_context_processor, app)
def test_qnode_intergration(): 'Test a simple use of qnode with a JAX interface and non-JAX device' dev = qml.device('default.mixed', wires=2) @qml.qnode(dev, interface='jax') def circuit(weights): qml.RX(weights[0], wires=0) qml.RZ(weights[1], wires=1) return qml.expval((qml.PauliZ(0) @ qml.PauliZ(1))) weights = jnp.array([0.1, 0.2]) val = circuit(weights) assert ('DeviceArray' in val.__repr__())
7,754,218,695,551,231,000
Test a simple use of qnode with a JAX interface and non-JAX device
tests/tape/interfaces/test_qnode_jax.py
test_qnode_intergration
PritishSehzpaul/pennylane
python
def test_qnode_intergration(): dev = qml.device('default.mixed', wires=2) @qml.qnode(dev, interface='jax') def circuit(weights): qml.RX(weights[0], wires=0) qml.RZ(weights[1], wires=1) return qml.expval((qml.PauliZ(0) @ qml.PauliZ(1))) weights = jnp.array([0.1, 0.2]) val = circuit(weights) assert ('DeviceArray' in val.__repr__())
def test_to_jax(): 'Test the to_jax method' dev = qml.device('default.mixed', wires=2) @qml.qnode(dev, interface='autograd') def circuit(weights): qml.RX(weights[0], wires=0) qml.RZ(weights[1], wires=1) return qml.expval((qml.PauliZ(0) @ qml.PauliZ(1))) circuit.to_jax() weights = jnp.array([0.1, 0.2]) val = circuit(weights) assert ('DeviceArray' in val.__repr__())
5,437,203,877,633,381,000
Test the to_jax method
tests/tape/interfaces/test_qnode_jax.py
test_to_jax
PritishSehzpaul/pennylane
python
def test_to_jax(): dev = qml.device('default.mixed', wires=2) @qml.qnode(dev, interface='autograd') def circuit(weights): qml.RX(weights[0], wires=0) qml.RZ(weights[1], wires=1) return qml.expval((qml.PauliZ(0) @ qml.PauliZ(1))) circuit.to_jax() weights = jnp.array([0.1, 0.2]) val = circuit(weights) assert ('DeviceArray' in val.__repr__())
def test_simple_jacobian(): 'Test the use of jax.jaxrev' dev = qml.device('default.mixed', wires=2) @qml.qnode(dev, interface='jax', diff_method='parameter-shift') def circuit(weights): qml.RX(weights[0], wires=0) qml.RY(weights[1], wires=1) return qml.expval((qml.PauliZ(0) @ qml.PauliZ(1))) weights = jnp.array([0.1, 0.2]) grads = jax.jacrev(circuit)(weights) assert ('DeviceArray' in grads.__repr__()) assert (grads.shape == (2,)) np.testing.assert_allclose(grads, np.array([(- 0.09784342), (- 0.19767685)]))
-2,640,744,455,169,629,000
Test the use of jax.jaxrev
tests/tape/interfaces/test_qnode_jax.py
test_simple_jacobian
PritishSehzpaul/pennylane
python
def test_simple_jacobian(): dev = qml.device('default.mixed', wires=2) @qml.qnode(dev, interface='jax', diff_method='parameter-shift') def circuit(weights): qml.RX(weights[0], wires=0) qml.RY(weights[1], wires=1) return qml.expval((qml.PauliZ(0) @ qml.PauliZ(1))) weights = jnp.array([0.1, 0.2]) grads = jax.jacrev(circuit)(weights) assert ('DeviceArray' in grads.__repr__()) assert (grads.shape == (2,)) np.testing.assert_allclose(grads, np.array([(- 0.09784342), (- 0.19767685)]))