response
stringlengths
1
33.1k
instruction
stringlengths
22
582k
Test that the data values & type returned by sigma_clip are the same as its input when using masked=True (rather than being upcast to float64 & containing NaNs as in issue #10605) and also that the input data get copied or referenced as appropriate.
def test_sigma_clip_masked_data_values(): """ Test that the data values & type returned by sigma_clip are the same as its input when using masked=True (rather than being upcast to float64 & containing NaNs as in issue #10605) and also that the input data get copied or referenced as appropriate. """ data = np.array([-2, 5, -5, -6, 20, 14, 1]) result = sigma_clip(data, sigma=1.5, maxiters=3, axis=None, masked=True, copy=True) assert result.dtype == data.dtype assert_equal(result.data, data) assert not np.shares_memory(result.data, data) result = sigma_clip(data, sigma=1.5, maxiters=3, axis=None, masked=True, copy=False) assert result.dtype == data.dtype assert_equal(result.data, data) assert np.shares_memory(result.data, data) # (The fact that the arrays share memory probably also means they're the # same, but doesn't strictly prove it, eg. one could be reversed.) result = sigma_clip(data, sigma=1.5, maxiters=3, axis=0, masked=True, copy=True) assert result.dtype == data.dtype assert_equal(result.data, data) assert not np.shares_memory(result.data, data) result = sigma_clip(data, sigma=1.5, maxiters=3, axis=0, masked=True, copy=False) assert result.dtype == data.dtype assert_equal(result.data, data) assert np.shares_memory(result.data, data)
Test sigma_clip with growth of masking to include the neighbours within a specified radius of deviant values.
def test_sigma_clip_grow(): """ Test sigma_clip with growth of masking to include the neighbours within a specified radius of deviant values. """ # We could use a random seed here, but enumerating the data guarantees that # we test sigma_clip itself and not random number generation. # fmt: off data = np.array( [ -0.2 , 0.48, -0.52, -0.56, 1.97, 1.39, 0.09, 0.28, 0.77, 1.25, 1.01, -1.3 , 0.27, 0.23, 1.35, 0.89, -2. , -0.37, 1.67, -0.44, -0.54, 0.48, 3.25, -1.02, -0.58, 0.12, 0.3 , 0.52, 0. , 1.34, -0.71, -0.83, -2.37, -1.86, -0.86, 0.56, -1.27, 0.12, -1.06, 0.33, -2.36, -0.2 , -1.54, -0.97, -1.31, 0.29, 0.38, -0.75, 0.33, 1.35, 0.07, 0.25, -0.01, 1. , 1.33, -0.92, -1.55, 0.02, 0.76, -0.66, 0.86, -0.01, 0.05, 0.67, 0.85, -0.96, -0.02, -2.3 , -0.65, -1.22, -1.33, 1.07, 0.72, 0.69, 1. , -0.5 , -0.62, -0.92, -0.73, 0.22, 0.05, -1.16, 0.82, 0.43, 1.01, 1.82, -1. , 0.85, -0.13, 0.91, 0.19, 2.17, -0.11, 2. , 0.03, 0.8 , 0.12, -0.75, 0.58, 0.15, ] ) # fmt: on # Test growth to immediate neighbours in simple 1D case: filtered_data = sigma_clip(data, sigma=2, maxiters=3, grow=1) # Indices of the 26/100 points expected to be masked: # fmt: off expected = np.array( [ 3, 4, 5, 15, 16, 17, 21, 22, 23, 31, 32, 33, 39, 40, 41, 66, 67, 68, 84, 85, 86, 90, 91, 92, 93, 94, ] ) # fmt: on assert np.array_equal(np.where(filtered_data.mask)[0], expected) # Test block growth in 2 of 3 dimensions (as in a 2D model set): data = data.reshape(4, 5, 5) filtered_data = sigma_clip(data, sigma=2.1, maxiters=1, grow=1.5, axis=(1, 2)) # fmt: off expected = np.array( [ [ 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, ], [ 3, 3, 3, 4, 4, 4, 0, 0, 0, 1, 1, 1, 2, 2, 2, 2, 3, 3, 4, 4, 2, 2, 2, 3, 3, 3, 4, 4, 4, 2, 2, 2, 3, 3, 3, 4, 4, 4, ], [ 1, 2, 3, 1, 2, 3, 1, 2, 3, 1, 2, 3, 0, 1, 2, 3, 0, 1, 0, 1, 1, 2, 3, 1, 2, 3, 1, 2, 3, 0, 1, 2, 0, 1, 2, 0, 1, 2, ], ] ) # fmt: on assert np.array_equal(np.where(filtered_data.mask), expected) # Test ~spherical growth (of a single very-deviant point) in 3D data: data[1, 2, 2] = 100.0 filtered_data = sigma_clip(data, sigma=3.0, maxiters=1, grow=2.0) # fmt: off expected = np.array( [ [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3 ], [ 1, 1, 1, 2, 2, 2, 3, 3, 3, 0, 1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 3, 4, 1, 1, 1, 2, 2, 2, 3, 3, 3, 2 ], [ 1, 2, 3, 1, 2, 3, 1, 2, 3, 2, 1, 2, 3, 0, 1, 2, 3, 4, 1, 2, 3, 2, 1, 2, 3, 1, 2, 3, 1, 2, 3, 2 ], ] ) # fmt: on assert np.array_equal(np.where(filtered_data.mask), expected)
Test against Ripley's K function implemented in R package `spatstat` +-+---------+---------+----------+---------+-+ 6 + * + | | | | 5.5 + + | | | | 5 + * + | | 4.5 + + | | | | 4 + * + +-+---------+---------+----------+---------+-+ 1 1.5 2 2.5 3 +-+---------+---------+----------+---------+-+ 3 + * + | | | | 2.5 + + | | | | 2 + * + | | 1.5 + + | | | | 1 + * + +-+---------+---------+----------+---------+-+ -3 -2.5 -2 -1.5 -1
def test_ripley_K_implementation(points, x_min, x_max): """ Test against Ripley's K function implemented in R package `spatstat` +-+---------+---------+----------+---------+-+ 6 + * + | | | | 5.5 + + | | | | 5 + * + | | 4.5 + + | | | | 4 + * + +-+---------+---------+----------+---------+-+ 1 1.5 2 2.5 3 +-+---------+---------+----------+---------+-+ 3 + * + | | | | 2.5 + + | | | | 2 + * + | | 1.5 + + | | | | 1 + * + +-+---------+---------+----------+---------+-+ -3 -2.5 -2 -1.5 -1 """ area = 100 r = np.linspace(0, 2.5, 5) Kest = RipleysKEstimator( area=area, x_min=x_min, y_min=x_min, x_max=x_max, y_max=x_max ) ANS_NONE = np.array([0, 0, 0, 66.667, 66.667]) assert_allclose(ANS_NONE, Kest(data=points, radii=r, mode="none"), atol=1e-3) ANS_TRANS = np.array([0, 0, 0, 82.304, 82.304]) assert_allclose( ANS_TRANS, Kest(data=points, radii=r, mode="translation"), atol=1e-3 )
Mixin-safe version of Column.copy() (with copy_data=True). Parameters ---------- col : Column or mixin column Input column copy_indices : bool Copy the column ``indices`` attribute Returns ------- col : Copy of input column
def col_copy(col, copy_indices=True): """ Mixin-safe version of Column.copy() (with copy_data=True). Parameters ---------- col : Column or mixin column Input column copy_indices : bool Copy the column ``indices`` attribute Returns ------- col : Copy of input column """ if isinstance(col, BaseColumn): return col.copy() newcol = col.copy() if hasattr(col, "copy") else deepcopy(col) # If the column has info defined, we copy it and adjust any indices # to point to the copied column. By guarding with the if statement, # we avoid side effects (of creating the default info instance). if "info" in col.__dict__: newcol.info = col.info if copy_indices and col.info.indices: newcol.info.indices = deepcopy(col.info.indices) for index in newcol.info.indices: index.replace_col(col, newcol) return newcol
For string-dtype return a version of ``arr`` that is wide enough for ``values``. If ``arr`` is not string-dtype or does not need expansion then return ``arr``. Parameters ---------- arr : np.ndarray Input array values : scalar or array-like Values for width comparison for string arrays Returns ------- arr_expanded : np.ndarray
def _expand_string_array_for_values(arr, values): """ For string-dtype return a version of ``arr`` that is wide enough for ``values``. If ``arr`` is not string-dtype or does not need expansion then return ``arr``. Parameters ---------- arr : np.ndarray Input array values : scalar or array-like Values for width comparison for string arrays Returns ------- arr_expanded : np.ndarray """ if arr.dtype.kind in ("U", "S") and values is not np.ma.masked: # Starting with numpy 2.0, np.char.str_len() propagates the mask for # masked data. We want masked values to be preserved so unmask # `values` prior to counting string lengths. values = np.asarray(values) # Find the length of the longest string in the new values. values_str_len = np.char.str_len(values).max() # Determine character repeat count of arr.dtype. Returns a positive # int or None (something like 'U0' is not possible in numpy). If new values # are longer than current then make a new (wider) version of arr. arr_str_len = dtype_bytes_or_chars(arr.dtype) if arr_str_len and values_str_len > arr_str_len: arr_dtype = arr.dtype.byteorder + arr.dtype.kind + str(values_str_len) arr = arr.astype(arr_dtype) return arr
Convert N-d sequence-like data to ndarray or MaskedArray. This is the core function for converting Python lists or list of lists to a numpy array. This handles embedded np.ma.masked constants in ``data`` along with the special case of an homogeneous list of MaskedArray elements. Considerations: - np.ma.array is about 50 times slower than np.array for list input. This function avoids using np.ma.array on list input. - np.array emits a UserWarning for embedded np.ma.masked, but only for int or float inputs. For those it converts to np.nan and forces float dtype. For other types np.array is inconsistent, for instance converting np.ma.masked to "0.0" for str types. - Searching in pure Python for np.ma.masked in ``data`` is comparable in speed to calling ``np.array(data)``. - This function may end up making two additional copies of input ``data``. Parameters ---------- data : N-d sequence Input data, typically list or list of lists dtype : None or dtype-like Output datatype (None lets np.array choose) Returns ------- np_data : np.ndarray or np.ma.MaskedArray
def _convert_sequence_data_to_array(data, dtype=None): """Convert N-d sequence-like data to ndarray or MaskedArray. This is the core function for converting Python lists or list of lists to a numpy array. This handles embedded np.ma.masked constants in ``data`` along with the special case of an homogeneous list of MaskedArray elements. Considerations: - np.ma.array is about 50 times slower than np.array for list input. This function avoids using np.ma.array on list input. - np.array emits a UserWarning for embedded np.ma.masked, but only for int or float inputs. For those it converts to np.nan and forces float dtype. For other types np.array is inconsistent, for instance converting np.ma.masked to "0.0" for str types. - Searching in pure Python for np.ma.masked in ``data`` is comparable in speed to calling ``np.array(data)``. - This function may end up making two additional copies of input ``data``. Parameters ---------- data : N-d sequence Input data, typically list or list of lists dtype : None or dtype-like Output datatype (None lets np.array choose) Returns ------- np_data : np.ndarray or np.ma.MaskedArray """ np_ma_masked = np.ma.masked # Avoid repeated lookups of this object # Special case of an homogeneous list of MaskedArray elements (see #8977). # np.ma.masked is an instance of MaskedArray, so exclude those values. if ( hasattr(data, "__len__") and len(data) > 0 and all( isinstance(val, np.ma.MaskedArray) and val is not np_ma_masked for val in data ) ): np_data = np.ma.array(data, dtype=dtype) return np_data # First convert data to a plain ndarray. If there are instances of np.ma.masked # in the data this will issue a warning for int and float. with warnings.catch_warnings(record=True) as warns: # Ensure this warning from numpy is always enabled and that it is not # converted to an error (which can happen during pytest). warnings.filterwarnings( "always", category=UserWarning, message=".*converting a masked element.*" ) # FutureWarning in numpy 1.21. See https://github.com/astropy/astropy/issues/11291 # and https://github.com/numpy/numpy/issues/18425. warnings.filterwarnings( "always", category=FutureWarning, message=".*Promotion of numbers and bools to strings.*", ) try: np_data = np.array(data, dtype=dtype) except np.ma.MaskError: # Catches case of dtype=int with masked values, instead let it # convert to float np_data = np.array(data) except Exception: # Conversion failed for some reason, e.g. [2, 1*u.m] gives TypeError in Quantity. # First try to interpret the data as Quantity. If that still fails then fall # through to object try: np_data = Quantity(data, dtype) except Exception: dtype = object np_data = np.array(data, dtype=dtype) if np_data.ndim == 0 or (np_data.ndim > 0 and len(np_data) == 0): # Implies input was a scalar or an empty list (e.g. initializing an # empty table with pre-declared names and dtypes but no data). Here we # need to fall through to initializing with the original data=[]. return data # If there were no warnings and the data are int or float, then we are done. # Other dtypes like string or complex can have masked values and the # np.array() conversion gives the wrong answer (e.g. converting np.ma.masked # to the string "0.0"). if len(warns) == 0 and np_data.dtype.kind in ("i", "f"): return np_data # Now we need to determine if there is an np.ma.masked anywhere in input data. # Make a statement like below to look for np.ma.masked in a nested sequence. # Because np.array(data) succeeded we know that `data` has a regular N-d # structure. Find ma_masked: # any(any(any(d2 is ma_masked for d2 in d1) for d1 in d0) for d0 in data) # Using this eval avoids creating a copy of `data` in the more-usual case of # no masked elements. any_statement = "d0 is ma_masked" for ii in reversed(range(np_data.ndim)): if ii == 0: any_statement = f"any({any_statement} for d0 in data)" elif ii == np_data.ndim - 1: any_statement = f"any(d{ii} is ma_masked for d{ii} in d{ii-1})" else: any_statement = f"any({any_statement} for d{ii} in d{ii-1})" context = {"ma_masked": np.ma.masked, "data": data} has_masked = eval(any_statement, context) # If there are any masks then explicitly change each one to a fill value and # set a mask boolean array. If not has_masked then we're done. if has_masked: mask = np.zeros(np_data.shape, dtype=bool) data_filled = np.array(data, dtype=object) # Make type-appropriate fill value based on initial conversion. if np_data.dtype.kind == "U": fill = "" elif np_data.dtype.kind == "S": fill = b"" else: # Zero works for every numeric type. fill = 0 ranges = [range(dim) for dim in np_data.shape] for idxs in itertools.product(*ranges): val = data_filled[idxs] if val is np_ma_masked: data_filled[idxs] = fill mask[idxs] = True elif isinstance(val, bool) and dtype is None: # If we see a bool and dtype not specified then assume bool for # the entire array. Not perfect but in most practical cases OK. # Unfortunately numpy types [False, 0] as int, not bool (and # [False, np.ma.masked] => array([0.0, np.nan])). dtype = bool # If no dtype is provided then need to convert back to list so np.array # does type autodetection. if dtype is None: data_filled = data_filled.tolist() # Use np.array first to convert `data` to ndarray (fast) and then make # masked array from an ndarray with mask (fast) instead of from `data`. np_data = np.ma.array(np.array(data_filled, dtype=dtype), mask=mask) return np_data
Make Column comparison methods which encode the ``other`` object to utf-8 in the case of a bytestring dtype for Py3+. Parameters ---------- oper : str Operator name
def _make_compare(oper): """ Make Column comparison methods which encode the ``other`` object to utf-8 in the case of a bytestring dtype for Py3+. Parameters ---------- oper : str Operator name """ def _compare(self, other): op = oper # copy enclosed ref to allow swap below # If other is a Quantity, we should let it do the work, since # it can deal with our possible unit (which, for MaskedColumn, # would get dropped below, as '.data' is accessed in super()). if isinstance(other, Quantity): return NotImplemented # If we are unicode and other is a column with bytes, defer to it for # doing the unicode sandwich. This avoids problems like those # discussed in #6838 and #6899. if ( self.dtype.kind == "U" and isinstance(other, Column) and other.dtype.kind == "S" ): return NotImplemented # If we are bytes, encode other as needed. if self.dtype.char == "S": other = self._encode_str(other) # Now just let the regular ndarray.__eq__, etc., take over. result = getattr(super(Column, self), op)(other) # But we should not return Column instances for this case. return result.data if isinstance(result, Column) else result return _compare
Get groups for ``table`` on specified ``keys``. Parameters ---------- table : `Table` Table to group keys : str, list of str, `Table`, or Numpy array Grouping key specifier Returns ------- grouped_table : Table object with groups attr set accordingly
def _table_group_by(table, keys): """ Get groups for ``table`` on specified ``keys``. Parameters ---------- table : `Table` Table to group keys : str, list of str, `Table`, or Numpy array Grouping key specifier Returns ------- grouped_table : Table object with groups attr set accordingly """ from .serialize import represent_mixins_as_columns from .table import Table # Pre-convert string to tuple of strings, or Table to the underlying structured array if isinstance(keys, str): keys = (keys,) if isinstance(keys, (list, tuple)): for name in keys: if name not in table.colnames: raise ValueError(f"Table does not have key column {name!r}") if table.masked and np.any(table[name].mask): raise ValueError( f"Missing values in key column {name!r} are not allowed" ) # Make a column slice of the table without copying table_keys = table.__class__([table[key] for key in keys], copy=False) # If available get a pre-existing index for these columns table_index = get_index_by_names(table, keys) grouped_by_table_cols = True elif isinstance(keys, (np.ndarray, Table)): table_keys = keys if len(table_keys) != len(table): raise ValueError( f"Input keys array length {len(table_keys)} does not match " f"table length {len(table)}" ) table_index = None grouped_by_table_cols = False else: raise TypeError( f"Keys input must be string, list, tuple, Table or numpy array, " f"but got {type(keys)}" ) # TODO: don't use represent_mixins_as_columns here, but instead ensure that # keys_sort.argsort(kind="stable") works for all columns (including mixins). # If there is not already an available index and table_keys is a Table then ensure # that all cols (including mixins) are in a form that can sorted with the code below. if not table_index and isinstance(table_keys, Table): table_keys_sort = represent_mixins_as_columns(table_keys) else: table_keys_sort = table_keys # Get the argsort index `idx_sort`, accounting for particulars try: # take advantage of index internal sort if possible if table_index is not None: idx_sort = table_index.sorted_data() else: idx_sort = table_keys_sort.argsort(kind="stable") stable_sort = True except TypeError: # TODO: is this still needed? # Some versions (likely 1.6 and earlier) of numpy don't support # 'mergesort' for all data types. MacOSX (Darwin) doesn't have a stable # sort by default, nor does Windows, while Linux does (or appears to). idx_sort = table_keys_sort.argsort() stable_sort = platform.system() not in ("Darwin", "Windows") # Finally do the actual sort of table_keys values table_keys = table_keys[idx_sort] # Get all keys diffs = np.concatenate(([True], table_keys[1:] != table_keys[:-1], [True])) indices = np.flatnonzero(diffs) # If the sort is not stable (preserves original table order) then sort idx_sort in # place within each group. if not stable_sort: for i0, i1 in pairwise(indices): idx_sort[i0:i1].sort() # Make a new table and set the _groups to the appropriate TableGroups object. # Take the subset of the original keys at the indices values (group boundaries). out = table.__class__(table[idx_sort]) if len(table) == 0: out_keys = table_keys indices = np.array([], dtype=int) else: out_keys = table_keys[indices[:-1]] if isinstance(out_keys, Table): out_keys.meta["grouped_by_table_cols"] = grouped_by_table_cols out._groups = TableGroups(out, indices=indices, keys=out_keys) return out
Get groups for ``column`` on specified ``keys``. Parameters ---------- column : Column object Column to group keys : Table or Numpy array of same length as col Grouping key specifier Returns ------- grouped_column : Column object with groups attr set accordingly
def column_group_by(column, keys): """ Get groups for ``column`` on specified ``keys``. Parameters ---------- column : Column object Column to group keys : Table or Numpy array of same length as col Grouping key specifier Returns ------- grouped_column : Column object with groups attr set accordingly """ from .serialize import represent_mixins_as_columns from .table import Table # TODO: don't use represent_mixins_as_columns here, but instead ensure that # keys_sort.argsort(kind="stable") works for all columns (including mixins). if isinstance(keys, Table): keys_sort = represent_mixins_as_columns(keys) else: keys_sort = keys if len(keys_sort) != len(column): raise ValueError( f"Input keys array length {len(keys)} does not match " f"column length {len(column)}" ) try: idx_sort = keys_sort.argsort(kind="stable") except AttributeError: raise TypeError( f"keys input ({keys.__class__.__name__}) must have an `argsort` method" ) keys = keys[idx_sort] # Get all keys diffs = np.concatenate(([True], keys[1:] != keys[:-1], [True])) indices = np.flatnonzero(diffs) # Make a new column and set the _groups to the appropriate ColumnGroups object. # Take the subset of the original keys at the indices values (group boundaries). out = column.__class__(column[idx_sort]) out._groups = ColumnGroups(out, indices=indices, keys=keys[indices[:-1]]) return out
Inputs a table and some subset of its columns as table_copy. List or tuple containing names of columns as names,and returns an index corresponding to this subset or list or None if no such index exists. Parameters ---------- table : `Table` Input table table_copy : `Table`, optional Subset of the columns in the ``table`` argument names : list, tuple, optional Subset of column names in the ``table`` argument Returns ------- Index of columns or None
def get_index(table, table_copy=None, names=None): """ Inputs a table and some subset of its columns as table_copy. List or tuple containing names of columns as names,and returns an index corresponding to this subset or list or None if no such index exists. Parameters ---------- table : `Table` Input table table_copy : `Table`, optional Subset of the columns in the ``table`` argument names : list, tuple, optional Subset of column names in the ``table`` argument Returns ------- Index of columns or None """ if names is not None and table_copy is not None: raise ValueError( 'one and only one argument from "table_copy" or "names" is required' ) if names is None and table_copy is None: raise ValueError( 'one and only one argument from "table_copy" or "names" is required' ) if names is not None: names = set(names) else: names = set(table_copy.colnames) if not names <= set(table.colnames): raise ValueError(f"{names} is not a subset of table columns") for name in names: for index in table[name].info.indices: if {col.info.name for col in index.columns} == names: return index return None
Returns an index in ``table`` corresponding to the ``names`` columns or None if no such index exists. Parameters ---------- table : `Table` Input table nmaes : tuple, list Column names
def get_index_by_names(table, names): """ Returns an index in ``table`` corresponding to the ``names`` columns or None if no such index exists. Parameters ---------- table : `Table` Input table nmaes : tuple, list Column names """ names = list(names) for index in table.indices: index_names = [col.info.name for col in index.columns] if index_names == names: return index return None
Write summary information about column to the ``out`` filehandle. By default this prints to standard output via sys.stdout. The ``option`` argument specifies what type of information to include. This can be a string, a function, or a list of strings or functions. Built-in options are: - ``attributes``: basic column meta data like ``dtype`` or ``format`` - ``stats``: basic statistics: minimum, mean, and maximum If a function is specified then that function will be called with the column as its single argument. The function must return an OrderedDict containing the information attributes. If a list is provided then the information attributes will be appended for each of the options, in order. Examples -------- >>> from astropy.table.table_helpers import simple_table >>> t = simple_table(size=2, kinds='if') >>> t['a'].unit = 'm' >>> t.info() <Table length=2> name dtype unit ---- ------- ---- a int64 m b float64 >>> t.info('stats') <Table length=2> name mean std min max ---- ---- --- --- --- a 1.5 0.5 1 2 b 1.5 0.5 1 2 Parameters ---------- option : str, callable, list of (str or callable) Info option, defaults to 'attributes'. out : file-like, None Output destination, default is sys.stdout. If None then a Table with information attributes is returned Returns ------- info : `~astropy.table.Table` if out==None else None
def table_info(tbl, option="attributes", out=""): """ Write summary information about column to the ``out`` filehandle. By default this prints to standard output via sys.stdout. The ``option`` argument specifies what type of information to include. This can be a string, a function, or a list of strings or functions. Built-in options are: - ``attributes``: basic column meta data like ``dtype`` or ``format`` - ``stats``: basic statistics: minimum, mean, and maximum If a function is specified then that function will be called with the column as its single argument. The function must return an OrderedDict containing the information attributes. If a list is provided then the information attributes will be appended for each of the options, in order. Examples -------- >>> from astropy.table.table_helpers import simple_table >>> t = simple_table(size=2, kinds='if') >>> t['a'].unit = 'm' >>> t.info() <Table length=2> name dtype unit ---- ------- ---- a int64 m b float64 >>> t.info('stats') <Table length=2> name mean std min max ---- ---- --- --- --- a 1.5 0.5 1 2 b 1.5 0.5 1 2 Parameters ---------- option : str, callable, list of (str or callable) Info option, defaults to 'attributes'. out : file-like, None Output destination, default is sys.stdout. If None then a Table with information attributes is returned Returns ------- info : `~astropy.table.Table` if out==None else None """ from .table import Table if out == "": out = sys.stdout descr_vals = [tbl.__class__.__name__] if tbl.masked: descr_vals.append("masked=True") descr_vals.append(f"length={len(tbl)}") outlines = ["<" + " ".join(descr_vals) + ">"] cols = list(tbl.columns.values()) if tbl.colnames: infos = [] for col in cols: infos.append(col.info(option, out=None)) info = Table(infos, names=list(infos[0])) else: info = Table() if out is None: return info # Since info is going to a filehandle for viewing then remove uninteresting # columns. if "class" in info.colnames: # Remove 'class' info column if all table columns are the same class # and they are the default column class for that table. uniq_types = {type(col) for col in cols} if len(uniq_types) == 1 and isinstance(cols[0], tbl.ColumnClass): del info["class"] if "n_bad" in info.colnames and np.all(info["n_bad"] == 0): del info["n_bad"] # Standard attributes has 'length' but this is typically redundant if "length" in info.colnames and np.all(info["length"] == len(tbl)): del info["length"] for name in info.colnames: if info[name].dtype.kind in "SU" and np.all(info[name] == ""): del info[name] if tbl.colnames: outlines.extend(info.pformat(max_width=-1, max_lines=-1, show_unit=False)) else: outlines.append("<No columns>") out.writelines(outline + os.linesep for outline in outlines)
Context manager to temporarily override individual column info.serialize_method dict values. The serialize_method attribute is an optional dict which might look like ``{'fits': 'jd1_jd2', 'ecsv': 'formatted_value', ..}``. ``serialize_method`` is a str or dict. If str then it the value is the ``serialize_method`` that will be used for all formats. If dict then the key values can be either: - Column name. This has higher precedence than the second option of matching class. - Class (matches any column which is an instance of the class) This context manager is expected to be used only within ``Table.write``. It could have been a private method on Table but prefer not to add clutter to that class. Parameters ---------- tbl : Table object Input table serialize_method : dict, str Dict with key values of column names or types, or str Returns ------- None (context manager)
def serialize_method_as(tbl, serialize_method): """Context manager to temporarily override individual column info.serialize_method dict values. The serialize_method attribute is an optional dict which might look like ``{'fits': 'jd1_jd2', 'ecsv': 'formatted_value', ..}``. ``serialize_method`` is a str or dict. If str then it the value is the ``serialize_method`` that will be used for all formats. If dict then the key values can be either: - Column name. This has higher precedence than the second option of matching class. - Class (matches any column which is an instance of the class) This context manager is expected to be used only within ``Table.write``. It could have been a private method on Table but prefer not to add clutter to that class. Parameters ---------- tbl : Table object Input table serialize_method : dict, str Dict with key values of column names or types, or str Returns ------- None (context manager) """ def get_override_sm(col): """ Determine if the ``serialize_method`` str or dict specifies an override of column presets for ``col``. Returns the matching serialize_method value or ``None``. """ # If a string then all columns match if isinstance(serialize_method, str): return serialize_method # If column name then return that serialize_method if col.info.name in serialize_method: return serialize_method[col.info.name] # Otherwise look for subclass matches for key in serialize_method: if isinstance(key, type) and isinstance(col, key): return serialize_method[key] return None # Setup for the context block. Set individual column.info.serialize_method # values as appropriate and keep a backup copy. If ``serialize_method`` # is None or empty then don't do anything. # Original serialize_method dict, keyed by column name. This only # gets used and set if there is an override. original_sms = {} if serialize_method: # Go through every column and if it has a serialize_method info # attribute then potentially update it for the duration of the write. for col in tbl.itercols(): if hasattr(col.info, "serialize_method"): override_sm = get_override_sm(col) if override_sm: # Make a reference copy of the column serialize_method # dict which maps format (e.g. 'fits') to the # appropriate method (e.g. 'data_mask'). original_sms[col.info.name] = col.info.serialize_method # Set serialize method for *every* available format. This is # brute force, but at this point the format ('fits', 'ecsv', etc) # is not actually known (this gets determined by the write function # in registry.py). Note this creates a new temporary dict object # so that the restored version is the same original object. col.info.serialize_method = { fmt: override_sm for fmt in col.info.serialize_method } # Finally yield for the context block try: yield finally: # Teardown (restore) for the context block. Be sure to do this even # if an exception occurred. if serialize_method: for name, original_sm in original_sms.items(): tbl[name].info.serialize_method = original_sm
Construct dict from !!omap in yaml safe load. See ``get_header_from_yaml()`` for usage. Source: https://gist.github.com/weaver/317164 License: Unspecified This is the same as SafeConstructor.construct_yaml_omap(), except the data type is changed to OrderedDict() and setitem is used instead of append in the loop
def _construct_odict(load, node): """ Construct dict from !!omap in yaml safe load. See ``get_header_from_yaml()`` for usage. Source: https://gist.github.com/weaver/317164 License: Unspecified This is the same as SafeConstructor.construct_yaml_omap(), except the data type is changed to OrderedDict() and setitem is used instead of append in the loop """ omap = {} yield omap if not isinstance(node, yaml.SequenceNode): raise yaml.constructor.ConstructorError( "while constructing an ordered map", node.start_mark, f"expected a sequence, but found {node.id}", node.start_mark, ) for subnode in node.value: if not isinstance(subnode, yaml.MappingNode): raise yaml.constructor.ConstructorError( "while constructing an ordered map", node.start_mark, f"expected a mapping of length 1, but found {subnode.id}", subnode.start_mark, ) if len(subnode.value) != 1: raise yaml.constructor.ConstructorError( "while constructing an ordered map", node.start_mark, f"expected a single mapping item, but found {len(subnode.value)} items", subnode.start_mark, ) key_node, value_node = subnode.value[0] key = load.construct_object(key_node) value = load.construct_object(value_node) omap[key] = value
This is the same code as BaseRepresenter.represent_sequence(), but the value passed to dump.represent_data() in the loop is a dictionary instead of a tuple. Source: https://gist.github.com/weaver/317164 License: Unspecified
def _repr_pairs(dump, tag, sequence, flow_style=None): """ This is the same code as BaseRepresenter.represent_sequence(), but the value passed to dump.represent_data() in the loop is a dictionary instead of a tuple. Source: https://gist.github.com/weaver/317164 License: Unspecified """ value = [] node = yaml.SequenceNode(tag, value, flow_style=flow_style) if dump.alias_key is not None: dump.represented_objects[dump.alias_key] = node best_style = True for key, val in sequence: item = dump.represent_data({key: val}) if not (isinstance(item, yaml.ScalarNode) and not item.style): best_style = False value.append(item) if flow_style is None: if dump.default_flow_style is not None: node.flow_style = dump.default_flow_style else: node.flow_style = best_style return node
Represent OrderedDict in yaml dump. Source: https://gist.github.com/weaver/317164 License: Unspecified >>> data = OrderedDict([('foo', 'bar'), ('mumble', 'quux'), ('baz', 'gorp')]) >>> yaml.dump(data, default_flow_style=False) # doctest: +SKIP '!!omap\n- foo: bar\n- mumble: quux\n- baz: gorp\n' >>> yaml.dump(data, default_flow_style=True) # doctest: +SKIP '!!omap [foo: bar, mumble: quux, baz: gorp]\n'
def _repr_odict(dumper, data): """ Represent OrderedDict in yaml dump. Source: https://gist.github.com/weaver/317164 License: Unspecified >>> data = OrderedDict([('foo', 'bar'), ('mumble', 'quux'), ('baz', 'gorp')]) >>> yaml.dump(data, default_flow_style=False) # doctest: +SKIP '!!omap\\n- foo: bar\\n- mumble: quux\\n- baz: gorp\\n' >>> yaml.dump(data, default_flow_style=True) # doctest: +SKIP '!!omap [foo: bar, mumble: quux, baz: gorp]\\n' """ return _repr_pairs(dumper, "tag:yaml.org,2002:omap", data.items())
Represent ColumnDict in yaml dump. This is the same as an ordinary mapping except that the keys are written in a fixed order that makes sense for astropy table columns.
def _repr_column_dict(dumper, data): """ Represent ColumnDict in yaml dump. This is the same as an ordinary mapping except that the keys are written in a fixed order that makes sense for astropy table columns. """ return dumper.represent_mapping("tag:yaml.org,2002:map", data)
Check if object-type ``col`` is really a variable length list. That is true if the object consists purely of list of nested lists, where the shape of every item can be represented as (m, n, ..., *) where the (m, n, ...) are constant and only the lists in the last axis have variable shape. If so the returned value of shape will be a tuple in the form (m, n, ..., None). If ``col`` is a variable length array then the return ``dtype`` corresponds to the type found by numpy for all the individual values. Otherwise it will be ``np.dtype(object)``. Parameters ---------- col : column-like Input table column, assumed to be object-type Returns ------- shape : tuple Inferred variable length shape or None dtype : np.dtype Numpy dtype that applies to col
def _get_variable_length_array_shape(col): """Check if object-type ``col`` is really a variable length list. That is true if the object consists purely of list of nested lists, where the shape of every item can be represented as (m, n, ..., *) where the (m, n, ...) are constant and only the lists in the last axis have variable shape. If so the returned value of shape will be a tuple in the form (m, n, ..., None). If ``col`` is a variable length array then the return ``dtype`` corresponds to the type found by numpy for all the individual values. Otherwise it will be ``np.dtype(object)``. Parameters ---------- col : column-like Input table column, assumed to be object-type Returns ------- shape : tuple Inferred variable length shape or None dtype : np.dtype Numpy dtype that applies to col """ class ConvertError(ValueError): """Local conversion error used below.""" # Numpy types supported as variable-length arrays np_classes = (np.floating, np.integer, np.bool_, np.str_) try: if len(col) == 0 or not all(isinstance(val, np.ndarray) for val in col): raise ConvertError dtype = col[0].dtype shape = col[0].shape[:-1] for val in col: if not issubclass(val.dtype.type, np_classes) or val.shape[:-1] != shape: raise ConvertError dtype = np.promote_types(dtype, val.dtype) shape = shape + (None,) except ConvertError: # `col` is not a variable length array, return shape and dtype to # the original. Note that this function is only called if # col.shape[1:] was () and col.info.dtype is object. dtype = col.info.dtype shape = () return shape, dtype
Return string version of ``dtype`` for writing to ECSV ``datatype``.
def _get_datatype_from_dtype(dtype): """Return string version of ``dtype`` for writing to ECSV ``datatype``.""" datatype = dtype.name if datatype.startswith(("bytes", "str")): datatype = "string" if datatype.endswith("_"): datatype = datatype[:-1] # string_ and bool_ lose the final _ for ECSV return datatype
Extract information from a column (apart from the values) that is required to fully serialize the column. Parameters ---------- col : column-like Input Table column Returns ------- attrs : dict Dict of ECSV attributes for ``col``
def _get_col_attributes(col): """ Extract information from a column (apart from the values) that is required to fully serialize the column. Parameters ---------- col : column-like Input Table column Returns ------- attrs : dict Dict of ECSV attributes for ``col`` """ dtype = col.info.dtype # Type of column values that get written subtype = None # Type of data for object columns serialized with JSON shape = col.shape[1:] # Shape of multidim / variable length columns if dtype.name == "object": if shape == (): # 1-d object type column might be a variable length array dtype = np.dtype(str) shape, subtype = _get_variable_length_array_shape(col) else: # N-d object column is subtype object but serialized as JSON string dtype = np.dtype(str) subtype = np.dtype(object) elif shape: # N-d column which is not object is serialized as JSON string dtype = np.dtype(str) subtype = col.info.dtype datatype = _get_datatype_from_dtype(dtype) # Set the output attributes attrs = ColumnDict() attrs["name"] = col.info.name attrs["datatype"] = datatype for attr, nontrivial, xform in ( ("unit", lambda x: x is not None, str), ("format", lambda x: x is not None, None), ("description", lambda x: x is not None, None), ("meta", lambda x: x, OrderedDict), ): col_attr = getattr(col.info, attr) if nontrivial(col_attr): attrs[attr] = xform(col_attr) if xform else col_attr if subtype: attrs["subtype"] = _get_datatype_from_dtype(subtype) # Numpy 'object' maps to 'subtype' of 'json' in ECSV if attrs["subtype"] == "object": attrs["subtype"] = "json" if shape: attrs["subtype"] += json.dumps(list(shape), separators=(",", ":")) return attrs
Return lines with a YAML representation of header content from the ``table``. Parameters ---------- table : `~astropy.table.Table` object Table for which header content is output Returns ------- lines : list List of text lines with YAML header content
def get_yaml_from_table(table): """ Return lines with a YAML representation of header content from the ``table``. Parameters ---------- table : `~astropy.table.Table` object Table for which header content is output Returns ------- lines : list List of text lines with YAML header content """ header = {"cols": list(table.columns.values())} if table.meta: header["meta"] = OrderedDict(table.meta) return get_yaml_from_header(header)
Return lines with a YAML representation of header content from a Table. The ``header`` dict must contain these keys: - 'cols' : list of table column objects (required) - 'meta' : table 'meta' attribute (optional) Other keys included in ``header`` will be serialized in the output YAML representation. Parameters ---------- header : dict Table header content Returns ------- lines : list List of text lines with YAML header content
def get_yaml_from_header(header): """ Return lines with a YAML representation of header content from a Table. The ``header`` dict must contain these keys: - 'cols' : list of table column objects (required) - 'meta' : table 'meta' attribute (optional) Other keys included in ``header`` will be serialized in the output YAML representation. Parameters ---------- header : dict Table header content Returns ------- lines : list List of text lines with YAML header content """ from astropy.io.misc.yaml import AstropyDumper class TableDumper(AstropyDumper): """ Custom Dumper that represents OrderedDict as an !!omap object. """ def represent_mapping(self, tag, mapping, flow_style=None): """ This is a combination of the Python 2 and 3 versions of this method in the PyYAML library to allow the required key ordering via the ColumnOrderList object. The Python 3 version insists on turning the items() mapping into a list object and sorting, which results in alphabetical order for the column keys. """ value = [] node = yaml.MappingNode(tag, value, flow_style=flow_style) if self.alias_key is not None: self.represented_objects[self.alias_key] = node best_style = True if hasattr(mapping, "items"): mapping = mapping.items() if hasattr(mapping, "sort"): mapping.sort() else: mapping = list(mapping) try: mapping = sorted(mapping) except TypeError: pass for item_key, item_value in mapping: node_key = self.represent_data(item_key) node_value = self.represent_data(item_value) if not (isinstance(node_key, yaml.ScalarNode) and not node_key.style): best_style = False if not ( isinstance(node_value, yaml.ScalarNode) and not node_value.style ): best_style = False value.append((node_key, node_value)) if flow_style is None: if self.default_flow_style is not None: node.flow_style = self.default_flow_style else: node.flow_style = best_style return node TableDumper.add_representer(OrderedDict, _repr_odict) TableDumper.add_representer(ColumnDict, _repr_column_dict) header = copy.copy(header) # Don't overwrite original header["datatype"] = [_get_col_attributes(col) for col in header["cols"]] del header["cols"] lines = yaml.dump( header, default_flow_style=None, Dumper=TableDumper, width=130 ).splitlines() return lines
Get a header dict from input ``lines`` which should be valid YAML. This input will typically be created by get_yaml_from_header. The output is a dictionary which describes all the table and column meta. The get_cols() method in the io/ascii/ecsv.py file should be used as a guide to using the information when constructing a table using this header dict information. Parameters ---------- lines : list List of text lines with YAML header content Returns ------- header : dict Dictionary describing table and column meta
def get_header_from_yaml(lines): """ Get a header dict from input ``lines`` which should be valid YAML. This input will typically be created by get_yaml_from_header. The output is a dictionary which describes all the table and column meta. The get_cols() method in the io/ascii/ecsv.py file should be used as a guide to using the information when constructing a table using this header dict information. Parameters ---------- lines : list List of text lines with YAML header content Returns ------- header : dict Dictionary describing table and column meta """ from astropy.io.misc.yaml import AstropyLoader class TableLoader(AstropyLoader): """ Custom Loader that constructs OrderedDict from an !!omap object. This does nothing but provide a namespace for adding the custom odict constructor. """ TableLoader.add_constructor("tag:yaml.org,2002:omap", _construct_odict) # Now actually load the YAML data structure into `meta` header_yaml = textwrap.dedent("\n".join(lines)) try: header = yaml.load(header_yaml, Loader=TableLoader) except Exception as err: raise YamlParseError() from err return header
Find the column names mapping when merging the list of structured ndarrays ``arrays``. It is assumed that col names in ``common_names`` are to be merged into a single column while the rest will be uniquely represented in the output. The args ``uniq_col_name`` and ``table_names`` specify how to rename columns in case of conflicts. Returns a dict mapping each output column name to the input(s). This takes the form {outname : (col_name_0, col_name_1, ...), ... }. For key columns all of input names will be present, while for the other non-key columns the value will be (col_name_0, None, ..) or (None, col_name_1, ..) etc.
def get_col_name_map( arrays, common_names, uniq_col_name="{col_name}_{table_name}", table_names=None ): """ Find the column names mapping when merging the list of structured ndarrays ``arrays``. It is assumed that col names in ``common_names`` are to be merged into a single column while the rest will be uniquely represented in the output. The args ``uniq_col_name`` and ``table_names`` specify how to rename columns in case of conflicts. Returns a dict mapping each output column name to the input(s). This takes the form {outname : (col_name_0, col_name_1, ...), ... }. For key columns all of input names will be present, while for the other non-key columns the value will be (col_name_0, None, ..) or (None, col_name_1, ..) etc. """ col_name_map = collections.defaultdict(lambda: [None] * len(arrays)) col_name_list = [] if table_names is None: table_names = [str(ii + 1) for ii in range(len(arrays))] for idx, array in enumerate(arrays): table_name = table_names[idx] for name in array.dtype.names: out_name = name if name in common_names: # If name is in the list of common_names then insert into # the column name list, but just once. if name not in col_name_list: col_name_list.append(name) else: # If name is not one of the common column outputs, and it collides # with the names in one of the other arrays, then rename others = list(arrays) others.pop(idx) if any(name in other.dtype.names for other in others): out_name = uniq_col_name.format( table_name=table_name, col_name=name ) col_name_list.append(out_name) col_name_map[out_name][idx] = name # Check for duplicate output column names col_name_count = Counter(col_name_list) repeated_names = [name for name, count in col_name_count.items() if count > 1] if repeated_names: raise TableMergeError( f"Merging column names resulted in duplicates: {repeated_names}. " "Change uniq_col_name or table_names args to fix this." ) # Convert col_name_map to a regular dict with tuple (immutable) values col_name_map = OrderedDict((name, col_name_map[name]) for name in col_name_list) return col_name_map
Find the dtypes descrs resulting from merging the list of arrays' dtypes, using the column name mapping ``col_name_map``. Return a list of descrs for the output.
def get_descrs(arrays, col_name_map): """ Find the dtypes descrs resulting from merging the list of arrays' dtypes, using the column name mapping ``col_name_map``. Return a list of descrs for the output. """ out_descrs = [] for out_name, in_names in col_name_map.items(): # List of input arrays that contribute to this output column in_cols = [arr[name] for arr, name in zip(arrays, in_names) if name is not None] # List of names of the columns that contribute to this output column. names = [name for name in in_names if name is not None] # Output dtype is the superset of all dtypes in in_arrays try: dtype = common_dtype(in_cols) except TableMergeError as tme: # Beautify the error message when we are trying to merge columns with incompatible # types by including the name of the columns that originated the error. raise TableMergeError( f"The '{names[0]}' columns have incompatible types: " f"{tme._incompat_types}" ) from tme # Make sure all input shapes are the same uniq_shapes = {col.shape[1:] for col in in_cols} if len(uniq_shapes) != 1: raise TableMergeError("Key columns have different shape") shape = uniq_shapes.pop() if out_name is not None: out_name = str(out_name) out_descrs.append((out_name, dtype, shape)) return out_descrs
Use numpy to find the common dtype for a list of structured ndarray columns. Only allow columns within the following fundamental numpy data types: np.bool_, np.object_, np.number, np.character, np.void
def common_dtype(cols): """ Use numpy to find the common dtype for a list of structured ndarray columns. Only allow columns within the following fundamental numpy data types: np.bool_, np.object_, np.number, np.character, np.void """ np_types = (np.bool_, np.object_, np.number, np.character, np.void) uniq_types = { tuple(issubclass(col.dtype.type, np_type) for np_type in np_types) for col in cols } if len(uniq_types) > 1: # Embed into the exception the actual list of incompatible types. incompat_types = [col.dtype.name for col in cols] tme = TableMergeError(f"Columns have incompatible types {incompat_types}") tme._incompat_types = incompat_types raise tme arrs = [np.empty(1, dtype=col.dtype) for col in cols] # For string-type arrays need to explicitly fill in non-zero # values or the final arr_common = .. step is unpredictable. for arr in arrs: if arr.dtype.kind in ("S", "U"): arr[0] = "0" * arr.itemsize arr_common = np.array([arr[0] for arr in arrs]) return arr_common.dtype.str
Check that tables is a Table or sequence of Tables. Returns the corresponding list of Tables.
def _get_list_of_tables(tables): """ Check that tables is a Table or sequence of Tables. Returns the corresponding list of Tables. """ # Make sure we have a list of things if not isinstance(tables, Sequence): tables = [tables] # Make sure there is something to stack if len(tables) == 0: raise ValueError("no values provided to stack.") # Convert inputs (Table, Row, or anything column-like) to Tables. # Special case that Quantity converts to a QTable. # Do this in a separate list to not modify the original input list tables = list(tables) for ii, val in enumerate(tables): if isinstance(val, Table): pass elif isinstance(val, Row): tables[ii] = Table(val) elif isinstance(val, Quantity): tables[ii] = QTable([val]) else: try: tables[ii] = Table([val]) except (ValueError, TypeError) as err: raise TypeError(f"Cannot convert {val} to table column.") from err return tables
From a list of input objects ``objs`` get merged output object class. This is just taken as the deepest subclass. This doesn't handle complicated inheritance schemes, but as a special case, classes which share ``info`` are taken to be compatible.
def _get_out_class(objs): """ From a list of input objects ``objs`` get merged output object class. This is just taken as the deepest subclass. This doesn't handle complicated inheritance schemes, but as a special case, classes which share ``info`` are taken to be compatible. """ out_class = objs[0].__class__ for obj in objs[1:]: if issubclass(obj.__class__, out_class): out_class = obj.__class__ if any( not ( issubclass(out_class, obj.__class__) or out_class.info is obj.__class__.info ) for obj in objs ): raise ValueError( f"unmergeable object classes {[type(obj).__name__ for obj in objs]}" ) return out_class
Helper function to join on SkyCoord columns using distance matching. This function is intended for use in ``table.join()`` to allow performing a table join where the key columns are both ``SkyCoord`` objects, matched by computing the distance between points and accepting values below ``distance``. The distance cross-matching is done using either `~astropy.coordinates.search_around_sky` or `~astropy.coordinates.search_around_3d`, depending on the value of ``distance_func``. The default is ``'search_around_sky'``. One can also provide a function object for ``distance_func``, in which case it must be a function that follows the same input and output API as `~astropy.coordinates.search_around_sky`. In this case the function will be called with ``(skycoord1, skycoord2, distance)`` as arguments. Parameters ---------- distance : `~astropy.units.Quantity` ['angle', 'length'] Maximum distance between points to be considered a join match. Must have angular or distance units. distance_func : str or function Specifies the function for performing the cross-match based on ``distance``. If supplied as a string this specifies the name of a function in `astropy.coordinates`. If supplied as a function then that function is called directly. Returns ------- join_func : function Function that accepts two ``SkyCoord`` columns (col1, col2) and returns the tuple (ids1, ids2) of pair-matched unique identifiers. Examples -------- This example shows an inner join of two ``SkyCoord`` columns, taking any sources within 0.2 deg to be a match. Note the new ``sc_id`` column which is added and provides a unique source identifier for the matches. >>> from astropy.coordinates import SkyCoord >>> import astropy.units as u >>> from astropy.table import Table, join_skycoord >>> from astropy import table >>> sc1 = SkyCoord([0, 1, 1.1, 2], [0, 0, 0, 0], unit='deg') >>> sc2 = SkyCoord([0.5, 1.05, 2.1], [0, 0, 0], unit='deg') >>> join_func = join_skycoord(0.2 * u.deg) >>> join_func(sc1, sc2) # Associate each coordinate with unique source ID (array([3, 1, 1, 2]), array([4, 1, 2])) >>> t1 = Table([sc1], names=['sc']) >>> t2 = Table([sc2], names=['sc']) >>> t12 = table.join(t1, t2, join_funcs={'sc': join_skycoord(0.2 * u.deg)}) >>> print(t12) # Note new `sc_id` column with the IDs from join_func() sc_id sc_1 sc_2 deg,deg deg,deg ----- ------- -------- 1 1.0,0.0 1.05,0.0 1 1.1,0.0 1.05,0.0 2 2.0,0.0 2.1,0.0
def join_skycoord(distance, distance_func="search_around_sky"): """Helper function to join on SkyCoord columns using distance matching. This function is intended for use in ``table.join()`` to allow performing a table join where the key columns are both ``SkyCoord`` objects, matched by computing the distance between points and accepting values below ``distance``. The distance cross-matching is done using either `~astropy.coordinates.search_around_sky` or `~astropy.coordinates.search_around_3d`, depending on the value of ``distance_func``. The default is ``'search_around_sky'``. One can also provide a function object for ``distance_func``, in which case it must be a function that follows the same input and output API as `~astropy.coordinates.search_around_sky`. In this case the function will be called with ``(skycoord1, skycoord2, distance)`` as arguments. Parameters ---------- distance : `~astropy.units.Quantity` ['angle', 'length'] Maximum distance between points to be considered a join match. Must have angular or distance units. distance_func : str or function Specifies the function for performing the cross-match based on ``distance``. If supplied as a string this specifies the name of a function in `astropy.coordinates`. If supplied as a function then that function is called directly. Returns ------- join_func : function Function that accepts two ``SkyCoord`` columns (col1, col2) and returns the tuple (ids1, ids2) of pair-matched unique identifiers. Examples -------- This example shows an inner join of two ``SkyCoord`` columns, taking any sources within 0.2 deg to be a match. Note the new ``sc_id`` column which is added and provides a unique source identifier for the matches. >>> from astropy.coordinates import SkyCoord >>> import astropy.units as u >>> from astropy.table import Table, join_skycoord >>> from astropy import table >>> sc1 = SkyCoord([0, 1, 1.1, 2], [0, 0, 0, 0], unit='deg') >>> sc2 = SkyCoord([0.5, 1.05, 2.1], [0, 0, 0], unit='deg') >>> join_func = join_skycoord(0.2 * u.deg) >>> join_func(sc1, sc2) # Associate each coordinate with unique source ID (array([3, 1, 1, 2]), array([4, 1, 2])) >>> t1 = Table([sc1], names=['sc']) >>> t2 = Table([sc2], names=['sc']) >>> t12 = table.join(t1, t2, join_funcs={'sc': join_skycoord(0.2 * u.deg)}) >>> print(t12) # Note new `sc_id` column with the IDs from join_func() sc_id sc_1 sc_2 deg,deg deg,deg ----- ------- -------- 1 1.0,0.0 1.05,0.0 1 1.1,0.0 1.05,0.0 2 2.0,0.0 2.1,0.0 """ if isinstance(distance_func, str): import astropy.coordinates as coords try: distance_func = getattr(coords, distance_func) except AttributeError as err: raise ValueError( "distance_func must be a function in astropy.coordinates" ) from err else: from inspect import isfunction if not isfunction(distance_func): raise ValueError("distance_func must be a str or function") def join_func(sc1, sc2): # Call the appropriate SkyCoord method to find pairs within distance idxs1, idxs2, d2d, d3d = distance_func(sc1, sc2, distance) # Now convert that into unique identifiers for each near-pair. This is # taken to be transitive, so that if points 1 and 2 are "near" and points # 1 and 3 are "near", then 1, 2, and 3 are all given the same identifier. # This identifier will then be used in the table join matching. # Identifiers for each column, initialized to all zero. ids1 = np.zeros(len(sc1), dtype=int) ids2 = np.zeros(len(sc2), dtype=int) # Start the identifier count at 1 id_ = 1 for idx1, idx2 in zip(idxs1, idxs2): # If this col1 point is previously identified then set corresponding # col2 point to same identifier. Likewise for col2 and col1. if ids1[idx1] > 0: ids2[idx2] = ids1[idx1] elif ids2[idx2] > 0: ids1[idx1] = ids2[idx2] else: # Not yet seen so set identifier for col1 and col2 ids1[idx1] = id_ ids2[idx2] = id_ id_ += 1 # Fill in unique identifiers for points with no near neighbor for ids in (ids1, ids2): for idx in np.flatnonzero(ids == 0): ids[idx] = id_ id_ += 1 # End of enclosure join_func() return ids1, ids2 return join_func
Helper function to join table columns using distance matching. This function is intended for use in ``table.join()`` to allow performing a table join where the key columns are matched by computing the distance between points and accepting values below ``distance``. This numerical "fuzzy" match can apply to 1-D or 2-D columns, where in the latter case the distance is a vector distance. The distance cross-matching is done using `scipy.spatial.KDTree`. If necessary you can tweak the default behavior by providing ``dict`` values for the ``kdtree_args`` or ``query_args``. Parameters ---------- distance : float or `~astropy.units.Quantity` ['length'] Maximum distance between points to be considered a join match kdtree_args : dict, None Optional extra args for `~scipy.spatial.KDTree` query_args : dict, None Optional extra args for `~scipy.spatial.KDTree.query_ball_tree` Returns ------- join_func : function Function that accepts (skycoord1, skycoord2) and returns the tuple (ids1, ids2) of pair-matched unique identifiers. Examples -------- >>> from astropy.table import Table, join_distance >>> from astropy import table >>> c1 = [0, 1, 1.1, 2] >>> c2 = [0.5, 1.05, 2.1] >>> t1 = Table([c1], names=['col']) >>> t2 = Table([c2], names=['col']) >>> t12 = table.join(t1, t2, join_type='outer', join_funcs={'col': join_distance(0.2)}) >>> print(t12) col_id col_1 col_2 ------ ----- ----- 1 1.0 1.05 1 1.1 1.05 2 2.0 2.1 3 0.0 -- 4 -- 0.5
def join_distance(distance, kdtree_args=None, query_args=None): """Helper function to join table columns using distance matching. This function is intended for use in ``table.join()`` to allow performing a table join where the key columns are matched by computing the distance between points and accepting values below ``distance``. This numerical "fuzzy" match can apply to 1-D or 2-D columns, where in the latter case the distance is a vector distance. The distance cross-matching is done using `scipy.spatial.KDTree`. If necessary you can tweak the default behavior by providing ``dict`` values for the ``kdtree_args`` or ``query_args``. Parameters ---------- distance : float or `~astropy.units.Quantity` ['length'] Maximum distance between points to be considered a join match kdtree_args : dict, None Optional extra args for `~scipy.spatial.KDTree` query_args : dict, None Optional extra args for `~scipy.spatial.KDTree.query_ball_tree` Returns ------- join_func : function Function that accepts (skycoord1, skycoord2) and returns the tuple (ids1, ids2) of pair-matched unique identifiers. Examples -------- >>> from astropy.table import Table, join_distance >>> from astropy import table >>> c1 = [0, 1, 1.1, 2] >>> c2 = [0.5, 1.05, 2.1] >>> t1 = Table([c1], names=['col']) >>> t2 = Table([c2], names=['col']) >>> t12 = table.join(t1, t2, join_type='outer', join_funcs={'col': join_distance(0.2)}) >>> print(t12) col_id col_1 col_2 ------ ----- ----- 1 1.0 1.05 1 1.1 1.05 2 2.0 2.1 3 0.0 -- 4 -- 0.5 """ try: from scipy.spatial import KDTree except ImportError as exc: raise ImportError("scipy is required to use join_distance()") from exc if kdtree_args is None: kdtree_args = {} if query_args is None: query_args = {} def join_func(col1, col2): if col1.ndim > 2 or col2.ndim > 2: raise ValueError("columns for isclose_join must be 1- or 2-dimensional") if isinstance(distance, Quantity): # Convert to np.array with common unit col1 = col1.to_value(distance.unit) col2 = col2.to_value(distance.unit) dist = distance.value else: # Convert to np.array to allow later in-place shape changing col1 = np.asarray(col1) col2 = np.asarray(col2) dist = distance # Ensure columns are pure np.array and are 2-D for use with KDTree if col1.ndim == 1: col1.shape = col1.shape + (1,) if col2.ndim == 1: col2.shape = col2.shape + (1,) # Cross-match col1 and col2 within dist using KDTree kd1 = KDTree(col1, **kdtree_args) kd2 = KDTree(col2, **kdtree_args) nears = kd1.query_ball_tree(kd2, r=dist, **query_args) # Output of above is nears which is a list of lists, where the outer # list corresponds to each item in col1, and where the inner lists are # indexes into col2 of elements within the distance tolerance. This # identifies col1 / col2 near pairs. # Now convert that into unique identifiers for each near-pair. This is # taken to be transitive, so that if points 1 and 2 are "near" and points # 1 and 3 are "near", then 1, 2, and 3 are all given the same identifier. # This identifier will then be used in the table join matching. # Identifiers for each column, initialized to all zero. ids1 = np.zeros(len(col1), dtype=int) ids2 = np.zeros(len(col2), dtype=int) # Start the identifier count at 1 id_ = 1 for idx1, idxs2 in enumerate(nears): for idx2 in idxs2: # If this col1 point is previously identified then set corresponding # col2 point to same identifier. Likewise for col2 and col1. if ids1[idx1] > 0: ids2[idx2] = ids1[idx1] elif ids2[idx2] > 0: ids1[idx1] = ids2[idx2] else: # Not yet seen so set identifier for col1 and col2 ids1[idx1] = id_ ids2[idx2] = id_ id_ += 1 # Fill in unique identifiers for points with no near neighbor for ids in (ids1, ids2): for idx in np.flatnonzero(ids == 0): ids[idx] = id_ id_ += 1 # End of enclosure join_func() return ids1, ids2 return join_func
Perform a join of the left table with the right table on specified keys. Parameters ---------- left : `~astropy.table.Table`-like object Left side table in the join. If not a Table, will call ``Table(left)`` right : `~astropy.table.Table`-like object Right side table in the join. If not a Table, will call ``Table(right)`` keys : str or list of str Name(s) of column(s) used to match rows of left and right tables. Default is to use all columns which are common to both tables. join_type : str Join type ('inner' | 'outer' | 'left' | 'right' | 'cartesian'), default is 'inner' keys_left : str or list of str or list of column-like, optional Left column(s) used to match rows instead of ``keys`` arg. This can be be a single left table column name or list of column names, or a list of column-like values with the same lengths as the left table. keys_right : str or list of str or list of column-like, optional Same as ``keys_left``, but for the right side of the join. keep_order: bool, optional By default, rows are sorted by the join keys. If True, preserve the order of rows from the left table for "inner" or "left" joins, or from the right table for "right" joins. For other join types this argument is ignored except that a warning is issued if ``keep_order=True``. uniq_col_name : str or None String generate a unique output column name in case of a conflict. The default is '{col_name}_{table_name}'. table_names : list of str or None Two-element list of table names used when generating unique output column names. The default is ['1', '2']. metadata_conflicts : str How to proceed with metadata conflicts. This should be one of: * ``'silent'``: silently pick the last conflicting meta-data value * ``'warn'``: pick the last conflicting meta-data value, but emit a warning (default) * ``'error'``: raise an exception. join_funcs : dict, None Dict of functions to use for matching the corresponding key column(s). See `~astropy.table.join_skycoord` for an example and details. Returns ------- joined_table : `~astropy.table.Table` object New table containing the result of the join operation.
def join( left, right, keys=None, join_type="inner", *, keys_left=None, keys_right=None, keep_order=False, uniq_col_name="{col_name}_{table_name}", table_names=["1", "2"], metadata_conflicts="warn", join_funcs=None, ): """ Perform a join of the left table with the right table on specified keys. Parameters ---------- left : `~astropy.table.Table`-like object Left side table in the join. If not a Table, will call ``Table(left)`` right : `~astropy.table.Table`-like object Right side table in the join. If not a Table, will call ``Table(right)`` keys : str or list of str Name(s) of column(s) used to match rows of left and right tables. Default is to use all columns which are common to both tables. join_type : str Join type ('inner' | 'outer' | 'left' | 'right' | 'cartesian'), default is 'inner' keys_left : str or list of str or list of column-like, optional Left column(s) used to match rows instead of ``keys`` arg. This can be be a single left table column name or list of column names, or a list of column-like values with the same lengths as the left table. keys_right : str or list of str or list of column-like, optional Same as ``keys_left``, but for the right side of the join. keep_order: bool, optional By default, rows are sorted by the join keys. If True, preserve the order of rows from the left table for "inner" or "left" joins, or from the right table for "right" joins. For other join types this argument is ignored except that a warning is issued if ``keep_order=True``. uniq_col_name : str or None String generate a unique output column name in case of a conflict. The default is '{col_name}_{table_name}'. table_names : list of str or None Two-element list of table names used when generating unique output column names. The default is ['1', '2']. metadata_conflicts : str How to proceed with metadata conflicts. This should be one of: * ``'silent'``: silently pick the last conflicting meta-data value * ``'warn'``: pick the last conflicting meta-data value, but emit a warning (default) * ``'error'``: raise an exception. join_funcs : dict, None Dict of functions to use for matching the corresponding key column(s). See `~astropy.table.join_skycoord` for an example and details. Returns ------- joined_table : `~astropy.table.Table` object New table containing the result of the join operation. """ # Try converting inputs to Table as needed if not isinstance(left, Table): left = Table(left) if not isinstance(right, Table): right = Table(right) # Define a magic key that won't conflict with any user column name. This is to # support the keep_order argument. In this case a temporary column is added to the # left or right table to keep track of the original row order. After joining, the # order is restored and the temporary column is removed. sort_table_index_key = "__astropy_table_keep_order_sort_index__" sort_table = None if keep_order: if join_type not in ["left", "right", "inner"]: # Keep order is not meaningful for an outer join and cartesian join is # already ordered by left (primary) then right (secondary). warnings.warn( "keep_order=True is only supported for left, right, and inner joins", UserWarning, stacklevel=2, ) else: sort_table = right if join_type == "right" else left sort_table[sort_table_index_key] = np.arange(len(sort_table)) # In case keep_order=True we need try/finally to ensure that the temporary column # is removed even if an exception is raised. try: out = _join( left, right, keys, join_type, uniq_col_name, table_names, metadata_conflicts, join_funcs, keys_left=keys_left, keys_right=keys_right, ) if sort_table is not None: # Sort joined table to the original order and remove the temporary column. out.sort(sort_table_index_key) del out[sort_table_index_key] finally: if sort_table is not None: # If sort_table is not None that implies keep_order=True. del sort_table[sort_table_index_key] # Merge the column and table meta data. Table subclasses might override # these methods for custom merge behavior. _merge_table_meta(out, [left, right], metadata_conflicts=metadata_conflicts) return out
Take a set difference of table rows. The row set difference will contain all rows in ``table1`` that are not present in ``table2``. If the keys parameter is not defined, all columns in ``table1`` will be included in the output table. Parameters ---------- table1 : `~astropy.table.Table` ``table1`` is on the left side of the set difference. table2 : `~astropy.table.Table` ``table2`` is on the right side of the set difference. keys : str or list of str Name(s) of column(s) used to match rows of left and right tables. Default is to use all columns in ``table1``. Returns ------- diff_table : `~astropy.table.Table` New table containing the set difference between tables. If the set difference is none, an empty table will be returned. Examples -------- To get a set difference between two tables:: >>> from astropy.table import setdiff, Table >>> t1 = Table({'a': [1, 4, 9], 'b': ['c', 'd', 'f']}, names=('a', 'b')) >>> t2 = Table({'a': [1, 5, 9], 'b': ['c', 'b', 'f']}, names=('a', 'b')) >>> print(t1) a b --- --- 1 c 4 d 9 f >>> print(t2) a b --- --- 1 c 5 b 9 f >>> print(setdiff(t1, t2)) a b --- --- 4 d >>> print(setdiff(t2, t1)) a b --- --- 5 b
def setdiff(table1, table2, keys=None): """ Take a set difference of table rows. The row set difference will contain all rows in ``table1`` that are not present in ``table2``. If the keys parameter is not defined, all columns in ``table1`` will be included in the output table. Parameters ---------- table1 : `~astropy.table.Table` ``table1`` is on the left side of the set difference. table2 : `~astropy.table.Table` ``table2`` is on the right side of the set difference. keys : str or list of str Name(s) of column(s) used to match rows of left and right tables. Default is to use all columns in ``table1``. Returns ------- diff_table : `~astropy.table.Table` New table containing the set difference between tables. If the set difference is none, an empty table will be returned. Examples -------- To get a set difference between two tables:: >>> from astropy.table import setdiff, Table >>> t1 = Table({'a': [1, 4, 9], 'b': ['c', 'd', 'f']}, names=('a', 'b')) >>> t2 = Table({'a': [1, 5, 9], 'b': ['c', 'b', 'f']}, names=('a', 'b')) >>> print(t1) a b --- --- 1 c 4 d 9 f >>> print(t2) a b --- --- 1 c 5 b 9 f >>> print(setdiff(t1, t2)) a b --- --- 4 d >>> print(setdiff(t2, t1)) a b --- --- 5 b """ if keys is None: keys = table1.colnames # Check that all keys are in table1 and table2 for tbl, tbl_str in ((table1, "table1"), (table2, "table2")): diff_keys = np.setdiff1d(keys, tbl.colnames) if len(diff_keys) != 0: raise ValueError( f"The {diff_keys} columns are missing from {tbl_str}, cannot take " "a set difference." ) # Make a light internal copy of both tables t1 = table1.copy(copy_data=False) t1.meta = {} t1.keep_columns(keys) t1["__index1__"] = np.arange(len(table1)) # Keep track of rows indices # Make a light internal copy to avoid touching table2 t2 = table2.copy(copy_data=False) t2.meta = {} t2.keep_columns(keys) # Dummy column to recover rows after join t2["__index2__"] = np.zeros(len(t2), dtype=np.uint8) # dummy column t12 = _join(t1, t2, join_type="left", keys=keys, metadata_conflicts="silent") # If t12 index2 is masked then that means some rows were in table1 but not table2. if hasattr(t12["__index2__"], "mask"): # Define bool mask of table1 rows not in table2 diff = t12["__index2__"].mask # Get the row indices of table1 for those rows idx = t12["__index1__"][diff] # Select corresponding table1 rows straight from table1 to ensure # correct table and column types. t12_diff = table1[idx] else: t12_diff = table1[[]] return t12_diff
Stack columns within tables depth-wise. A ``join_type`` of 'exact' means that the tables must all have exactly the same column names (though the order can vary). If ``join_type`` is 'inner' then the intersection of common columns will be the output. A value of 'outer' (default) means the output will have the union of all columns, with table values being masked where no common values are available. Parameters ---------- tables : `~astropy.table.Table` or `~astropy.table.Row` or list thereof Table(s) to stack along depth-wise with the current table Table columns should have same shape and name for depth-wise stacking join_type : str Join type ('inner' | 'exact' | 'outer'), default is 'outer' metadata_conflicts : str How to proceed with metadata conflicts. This should be one of: * ``'silent'``: silently pick the last conflicting meta-data value * ``'warn'``: pick the last conflicting meta-data value, but emit a warning (default) * ``'error'``: raise an exception. Returns ------- stacked_table : `~astropy.table.Table` object New table containing the stacked data from the input tables. Examples -------- To stack two tables along rows do:: >>> from astropy.table import dstack, Table >>> t1 = Table({'a': [1., 2.], 'b': [3., 4.]}, names=('a', 'b')) >>> t2 = Table({'a': [5., 6.], 'b': [7., 8.]}, names=('a', 'b')) >>> print(t1) a b --- --- 1.0 3.0 2.0 4.0 >>> print(t2) a b --- --- 5.0 7.0 6.0 8.0 >>> print(dstack([t1, t2])) a b ---------- ---------- 1.0 .. 5.0 3.0 .. 7.0 2.0 .. 6.0 4.0 .. 8.0
def dstack(tables, join_type="outer", metadata_conflicts="warn"): """ Stack columns within tables depth-wise. A ``join_type`` of 'exact' means that the tables must all have exactly the same column names (though the order can vary). If ``join_type`` is 'inner' then the intersection of common columns will be the output. A value of 'outer' (default) means the output will have the union of all columns, with table values being masked where no common values are available. Parameters ---------- tables : `~astropy.table.Table` or `~astropy.table.Row` or list thereof Table(s) to stack along depth-wise with the current table Table columns should have same shape and name for depth-wise stacking join_type : str Join type ('inner' | 'exact' | 'outer'), default is 'outer' metadata_conflicts : str How to proceed with metadata conflicts. This should be one of: * ``'silent'``: silently pick the last conflicting meta-data value * ``'warn'``: pick the last conflicting meta-data value, but emit a warning (default) * ``'error'``: raise an exception. Returns ------- stacked_table : `~astropy.table.Table` object New table containing the stacked data from the input tables. Examples -------- To stack two tables along rows do:: >>> from astropy.table import dstack, Table >>> t1 = Table({'a': [1., 2.], 'b': [3., 4.]}, names=('a', 'b')) >>> t2 = Table({'a': [5., 6.], 'b': [7., 8.]}, names=('a', 'b')) >>> print(t1) a b --- --- 1.0 3.0 2.0 4.0 >>> print(t2) a b --- --- 5.0 7.0 6.0 8.0 >>> print(dstack([t1, t2])) a b ---------- ---------- 1.0 .. 5.0 3.0 .. 7.0 2.0 .. 6.0 4.0 .. 8.0 """ _check_join_type(join_type, "dstack") tables = _get_list_of_tables(tables) if len(tables) == 1: return tables[0] # no point in stacking a single table n_rows = {len(table) for table in tables} if len(n_rows) != 1: raise ValueError("Table lengths must all match for dstack") n_row = n_rows.pop() out = vstack(tables, join_type, metadata_conflicts) for name, col in out.columns.items(): col = out[name] # Reshape to so each original column is now in a row. # If entries are not 0-dim then those additional shape dims # are just carried along. # [x x x y y y] => [[x x x], # [y y y]] new_shape = (len(tables), n_row) + col.shape[1:] try: col.shape = (len(tables), n_row) + col.shape[1:] except AttributeError: col = col.reshape(new_shape) # Transpose the table and row axes to get to # [[x, y], # [x, y] # [x, y]] axes = np.arange(len(col.shape)) axes[:2] = [1, 0] # This temporarily makes `out` be corrupted (columns of different # length) but it all works out in the end. out.columns.__setitem__(name, col.transpose(axes), validated=True) return out
Stack tables vertically (along rows). A ``join_type`` of 'exact' means that the tables must all have exactly the same column names (though the order can vary). If ``join_type`` is 'inner' then the intersection of common columns will be the output. A value of 'outer' (default) means the output will have the union of all columns, with table values being masked where no common values are available. Parameters ---------- tables : `~astropy.table.Table` or `~astropy.table.Row` or list thereof Table(s) to stack along rows (vertically) with the current table join_type : str Join type ('inner' | 'exact' | 'outer'), default is 'outer' metadata_conflicts : str How to proceed with metadata conflicts. This should be one of: * ``'silent'``: silently pick the last conflicting meta-data value * ``'warn'``: pick the last conflicting meta-data value, but emit a warning (default) * ``'error'``: raise an exception. Returns ------- stacked_table : `~astropy.table.Table` object New table containing the stacked data from the input tables. Examples -------- To stack two tables along rows do:: >>> from astropy.table import vstack, Table >>> t1 = Table({'a': [1, 2], 'b': [3, 4]}, names=('a', 'b')) >>> t2 = Table({'a': [5, 6], 'b': [7, 8]}, names=('a', 'b')) >>> print(t1) a b --- --- 1 3 2 4 >>> print(t2) a b --- --- 5 7 6 8 >>> print(vstack([t1, t2])) a b --- --- 1 3 2 4 5 7 6 8
def vstack(tables, join_type="outer", metadata_conflicts="warn"): """ Stack tables vertically (along rows). A ``join_type`` of 'exact' means that the tables must all have exactly the same column names (though the order can vary). If ``join_type`` is 'inner' then the intersection of common columns will be the output. A value of 'outer' (default) means the output will have the union of all columns, with table values being masked where no common values are available. Parameters ---------- tables : `~astropy.table.Table` or `~astropy.table.Row` or list thereof Table(s) to stack along rows (vertically) with the current table join_type : str Join type ('inner' | 'exact' | 'outer'), default is 'outer' metadata_conflicts : str How to proceed with metadata conflicts. This should be one of: * ``'silent'``: silently pick the last conflicting meta-data value * ``'warn'``: pick the last conflicting meta-data value, but emit a warning (default) * ``'error'``: raise an exception. Returns ------- stacked_table : `~astropy.table.Table` object New table containing the stacked data from the input tables. Examples -------- To stack two tables along rows do:: >>> from astropy.table import vstack, Table >>> t1 = Table({'a': [1, 2], 'b': [3, 4]}, names=('a', 'b')) >>> t2 = Table({'a': [5, 6], 'b': [7, 8]}, names=('a', 'b')) >>> print(t1) a b --- --- 1 3 2 4 >>> print(t2) a b --- --- 5 7 6 8 >>> print(vstack([t1, t2])) a b --- --- 1 3 2 4 5 7 6 8 """ _check_join_type(join_type, "vstack") tables = _get_list_of_tables(tables) # validates input if len(tables) == 1: return tables[0] # no point in stacking a single table out = _vstack(tables, join_type, metadata_conflicts) # Merge table metadata _merge_table_meta(out, tables, metadata_conflicts=metadata_conflicts) return out
Stack tables along columns (horizontally). A ``join_type`` of 'exact' means that the tables must all have exactly the same number of rows. If ``join_type`` is 'inner' then the intersection of rows will be the output. A value of 'outer' (default) means the output will have the union of all rows, with table values being masked where no common values are available. Parameters ---------- tables : `~astropy.table.Table` or `~astropy.table.Row` or list thereof Tables to stack along columns (horizontally) with the current table join_type : str Join type ('inner' | 'exact' | 'outer'), default is 'outer' uniq_col_name : str or None String generate a unique output column name in case of a conflict. The default is '{col_name}_{table_name}'. table_names : list of str or None Two-element list of table names used when generating unique output column names. The default is ['1', '2', ..]. metadata_conflicts : str How to proceed with metadata conflicts. This should be one of: * ``'silent'``: silently pick the last conflicting meta-data value * ``'warn'``: pick the last conflicting meta-data value, but emit a warning (default) * ``'error'``: raise an exception. Returns ------- stacked_table : `~astropy.table.Table` object New table containing the stacked data from the input tables. See Also -------- Table.add_columns, Table.replace_column, Table.update Examples -------- To stack two tables horizontally (along columns) do:: >>> from astropy.table import Table, hstack >>> t1 = Table({'a': [1, 2], 'b': [3, 4]}, names=('a', 'b')) >>> t2 = Table({'c': [5, 6], 'd': [7, 8]}, names=('c', 'd')) >>> print(t1) a b --- --- 1 3 2 4 >>> print(t2) c d --- --- 5 7 6 8 >>> print(hstack([t1, t2])) a b c d --- --- --- --- 1 3 5 7 2 4 6 8
def hstack( tables, join_type="outer", uniq_col_name="{col_name}_{table_name}", table_names=None, metadata_conflicts="warn", ): """ Stack tables along columns (horizontally). A ``join_type`` of 'exact' means that the tables must all have exactly the same number of rows. If ``join_type`` is 'inner' then the intersection of rows will be the output. A value of 'outer' (default) means the output will have the union of all rows, with table values being masked where no common values are available. Parameters ---------- tables : `~astropy.table.Table` or `~astropy.table.Row` or list thereof Tables to stack along columns (horizontally) with the current table join_type : str Join type ('inner' | 'exact' | 'outer'), default is 'outer' uniq_col_name : str or None String generate a unique output column name in case of a conflict. The default is '{col_name}_{table_name}'. table_names : list of str or None Two-element list of table names used when generating unique output column names. The default is ['1', '2', ..]. metadata_conflicts : str How to proceed with metadata conflicts. This should be one of: * ``'silent'``: silently pick the last conflicting meta-data value * ``'warn'``: pick the last conflicting meta-data value, but emit a warning (default) * ``'error'``: raise an exception. Returns ------- stacked_table : `~astropy.table.Table` object New table containing the stacked data from the input tables. See Also -------- Table.add_columns, Table.replace_column, Table.update Examples -------- To stack two tables horizontally (along columns) do:: >>> from astropy.table import Table, hstack >>> t1 = Table({'a': [1, 2], 'b': [3, 4]}, names=('a', 'b')) >>> t2 = Table({'c': [5, 6], 'd': [7, 8]}, names=('c', 'd')) >>> print(t1) a b --- --- 1 3 2 4 >>> print(t2) c d --- --- 5 7 6 8 >>> print(hstack([t1, t2])) a b c d --- --- --- --- 1 3 5 7 2 4 6 8 """ _check_join_type(join_type, "hstack") tables = _get_list_of_tables(tables) # validates input if len(tables) == 1: return tables[0] # no point in stacking a single table out = _hstack(tables, join_type, uniq_col_name, table_names) _merge_table_meta(out, tables, metadata_conflicts=metadata_conflicts) return out
Returns the unique rows of a table. Parameters ---------- input_table : table-like keys : str or list of str Name(s) of column(s) used to create unique rows. Default is to use all columns. keep : {'first', 'last', 'none'} Whether to keep the first or last row for each set of duplicates. If 'none', all rows that are duplicate are removed, leaving only rows that are already unique in the input. Default is 'first'. silent : bool If `True`, masked value column(s) are silently removed from ``keys``. If `False`, an exception is raised when ``keys`` contains masked value column(s). Default is `False`. Returns ------- unique_table : `~astropy.table.Table` object New table containing only the unique rows of ``input_table``. Examples -------- >>> from astropy.table import unique, Table >>> import numpy as np >>> table = Table(data=[[1,2,3,2,3,3], ... [2,3,4,5,4,6], ... [3,4,5,6,7,8]], ... names=['col1', 'col2', 'col3'], ... dtype=[np.int32, np.int32, np.int32]) >>> table <Table length=6> col1 col2 col3 int32 int32 int32 ----- ----- ----- 1 2 3 2 3 4 3 4 5 2 5 6 3 4 7 3 6 8 >>> unique(table, keys='col1') <Table length=3> col1 col2 col3 int32 int32 int32 ----- ----- ----- 1 2 3 2 3 4 3 4 5 >>> unique(table, keys=['col1'], keep='last') <Table length=3> col1 col2 col3 int32 int32 int32 ----- ----- ----- 1 2 3 2 5 6 3 6 8 >>> unique(table, keys=['col1', 'col2']) <Table length=5> col1 col2 col3 int32 int32 int32 ----- ----- ----- 1 2 3 2 3 4 2 5 6 3 4 5 3 6 8 >>> unique(table, keys=['col1', 'col2'], keep='none') <Table length=4> col1 col2 col3 int32 int32 int32 ----- ----- ----- 1 2 3 2 3 4 2 5 6 3 6 8 >>> unique(table, keys=['col1'], keep='none') <Table length=1> col1 col2 col3 int32 int32 int32 ----- ----- ----- 1 2 3
def unique(input_table, keys=None, silent=False, keep="first"): """ Returns the unique rows of a table. Parameters ---------- input_table : table-like keys : str or list of str Name(s) of column(s) used to create unique rows. Default is to use all columns. keep : {'first', 'last', 'none'} Whether to keep the first or last row for each set of duplicates. If 'none', all rows that are duplicate are removed, leaving only rows that are already unique in the input. Default is 'first'. silent : bool If `True`, masked value column(s) are silently removed from ``keys``. If `False`, an exception is raised when ``keys`` contains masked value column(s). Default is `False`. Returns ------- unique_table : `~astropy.table.Table` object New table containing only the unique rows of ``input_table``. Examples -------- >>> from astropy.table import unique, Table >>> import numpy as np >>> table = Table(data=[[1,2,3,2,3,3], ... [2,3,4,5,4,6], ... [3,4,5,6,7,8]], ... names=['col1', 'col2', 'col3'], ... dtype=[np.int32, np.int32, np.int32]) >>> table <Table length=6> col1 col2 col3 int32 int32 int32 ----- ----- ----- 1 2 3 2 3 4 3 4 5 2 5 6 3 4 7 3 6 8 >>> unique(table, keys='col1') <Table length=3> col1 col2 col3 int32 int32 int32 ----- ----- ----- 1 2 3 2 3 4 3 4 5 >>> unique(table, keys=['col1'], keep='last') <Table length=3> col1 col2 col3 int32 int32 int32 ----- ----- ----- 1 2 3 2 5 6 3 6 8 >>> unique(table, keys=['col1', 'col2']) <Table length=5> col1 col2 col3 int32 int32 int32 ----- ----- ----- 1 2 3 2 3 4 2 5 6 3 4 5 3 6 8 >>> unique(table, keys=['col1', 'col2'], keep='none') <Table length=4> col1 col2 col3 int32 int32 int32 ----- ----- ----- 1 2 3 2 3 4 2 5 6 3 6 8 >>> unique(table, keys=['col1'], keep='none') <Table length=1> col1 col2 col3 int32 int32 int32 ----- ----- ----- 1 2 3 """ if keep not in ("first", "last", "none"): raise ValueError("'keep' should be one of 'first', 'last', 'none'") if isinstance(keys, str): keys = [keys] if keys is None: keys = input_table.colnames else: if len(set(keys)) != len(keys): raise ValueError("duplicate key names") # Check for columns with masked values for key in keys[:]: col = input_table[key] if hasattr(col, "mask") and np.any(col.mask): if not silent: raise ValueError( "cannot use columns with masked values as keys; " f"remove column '{key}' from keys and rerun " "unique()" ) del keys[keys.index(key)] if len(keys) == 0: raise ValueError( "no column remained in ``keys``; " "unique() cannot work with masked value " "key columns" ) grouped_table = input_table.group_by(keys) indices = grouped_table.groups.indices if keep == "first": indices = indices[:-1] elif keep == "last": indices = indices[1:] - 1 else: indices = indices[:-1][np.diff(indices) == 1] return grouped_table[indices]
Find the column names mapping when merging the list of tables ``arrays``. It is assumed that col names in ``common_names`` are to be merged into a single column while the rest will be uniquely represented in the output. The args ``uniq_col_name`` and ``table_names`` specify how to rename columns in case of conflicts. Returns a dict mapping each output column name to the input(s). This takes the form {outname : (col_name_0, col_name_1, ...), ... }. For key columns all of input names will be present, while for the other non-key columns the value will be (col_name_0, None, ..) or (None, col_name_1, ..) etc.
def get_col_name_map( arrays, common_names, uniq_col_name="{col_name}_{table_name}", table_names=None ): """ Find the column names mapping when merging the list of tables ``arrays``. It is assumed that col names in ``common_names`` are to be merged into a single column while the rest will be uniquely represented in the output. The args ``uniq_col_name`` and ``table_names`` specify how to rename columns in case of conflicts. Returns a dict mapping each output column name to the input(s). This takes the form {outname : (col_name_0, col_name_1, ...), ... }. For key columns all of input names will be present, while for the other non-key columns the value will be (col_name_0, None, ..) or (None, col_name_1, ..) etc. """ col_name_map = collections.defaultdict(lambda: [None] * len(arrays)) col_name_list = [] if table_names is None: table_names = [str(ii + 1) for ii in range(len(arrays))] for idx, array in enumerate(arrays): table_name = table_names[idx] for name in array.colnames: out_name = name if name in common_names: # If name is in the list of common_names then insert into # the column name list, but just once. if name not in col_name_list: col_name_list.append(name) else: # If name is not one of the common column outputs, and it collides # with the names in one of the other arrays, then rename others = list(arrays) others.pop(idx) if any(name in other.colnames for other in others): out_name = uniq_col_name.format( table_name=table_name, col_name=name ) col_name_list.append(out_name) col_name_map[out_name][idx] = name # Check for duplicate output column names col_name_count = Counter(col_name_list) repeated_names = [name for name, count in col_name_count.items() if count > 1] if repeated_names: raise TableMergeError( f"Merging column names resulted in duplicates: {repeated_names}. " "Change uniq_col_name or table_names args to fix this." ) # Convert col_name_map to a regular dict with tuple (immutable) values col_name_map = OrderedDict((name, col_name_map[name]) for name in col_name_list) return col_name_map
Find the dtypes descrs resulting from merging the list of arrays' dtypes, using the column name mapping ``col_name_map``. Return a list of descrs for the output.
def get_descrs(arrays, col_name_map): """ Find the dtypes descrs resulting from merging the list of arrays' dtypes, using the column name mapping ``col_name_map``. Return a list of descrs for the output. """ out_descrs = [] for out_name, in_names in col_name_map.items(): # List of input arrays that contribute to this output column in_cols = [arr[name] for arr, name in zip(arrays, in_names) if name is not None] # List of names of the columns that contribute to this output column. names = [name for name in in_names if name is not None] # Output dtype is the superset of all dtypes in in_arrays try: dtype = common_dtype(in_cols) except TableMergeError as tme: # Beautify the error message when we are trying to merge columns with incompatible # types by including the name of the columns that originated the error. raise TableMergeError( f"The '{names[0]}' columns have incompatible types: " f"{tme._incompat_types}" ) from tme # Make sure all input shapes are the same uniq_shapes = {col.shape[1:] for col in in_cols} if len(uniq_shapes) != 1: raise TableMergeError(f"Key columns {names!r} have different shape") shape = uniq_shapes.pop() if out_name is not None: out_name = str(out_name) out_descrs.append((out_name, dtype, shape)) return out_descrs
Use numpy to find the common dtype for a list of columns. Only allow columns within the following fundamental numpy data types: np.bool_, np.object_, np.number, np.character, np.void
def common_dtype(cols): """ Use numpy to find the common dtype for a list of columns. Only allow columns within the following fundamental numpy data types: np.bool_, np.object_, np.number, np.character, np.void """ try: return metadata.common_dtype(cols) except metadata.MergeConflictError as err: tme = TableMergeError(f"Columns have incompatible types {err._incompat_types}") tme._incompat_types = err._incompat_types raise tme from err
Apply join_funcs.
def _apply_join_funcs(left, right, keys, join_funcs): """Apply join_funcs.""" # Make light copies of left and right, then add new index columns. left = left.copy(copy_data=False) right = right.copy(copy_data=False) for key, join_func in join_funcs.items(): ids1, ids2 = join_func(left[key], right[key]) # Define a unique id_key name, and keep adding underscores until we have # a name not yet present. id_key = key + "_id" while id_key in left.columns or id_key in right.columns: id_key = id_key[:-2] + "_id" keys = tuple(id_key if orig_key == key else orig_key for orig_key in keys) left.add_column(ids1, index=0, name=id_key) # [id_key] = ids1 right.add_column(ids2, index=0, name=id_key) # [id_key] = ids2 return left, right, keys
Perform a join of the left and right Tables on specified keys. Parameters ---------- left : Table Left side table in the join right : Table Right side table in the join keys : str or list of str Name(s) of column(s) used to match rows of left and right tables. Default is to use all columns which are common to both tables. join_type : str Join type ('inner' | 'outer' | 'left' | 'right' | 'cartesian'), default is 'inner' uniq_col_name : str or None String generate a unique output column name in case of a conflict. The default is '{col_name}_{table_name}'. table_names : list of str or None Two-element list of table names used when generating unique output column names. The default is ['1', '2']. metadata_conflicts : str How to proceed with metadata conflicts. This should be one of: * ``'silent'``: silently pick the last conflicting meta-data value * ``'warn'``: pick the last conflicting meta-data value, but emit a warning (default) * ``'error'``: raise an exception. join_funcs : dict, None Dict of functions to use for matching the corresponding key column(s). See `~astropy.table.join_skycoord` for an example and details. Returns ------- joined_table : `~astropy.table.Table` object New table containing the result of the join operation.
def _join( left, right, keys=None, join_type="inner", uniq_col_name="{col_name}_{table_name}", table_names=["1", "2"], metadata_conflicts="warn", join_funcs=None, keys_left=None, keys_right=None, ): """ Perform a join of the left and right Tables on specified keys. Parameters ---------- left : Table Left side table in the join right : Table Right side table in the join keys : str or list of str Name(s) of column(s) used to match rows of left and right tables. Default is to use all columns which are common to both tables. join_type : str Join type ('inner' | 'outer' | 'left' | 'right' | 'cartesian'), default is 'inner' uniq_col_name : str or None String generate a unique output column name in case of a conflict. The default is '{col_name}_{table_name}'. table_names : list of str or None Two-element list of table names used when generating unique output column names. The default is ['1', '2']. metadata_conflicts : str How to proceed with metadata conflicts. This should be one of: * ``'silent'``: silently pick the last conflicting meta-data value * ``'warn'``: pick the last conflicting meta-data value, but emit a warning (default) * ``'error'``: raise an exception. join_funcs : dict, None Dict of functions to use for matching the corresponding key column(s). See `~astropy.table.join_skycoord` for an example and details. Returns ------- joined_table : `~astropy.table.Table` object New table containing the result of the join operation. """ # Special column name for cartesian join, should never collide with real column cartesian_index_name = "__table_cartesian_join_temp_index__" if join_type not in ("inner", "outer", "left", "right", "cartesian"): raise ValueError( "The 'join_type' argument should be in 'inner', " "'outer', 'left', 'right', or 'cartesian' " f"(got '{join_type}' instead)" ) if join_type == "cartesian": if keys: raise ValueError("cannot supply keys for a cartesian join") if join_funcs: raise ValueError("cannot supply join_funcs for a cartesian join") # Make light copies of left and right, then add temporary index columns # with all the same value so later an outer join turns into a cartesian join. left = left.copy(copy_data=False) right = right.copy(copy_data=False) left[cartesian_index_name] = np.uint8(0) right[cartesian_index_name] = np.uint8(0) keys = (cartesian_index_name,) # Handle the case of join key columns that are different between left and # right via keys_left/keys_right args. This is done by saving the original # input tables and making new left and right tables that contain only the # key cols but with common column names ['0', '1', etc]. This sets `keys` to # those fake key names in the left and right tables if keys_left is not None or keys_right is not None: left_orig = left right_orig = right left, right, keys = _join_keys_left_right( left, right, keys, keys_left, keys_right, join_funcs ) if keys is None: keys = tuple(name for name in left.colnames if name in right.colnames) if len(keys) == 0: raise TableMergeError("No keys in common between left and right tables") elif isinstance(keys, str): # If we have a single key, put it in a tuple keys = (keys,) # Check the key columns for arr, arr_label in ((left, "Left"), (right, "Right")): for name in keys: if name not in arr.colnames: raise TableMergeError( f"{arr_label} table does not have key column {name!r}" ) if hasattr(arr[name], "mask") and np.any(arr[name].mask): raise TableMergeError( f"{arr_label} key column {name!r} has missing values" ) if join_funcs is not None: if not all(key in keys for key in join_funcs): raise ValueError( f"join_funcs keys {join_funcs.keys()} must be a " f"subset of join keys {keys}" ) left, right, keys = _apply_join_funcs(left, right, keys, join_funcs) len_left, len_right = len(left), len(right) if len_left == 0 or len_right == 0: raise ValueError("input tables for join must both have at least one row") try: idxs, idx_sort = _get_join_sort_idxs(keys, left, right) except NotImplementedError: raise TypeError("one or more key columns are not sortable") # Now that we have idxs and idx_sort, revert to the original table args to # carry on with making the output joined table. `keys` is set to an empty # list so that all original left and right columns are included in the # output table. if keys_left is not None or keys_right is not None: keys = [] left = left_orig right = right_orig # Joined array dtype as a list of descr (name, type_str, shape) tuples col_name_map = get_col_name_map([left, right], keys, uniq_col_name, table_names) out_descrs = get_descrs([left, right], col_name_map) # Main inner loop in Cython to compute the cartesian product # indices for the given join type int_join_type = {"inner": 0, "outer": 1, "left": 2, "right": 3, "cartesian": 1}[ join_type ] masked, n_out, left_out, left_mask, right_out, right_mask = _np_utils.join_inner( idxs, idx_sort, len_left, int_join_type ) out = _get_out_class([left, right])() for out_name, dtype, shape in out_descrs: if out_name == cartesian_index_name: continue left_name, right_name = col_name_map[out_name] if left_name and right_name: # this is a key which comes from left and right cols = [left[left_name], right[right_name]] col_cls = _get_out_class(cols) if not hasattr(col_cls.info, "new_like"): raise NotImplementedError( f"join unavailable for mixin column type(s): {col_cls.__name__}" ) out[out_name] = col_cls.info.new_like( cols, n_out, metadata_conflicts, out_name ) out[out_name][:] = np.where( right_mask, left[left_name].take(left_out), right[right_name].take(right_out), ) continue elif left_name: # out_name came from the left table name, array, array_out, array_mask = left_name, left, left_out, left_mask elif right_name: name, array, array_out, array_mask = ( right_name, right, right_out, right_mask, ) else: raise TableMergeError('Unexpected column names (maybe one is ""?)') # Select the correct elements from the original table col = array[name][array_out] # If the output column is masked then set the output column masking # accordingly. Check for columns that don't support a mask attribute. if masked and np.any(array_mask): # If col is a Column but not MaskedColumn then upgrade at this point # because masking is required. if isinstance(col, Column) and not isinstance(col, MaskedColumn): col = out.MaskedColumn(col, copy=False) if isinstance(col, Quantity) and not isinstance(col, Masked): col = Masked(col, copy=False) # array_mask is 1-d corresponding to length of output column. We need # make it have the correct shape for broadcasting, i.e. (length, 1, 1, ..). # Mixin columns might not have ndim attribute so use len(col.shape). array_mask.shape = (col.shape[0],) + (1,) * (len(col.shape) - 1) # Now broadcast to the correct final shape array_mask = np.broadcast_to(array_mask, col.shape) try: col[array_mask] = col.info.mask_val except Exception as err: # Not clear how different classes will fail here raise NotImplementedError( f"join requires masking column '{out_name}' but column" f" type {col.__class__.__name__} does not support masking" ) from err # Set the output table column to the new joined column out[out_name] = col return out
Do processing to handle keys_left / keys_right args for join. This takes the keys_left/right inputs and turns them into a list of left/right columns corresponding to those inputs (which can be column names or column data values). It also generates the list of fake key column names (strings of "1", "2", etc.) that correspond to the input keys.
def _join_keys_left_right(left, right, keys, keys_left, keys_right, join_funcs): """Do processing to handle keys_left / keys_right args for join. This takes the keys_left/right inputs and turns them into a list of left/right columns corresponding to those inputs (which can be column names or column data values). It also generates the list of fake key column names (strings of "1", "2", etc.) that correspond to the input keys. """ def _keys_to_cols(keys, table, label): # Process input `keys`, which is a str or list of str column names in # `table` or a list of column-like objects. The `label` is just for # error reporting. if isinstance(keys, str): keys = [keys] cols = [] for key in keys: if isinstance(key, str): try: cols.append(table[key]) except KeyError: raise ValueError(f"{label} table does not have key column {key!r}") else: if len(key) != len(table): raise ValueError( f"{label} table has different length from key {key}" ) cols.append(key) return cols if join_funcs is not None: raise ValueError("cannot supply join_funcs arg and keys_left / keys_right") if keys_left is None or keys_right is None: raise ValueError("keys_left and keys_right must both be provided") if keys is not None: raise ValueError( "keys arg must be None if keys_left and keys_right are supplied" ) cols_left = _keys_to_cols(keys_left, left, "left") cols_right = _keys_to_cols(keys_right, right, "right") if len(cols_left) != len(cols_right): raise ValueError("keys_left and keys_right args must have same length") # Make two new temp tables for the join with only the join columns and # key columns in common. keys = [f"{ii}" for ii in range(len(cols_left))] left = left.__class__(cols_left, names=keys, copy=False) right = right.__class__(cols_right, names=keys, copy=False) return left, right, keys
Check join_type arg in hstack and vstack. This specifically checks for the common mistake of call vstack(t1, t2) instead of vstack([t1, t2]). The subsequent check of ``join_type in ('inner', ..)`` does not raise in this case.
def _check_join_type(join_type, func_name): """Check join_type arg in hstack and vstack. This specifically checks for the common mistake of call vstack(t1, t2) instead of vstack([t1, t2]). The subsequent check of ``join_type in ('inner', ..)`` does not raise in this case. """ if not isinstance(join_type, str): msg = "`join_type` arg must be a string" if isinstance(join_type, Table): msg += ( ". Did you accidentally " f"call {func_name}(t1, t2, ..) instead of " f"{func_name}([t1, t2], ..)?" ) raise TypeError(msg) if join_type not in ("inner", "exact", "outer"): raise ValueError("`join_type` arg must be one of 'inner', 'exact' or 'outer'")
Stack Tables vertically (by rows). A ``join_type`` of 'exact' (default) means that the arrays must all have exactly the same column names (though the order can vary). If ``join_type`` is 'inner' then the intersection of common columns will be the output. A value of 'outer' means the output will have the union of all columns, with array values being masked where no common values are available. Parameters ---------- arrays : list of Tables Tables to stack by rows (vertically) join_type : str Join type ('inner' | 'exact' | 'outer'), default is 'outer' Returns ------- stacked_table : `~astropy.table.Table` object New table containing the stacked data from the input tables.
def _vstack(arrays, join_type="outer", metadata_conflicts="warn"): """ Stack Tables vertically (by rows). A ``join_type`` of 'exact' (default) means that the arrays must all have exactly the same column names (though the order can vary). If ``join_type`` is 'inner' then the intersection of common columns will be the output. A value of 'outer' means the output will have the union of all columns, with array values being masked where no common values are available. Parameters ---------- arrays : list of Tables Tables to stack by rows (vertically) join_type : str Join type ('inner' | 'exact' | 'outer'), default is 'outer' Returns ------- stacked_table : `~astropy.table.Table` object New table containing the stacked data from the input tables. """ # Trivial case of one input array if len(arrays) == 1: return arrays[0] # Start by assuming an outer match where all names go to output names = set(itertools.chain(*[arr.colnames for arr in arrays])) col_name_map = get_col_name_map(arrays, names) # If require_match is True then the output must have exactly the same # number of columns as each input array if join_type == "exact": for names in col_name_map.values(): if any(x is None for x in names): raise TableMergeError( "Inconsistent columns in input arrays " "(use 'inner' or 'outer' join_type to " "allow non-matching columns)" ) join_type = "outer" # For an inner join, keep only columns where all input arrays have that column if join_type == "inner": col_name_map = OrderedDict( (name, in_names) for name, in_names in col_name_map.items() if all(x is not None for x in in_names) ) if len(col_name_map) == 0: raise TableMergeError("Input arrays have no columns in common") lens = [len(arr) for arr in arrays] n_rows = sum(lens) out = _get_out_class(arrays)() for out_name, in_names in col_name_map.items(): # List of input arrays that contribute to this output column cols = [arr[name] for arr, name in zip(arrays, in_names) if name is not None] col_cls = _get_out_class(cols) if not hasattr(col_cls.info, "new_like"): raise NotImplementedError( f"vstack unavailable for mixin column type(s): {col_cls.__name__}" ) try: col = col_cls.info.new_like(cols, n_rows, metadata_conflicts, out_name) except metadata.MergeConflictError as err: # Beautify the error message when we are trying to merge columns with incompatible # types by including the name of the columns that originated the error. raise TableMergeError( f"The '{out_name}' columns have incompatible types: " f"{err._incompat_types}" ) from err idx0 = 0 for name, array in zip(in_names, arrays): idx1 = idx0 + len(array) if name in array.colnames: col[idx0:idx1] = array[name] else: # If col is a Column but not MaskedColumn then upgrade at this point # because masking is required. if isinstance(col, Column) and not isinstance(col, MaskedColumn): col = out.MaskedColumn(col, copy=False) if isinstance(col, Quantity) and not isinstance(col, Masked): col = Masked(col, copy=False) try: col[idx0:idx1] = col.info.mask_val except Exception as err: raise NotImplementedError( f"vstack requires masking column '{out_name}' but column" f" type {col.__class__.__name__} does not support masking" ) from err idx0 = idx1 out[out_name] = col return out
Stack tables horizontally (by columns). A ``join_type`` of 'exact' (default) means that the arrays must all have exactly the same number of rows. If ``join_type`` is 'inner' then the intersection of rows will be the output. A value of 'outer' means the output will have the union of all rows, with array values being masked where no common values are available. Parameters ---------- arrays : List of tables Tables to stack by columns (horizontally) join_type : str Join type ('inner' | 'exact' | 'outer'), default is 'outer' uniq_col_name : str or None String generate a unique output column name in case of a conflict. The default is '{col_name}_{table_name}'. table_names : list of str or None Two-element list of table names used when generating unique output column names. The default is ['1', '2', ..]. Returns ------- stacked_table : `~astropy.table.Table` object New table containing the stacked data from the input tables.
def _hstack( arrays, join_type="outer", uniq_col_name="{col_name}_{table_name}", table_names=None, ): """ Stack tables horizontally (by columns). A ``join_type`` of 'exact' (default) means that the arrays must all have exactly the same number of rows. If ``join_type`` is 'inner' then the intersection of rows will be the output. A value of 'outer' means the output will have the union of all rows, with array values being masked where no common values are available. Parameters ---------- arrays : List of tables Tables to stack by columns (horizontally) join_type : str Join type ('inner' | 'exact' | 'outer'), default is 'outer' uniq_col_name : str or None String generate a unique output column name in case of a conflict. The default is '{col_name}_{table_name}'. table_names : list of str or None Two-element list of table names used when generating unique output column names. The default is ['1', '2', ..]. Returns ------- stacked_table : `~astropy.table.Table` object New table containing the stacked data from the input tables. """ if table_names is None: table_names = [f"{ii + 1}" for ii in range(len(arrays))] if len(arrays) != len(table_names): raise ValueError("Number of arrays must match number of table_names") # Trivial case of one input arrays if len(arrays) == 1: return arrays[0] col_name_map = get_col_name_map(arrays, [], uniq_col_name, table_names) # If require_match is True then all input arrays must have the same length arr_lens = [len(arr) for arr in arrays] if join_type == "exact": if len(set(arr_lens)) > 1: raise TableMergeError( "Inconsistent number of rows in input arrays " "(use 'inner' or 'outer' join_type to allow " "non-matching rows)" ) join_type = "outer" # For an inner join, keep only the common rows if join_type == "inner": min_arr_len = min(arr_lens) if len(set(arr_lens)) > 1: arrays = [arr[:min_arr_len] for arr in arrays] arr_lens = [min_arr_len for arr in arrays] # If there are any output rows where one or more input arrays are missing # then the output must be masked. If any input arrays are masked then # output is masked. n_rows = max(arr_lens) out = _get_out_class(arrays)() for out_name, in_names in col_name_map.items(): for name, array, arr_len in zip(in_names, arrays, arr_lens): if name is None: continue if n_rows > arr_len: indices = np.arange(n_rows) indices[arr_len:] = 0 col = array[name][indices] # If col is a Column but not MaskedColumn then upgrade at this point # because masking is required. if isinstance(col, Column) and not isinstance(col, MaskedColumn): col = out.MaskedColumn(col, copy=False) if isinstance(col, Quantity) and not isinstance(col, Masked): col = Masked(col, copy=False) try: col[arr_len:] = col.info.mask_val except Exception as err: raise NotImplementedError( f"hstack requires masking column '{out_name}' but column" f" type {col.__class__.__name__} does not support masking" ) from err else: col = array[name][:n_rows] out[out_name] = col return out
Wrap format function to trap masked values. String format functions and most user functions will not be able to deal with masked values, so we wrap them to ensure they are passed to str().
def _use_str_for_masked_values(format_func): """Wrap format function to trap masked values. String format functions and most user functions will not be able to deal with masked values, so we wrap them to ensure they are passed to str(). """ return lambda format_, val: ( str(val) if val is np.ma.masked else format_func(format_, val) )
Iterate through possible string-derived format functions. A string can either be a format specifier for the format built-in, a new-style format string, or an old-style format string.
def _possible_string_format_functions(format_): """Iterate through possible string-derived format functions. A string can either be a format specifier for the format built-in, a new-style format string, or an old-style format string. """ yield lambda format_, val: format(val, format_) yield lambda format_, val: format_.format(val) yield lambda format_, val: format_ % val yield lambda format_, val: format_.format(**{k: val[k] for k in val.dtype.names})
Return a wrapped ``auto_format_func`` function which is used in formatting table columns. This is primarily an internal function but gets used directly in other parts of astropy, e.g. `astropy.io.ascii`. Parameters ---------- col_name : object, optional Hashable object to identify column like id or name. Default is None. possible_string_format_functions : func, optional Function that yields possible string formatting functions (defaults to internal function to do this). Returns ------- Wrapped ``auto_format_func`` function
def get_auto_format_func( col=None, possible_string_format_functions=_possible_string_format_functions ): """ Return a wrapped ``auto_format_func`` function which is used in formatting table columns. This is primarily an internal function but gets used directly in other parts of astropy, e.g. `astropy.io.ascii`. Parameters ---------- col_name : object, optional Hashable object to identify column like id or name. Default is None. possible_string_format_functions : func, optional Function that yields possible string formatting functions (defaults to internal function to do this). Returns ------- Wrapped ``auto_format_func`` function """ def _auto_format_func(format_, val): """Format ``val`` according to ``format_`` for a plain format specifier, old- or new-style format strings, or using a user supplied function. More importantly, determine and cache (in _format_funcs) a function that will do this subsequently. In this way this complicated logic is only done for the first value. Returns the formatted value. """ if format_ is None: return default_format_func(format_, val) if format_ in col.info._format_funcs: return col.info._format_funcs[format_](format_, val) if callable(format_): format_func = lambda format_, val: format_(val) try: out = format_func(format_, val) if not isinstance(out, str): raise ValueError( f"Format function for value {val} returned {type(val)} " "instead of string type" ) except Exception as err: # For a masked element, the format function call likely failed # to handle it. Just return the string representation for now, # and retry when a non-masked value comes along. if val is np.ma.masked: return str(val) raise ValueError(f"Format function for value {val} failed.") from err # If the user-supplied function handles formatting masked elements, use # it directly. Otherwise, wrap it in a function that traps them. try: format_func(format_, np.ma.masked) except Exception: format_func = _use_str_for_masked_values(format_func) else: # For a masked element, we cannot set string-based format functions yet, # as all tests below will fail. Just return the string representation # of masked for now, and retry when a non-masked value comes along. if val is np.ma.masked: return str(val) for format_func in possible_string_format_functions(format_): try: # Does this string format method work? out = format_func(format_, val) # Require that the format statement actually did something. if out == format_: raise ValueError("the format passed in did nothing.") except Exception: continue else: break else: # None of the possible string functions passed muster. raise ValueError( f"unable to parse format string {format_} for its column." ) # String-based format functions will fail on masked elements; # wrap them in a function that traps them. format_func = _use_str_for_masked_values(format_func) col.info._format_funcs[format_] = format_func return out return _auto_format_func
Get the set of names to show in pprint from the table pprint_include_names and pprint_exclude_names attributes. These may be fnmatch unix-style globs.
def _get_pprint_include_names(table): """Get the set of names to show in pprint from the table pprint_include_names and pprint_exclude_names attributes. These may be fnmatch unix-style globs. """ def get_matches(name_globs, default): match_names = set() if name_globs: # For None or () use the default for name in table.colnames: for name_glob in name_globs: if fnmatch.fnmatch(name, name_glob): match_names.add(name) break else: match_names.update(default) return match_names include_names = get_matches(table.pprint_include_names(), table.colnames) exclude_names = get_matches(table.pprint_exclude_names(), []) return include_names - exclude_names
Carry out processing needed to serialize ``col`` in an output table consisting purely of plain ``Column`` or ``MaskedColumn`` columns. This relies on the object determine if any transformation is required and may depend on the ``serialize_method`` and ``serialize_context`` context variables. For instance a ``MaskedColumn`` may be stored directly to FITS, but can also be serialized as separate data and mask columns. This function builds up a list of plain columns in the ``new_cols`` arg (which is passed as a persistent list). This includes both plain columns from the original table and plain columns that represent data from serialized columns (e.g. ``jd1`` and ``jd2`` arrays from a ``Time`` column). For serialized columns the ``mixin_cols`` dict is updated with required attributes and information to subsequently reconstruct the table. Table mixin columns are always serialized and get represented by one or more data columns. In earlier versions of the code *only* mixin columns were serialized, hence the use within this code of "mixin" to imply serialization. Starting with version 3.1, the non-mixin ``MaskedColumn`` can also be serialized.
def _represent_mixin_as_column(col, name, new_cols, mixin_cols, exclude_classes=()): """Carry out processing needed to serialize ``col`` in an output table consisting purely of plain ``Column`` or ``MaskedColumn`` columns. This relies on the object determine if any transformation is required and may depend on the ``serialize_method`` and ``serialize_context`` context variables. For instance a ``MaskedColumn`` may be stored directly to FITS, but can also be serialized as separate data and mask columns. This function builds up a list of plain columns in the ``new_cols`` arg (which is passed as a persistent list). This includes both plain columns from the original table and plain columns that represent data from serialized columns (e.g. ``jd1`` and ``jd2`` arrays from a ``Time`` column). For serialized columns the ``mixin_cols`` dict is updated with required attributes and information to subsequently reconstruct the table. Table mixin columns are always serialized and get represented by one or more data columns. In earlier versions of the code *only* mixin columns were serialized, hence the use within this code of "mixin" to imply serialization. Starting with version 3.1, the non-mixin ``MaskedColumn`` can also be serialized. """ obj_attrs = col.info._represent_as_dict() # If serialization is not required (see function docstring above) # or explicitly specified as excluded, then treat as a normal column. if not obj_attrs or col.__class__ in exclude_classes: new_cols.append(col) return # Subtlety here is handling mixin info attributes. The basic list of such # attributes is: 'name', 'unit', 'dtype', 'format', 'description', 'meta'. # - name: handled directly [DON'T store] # - unit: DON'T store if this is a parent attribute # - dtype: captured in plain Column if relevant [DON'T store] # - format: possibly irrelevant but settable post-object creation [DO store] # - description: DO store # - meta: DO store info = {} for attr, nontrivial in ( ("unit", lambda x: x is not None and x != ""), ("format", lambda x: x is not None), ("description", lambda x: x is not None), ("meta", lambda x: x), ): col_attr = getattr(col.info, attr) if nontrivial(col_attr): info[attr] = col_attr # Find column attributes that have the same length as the column itself. # These will be stored in the table as new columns (aka "data attributes"). # Examples include SkyCoord.ra (what is typically considered the data and is # always an array) and Skycoord.obs_time (which can be a scalar or an # array). data_attrs = [ key for key, value in obj_attrs.items() if getattr(value, "shape", ())[:1] == col.shape[:1] ] for data_attr in data_attrs: data = obj_attrs[data_attr] # New column name combines the old name and attribute # (e.g. skycoord.ra, skycoord.dec).unless it is the primary data # attribute for the column (e.g. value for Quantity or data for # MaskedColumn). For primary data, we attempt to store any info on # the format, etc., on the column, but not for ancillary data (e.g., # no sense to use a float format for a mask). is_primary = data_attr == col.info._represent_as_dict_primary_data if is_primary: new_name = name new_info = info else: new_name = name + "." + data_attr new_info = {} if not has_info_class(data, MixinInfo): col_cls = ( MaskedColumn if (hasattr(data, "mask") and np.any(data.mask)) else Column ) data = col_cls(data, name=new_name, **new_info) if is_primary: # Don't store info in the __serialized_columns__ dict for this column # since this is redundant with info stored on the new column. info = {} # Recurse. If this is anything that needs further serialization (i.e., # a Mixin column, a structured Column, a MaskedColumn for which mask is # stored, etc.), it will define obj_attrs[new_name]. Otherwise, it will # just add to new_cols and all we have to do is to link to the new name. _represent_mixin_as_column(data, new_name, new_cols, obj_attrs) obj_attrs[data_attr] = SerializedColumn( obj_attrs.pop(new_name, {"name": new_name}) ) # Strip out from info any attributes defined by the parent, # and store whatever remains. for attr in col.info.attrs_from_parent: if attr in info: del info[attr] if info: obj_attrs["__info__"] = info # Store the fully qualified class name if not isinstance(col, SerializedColumn): obj_attrs.setdefault("__class__", col.__module__ + "." + col.__class__.__name__) mixin_cols[name] = obj_attrs
Represent input Table ``tbl`` using only `~astropy.table.Column` or `~astropy.table.MaskedColumn` objects. This function represents any mixin columns like `~astropy.time.Time` in ``tbl`` to one or more plain ``~astropy.table.Column`` objects and returns a new Table. A single mixin column may be split into multiple column components as needed for fully representing the column. This includes the possibility of recursive splitting, as shown in the example below. The new column names are formed as ``<column_name>.<component>``, e.g. ``sc.ra`` for a `~astropy.coordinates.SkyCoord` column named ``sc``. In addition to splitting columns, this function updates the table ``meta`` dictionary to include a dict named ``__serialized_columns__`` which provides additional information needed to construct the original mixin columns from the split columns. This function is used by astropy I/O when writing tables to ECSV, FITS, HDF5 formats. Note that if the table does not include any mixin columns then the original table is returned with no update to ``meta``. Parameters ---------- tbl : `~astropy.table.Table` or subclass Table to represent mixins as Columns exclude_classes : tuple of class Exclude any mixin columns which are instannces of any classes in the tuple Returns ------- tbl : `~astropy.table.Table` New Table with updated columns, or else the original input ``tbl`` Examples -------- >>> from astropy.table import Table, represent_mixins_as_columns >>> from astropy.time import Time >>> from astropy.coordinates import SkyCoord >>> x = [100.0, 200.0] >>> obstime = Time([1999.0, 2000.0], format='jyear') >>> sc = SkyCoord([1, 2], [3, 4], unit='deg', obstime=obstime) >>> tbl = Table([sc, x], names=['sc', 'x']) >>> represent_mixins_as_columns(tbl) <Table length=2> sc.ra sc.dec sc.obstime.jd1 sc.obstime.jd2 x deg deg float64 float64 float64 float64 float64 ------- ------- -------------- -------------- ------- 1.0 3.0 2451180.0 -0.25 100.0 2.0 4.0 2451545.0 0.0 200.0
def represent_mixins_as_columns(tbl, exclude_classes=()): """Represent input Table ``tbl`` using only `~astropy.table.Column` or `~astropy.table.MaskedColumn` objects. This function represents any mixin columns like `~astropy.time.Time` in ``tbl`` to one or more plain ``~astropy.table.Column`` objects and returns a new Table. A single mixin column may be split into multiple column components as needed for fully representing the column. This includes the possibility of recursive splitting, as shown in the example below. The new column names are formed as ``<column_name>.<component>``, e.g. ``sc.ra`` for a `~astropy.coordinates.SkyCoord` column named ``sc``. In addition to splitting columns, this function updates the table ``meta`` dictionary to include a dict named ``__serialized_columns__`` which provides additional information needed to construct the original mixin columns from the split columns. This function is used by astropy I/O when writing tables to ECSV, FITS, HDF5 formats. Note that if the table does not include any mixin columns then the original table is returned with no update to ``meta``. Parameters ---------- tbl : `~astropy.table.Table` or subclass Table to represent mixins as Columns exclude_classes : tuple of class Exclude any mixin columns which are instannces of any classes in the tuple Returns ------- tbl : `~astropy.table.Table` New Table with updated columns, or else the original input ``tbl`` Examples -------- >>> from astropy.table import Table, represent_mixins_as_columns >>> from astropy.time import Time >>> from astropy.coordinates import SkyCoord >>> x = [100.0, 200.0] >>> obstime = Time([1999.0, 2000.0], format='jyear') >>> sc = SkyCoord([1, 2], [3, 4], unit='deg', obstime=obstime) >>> tbl = Table([sc, x], names=['sc', 'x']) >>> represent_mixins_as_columns(tbl) <Table length=2> sc.ra sc.dec sc.obstime.jd1 sc.obstime.jd2 x deg deg float64 float64 float64 float64 float64 ------- ------- -------------- -------------- ------- 1.0 3.0 2451180.0 -0.25 100.0 2.0 4.0 2451545.0 0.0 200.0 """ # Dict of metadata for serializing each column, keyed by column name. # Gets filled in place by _represent_mixin_as_column(). mixin_cols = {} # List of columns for the output table. For plain Column objects # this will just be the original column object. new_cols = [] # Go through table columns and represent each column as one or more # plain Column objects (in new_cols) + metadata (in mixin_cols). for col in tbl.itercols(): _represent_mixin_as_column( col, col.info.name, new_cols, mixin_cols, exclude_classes=exclude_classes ) # If no metadata was created then just return the original table. if mixin_cols: meta = deepcopy(tbl.meta) meta["__serialized_columns__"] = mixin_cols out = Table(new_cols, meta=meta, copy=False) else: out = tbl for col in out.itercols(): if not isinstance(col, Column) and col.__class__ not in exclude_classes: # This catches columns for which info has not been set up right and # therefore were not converted. See the corresponding test in # test_mixin.py for an example. raise TypeError( "failed to represent column " f"{col.info.name!r} ({col.__class__.__name__}) as one " "or more Column subclasses. This looks like a mixin class " "that does not have the correct _represent_as_dict() method " "in the class `info` attribute." ) return out
Call np.searchsorted or use a custom binary search if necessary.
def _searchsorted(array, val, side="left"): """ Call np.searchsorted or use a custom binary search if necessary. """ if hasattr(array, "searchsorted"): return array.searchsorted(val, side=side) # Python binary search begin = 0 end = len(array) while begin < end: mid = (begin + end) // 2 if val > array[mid]: begin = mid + 1 elif val < array[mid]: end = mid elif side == "right": begin = mid + 1 else: end = mid return begin
Array-interface compliant full description of a column. This returns a 3-tuple (name, type, shape) that can always be used in a structured array dtype definition.
def descr(col): """Array-interface compliant full description of a column. This returns a 3-tuple (name, type, shape) that can always be used in a structured array dtype definition. """ col_dtype = "O" if (col.info.dtype is None) else col.info.dtype col_shape = col.shape[1:] if hasattr(col, "shape") else () return (col.info.name, col_dtype, col_shape)
Check if the object's info is an instance of cls.
def has_info_class(obj, cls): """Check if the object's info is an instance of cls.""" # We check info on the class of the instance, since on the instance # itself accessing 'info' has side effects in that it sets # obj.__dict__['info'] if it does not exist already. return isinstance(getattr(obj.__class__, "info", None), cls)
Return list of column names if ``rows`` is a list of dict that defines table data. If rows is not a list of dict then return None.
def _get_names_from_list_of_dict(rows): """Return list of column names if ``rows`` is a list of dict that defines table data. If rows is not a list of dict then return None. """ if rows is None: return None names = set() for row in rows: if not isinstance(row, Mapping): return None names.update(row) return list(names)
Return a simple table for testing. Example -------- :: >>> from astropy.table.table_helpers import simple_table >>> print(simple_table(3, 6, masked=True, kinds='ifOS')) a b c d e f --- --- -------- --- --- --- -- 1.0 {'c': 2} -- 5 5.0 2 2.0 -- e 6 -- 3 -- {'e': 4} f -- 7.0 Parameters ---------- size : int Number of table rows cols : int, optional Number of table columns. Defaults to number of kinds. kinds : str String consisting of the column dtype.kinds. This string will be cycled through to generate the column dtype. The allowed values are 'i', 'f', 'S', 'O'. Returns ------- out : `Table` New table with appropriate characteristics
def simple_table(size=3, cols=None, kinds="ifS", masked=False): """ Return a simple table for testing. Example -------- :: >>> from astropy.table.table_helpers import simple_table >>> print(simple_table(3, 6, masked=True, kinds='ifOS')) a b c d e f --- --- -------- --- --- --- -- 1.0 {'c': 2} -- 5 5.0 2 2.0 -- e 6 -- 3 -- {'e': 4} f -- 7.0 Parameters ---------- size : int Number of table rows cols : int, optional Number of table columns. Defaults to number of kinds. kinds : str String consisting of the column dtype.kinds. This string will be cycled through to generate the column dtype. The allowed values are 'i', 'f', 'S', 'O'. Returns ------- out : `Table` New table with appropriate characteristics """ if cols is None: cols = len(kinds) if cols > 26: raise ValueError("Max 26 columns in SimpleTable") columns = [] names = [chr(ord("a") + ii) for ii in range(cols)] letters = np.array(list(string.ascii_letters)) for jj, kind in zip(range(cols), cycle(kinds)): if kind == "i": data = np.arange(1, size + 1, dtype=np.int64) + jj elif kind == "f": data = np.arange(size, dtype=np.float64) + jj elif kind == "S": indices = (np.arange(size) + jj) % len(letters) data = letters[indices] elif kind == "O": indices = (np.arange(size) + jj) % len(letters) vals = letters[indices] data = [{val: index} for val, index in zip(vals, indices)] else: raise ValueError("Unknown data kind") columns.append(Column(data)) table = Table(columns, names=names, masked=masked) if masked: for ii, col in enumerate(table.columns.values()): mask = np.array((np.arange(size) + ii) % 3, dtype=bool) col.mask = ~mask return table
Return a masked table from the io.votable test set that has a wide variety of stressing types.
def complex_table(): """ Return a masked table from the io.votable test set that has a wide variety of stressing types. """ import warnings from astropy.io.votable.table import parse from astropy.utils.data import get_pkg_data_filename with warnings.catch_warnings(): warnings.simplefilter("ignore") votable = parse( get_pkg_data_filename("../io/votable/tests/data/regression.xml"), pedantic=False, ) first_table = votable.get_first_table() table = first_table.to_table() return table
Register a mixin column 'handler'. A mixin column handler is a function that given an arbitrary Python object, will return an object with the .info attribute that can then be used as a mixin column (this can be e.g. a copy of the object with a new attribute, a subclass instance, or a wrapper class - this is left up to the handler). The handler will be used on classes that have an exactly matching fully qualified name. Parameters ---------- fully_qualified_name : str The fully qualified name of the class that the handler can operate on, such as e.g. ``dask.array.core.Array``. handler : func The handler function. force : bool, optional Whether to overwrite any previous handler if there is already one for the same fully qualified name.
def register_mixin_handler(fully_qualified_name, handler, force=False): """ Register a mixin column 'handler'. A mixin column handler is a function that given an arbitrary Python object, will return an object with the .info attribute that can then be used as a mixin column (this can be e.g. a copy of the object with a new attribute, a subclass instance, or a wrapper class - this is left up to the handler). The handler will be used on classes that have an exactly matching fully qualified name. Parameters ---------- fully_qualified_name : str The fully qualified name of the class that the handler can operate on, such as e.g. ``dask.array.core.Array``. handler : func The handler function. force : bool, optional Whether to overwrite any previous handler if there is already one for the same fully qualified name. """ if fully_qualified_name not in _handlers or force: _handlers[fully_qualified_name] = handler else: raise MixinRegistryError( f"Handler for class {fully_qualified_name} is already defined" )
Given an arbitrary object, return the matching mixin handler (if any). Parameters ---------- obj : object or str The object to find a mixin handler for, or a fully qualified name. Returns ------- handler : None or func Then matching handler, if found, or `None`
def get_mixin_handler(obj): """ Given an arbitrary object, return the matching mixin handler (if any). Parameters ---------- obj : object or str The object to find a mixin handler for, or a fully qualified name. Returns ------- handler : None or func Then matching handler, if found, or `None` """ if isinstance(obj, str): return _handlers.get(obj) else: return _handlers.get( obj.__class__.__module__ + "." + obj.__class__.__name__, None )
Read a table and print to the standard output. Parameters ---------- filename : str The path to a FITS file.
def showtable(filename, args): """ Read a table and print to the standard output. Parameters ---------- filename : str The path to a FITS file. """ if args.info and args.stats: warnings.warn("--info and --stats cannot be used together", AstropyUserWarning) if any((args.max_lines, args.max_width, args.hide_unit, args.show_dtype)) and ( args.info or args.stats ): warnings.warn( "print parameters are ignored if --info or --stats is used", AstropyUserWarning, ) # these parameters are passed to Table.read if they are specified in the # command-line read_kwargs = ("hdu", "format", "table_id", "delimiter") kwargs = {k: v for k, v in vars(args).items() if k in read_kwargs and v is not None} try: table = Table.read(filename, **kwargs) if args.info: table.info("attributes") elif args.stats: table.info("stats") else: formatter = table.more if args.more else table.pprint formatter( max_lines=args.max_lines, max_width=args.max_width, show_unit=(False if args.hide_unit else None), show_dtype=(True if args.show_dtype else None), ) except OSError as e: log.error(str(e))
The main function called by the `showtable` script.
def main(args=None): """The main function called by the `showtable` script.""" parser = argparse.ArgumentParser( description=textwrap.dedent( """ Print tables from ASCII, FITS, HDF5, VOTable file(s). The tables are read with 'astropy.table.Table.read' and are printed with 'astropy.table.Table.pprint'. The default behavior is to make the table output fit onto a single screen page. For a long and wide table this will mean cutting out inner rows and columns. To print **all** the rows or columns use ``--max-lines=-1`` or ``max-width=-1``, respectively. The complete list of supported formats can be found at http://astropy.readthedocs.io/en/latest/io/unified.html#built-in-table-readers-writers """ ) ) addarg = parser.add_argument addarg("filename", nargs="+", help="path to one or more files") addarg( "--format", help=( "input table format, should be specified if it " "cannot be automatically detected" ), ) addarg("--more", action="store_true", help="use the pager mode from Table.more") addarg( "--info", action="store_true", help="show information about the table columns" ) addarg( "--stats", action="store_true", help="show statistics about the table columns" ) # pprint arguments pprint_args = parser.add_argument_group("pprint arguments") addarg = pprint_args.add_argument addarg( "--max-lines", type=int, help=( "maximum number of lines in table output (default=screen " "length, -1 for no limit)" ), ) addarg( "--max-width", type=int, help="maximum width in table output (default=screen width, -1 for no limit)", ) addarg( "--hide-unit", action="store_true", help=( "hide the header row for unit (which is shown " "only if one or more columns has a unit)" ), ) addarg( "--show-dtype", action="store_true", help=( "always include a header row for column dtypes " "(otherwise shown only if any column is multidimensional)" ), ) # ASCII-specific arguments ascii_args = parser.add_argument_group("ASCII arguments") addarg = ascii_args.add_argument addarg("--delimiter", help="column delimiter string") # FITS-specific arguments fits_args = parser.add_argument_group("FITS arguments") addarg = fits_args.add_argument addarg("--hdu", help="name of the HDU to show") # HDF5-specific arguments hdf5_args = parser.add_argument_group("HDF5 arguments") addarg = hdf5_args.add_argument addarg("--path", help="the path from which to read the table") # VOTable-specific arguments votable_args = parser.add_argument_group("VOTable arguments") addarg = votable_args.add_argument addarg("--table-id", help="the table to read in") args = parser.parse_args(args) for idx, filename in enumerate(args.filename): if idx > 0: print() showtable(filename, args)
Fixture to run all the tests for all available pickle protocols.
def protocol(request): """ Fixture to run all the tests for all available pickle protocols. """ return request.param
Fixture to return a set of columns for mixin testing which includes an index column 'i', two string cols 'a', 'b' (for joins etc), and one of the available mixin column types.
def mixin_cols(request): """ Fixture to return a set of columns for mixin testing which includes an index column 'i', two string cols 'a', 'b' (for joins etc), and one of the available mixin column types. """ cols = OrderedDict() mixin_cols = deepcopy(MIXIN_COLS) cols["i"] = table.Column([0, 1, 2, 3], name="i") cols["a"] = table.Column(["a", "b", "b", "c"], name="a") cols["b"] = table.Column(["b", "c", "a", "d"], name="b") cols["m"] = mixin_cols[request.param] return cols
Basic table
def T1b(request): """Basic table""" T = _get_test_table() return T
Basic table with or without index on integer column a
def T1(request): """Basic table with or without index on integer column a""" T = _get_test_table() if request.param: T.add_index("a") return T
Basic table where a column is integer or Quantity
def T1q(request): """Basic table where a column is integer or Quantity""" T = _get_test_table() if request.param: T["a"] = T["a"] * u.m return T
Basic table with or without index on column a, where a is integer or Quantity
def T1m(request): """Basic table with or without index on column a, where a is integer or Quantity""" T = _get_test_table() add_index, is_quantity = request.param if is_quantity: T["a"] = T["a"] * u.m if add_index: T.add_index("a") return T
Regression test for #1471: MaskedArray does not call __array_finalize__ so the meta-data was not getting copied over. By overloading _update_from we are able to work around this bug.
def test_getitem_metadata_regression(): """ Regression test for #1471: MaskedArray does not call __array_finalize__ so the meta-data was not getting copied over. By overloading _update_from we are able to work around this bug. """ # Make sure that meta-data gets propagated with __getitem__ c = table.Column( data=[1, 2], name="a", description="b", unit="m", format="%i", meta={"c": 8} ) assert c[1:2].name == "a" assert c[1:2].description == "b" assert c[1:2].unit == "m" assert c[1:2].format == "%i" assert c[1:2].meta["c"] == 8 c = table.MaskedColumn( data=[1, 2], name="a", description="b", unit="m", format="%i", meta={"c": 8} ) assert c[1:2].name == "a" assert c[1:2].description == "b" assert c[1:2].unit == "m" assert c[1:2].format == "%i" assert c[1:2].meta["c"] == 8 # As above, but with take() - check the method and the function c = table.Column( data=[1, 2, 3], name="a", description="b", unit="m", format="%i", meta={"c": 8} ) for subset in [c.take([0, 1]), np.take(c, [0, 1])]: assert subset.name == "a" assert subset.description == "b" assert subset.unit == "m" assert subset.format == "%i" assert subset.meta["c"] == 8 # Metadata isn't copied for scalar values for subset in [c.take(0), np.take(c, 0)]: assert subset == 1 assert subset.shape == () assert not isinstance(subset, table.Column) c = table.MaskedColumn( data=[1, 2, 3], name="a", description="b", unit="m", format="%i", meta={"c": 8} ) for subset in [c.take([0, 1]), np.take(c, [0, 1])]: assert subset.name == "a" assert subset.description == "b" assert subset.unit == "m" assert subset.format == "%i" assert subset.meta["c"] == 8 # Metadata isn't copied for scalar values for subset in [c.take(0), np.take(c, 0)]: assert subset == 1 assert subset.shape == () assert not isinstance(subset, table.MaskedColumn)
Column is not designed to hold scalars, but for numpy 1.6 this can happen: >> type(np.std(table.Column([1, 2]))) astropy.table.column.Column
def test_scalar_column(): """ Column is not designed to hold scalars, but for numpy 1.6 this can happen: >> type(np.std(table.Column([1, 2]))) astropy.table.column.Column """ c = table.Column(1.5) assert repr(c) == "1.5" assert str(c) == "1.5"
Ensures that a QTable that gets assigned a unit switches to be Quantity-y
def test_qtable_column_conversion(): """ Ensures that a QTable that gets assigned a unit switches to be Quantity-y """ qtab = table.QTable([[1, 2], [3, 4.2]], names=["i", "f"]) assert isinstance(qtab["i"], table.column.Column) assert isinstance(qtab["f"], table.column.Column) qtab["i"].unit = "km/s" assert isinstance(qtab["i"], u.Quantity) assert isinstance(qtab["f"], table.column.Column) # should follow from the above, but good to make sure as a #4497 regression test assert isinstance(qtab["i"][0], u.Quantity) assert isinstance(qtab[0]["i"], u.Quantity) assert not isinstance(qtab["f"][0], u.Quantity) assert not isinstance(qtab[0]["f"], u.Quantity) # Regression test for #5342: if a function unit is assigned, the column # should become the appropriate FunctionQuantity subclass. qtab["f"].unit = u.dex(u.cm / u.s**2) assert isinstance(qtab["f"], u.Dex)
Test warnings associated with in-place assignment to a string column that results in truncation of the right hand side.
def test_string_truncation_warning(masked): """ Test warnings associated with in-place assignment to a string column that results in truncation of the right hand side. """ from inspect import currentframe, getframeinfo t = table.Table([["aa", "bb"]], names=["a"], masked=masked) t["a"][1] = "cc" t["a"][:] = "dd" with pytest.warns( table.StringTruncateWarning, match=r"truncated right side string\(s\) longer than 2 character\(s\)", ) as w: frameinfo = getframeinfo(currentframe()) t["a"][0] = "eee" # replace item with string that gets truncated assert t["a"][0] == "ee" assert len(w) == 1 # Make sure the warning points back to the user code line assert w[0].lineno == frameinfo.lineno + 1 assert "test_column" in w[0].filename with pytest.warns( table.StringTruncateWarning, match=r"truncated right side string\(s\) longer than 2 character\(s\)", ) as w: t["a"][:] = ["ff", "ggg"] # replace item with string that gets truncated assert np.all(t["a"] == ["ff", "gg"]) assert len(w) == 1 # Test the obscure case of assigning from an array that was originally # wider than any of the current elements (i.e. dtype is U4 but actual # elements are U1 at the time of assignment). val = np.array(["ffff", "gggg"]) val[:] = ["f", "g"] t["a"][:] = val assert np.all(t["a"] == ["f", "g"])
Test warnings associated with in-place assignment to a string to a masked column, specifically where the right hand side contains np.ma.masked.
def test_string_truncation_warning_masked(): """ Test warnings associated with in-place assignment to a string to a masked column, specifically where the right hand side contains np.ma.masked. """ # Test for strings, but also cover assignment of np.ma.masked to # int and float masked column setting. This was previously only # covered in an unrelated io.ascii test (test_line_endings) which # showed an unexpected difference between handling of str and numeric # masked arrays. for values in (["a", "b"], [1, 2], [1.0, 2.0]): mc = table.MaskedColumn(values) mc[1] = np.ma.masked assert np.all(mc.mask == [False, True]) mc[:] = np.ma.masked assert np.all(mc.mask == [True, True]) mc = table.MaskedColumn(["aa", "bb"]) with pytest.warns( table.StringTruncateWarning, match=r"truncated right side string\(s\) longer than 2 character\(s\)", ) as w: mc[:] = [np.ma.masked, "ggg"] # replace item with string that gets truncated assert mc[1] == "gg" assert np.all(mc.mask == [True, False]) assert len(w) == 1
Create a bytestring Column from strings (including unicode) in Py3.
def test_col_unicode_sandwich_create_from_str(Column): """ Create a bytestring Column from strings (including unicode) in Py3. """ # a-umlaut is a 2-byte character in utf-8, test fails with ascii encoding. # Stress the system by injecting non-ASCII characters. uba = "bä" c = Column([uba, "def"], dtype="S") assert c.dtype.char == "S" assert c[0] == uba assert isinstance(c[0], str) assert isinstance(c[:0], table.Column) assert np.all(c[:2] == np.array([uba, "def"]))
Create a Column of dtype object with bytestring in it and make sure it keeps the bytestring and not convert to str with accessed.
def test_col_unicode_sandwich_bytes_obj(Column): """ Create a Column of dtype object with bytestring in it and make sure it keeps the bytestring and not convert to str with accessed. """ c = Column([None, b"def"]) assert c.dtype.char == "O" assert not c[0] assert c[1] == b"def" assert isinstance(c[1], bytes) assert not isinstance(c[1], str) assert isinstance(c[:0], table.Column) assert np.all(c[:2] == np.array([None, b"def"])) assert not np.all(c[:2] == np.array([None, "def"]))
Create a bytestring Column from bytes and ensure that it works in Python 3 in a convenient way like in Python 2.
def test_col_unicode_sandwich_bytes(Column): """ Create a bytestring Column from bytes and ensure that it works in Python 3 in a convenient way like in Python 2. """ # a-umlaut is a 2-byte character in utf-8, test fails with ascii encoding. # Stress the system by injecting non-ASCII characters. uba = "bä" uba8 = uba.encode("utf-8") c = Column([uba8, b"def"]) assert c.dtype.char == "S" assert c[0] == uba assert isinstance(c[0], str) assert isinstance(c[:0], table.Column) assert np.all(c[:2] == np.array([uba, "def"])) assert isinstance(c[:], table.Column) assert c[:].dtype.char == "S" # Array / list comparisons assert np.all(c == [uba, "def"]) ok = c == [uba8, b"def"] assert type(ok) is type(c.data) assert ok.dtype.char == "?" assert np.all(ok) assert np.all(c == np.array([uba, "def"])) assert np.all(c == np.array([uba8, b"def"])) # Scalar compare cmps = (uba, uba8) for cmp in cmps: ok = c == cmp assert type(ok) is type(c.data) assert np.all(ok == [True, False])
Sanity check that Unicode Column behaves normally.
def test_col_unicode_sandwich_unicode(): """ Sanity check that Unicode Column behaves normally. """ uba = "bä" uba8 = uba.encode("utf-8") c = table.Column([uba, "def"], dtype="U") assert c[0] == uba assert isinstance(c[:0], table.Column) assert isinstance(c[0], str) assert np.all(c[:2] == np.array([uba, "def"])) assert isinstance(c[:], table.Column) assert c[:].dtype.char == "U" ok = c == [uba, "def"] assert type(ok) == np.ndarray assert ok.dtype.char == "?" assert np.all(ok) with warnings.catch_warnings(): # Ignore the FutureWarning in numpy >=1.24 (it is OK). warnings.filterwarnings("ignore", message=".*elementwise comparison failed.*") assert np.all(c != [uba8, b"def"])
Create a bytestring MaskedColumn and ensure that it works in Python 3 in a convenient way like in Python 2.
def test_masked_col_unicode_sandwich(): """ Create a bytestring MaskedColumn and ensure that it works in Python 3 in a convenient way like in Python 2. """ c = table.MaskedColumn([b"abc", b"def"]) c[1] = np.ma.masked assert isinstance(c[:0], table.MaskedColumn) assert isinstance(c[0], str) assert c[0] == "abc" assert c[1] is np.ma.masked assert isinstance(c[:], table.MaskedColumn) assert c[:].dtype.char == "S" ok = c == ["abc", "def"] assert ok[0] assert ok[1] is np.ma.masked assert np.all(c == [b"abc", b"def"]) assert np.all(c == np.array(["abc", "def"])) assert np.all(c == np.array([b"abc", b"def"])) for cmp in ("abc", b"abc"): ok = c == cmp assert type(ok) is np.ma.MaskedArray assert ok[0] assert ok[1] is np.ma.masked
Test setting
def test_unicode_sandwich_set(Column): """ Test setting """ uba = "bä" c = Column([b"abc", b"def"]) c[0] = b"aa" assert np.all(c == ["aa", "def"]) c[0] = uba # ä is a 2-byte character in utf-8, test fails with ascii encoding assert np.all(c == [uba, "def"]) assert c.pformat() == ["None", "----", " " + uba, " def"] c[:] = b"cc" assert np.all(c == ["cc", "cc"]) c[:] = uba assert np.all(c == [uba, uba]) c[:] = "" c[:] = [uba, b"def"] assert np.all(c == [uba, b"def"])
Test that comparing a bytestring Column/MaskedColumn with various str (unicode) object types gives the expected result. Tests #6838.
def test_unicode_sandwich_compare(class1, class2): """Test that comparing a bytestring Column/MaskedColumn with various str (unicode) object types gives the expected result. Tests #6838. """ obj1 = class1([b"a", b"c"]) if class2 is str: obj2 = "a" elif class2 is list: obj2 = ["a", "b"] else: obj2 = class2(["a", "b"]) assert np.all((obj1 == obj2) == [True, False]) assert np.all((obj2 == obj1) == [True, False]) assert np.all((obj1 != obj2) == [False, True]) assert np.all((obj2 != obj1) == [False, True]) assert np.all((obj1 > obj2) == [False, True]) assert np.all((obj2 > obj1) == [False, False]) assert np.all((obj1 <= obj2) == [True, False]) assert np.all((obj2 <= obj1) == [True, True]) assert np.all((obj1 < obj2) == [False, False]) assert np.all((obj2 < obj1) == [False, True]) assert np.all((obj1 >= obj2) == [True, True]) assert np.all((obj2 >= obj1) == [True, False])
Test the fix for #6839 from #6899.
def test_unicode_sandwich_masked_compare(): """Test the fix for #6839 from #6899.""" c1 = table.MaskedColumn(["a", "b", "c", "d"], mask=[True, False, True, False]) c2 = table.MaskedColumn([b"a", b"b", b"c", b"d"], mask=[True, True, False, False]) for cmp in ((c1 == c2), (c2 == c1)): assert cmp[0] is np.ma.masked assert cmp[1] is np.ma.masked assert cmp[2] is np.ma.masked assert cmp[3] for cmp in ((c1 != c2), (c2 != c1)): assert cmp[0] is np.ma.masked assert cmp[1] is np.ma.masked assert cmp[2] is np.ma.masked assert not cmp[3]
Can a column's underlying data consistently be accessed via `.value`, whether it is a `Column`, `MaskedColumn`, `Quantity`, or `Time`?
def test_column_value_access(): """Can a column's underlying data consistently be accessed via `.value`, whether it is a `Column`, `MaskedColumn`, `Quantity`, or `Time`?""" data = np.array([1, 2, 3]) tbl = table.QTable( { "a": table.Column(data), "b": table.MaskedColumn(data), "c": u.Quantity(data), "d": time.Time(data, format="mjd"), } ) assert type(tbl["a"].value) == np.ndarray assert type(tbl["b"].value) == np.ma.MaskedArray assert type(tbl["c"].value) == np.ndarray assert type(tbl["d"].value) == np.ndarray
Test grouping a Column by various key types.
def test_column_group_by(T1q): """Test grouping a Column by various key types.""" # T1q["a"] could be Column or Quantity, so force the object we want to group to be # Column. Then later we are using the "a" column as a grouping key. t1a = Column(T1q["a"]) unit = T1q["a"].unit or 1 # Group by a Column (i.e. numpy array) t1ag = t1a.group_by(T1q["a"]) keys = t1ag.groups.keys assert np.all(t1ag.groups.indices == np.array([0, 1, 4, 8])) assert np.all(keys == np.array([0, 1, 2]) * unit) # Group by a Table and numpy structured array for t1ag, key_unit in ( (t1a.group_by(T1q["a", "b"]), unit), (t1a.group_by(T1q["a", "b"].as_array()), 1), ): assert np.all(t1ag.groups.indices == np.array([0, 1, 3, 4, 5, 7, 8])) keys = t1ag.groups.keys assert keys.dtype.names == ("a", "b") assert np.all(keys["a"] == np.array([0, 1, 1, 2, 2, 2]) * key_unit) assert np.all(keys["b"] == np.array(["a", "a", "b", "a", "b", "c"]))
Test basic table group_by functionality for possible key types and for masked/unmasked tables.
def test_table_group_by(T1): """ Test basic table group_by functionality for possible key types and for masked/unmasked tables. """ for masked in (False, True): t1 = QTable(T1, masked=masked) # Group by a single column key specified by name tg = t1.group_by("a") assert np.all(tg.groups.indices == np.array([0, 1, 4, 8])) assert str(tg.groups) == "<TableGroups indices=[0 1 4 8]>" assert str(tg["a"].groups) == "<ColumnGroups indices=[0 1 4 8]>" # Sorted by 'a' and in original order for rest assert tg.pformat() == [ " a b c d q ", " m ", "--- --- --- --- ---", " 0 a 0.0 4 4.0", " 1 b 3.0 5 5.0", " 1 a 2.0 6 6.0", " 1 a 1.0 7 7.0", " 2 c 7.0 0 0.0", " 2 b 5.0 1 1.0", " 2 b 6.0 2 2.0", " 2 a 4.0 3 3.0", ] assert tg.meta["ta"] == 1 assert tg["c"].meta["a"] == 1 assert tg["c"].description == "column c" # Group by a table column tg2 = t1.group_by(t1["a"]) assert tg.pformat() == tg2.pformat() # Group by two columns spec'd by name for keys in (["a", "b"], ("a", "b")): tg = t1.group_by(keys) assert np.all(tg.groups.indices == np.array([0, 1, 3, 4, 5, 7, 8])) # Sorted by 'a', 'b' and in original order for rest assert tg.pformat() == [ " a b c d q ", " m ", "--- --- --- --- ---", " 0 a 0.0 4 4.0", " 1 a 2.0 6 6.0", " 1 a 1.0 7 7.0", " 1 b 3.0 5 5.0", " 2 a 4.0 3 3.0", " 2 b 5.0 1 1.0", " 2 b 6.0 2 2.0", " 2 c 7.0 0 0.0", ] # Group by a Table tg2 = t1.group_by(t1["a", "b"]) assert tg.pformat() == tg2.pformat() # Group by a structured array tg2 = t1.group_by(t1["a", "b"].as_array()) assert tg.pformat() == tg2.pformat() # Group by a simple ndarray tg = t1.group_by(np.array([0, 1, 0, 1, 2, 1, 0, 0])) assert np.all(tg.groups.indices == np.array([0, 4, 7, 8])) assert tg.pformat() == [ " a b c d q ", " m ", "--- --- --- --- ---", " 2 c 7.0 0 0.0", " 2 b 6.0 2 2.0", " 1 a 2.0 6 6.0", " 1 a 1.0 7 7.0", " 2 b 5.0 1 1.0", " 2 a 4.0 3 3.0", " 1 b 3.0 5 5.0", " 0 a 0.0 4 4.0", ]
Group a table with a time column using that column as a key.
def test_groups_keys_time(T1b: QTable): """Group a table with a time column using that column as a key.""" T1b = T1b.copy() T1b["a"] = Time(T1b["a"], format="cxcsec") tg = T1b.group_by("a") keys = tg.groups.keys assert keys.dtype.names == ("a",) assert np.all(keys["a"] == Time(np.array([0, 1, 2]), format="cxcsec")) tg = T1b.group_by(["a", "b"]) keys = tg.groups.keys assert keys.dtype.names == ("a", "b") assert np.all(keys["a"] == Time(np.array([0, 1, 1, 2, 2, 2]), format="cxcsec")) assert np.all(keys["b"] == np.array(["a", "a", "b", "a", "b", "c"]))
Test that copying a table or column copies the groups properly
def test_grouped_copy(T1): """ Test that copying a table or column copies the groups properly """ for masked in (False, True): t1 = QTable(T1, masked=masked) tg = t1.group_by("a") tgc = tg.copy() assert np.all(tgc.groups.indices == tg.groups.indices) assert np.all(tgc.groups.keys == tg.groups.keys) tac = tg["a"].copy() assert np.all(tac.groups.indices == tg["a"].groups.indices) c1 = t1["a"].copy() gc1 = c1.group_by(t1["a"]) gc1c = gc1.copy() assert np.all(gc1c.groups.indices == np.array([0, 1, 4, 8]))
Test that slicing a table removes previous grouping
def test_grouped_slicing(T1): """ Test that slicing a table removes previous grouping """ for masked in (False, True): t1 = QTable(T1, masked=masked) # Regular slice of a table tg = t1.group_by("a") tg2 = tg[3:5] assert np.all(tg2.groups.indices == np.array([0, len(tg2)])) assert tg2.groups.keys is None
Group a column that is part of a table
def test_group_column_from_table(T1): """ Group a column that is part of a table """ cg = T1["c"].group_by(np.array(T1["a"])) assert np.all(cg.groups.keys == np.array([0, 1, 2])) assert np.all(cg.groups.indices == np.array([0, 1, 4, 8]))
Use boolean mask as item in __getitem__ for groups
def test_table_groups_mask_index(T1): """ Use boolean mask as item in __getitem__ for groups """ for masked in (False, True): t1 = Table(T1, masked=masked).group_by("a") t2 = t1.groups[np.array([True, False, True])] assert len(t2.groups) == 2 assert t2.groups[0].pformat() == t1.groups[0].pformat() assert t2.groups[1].pformat() == t1.groups[2].pformat() assert np.all(t2.groups.keys["a"] == np.array([0, 2]))
Use numpy array as item in __getitem__ for groups
def test_table_groups_array_index(T1): """ Use numpy array as item in __getitem__ for groups """ for masked in (False, True): t1 = Table(T1, masked=masked).group_by("a") t2 = t1.groups[np.array([0, 2])] assert len(t2.groups) == 2 assert t2.groups[0].pformat() == t1.groups[0].pformat() assert t2.groups[1].pformat() == t1.groups[2].pformat() assert np.all(t2.groups.keys["a"] == np.array([0, 2]))
Test that slicing table groups works
def test_table_groups_slicing(T1): """ Test that slicing table groups works """ for masked in (False, True): t1 = Table(T1, masked=masked).group_by("a") # slice(0, 2) t2 = t1.groups[0:2] assert len(t2.groups) == 2 assert t2.groups[0].pformat() == t1.groups[0].pformat() assert t2.groups[1].pformat() == t1.groups[1].pformat() assert np.all(t2.groups.keys["a"] == np.array([0, 1])) # slice(1, 2) t2 = t1.groups[1:2] assert len(t2.groups) == 1 assert t2.groups[0].pformat() == t1.groups[1].pformat() assert np.all(t2.groups.keys["a"] == np.array([1])) # slice(0, 3, 2) t2 = t1.groups[0:3:2] assert len(t2.groups) == 2 assert t2.groups[0].pformat() == t1.groups[0].pformat() assert t2.groups[1].pformat() == t1.groups[2].pformat() assert np.all(t2.groups.keys["a"] == np.array([0, 2]))
Test that column slicing preserves grouping
def test_grouped_item_access(T1): """ Test that column slicing preserves grouping """ for masked in (False, True): t1 = Table(T1, masked=masked) # Regular slice of a table tg = t1.group_by("a") tgs = tg["a", "c", "d"] assert np.all(tgs.groups.keys == tg.groups.keys) assert np.all(tgs.groups.indices == tg.groups.indices) tgsa = tgs.groups.aggregate(np.sum) assert tgsa.pformat() == [ " a c d ", "--- ---- ---", " 0 0.0 4", " 1 6.0 18", " 2 22.0 6", ] tgs = tg["c", "d"] assert np.all(tgs.groups.keys == tg.groups.keys) assert np.all(tgs.groups.indices == tg.groups.indices) tgsa = tgs.groups.aggregate(np.sum) assert tgsa.pformat() == [ " c d ", "---- ---", " 0.0 4", " 6.0 18", "22.0 6", ]
Operations like adding or deleting a row should removing grouping, but adding or removing or renaming a column should retain grouping.
def test_mutable_operations(T1): """ Operations like adding or deleting a row should removing grouping, but adding or removing or renaming a column should retain grouping. """ for masked in (False, True): t1 = QTable(T1, masked=masked) # add row tg = t1.group_by("a") tg.add_row((0, "a", 3.0, 4, 4 * u.m)) assert np.all(tg.groups.indices == np.array([0, len(tg)])) assert tg.groups.keys is None # remove row tg = t1.group_by("a") tg.remove_row(4) assert np.all(tg.groups.indices == np.array([0, len(tg)])) assert tg.groups.keys is None # add column tg = t1.group_by("a") indices = tg.groups.indices.copy() tg.add_column(Column(name="e", data=np.arange(len(tg)))) assert np.all(tg.groups.indices == indices) assert np.all(tg["e"].groups.indices == indices) assert np.all(tg["e"].groups.keys == tg.groups.keys) # remove column (not key column) tg = t1.group_by("a") tg.remove_column("b") assert np.all(tg.groups.indices == indices) # Still has original key col names assert tg.groups.keys.dtype.names == ("a",) assert np.all(tg["a"].groups.indices == indices) # remove key column tg = t1.group_by("a") tg.remove_column("a") assert np.all(tg.groups.indices == indices) assert tg.groups.keys.dtype.names == ("a",) assert np.all(tg["b"].groups.indices == indices) # rename key column tg = t1.group_by("a") tg.rename_column("a", "aa") assert np.all(tg.groups.indices == indices) assert tg.groups.keys.dtype.names == ("a",) assert np.all(tg["aa"].groups.indices == indices)
Appropriate errors get raised.
def test_group_by_errors(T1): """ Appropriate errors get raised. """ # Bad column name as string with pytest.raises(ValueError): T1.group_by("f") # Bad column names in list with pytest.raises(ValueError): T1.group_by(["f", "g"]) # Wrong length array with pytest.raises(ValueError): T1.group_by(np.array([1, 2])) # Wrong type with pytest.raises(TypeError): T1.group_by(None) # Masked key column t1 = QTable(T1, masked=True) t1["a"].mask[4] = True with pytest.raises(ValueError): t1.group_by("a")
Make sure the keys meta['grouped_by_table_cols'] is working.
def test_groups_keys_meta(T1): """ Make sure the keys meta['grouped_by_table_cols'] is working. """ # Group by column in this table tg = T1.group_by("a") assert tg.groups.keys.meta["grouped_by_table_cols"] is True assert tg["c"].groups.keys.meta["grouped_by_table_cols"] is True assert tg.groups[1].groups.keys.meta["grouped_by_table_cols"] is True assert ( tg["d"] .groups[np.array([False, True, True])] .groups.keys.meta["grouped_by_table_cols"] is True ) # Group by external Table tg = T1.group_by(T1["a", "b"]) assert tg.groups.keys.meta["grouped_by_table_cols"] is False assert tg["c"].groups.keys.meta["grouped_by_table_cols"] is False assert tg.groups[1].groups.keys.meta["grouped_by_table_cols"] is False # Group by external numpy array tg = T1.group_by(T1["a", "b"].as_array()) assert not hasattr(tg.groups.keys, "meta") assert not hasattr(tg["c"].groups.keys, "meta") # Group by Column tg = T1.group_by(T1["a"]) assert "grouped_by_table_cols" not in tg.groups.keys.meta assert "grouped_by_table_cols" not in tg["c"].groups.keys.meta
Aggregate a table
def test_table_aggregate(T1): """ Aggregate a table """ # Table with only summable cols t1 = T1["a", "c", "d"] tg = t1.group_by("a") tga = tg.groups.aggregate(np.sum) assert tga.pformat() == [ " a c d ", "--- ---- ---", " 0 0.0 4", " 1 6.0 18", " 2 22.0 6", ] # Reverts to default groups assert np.all(tga.groups.indices == np.array([0, 3])) assert tga.groups.keys is None # metadata survives assert tga.meta["ta"] == 1 assert tga["c"].meta["a"] == 1 assert tga["c"].description == "column c" # Aggregate with np.sum with masked elements. This results # in one group with no elements, hence a nan result and conversion # to float for the 'd' column. t1m = QTable(T1, masked=True) t1m["c"].mask[4:6] = True t1m["d"].mask[4:6] = True tg = t1m.group_by("a") if PYTEST_LT_8_0: ctx = nullcontext() else: ctx = pytest.warns(AstropyUserWarning, match="Cannot aggregate column") with pytest.warns(UserWarning, match="converting a masked element to nan"), ctx: tga = tg.groups.aggregate(np.sum) assert tga.pformat() == [ " a c d q ", " m ", "--- ---- ---- ----", " 0 nan nan 4.0", " 1 3.0 13.0 18.0", " 2 22.0 6.0 6.0", ] # Aggregate with np.sum with masked elements, but where every # group has at least one remaining (unmasked) element. Then # the int column stays as an int. t1m = QTable(t1, masked=True) t1m["c"].mask[5] = True t1m["d"].mask[5] = True tg = t1m.group_by("a") tga = tg.groups.aggregate(np.sum) assert tga.pformat() == [ " a c d ", "--- ---- ---", " 0 0.0 4", " 1 3.0 13", " 2 22.0 6", ] # Aggregate with a column type that cannot by supplied to the aggregating # function. This raises a warning but still works. tg = T1.group_by("a") with pytest.warns(AstropyUserWarning, match="Cannot aggregate column"): tga = tg.groups.aggregate(np.sum) assert tga.pformat() == [ " a c d q ", " m ", "--- ---- --- ----", " 0 0.0 4 4.0", " 1 6.0 18 18.0", " 2 22.0 6 6.0", ]
Aggregate table with functions which have a reduceat method
def test_table_aggregate_reduceat(T1): """ Aggregate table with functions which have a reduceat method """ # Comparison functions without reduceat def np_mean(x): return np.mean(x) def np_sum(x): return np.sum(x) def np_add(x): return np.add(x) # Table with only summable cols t1 = T1["a", "c", "d"] tg = t1.group_by("a") # Comparison tga_r = tg.groups.aggregate(np.sum) tga_a = tg.groups.aggregate(np.add) tga_n = tg.groups.aggregate(np_sum) assert np.all(tga_r == tga_n) assert np.all(tga_a == tga_n) assert tga_n.pformat() == [ " a c d ", "--- ---- ---", " 0 0.0 4", " 1 6.0 18", " 2 22.0 6", ] tga_r = tg.groups.aggregate(np.mean) tga_n = tg.groups.aggregate(np_mean) assert np.all(tga_r == tga_n) assert tga_n.pformat() == [ " a c d ", "--- --- ---", " 0 0.0 4.0", " 1 2.0 6.0", " 2 5.5 1.5", ] # Binary ufunc np_add should raise warning without reduceat t2 = T1["a", "c"] tg = t2.group_by("a") with pytest.warns(AstropyUserWarning, match="Cannot aggregate column"): tga = tg.groups.aggregate(np_add) assert tga.pformat() == [" a ", "---", " 0", " 1", " 2"]
Aggregate a single table column
def test_column_aggregate(T1): """ Aggregate a single table column """ for masked in (False, True): tg = QTable(T1, masked=masked).group_by("a") tga = tg["c"].groups.aggregate(np.sum) assert tga.pformat() == [" c ", "----", " 0.0", " 6.0", "22.0"]
https://github.com/astropy/astropy/issues/12706
def test_column_aggregate_f8(): """https://github.com/astropy/astropy/issues/12706""" # Just want to make sure it does not crash again. for masked in (False, True): tg = Table({"a": np.arange(2, dtype=">f8")}, masked=masked).group_by("a") tga = tg["a"].groups.aggregate(np.sum) assert tga.pformat() == [" a ", "---", "0.0", "1.0"]
Table groups filtering
def test_table_filter(): """ Table groups filtering """ def all_positive(table, key_colnames): return all( np.all(table[colname] >= 0) for colname in table.colnames if colname not in key_colnames ) # Negative value in 'a' column should not filter because it is a key col t = Table.read( [ " a c d", " -2 7.0 0", " -2 5.0 1", " 0 0.0 4", " 1 3.0 5", " 1 2.0 -6", " 1 1.0 7", " 3 3.0 5", " 3 -2.0 6", " 3 1.0 7", ], format="ascii", ) tg = t.group_by("a") t2 = tg.groups.filter(all_positive) assert t2.groups[0].pformat() == [ " a c d ", "--- --- ---", " -2 7.0 0", " -2 5.0 1", ] assert t2.groups[1].pformat() == [" a c d ", "--- --- ---", " 0 0.0 4"]
Table groups filtering
def test_column_filter(): """ Table groups filtering """ def all_positive(column): if np.any(column < 0): return False return True # Negative value in 'a' column should not filter because it is a key col t = Table.read( [ " a c d", " -2 7.0 0", " -2 5.0 1", " 0 0.0 4", " 1 3.0 5", " 1 2.0 -6", " 1 1.0 7", " 3 3.0 5", " 3 -2.0 6", " 3 1.0 7", ], format="ascii", ) tg = t.group_by("a") c2 = tg["c"].groups.filter(all_positive) assert len(c2.groups) == 3 assert c2.groups[0].pformat() == [" c ", "---", "7.0", "5.0"] assert c2.groups[1].pformat() == [" c ", "---", "0.0"] assert c2.groups[2].pformat() == [" c ", "---", "3.0", "2.0", "1.0"]