response
stringlengths 1
33.1k
| instruction
stringlengths 22
582k
|
---|---|
Try importing dependencies for reading HTML.
This is copied from pandas.io.html | def import_html_libs():
"""Try importing dependencies for reading HTML.
This is copied from pandas.io.html
"""
# import things we need
# but make this done on a first use basis
global _IMPORTS
if _IMPORTS:
return
global _HAS_BS4, _HAS_LXML, _HAS_HTML5LIB
from astropy.utils.compat.optional_deps import HAS_BS4 as _HAS_BS4
from astropy.utils.compat.optional_deps import HAS_HTML5LIB as _HAS_HTML5LIB
from astropy.utils.compat.optional_deps import HAS_LXML as _HAS_LXML
_IMPORTS = True |
Provide io Table connector to read table using pandas. | def _pandas_read(fmt, filespec, **kwargs):
"""Provide io Table connector to read table using pandas."""
try:
import pandas as pd
except ImportError:
raise ImportError("pandas must be installed to use pandas table reader")
pandas_fmt = fmt[len(PANDAS_PREFIX) :] # chop the 'pandas.' in front
read_func = getattr(pd, "read_" + pandas_fmt)
# Get defaults and then override with user-supplied values
read_kwargs = PANDAS_FMTS[pandas_fmt]["read"].copy()
read_kwargs.update(kwargs)
# Special case: pandas defaults to HTML lxml for reading, but does not attempt
# to fall back to bs4 + html5lib. So do that now for convenience if user has
# not specifically selected a flavor. If things go wrong the pandas exception
# with instruction to install a library will come up.
if pandas_fmt == "html" and "flavor" not in kwargs:
import_html_libs()
if not _HAS_LXML and _HAS_HTML5LIB and _HAS_BS4:
read_kwargs["flavor"] = "bs4"
df = read_func(filespec, **read_kwargs)
# Special case for HTML
if pandas_fmt == "html":
df = df[0]
return Table.from_pandas(df) |
Provide io Table connector to write table using pandas. | def _pandas_write(fmt, tbl, filespec, overwrite=False, **kwargs):
"""Provide io Table connector to write table using pandas."""
pandas_fmt = fmt[len(PANDAS_PREFIX) :] # chop the 'pandas.' in front
# Get defaults and then override with user-supplied values
write_kwargs = PANDAS_FMTS[pandas_fmt]["write"].copy()
write_kwargs.update(kwargs)
df = tbl.to_pandas()
write_method = getattr(df, "to_" + pandas_fmt)
if not overwrite:
try: # filespec is not always a path-like
exists = os.path.exists(filespec)
except TypeError: # skip invalid arguments
pass
else:
if exists: # only error if file already exists
raise OSError(NOT_OVERWRITING_MSG.format(filespec))
return write_method(filespec, **write_kwargs) |
Pytest fixture to run a test case with tilde-prefixed paths.
In the tilde-path case, environment variables are temporarily
modified so that '~' resolves to the temp directory. | def home_is_tmpdir(monkeypatch, tmp_path):
"""
Pytest fixture to run a test case with tilde-prefixed paths.
In the tilde-path case, environment variables are temporarily
modified so that '~' resolves to the temp directory.
"""
# For Unix
monkeypatch.setenv("HOME", str(tmp_path))
# For Windows
monkeypatch.setenv("USERPROFILE", str(tmp_path)) |
Test the old meta format
Only for some files created prior to v4.0, in compatibility mode. | def test_preserve_serialized_old_meta_format():
"""Test the old meta format
Only for some files created prior to v4.0, in compatibility mode.
"""
test_file = get_pkg_data_filename("data/old_meta_example.hdf5")
t1 = Table()
t1["a"] = Column(data=[1, 2, 3], unit="s")
t1["a"].meta["a0"] = "A0"
t1["a"].meta["a1"] = {"a1": [0, 1]}
t1["a"].format = "7.3f"
t1["a"].description = "A column"
t1.meta["b"] = 1
t1.meta["c"] = {"c0": [0, 1]}
t2 = Table.read(test_file, path="the_table")
assert t1["a"].unit == t2["a"].unit
assert t1["a"].format == t2["a"].format
assert t1["a"].description == t2["a"].description
assert t1["a"].meta == t2["a"].meta
assert t1.meta == t2.meta |
Test that very large datasets work, now! | def test_metadata_very_large(tmp_path):
"""Test that very large datasets work, now!"""
test_file = tmp_path / "test.hdf5"
t1 = Table()
t1["a"] = Column(data=[1, 2, 3], unit="s")
t1["a"].meta["a0"] = "A0"
t1["a"].meta["a1"] = {"a1": [0, 1]}
t1["a"].format = "7.3f"
t1["a"].description = "A column"
t1.meta["b"] = 1
t1.meta["c"] = {"c0": [0, 1]}
t1.meta["meta_big"] = "0" * (2**16 + 1)
t1.meta["meta_biggerstill"] = "0" * (2**18)
t1.write(test_file, path="the_table", serialize_meta=True, overwrite=True)
t2 = Table.read(test_file, path="the_table")
assert t1["a"].unit == t2["a"].unit
assert t1["a"].format == t2["a"].format
assert t1["a"].description == t2["a"].description
assert t1["a"].meta == t2["a"].meta
assert t1.meta == t2.meta |
Test writing as QTable and reading as Table. Ensure correct classes
come out. | def test_hdf5_mixins_qtable_to_table(tmp_path):
"""Test writing as QTable and reading as Table. Ensure correct classes
come out.
"""
filename = tmp_path / "test_simple.hdf5"
names = sorted(mixin_cols)
t = QTable([mixin_cols[name] for name in names], names=names)
t.write(filename, format="hdf5", path="root", serialize_meta=True)
t2 = Table.read(filename, format="hdf5", path="root")
assert t.colnames == t2.colnames
for name, col in t.columns.items():
col2 = t2[name]
attrs = compare_attrs[name]
compare_class = True
if isinstance(col.info, QuantityInfo):
# Downgrade Quantity to Column + unit
assert type(col2) is Column
# Class-specific attributes like `value` or `wrap_angle` are lost.
attrs = ["unit"]
compare_class = False
# Compare data values here (assert_objects_equal doesn't know how in this case)
assert np.all(col.value == col2)
assert_objects_equal(col, col2, attrs, compare_class) |
Test write/read all cols at once and validate intermediate column names | def test_hdf5_mixins_as_one(table_cls, tmp_path):
"""Test write/read all cols at once and validate intermediate column names"""
filename = tmp_path / "test_simple.hdf5"
names = sorted(mixin_cols)
all_serialized_names = []
for name in names:
all_serialized_names.extend(serialized_names[name])
t = table_cls([mixin_cols[name] for name in names], names=names)
t.meta["C"] = "spam"
t.meta["comments"] = ["this", "is", "a", "comment"]
t.meta["history"] = ["first", "second", "third"]
t.write(filename, format="hdf5", path="root", serialize_meta=True)
t2 = table_cls.read(filename, format="hdf5", path="root")
assert t2.meta["C"] == "spam"
assert t2.meta["comments"] == ["this", "is", "a", "comment"]
assert t2.meta["history"] == ["first", "second", "third"]
assert t.colnames == t2.colnames
# Read directly via hdf5 and confirm column names
h5 = h5py.File(filename, "r")
h5_names = list(h5["root"].dtype.names)
assert h5_names == all_serialized_names
h5.close() |
Test write/read one col at a time and do detailed validation | def test_hdf5_mixins_per_column(table_cls, name_col, tmp_path):
"""Test write/read one col at a time and do detailed validation"""
filename = tmp_path / "test_simple.hdf5"
name, col = name_col
c = [1.0, 2.0]
t = table_cls([c, col, c], names=["c1", name, "c2"])
t[name].info.description = "my description"
t[name].info.meta = {"list": list(range(50)), "dict": {"a": "b" * 200}}
if not t.has_mixin_columns:
pytest.skip("column is not a mixin (e.g. Quantity subclass in Table)")
t.write(filename, format="hdf5", path="root", serialize_meta=True)
t2 = table_cls.read(filename, format="hdf5", path="root")
assert t.colnames == t2.colnames
for colname in t.colnames:
compare = ["data"] if colname in ("c1", "c2") else compare_attrs[colname]
assert_objects_equal(t[colname], t2[colname], compare)
# Special case to make sure Column type doesn't leak into Time class data
if name.startswith("tm"):
assert t2[name]._time.jd1.__class__ is np.ndarray
assert t2[name]._time.jd2.__class__ is np.ndarray |
Test round-trip of MaskedColumn through HDF5 using default serialization
that writes a separate mask column. Note:
>>> simple_table(masked=True)
<Table masked=True length=3>
a b c
int64 float64 str1
----- ------- ----
-- 1.0 c
2 2.0 --
3 -- e | def test_round_trip_masked_table_default(tmp_path):
"""Test round-trip of MaskedColumn through HDF5 using default serialization
that writes a separate mask column. Note:
>>> simple_table(masked=True)
<Table masked=True length=3>
a b c
int64 float64 str1
----- ------- ----
-- 1.0 c
2 2.0 --
3 -- e
"""
filename = tmp_path / "test.h5"
t = simple_table(masked=True) # int, float, and str cols with one masked element
t["c"] = [b"c", b"d", b"e"]
t["c"].mask[1] = True
t.write(filename, format="hdf5", path="root", serialize_meta=True)
t2 = Table.read(filename)
assert t2.masked is False
assert t2.colnames == t.colnames
for name in t2.colnames:
assert np.all(t2[name].mask == t[name].mask)
assert np.all(t2[name] == t[name])
# Data under the mask round-trips also (unmask data to show this).
t[name].mask = False
t2[name].mask = False
assert np.all(t2[name] == t[name]) |
Test round-trip through pandas write/read for supported formats.
:param fmt: format name, e.g. csv, html, json
:return: | def test_read_write_format(fmt):
"""
Test round-trip through pandas write/read for supported formats.
:param fmt: format name, e.g. csv, html, json
:return:
"""
# Skip the reading tests
if fmt == "html" and not HAS_HTML_DEPS:
pytest.skip("Missing lxml or bs4 + html5lib for HTML read/write test")
pandas_fmt = "pandas." + fmt
# Explicitly provide dtype to avoid casting 'a' to int32.
# See https://github.com/astropy/astropy/issues/8682
t = Table(
[[1, 2, 3], [1.0, 2.5, 5.0], ["a", "b", "c"]], dtype=(np.int64, np.float64, str)
)
buf = StringIO()
t.write(buf, format=pandas_fmt)
buf.seek(0)
t2 = Table.read(buf, format=pandas_fmt)
assert t.colnames == t2.colnames
assert np.all(t == t2) |
Test overwriting. | def test_write_overwrite(tmp_path, fmt):
"""Test overwriting."""
tmpfile = tmp_path / f"test.{fmt}"
pandas_fmt = f"pandas.{fmt}"
# Explicitly provide dtype to avoid casting 'a' to int32.
# See https://github.com/astropy/astropy/issues/8682
t = Table(
[[1, 2, 3], [1.0, 2.5, 5.0], ["a", "b", "c"]], dtype=(np.int64, np.float64, str)
)
# works when file DNE
t.write(tmpfile, format=pandas_fmt)
# fails when cannot overwrite
with pytest.raises(OSError, match=_NOT_OVERWRITING_MSG_MATCH):
t.write(tmpfile, format=pandas_fmt, overwrite=False)
# passes when it can
t.write(tmpfile, format=pandas_fmt, overwrite=True) |
Test reading with pandas read_fwf() | def test_read_fixed_width_format():
"""Test reading with pandas read_fwf()"""
tbl = """\
a b c
1 2.0 a
2 3.0 b"""
buf = StringIO()
buf.write(tbl)
# Explicitly provide converters to avoid casting 'a' to int32.
# See https://github.com/astropy/astropy/issues/8682
t = Table.read(
tbl,
format="ascii",
guess=False,
converters={"a": [ascii.convert_numpy(np.int64)]},
)
buf.seek(0)
t2 = Table.read(buf, format="pandas.fwf")
assert t.colnames == t2.colnames
assert np.all(t == t2) |
Writing a table with mixins just drops them via to_pandas()
This also tests passing a kwarg to pandas read and write. | def test_write_with_mixins():
"""Writing a table with mixins just drops them via to_pandas()
This also tests passing a kwarg to pandas read and write.
"""
sc = SkyCoord([1, 2], [3, 4], unit="deg")
q = [5, 6] * u.m
qt = QTable([[1, 2], q, sc], names=["i", "q", "sc"])
buf = StringIO()
qt.write(buf, format="pandas.csv", sep=" ")
exp = ["i q sc.ra sc.dec", "1 5.0 1.0 3.0", "2 6.0 2.0 4.0"]
assert buf.getvalue().splitlines() == exp
# Read it back
buf.seek(0)
qt2 = Table.read(buf, format="pandas.csv", sep=" ")
# Explicitly provide converters to avoid casting 'i' to int32.
# See https://github.com/astropy/astropy/issues/8682
exp_t = ascii.read(exp, converters={"i": [ascii.convert_numpy(np.int64)]})
assert qt2.colnames == exp_t.colnames
assert np.all(qt2 == exp_t) |
Test writing/reading a simple parquet file. | def test_read_write_simple(tmp_path):
"""Test writing/reading a simple parquet file."""
test_file = tmp_path / "test.parquet"
t1 = Table()
t1.add_column(Column(name="a", data=[1, 2, 3]))
t1.write(test_file)
t2 = Table.read(test_file)
assert np.all(t2["a"] == [1, 2, 3]) |
Test writing an existing file without overwriting. | def test_read_write_existing(tmp_path):
"""Test writing an existing file without overwriting."""
test_file = tmp_path / "test.parquet"
with open(test_file, "w") as f: # create empty file
pass
t1 = Table()
t1.add_column(Column(name="a", data=[1, 2, 3]))
with pytest.raises(OSError, match=_NOT_OVERWRITING_MSG_MATCH):
t1.write(test_file) |
Test overwriting an existing file. | def test_read_write_existing_overwrite(tmp_path):
"""Test overwriting an existing file."""
test_file = tmp_path / "test.parquet"
with open(test_file, "w") as f: # create empty file
pass
t1 = Table()
t1.add_column(Column(name="a", data=[1, 2, 3]))
t1.write(test_file, overwrite=True)
t2 = Table.read(test_file)
assert np.all(t2["a"] == [1, 2, 3]) |
Test reading a file object. | def test_read_fileobj(tmp_path):
"""Test reading a file object."""
test_file = tmp_path / "test.parquet"
t1 = Table()
t1.add_column(Column(name="a", data=[1, 2, 3]))
t1.write(test_file)
import io
with io.FileIO(test_file, mode="r") as input_file:
t2 = Table.read(input_file)
assert np.all(t2["a"] == [1, 2, 3]) |
Test reading a path-like object. | def test_read_pathlikeobj(tmp_path):
"""Test reading a path-like object."""
test_file = tmp_path / "test.parquet"
t1 = Table()
t1.add_column(Column(name="a", data=[1, 2, 3]))
t1.write(test_file)
import pathlib
p = pathlib.Path(test_file)
t2 = Table.read(p)
assert np.all(t2["a"] == [1, 2, 3]) |
Test reading an incorrect fileobject type. | def test_read_wrong_fileobj():
"""Test reading an incorrect fileobject type."""
class FakeFile:
def not_read(self):
pass
f = FakeFile()
with pytest.raises(
TypeError, match="pyarrow can only open path-like or file-like objects."
):
Table.read(f, format="parquet") |
Test identifying an incorrect fileobj. | def test_identify_wrong_fileobj():
"""Test identifying an incorrect fileobj."""
class FakeFile:
def not_read(self):
pass
f = FakeFile()
assert not parquet_identify("test", "test", f) |
Test identifying an incorrect extension. | def test_identify_file_wrong_extension():
"""Test identifying an incorrect extension."""
assert not parquet_identify("test", "test.notparquet", None) |
Test identifying an incorrect extension. | def test_identify_file_correct_extension():
"""Test identifying an incorrect extension."""
assert parquet_identify("test", "test.parquet", None)
assert parquet_identify("test", "test.parq", None) |
Test running identify with no object or path. | def test_identify_file_noobject_nopath():
"""Test running identify with no object or path."""
assert not parquet_identify("test", None, None) |
Test writing to a filename of the wrong type. | def test_write_wrong_type():
"""Test writing to a filename of the wrong type."""
t1 = Table()
t1.add_column(Column(name="a", data=[1, 2, 3]))
with pytest.raises(TypeError, match="should be a string"):
t1.write(1212, format="parquet") |
Test writing and reading an empty Table. | def test_empty_roundtrip(tmp_path):
"""Test writing and reading an empty Table."""
test_file = tmp_path / "test.parquet"
t1 = Table()
t1.write(test_file)
t2 = Table.read(test_file)
assert len(t2) == 0
assert t1.colnames == t2.colnames |
Test that round-tripping a single column preserves datatypes. | def test_preserve_single_dtypes(tmp_path, dtype):
"""Test that round-tripping a single column preserves datatypes."""
test_file = tmp_path / "test.parquet"
values = _default_values(dtype)
t1 = Table()
t1.add_column(Column(name="a", data=np.array(values, dtype=dtype)))
t1.write(test_file)
t2 = Table.read(test_file)
assert np.all(t2["a"] == values)
assert t2["a"].dtype == dtype |
Test that round-tripping a single big-endian column preserves data. | def test_preserve_single_bigendian_dtypes(tmp_path, dtype):
"""Test that round-tripping a single big-endian column preserves data."""
test_file = tmp_path / "test.parquet"
values = _default_values(dtype)
t1 = Table()
t1.add_column(Column(name="a", data=np.array(values, dtype=dtype)))
t1.write(test_file)
t2 = Table.read(test_file)
assert np.all(t2["a"] == values)
# The parquet serialization will turn all arrays into little-endian.
assert t2["a"].dtype == dtype.newbyteorder("<") |
Test that round-tripping a single array column preserves datatypes. | def test_preserve_single_array_dtypes(tmp_path, dtype):
"""Test that round-tripping a single array column preserves datatypes."""
test_file = tmp_path / "test.parquet"
values = _default_array_values(dtype)
t1 = Table()
t1.add_column(Column(name="a", data=np.array(values, dtype=dtype)))
t1.write(test_file)
t2 = Table.read(test_file)
assert np.all(t2["a"] == t1["a"])
assert np.all(t2["a"].shape == np.array(values).shape)
assert t2["a"].dtype == dtype |
Test that round-tripping a single array column (big-endian) preserves data. | def test_preserve_single_bigendian_array_dtypes(tmp_path, dtype):
"""Test that round-tripping a single array column (big-endian) preserves data."""
test_file = tmp_path / "test.parquet"
values = _default_array_values(dtype)
t1 = Table()
t1.add_column(Column(name="a", data=np.array(values, dtype=dtype)))
t1.write(test_file)
t2 = Table.read(test_file)
assert np.all(t2["a"] == t1["a"])
assert np.all(t2["a"].shape == np.array(values).shape)
assert t2["a"].dtype == dtype.newbyteorder("<") |
Test that round-tripping a single variable length array column preserves
datatypes. | def test_preserve_single_var_length_array_dtypes(tmp_path, dtype):
"""
Test that round-tripping a single variable length array column preserves
datatypes.
"""
test_file = tmp_path / "test.parquet"
values = _default_var_length_array_values(dtype)
t1 = Table()
data = np.array([np.array(val, dtype=dtype) for val in values], dtype=np.object_)
t1.add_column(Column(name="a", data=data))
t1.write(test_file)
t2 = Table.read(test_file)
for row1, row2 in zip(t1["a"], t2["a"]):
assert np.all(row1 == row2)
assert row1.dtype == row2.dtype |
Test that round-tripping a single big-endian variable length array column preserves
datatypes. | def test_preserve_single_bigendian_var_length_array_dtypes(tmp_path, dtype):
"""
Test that round-tripping a single big-endian variable length array column preserves
datatypes.
"""
test_file = tmp_path / "test.parquet"
values = _default_var_length_array_values(dtype)
t1 = Table()
data = np.array([np.array(val, dtype=dtype) for val in values], dtype=np.object_)
t1.add_column(Column(name="a", data=data))
t1.write(test_file)
t2 = Table.read(test_file)
for row1, row2 in zip(t1["a"], t2["a"]):
assert np.all(row1 == row2)
assert row1.dtype.newbyteorder(">") == row2.dtype.newbyteorder(">") |
Test that round-tripping preserves a table with all the datatypes. | def test_preserve_all_dtypes(tmp_path):
"""Test that round-tripping preserves a table with all the datatypes."""
test_file = tmp_path / "test.parquet"
t1 = Table()
for dtype in ALL_DTYPES:
values = _default_values(dtype)
t1.add_column(Column(name=str(dtype), data=np.array(values, dtype=dtype)))
arr_values = _default_array_values(dtype)
t1.add_column(
Column(name=str(dtype) + "_arr", data=np.array(arr_values, dtype=dtype))
)
t1.write(test_file)
t2 = Table.read(test_file)
for dtype in ALL_DTYPES:
values = _default_values(dtype)
assert np.all(t2[str(dtype)] == values)
assert t2[str(dtype)].dtype == dtype
arr_values = _default_array_values(dtype)
assert np.all(t2[str(dtype) + "_arr"] == values)
assert t2[str(dtype)].dtype == dtype
assert np.all(t2[str(dtype) + "_arr"].shape == np.array(arr_values).shape)
# Test just reading the schema
schema2 = Table.read(test_file, schema_only=True)
assert len(schema2) == 0
assert schema2.dtype == t2.dtype |
Test that round-tripping preserves a table with all the var length datatypes. | def test_preserve_all_var_length_dtypes(tmp_path):
"""Test that round-tripping preserves a table with all the var length datatypes."""
test_file = tmp_path / "test.parquet"
t1 = Table()
for dtype in ALL_DTYPES:
varr_values = _default_var_length_array_values(dtype)
data = np.array(
[np.array(val, dtype=dtype) for val in varr_values], dtype=np.object_
)
t1.add_column(Column(name=str(dtype) + "_varr", data=data))
t1.write(test_file)
t2 = Table.read(test_file)
for dtype in ALL_DTYPES:
varr_values = _default_var_length_array_values(dtype)
colname = str(dtype) + "_varr"
for row1, row2 in zip(t1[colname], t2[colname]):
assert np.all(row1 == row2)
assert row1.dtype == row2.dtype |
Test that we can save an empty table with var length datatypes. | def test_write_empty_tables(tmp_path):
"""Test that we can save an empty table with var length datatypes."""
test_file = tmp_path / "test.parquet"
t1 = Table()
for dtype in ALL_DTYPES:
values = _default_values(dtype)
t1.add_column(Column(name=str(dtype), data=np.array(values, dtype=dtype)))
arr_values = _default_array_values(dtype)
t1.add_column(
Column(name=str(dtype) + "_arr", data=np.array(arr_values, dtype=dtype))
)
# Write an empty table with values and arrays, and confirm it works.
data = np.zeros(0, dtype=t1.dtype)
t2 = Table(data=data)
t2.write(test_file)
t3 = Table.read(test_file)
assert t3.dtype == t2.dtype
test_file2 = tmp_path / "test2.parquet"
t4 = Table()
for dtype in ALL_DTYPES:
varr_values = _default_var_length_array_values(dtype)
data = np.array(
[np.array(val, dtype=dtype) for val in varr_values], dtype=np.object_
)
t4.add_column(Column(name=str(dtype) + "_varr", data=data))
# Write an empty table with variable-length arrays, and confirm this
# raises an exception. (The datatype of an np.object_ type column
# cannot be inferred from an empty table.)
data = np.zeros(0, dtype=t4.dtype)
t5 = Table(data=data)
with pytest.raises(ValueError, match="Cannot serialize zero-length table") as err:
t5.write(test_file2) |
Test exception when trying to serialize a mixed-type variable-length column. | def test_heterogeneous_var_array_table(tmp_path):
"""Test exception when trying to serialize a mixed-type variable-length column."""
test_file = tmp_path / "test.parquet"
t1 = Table()
data = np.array(
[
np.array([0, 1, 2], dtype=np.int32),
np.array([0, 1, 2, 3, 4], dtype=np.float64),
],
dtype=np.object_,
)
t1.add_column(Column(name="a", data=data))
with pytest.raises(ValueError, match="Cannot serialize mixed-type column") as err:
t1.write(test_file) |
Test that writing/reading preserves metadata. | def test_preserve_meta(tmp_path):
"""Test that writing/reading preserves metadata."""
test_file = tmp_path / "test.parquet"
t1 = Table()
t1.add_column(Column(name="a", data=[1, 2, 3]))
t1.meta["a"] = 1
t1.meta["b"] = "hello"
t1.meta["c"] = 3.14159
t1.meta["d"] = True
t1.meta["e"] = np.array([1, 2, 3])
t1.write(test_file)
t2 = Table.read(test_file)
for key in t1.meta:
assert np.all(t1.meta[key] == t2.meta[key]) |
Test that writing/reading preserves unit/format/description. | def test_preserve_serialized(tmp_path):
"""Test that writing/reading preserves unit/format/description."""
test_file = tmp_path / "test.parquet"
t1 = Table()
t1["a"] = Column(data=[1, 2, 3], unit="s")
t1["a"].meta["a0"] = "A0"
t1["a"].meta["a1"] = {"a1": [0, 1]}
t1["a"].format = "7.3f"
t1["a"].description = "A column"
t1.meta["b"] = 1
t1.meta["c"] = {"c0": [0, 1]}
t1.write(test_file, overwrite=True)
t2 = Table.read(test_file)
assert t1["a"].unit == t2["a"].unit
assert t1["a"].format == t2["a"].format
assert t1["a"].description == t2["a"].description
assert t1["a"].meta == t2["a"].meta
assert t1.meta == t2.meta |
Test that very large datasets work | def test_metadata_very_large(tmp_path):
"""Test that very large datasets work"""
test_file = tmp_path / "test.parquet"
t1 = Table()
t1["a"] = Column(data=[1, 2, 3], unit="s")
t1["a"].meta["a0"] = "A0"
t1["a"].meta["a1"] = {"a1": [0, 1]}
t1["a"].format = "7.3f"
t1["a"].description = "A column"
t1.meta["b"] = 1
t1.meta["c"] = {"c0": [0, 1]}
t1.meta["meta_big"] = "0" * (2**16 + 1)
t1.meta["meta_biggerstill"] = "0" * (2**18)
t1.write(test_file, overwrite=True)
t2 = Table.read(test_file)
assert t1["a"].unit == t2["a"].unit
assert t1["a"].format == t2["a"].format
assert t1["a"].description == t2["a"].description
assert t1["a"].meta == t2["a"].meta
assert t1.meta == t2.meta |
Test that we cannot preserve objects in metadata. | def test_fail_meta_serialize(tmp_path):
"""Test that we cannot preserve objects in metadata."""
test_file = tmp_path / "test.parquet"
t1 = Table()
t1.add_column(Column(name="a", data=[1, 2, 3]))
t1.meta["f"] = str
with pytest.raises(Exception) as err:
t1.write(test_file)
assert "cannot represent an object" in str(err.value)
assert "<class 'str'>" in str(err.value) |
Convenient routine to check objects and attributes match. | def assert_objects_equal(obj1, obj2, attrs, compare_class=True):
"""Convenient routine to check objects and attributes match."""
if compare_class:
assert obj1.__class__ is obj2.__class__
info_attrs = [
"info.name",
"info.format",
"info.unit",
"info.description",
"info.meta",
]
for attr in attrs + info_attrs:
a1 = obj1
a2 = obj2
for subattr in attr.split("."):
try:
a1 = getattr(a1, subattr)
a2 = getattr(a2, subattr)
except AttributeError:
a1 = a1[subattr]
a2 = a2[subattr]
# Mixin info.meta can None instead of empty OrderedDict(), #6720 would
# fix this.
if attr == "info.meta":
if a1 is None:
a1 = {}
if a2 is None:
a2 = {}
if isinstance(a1, np.ndarray) and a1.dtype.kind == "f":
assert quantity_allclose(a1, a2, rtol=1e-15)
else:
assert np.all(a1 == a2) |
Test writing as QTable and reading as Table. Ensure correct classes
come out. | def test_parquet_mixins_qtable_to_table(tmp_path):
"""Test writing as QTable and reading as Table. Ensure correct classes
come out.
"""
filename = tmp_path / "test_simple.parquet"
names = sorted(mixin_cols)
t = QTable([mixin_cols[name] for name in names], names=names)
t.write(filename, format="parquet")
t2 = Table.read(filename, format="parquet")
assert t.colnames == t2.colnames
for name, col in t.columns.items():
col2 = t2[name]
# Special-case Time, which does not yet support round-tripping
# the format.
if isinstance(col2, Time):
col2.format = col.format
attrs = compare_attrs[name]
compare_class = True
if isinstance(col.info, QuantityInfo):
# Downgrade Quantity to Column + unit
assert type(col2) is Column
# Class-specific attributes like `value` or `wrap_angle` are lost.
attrs = ["unit"]
compare_class = False
# Compare data values here (assert_objects_equal doesn't know how in this case)
assert np.all(col.value == col2)
assert_objects_equal(col, col2, attrs, compare_class) |
Test write/read all cols at once and validate intermediate column names | def test_parquet_mixins_as_one(table_cls, tmp_path):
"""Test write/read all cols at once and validate intermediate column names"""
filename = tmp_path / "test_simple.parquet"
names = sorted(mixin_cols)
t = table_cls([mixin_cols[name] for name in names], names=names)
t.meta["C"] = "spam"
t.meta["comments"] = ["this", "is", "a", "comment"]
t.meta["history"] = ["first", "second", "third"]
t.write(filename, format="parquet")
t2 = table_cls.read(filename, format="parquet")
assert t2.meta["C"] == "spam"
assert t2.meta["comments"] == ["this", "is", "a", "comment"]
assert t2.meta["history"] == ["first", "second", "third"]
assert t.colnames == t2.colnames |
Test write/read one col at a time and do detailed validation | def test_parquet_mixins_per_column(table_cls, name_col, tmp_path):
"""Test write/read one col at a time and do detailed validation"""
filename = tmp_path / "test_simple.parquet"
name, col = name_col
c = [1.0, 2.0]
t = table_cls([c, col, c], names=["c1", name, "c2"])
t[name].info.description = "my description"
t[name].info.meta = {"list": list(range(50)), "dict": {"a": "b" * 200}}
if not t.has_mixin_columns:
pytest.skip("column is not a mixin (e.g. Quantity subclass in Table)")
if isinstance(t[name], NdarrayMixin):
pytest.xfail("NdarrayMixin not supported")
t.write(filename, format="parquet")
t2 = table_cls.read(filename, format="parquet")
assert t.colnames == t2.colnames
for colname in t.colnames:
assert_objects_equal(t[colname], t2[colname], compare_attrs[colname])
# Special case to make sure Column type doesn't leak into Time class data
if name.startswith("tm"):
assert t2[name]._time.jd1.__class__ is np.ndarray
assert t2[name]._time.jd2.__class__ is np.ndarray |
Test round-trip of MaskedColumn through Parquet using default serialization
that writes a separate mask column. Note:
>>> simple_table(masked=True)
<Table masked=True length=3>
a b c
int64 float64 str1
----- ------- ----
-- 1.0 c
2 2.0 --
3 -- e | def test_round_trip_masked_table_default(tmp_path):
"""Test round-trip of MaskedColumn through Parquet using default serialization
that writes a separate mask column. Note:
>>> simple_table(masked=True)
<Table masked=True length=3>
a b c
int64 float64 str1
----- ------- ----
-- 1.0 c
2 2.0 --
3 -- e
"""
filename = tmp_path / "test.parquet"
t = simple_table(masked=True) # int, float, and str cols with one masked element
t["c"] = [b"c", b"d", b"e"]
t["c"].mask[1] = True
t.write(filename, format="parquet")
t2 = Table.read(filename)
assert t2.masked is False
assert t2.colnames == t.colnames
for name in t2.colnames:
assert np.all(t2[name].mask == t[name].mask)
assert np.all(t2[name] == t[name])
# Data under the mask round-trips also (unmask data to show this).
t[name].mask = False
t2[name].mask = False
assert np.all(t2[name] == t[name]) |
Test write all cols at once, and read one at a time. | def test_parquet_mixins_read_one_name(table_cls, tmp_path):
"""Test write all cols at once, and read one at a time."""
filename = tmp_path / "test_simple.parquet"
names = sorted(mixin_cols)
t = table_cls([mixin_cols[name] for name in names], names=names)
t.meta["C"] = "spam"
t.meta["comments"] = ["this", "is", "a", "comment"]
t.meta["history"] = ["first", "second", "third"]
t.write(filename, format="parquet")
for name in names:
t2 = table_cls.read(filename, format="parquet", include_names=[name])
assert t2.meta["C"] == "spam"
assert t2.meta["comments"] == ["this", "is", "a", "comment"]
assert t2.meta["history"] == ["first", "second", "third"]
assert t2.colnames == [name] |
Test write all cols at once, and read all but one at a time. | def test_parquet_mixins_read_exclude_names(table_cls, tmp_path):
"""Test write all cols at once, and read all but one at a time."""
filename = tmp_path / "test_simple.parquet"
names = sorted(mixin_cols)
t = table_cls([mixin_cols[name] for name in names], names=names)
t.meta["C"] = "spam"
t.meta["comments"] = ["this", "is", "a", "comment"]
t.meta["history"] = ["first", "second", "third"]
t.write(filename, format="parquet")
t2 = table_cls.read(filename, format="parquet", exclude_names=names[0:5])
assert t.colnames[5:] == t2.colnames |
Test write all cols at once, and try to read no valid columns. | def test_parquet_mixins_read_no_columns(table_cls, tmp_path):
"""Test write all cols at once, and try to read no valid columns."""
filename = tmp_path / "test_simple.parquet"
names = sorted(mixin_cols)
t = table_cls([mixin_cols[name] for name in names], names=names)
t.meta["C"] = "spam"
t.meta["comments"] = ["this", "is", "a", "comment"]
t.meta["history"] = ["first", "second", "third"]
t.write(filename, format="parquet")
with pytest.raises(ValueError, match="No include_names specified"):
t2 = table_cls.read(
filename,
format="parquet",
include_names=["not_a_column", "also_not_a_column"],
) |
Test write all cols at once, and read the schema. | def test_parquet_mixins_read_schema(table_cls, tmp_path):
"""Test write all cols at once, and read the schema."""
filename = tmp_path / "test_simple.parquet"
names = sorted(mixin_cols)
t = table_cls([mixin_cols[name] for name in names], names=names)
t.meta["C"] = "spam"
t.meta["comments"] = ["this", "is", "a", "comment"]
t.meta["history"] = ["first", "second", "third"]
t.write(filename, format="parquet")
t2 = table_cls.read(filename, format="parquet", schema_only=True)
assert t2.meta["C"] == "spam"
assert t2.meta["comments"] == ["this", "is", "a", "comment"]
assert t2.meta["history"] == ["first", "second", "third"]
assert t.colnames == t2.colnames
assert len(t2) == 0 |
Test reading a parquet file with a filter. | def test_parquet_filter(tmp_path):
"""Test reading a parquet file with a filter."""
filename = tmp_path / "test_simple.parquet"
t1 = Table()
t1["a"] = Column(data=np.arange(100), dtype=np.int32)
t1["b"] = Column(data=np.arange(100, 0, -1), dtype=np.float64)
t1.write(filename, overwrite=True)
t2 = Table.read(filename, filters=[("a", "<", 50)])
assert t2["a"].max() < 50
t2 = Table.read(filename, filters=[("b", "<", 50)])
assert t2["b"].max() < 50 |
Test reading a generic parquet file. | def test_parquet_read_generic(tmp_path):
"""Test reading a generic parquet file."""
filename = tmp_path / "test_generic.parq"
t1 = Table()
for dtype in ALL_DTYPES:
values = _default_values(dtype)
t1.add_column(Column(name=str(dtype), data=np.array(values, dtype=dtype)))
# Write the table generically via pyarrow.parquet
names = t1.dtype.names
type_list = [
(name, pyarrow.from_numpy_dtype(t1[name].dtype.type)) for name in names
]
schema = pyarrow.schema(type_list)
_, parquet, writer_version = get_pyarrow()
# We use version='2.0' for full support of datatypes including uint32.
with parquet.ParquetWriter(filename, schema, version=writer_version) as writer:
arrays = [pyarrow.array(t1[name].data) for name in names]
writer.write_table(pyarrow.Table.from_arrays(arrays, schema=schema))
with pytest.warns(AstropyUserWarning, match="No table::len"):
t2 = Table.read(filename)
for dtype in ALL_DTYPES:
values = _default_values(dtype)
assert np.all(t2[str(dtype)] == values)
assert t2[str(dtype)].dtype == dtype |
Test reading a pandas parquet file. | def test_parquet_read_pandas(tmp_path):
"""Test reading a pandas parquet file."""
filename = tmp_path / "test_pandas.parq"
t1 = Table()
for dtype in ALL_DTYPES:
values = _default_values(dtype)
t1.add_column(Column(name=str(dtype), data=np.array(values, dtype=dtype)))
df = t1.to_pandas()
# We use version='2.0' for full support of datatypes including uint32.
_, _, writer_version = get_pyarrow()
df.to_parquet(filename, version=writer_version)
with pytest.warns(AstropyUserWarning, match="No table::len"):
t2 = Table.read(filename)
for dtype in ALL_DTYPES:
values = _default_values(dtype)
assert np.all(t2[str(dtype)] == values)
assert t2[str(dtype)].dtype == dtype |
Tests the `fnpickle` and `fnupickle` functions' basic operation by
pickling and unpickling a string, using both a filename and a
file. | def test_fnpickling_simple(tmp_path):
"""
Tests the `fnpickle` and `fnupickle` functions' basic operation by
pickling and unpickling a string, using both a filename and a
file.
"""
fn = str(tmp_path / "test1.pickle")
obj1 = "astring"
with pytest.warns(
AstropyDeprecationWarning, match="Use pickle from standard library"
):
fnpickle(obj1, fn)
res = fnunpickle(fn, 0)
assert obj1 == res
# now try with a file-like object instead of a string
with open(fn, "wb") as f:
fnpickle(obj1, f)
with open(fn, "rb") as f:
res = fnunpickle(f)
assert obj1 == res |
Tests the `fnpickle` and `fnupickle` functions' ability to pickle
and unpickle custom classes. | def test_fnpickling_class(tmp_path):
"""
Tests the `fnpickle` and `fnupickle` functions' ability to pickle
and unpickle custom classes.
"""
fn = str(tmp_path / "test2.pickle")
obj1 = "astring"
obj2 = ToBePickled(obj1)
with pytest.warns(
AstropyDeprecationWarning, match="Use pickle from standard library"
):
fnpickle(obj2, fn)
res = fnunpickle(fn)
assert res == obj2 |
Tests the `fnpickle` and `fnupickle` functions' ability to pickle
and unpickle pickle files from all protocols. | def test_fnpickling_protocol(tmp_path):
"""
Tests the `fnpickle` and `fnupickle` functions' ability to pickle
and unpickle pickle files from all protocols.
"""
import pickle
obj1 = "astring"
obj2 = ToBePickled(obj1)
for p in range(pickle.HIGHEST_PROTOCOL + 1):
fn = str(tmp_path / f"testp{p}.pickle")
with pytest.warns(
AstropyDeprecationWarning, match="Use pickle from standard library"
):
fnpickle(obj2, fn, protocol=p)
res = fnunpickle(fn)
assert res == obj2 |
Tests the `fnpickle` and `fnupickle` functions' ability to pickle
and unpickle multiple objects from a single file. | def test_fnpickling_many(tmp_path):
"""
Tests the `fnpickle` and `fnupickle` functions' ability to pickle
and unpickle multiple objects from a single file.
"""
fn = str(tmp_path / "test3.pickle")
# now try multiples
obj3 = 328.3432
obj4 = "blahblahfoo"
with pytest.warns(
AstropyDeprecationWarning, match="Use pickle from standard library"
):
fnpickle(obj3, fn)
fnpickle(obj4, fn, append=True)
res = fnunpickle(fn, number=-1)
assert len(res) == 2
assert res[0] == obj3
assert res[1] == obj4
fnpickle(obj4, fn, append=True)
res = fnunpickle(fn, number=2)
assert len(res) == 2
with pytest.raises(EOFError):
fnunpickle(fn, number=5) |
Test that astropy core objects in ``meta`` are serialized. | def test_ecsv_astropy_objects_in_meta():
"""
Test that astropy core objects in ``meta`` are serialized.
"""
t = QTable([[1, 2] * u.m, [4, 5]], names=["a", "b"])
tm = _get_time()
c = SkyCoord(
[[1, 2], [3, 4]],
[[5, 6], [7, 8]],
unit="deg",
frame="fk4",
obstime=Time("2016-01-02"),
location=EarthLocation(1000, 2000, 3000, unit=u.km),
)
unit = u.m / u.s
t.meta = {"tm": tm, "c": c, "unit": unit}
out = StringIO()
t.write(out, format="ascii.ecsv")
t2 = QTable.read(out.getvalue(), format="ascii.ecsv")
compare_time(tm, t2.meta["tm"])
compare_coord(c, t2.meta["c"])
assert t2.meta["unit"] == unit |
Test that dumping and loading object arrays fails. | def test_yaml_dump_of_object_arrays_fail():
"""Test that dumping and loading object arrays fails."""
with pytest.raises(TypeError, match="cannot serialize"):
dump(np.array([1, 2, 3], dtype=object)) |
Test that dumping and loading object arrays fails.
The string to load was obtained by suppressing the exception and dumping
``np.array([1, 2, 3], dtype=object)`` to a yaml file. | def test_yaml_load_of_object_arrays_fail():
"""Test that dumping and loading object arrays fails.
The string to load was obtained by suppressing the exception and dumping
``np.array([1, 2, 3], dtype=object)`` to a yaml file.
"""
with pytest.raises(TypeError, match="cannot load numpy array"):
load(
"""!numpy.ndarray
buffer: !!binary |
WndBQUFISUFBQUJwQUFBQQ==
dtype: object
order: C
shape: !!python/tuple [3]"""
) |
Makes a function for a method on UnifiedIORegistry.
.. todo::
Make kwarg "registry" not hidden.
Returns
-------
wrapper : callable
Signature matches method on UnifiedIORegistry.
Accepts (hidden) kwarg "registry". default is ``default_registry``. | def _make_io_func(method_name):
"""Makes a function for a method on UnifiedIORegistry.
.. todo::
Make kwarg "registry" not hidden.
Returns
-------
wrapper : callable
Signature matches method on UnifiedIORegistry.
Accepts (hidden) kwarg "registry". default is ``default_registry``.
"""
@functools.wraps(getattr(default_registry, method_name))
def wrapper(*args, registry=None, **kwargs):
# written this way in case ever controlled by ScienceState
if registry is None:
registry = default_registry
# get and call bound method from registry instance
return getattr(registry, method_name)(*args, **kwargs)
return wrapper |
Just check a fact that we rely on in other tests. | def test_fmcls1_fmtcls2(fmtcls1, fmtcls2):
"""Just check a fact that we rely on in other tests."""
assert fmtcls1[1] is fmtcls2[1] |
Test all the compat methods are in the directory | def test_dir():
"""Test all the compat methods are in the directory"""
dc = dir(compat)
for n in compat.__all__:
assert n in dc |
Test dynamically created documentation help via the I/O registry for 'fits'. | def test_table_read_help_fits():
"""
Test dynamically created documentation help via the I/O registry for 'fits'.
"""
out = StringIO()
Table.read.help("fits", out)
doc = out.getvalue()
# Check a smattering of expected content
assert "Table.read general documentation" not in doc
assert "The available built-in formats" not in doc
assert "Table.read(format='fits') documentation" in doc
assert "hdu : int or str, optional" in doc |
Test dynamically created documentation help via the I/O registry for 'ascii'. | def test_table_read_help_ascii():
"""
Test dynamically created documentation help via the I/O registry for 'ascii'.
"""
out = StringIO()
Table.read.help("ascii", out)
doc = out.getvalue()
# Check a smattering of expected content
assert "Table.read general documentation" not in doc
assert "The available built-in formats" not in doc
assert "Table.read(format='ascii') documentation" in doc
assert "delimiter : str" in doc
assert "ASCII reader 'ascii' details" in doc
assert "Character-delimited table with a single header line" in doc |
Test dynamically created documentation help via the I/O registry for 'hdf5'. | def test_table_write_help_hdf5():
"""
Test dynamically created documentation help via the I/O registry for 'hdf5'.
"""
out = StringIO()
Table.write.help("hdf5", out)
doc = out.getvalue()
# Check a smattering of expected content
assert "Table.write general documentation" not in doc
assert "The available built-in formats" not in doc
assert "Table.write(format='hdf5') documentation" in doc
assert "Write a Table object to an HDF5 file" in doc
assert "compression : bool or str or int" in doc |
Test getting list of available formats | def test_list_formats():
"""
Test getting list of available formats
"""
out = StringIO()
CCDData.write.list_formats(out)
output = out.getvalue()
assert (
output
== """\
Format Read Write Auto-identify
------ ---- ----- -------------
fits Yes Yes Yes"""
) |
Test dynamically created documentation help via the I/O registry for 'fits'. | def test_table_write_help_fits():
"""
Test dynamically created documentation help via the I/O registry for 'fits'.
"""
out = StringIO()
Table.write.help("fits", out)
doc = out.getvalue()
# Check a smattering of expected content
assert "Table.write general documentation" not in doc
assert "The available built-in formats" not in doc
assert "Table.write(format='fits') documentation" in doc
assert "Write a Table object to a FITS file" in doc |
Test dynamically created documentation help via the I/O registry for no
format provided. | def test_table_write_help_no_format():
"""
Test dynamically created documentation help via the I/O registry for no
format provided.
"""
out = StringIO()
Table.write.help(out=out)
doc = out.getvalue()
# Check a smattering of expected content
assert "Table.write general documentation" in doc
assert "The available built-in formats" in doc |
Test dynamically created documentation help via the I/O registry for not
format provided. | def test_table_read_help_no_format():
"""
Test dynamically created documentation help via the I/O registry for not
format provided.
"""
out = StringIO()
Table.read.help(out=out)
doc = out.getvalue()
# Check a smattering of expected content
assert "Table.read general documentation" in doc
assert "The available built-in formats" in doc |
Test dynamically created documentation help via the I/O registry for 'fits'. | def test_ccddata_write_help_fits():
"""
Test dynamically created documentation help via the I/O registry for 'fits'.
"""
out = StringIO()
CCDData.write.help("fits", out)
doc = out.getvalue()
# Check a smattering of expected content
assert "CCDData.write(format='fits') documentation" in doc
assert "Write CCDData object to FITS file" in doc
assert "key_uncertainty_type : str, optional" in doc |
Test dynamically created documentation help via the I/O registry for
CCDData 'fits'. | def test_ccddata_read_help_fits():
"""Test dynamically created documentation help via the I/O registry for
CCDData 'fits'.
"""
out = StringIO()
CCDData.read.help("fits", out)
doc = out.getvalue()
# Check a smattering of expected content
assert "CCDData.read(format='fits') documentation" in doc
assert "Generate a CCDData object from a FITS file" in doc
assert "hdu_uncertainty : str or None, optional" in doc |
Test dynamically created documentation help via the I/O registry for
'jsviewer'. | def test_table_write_help_jsviewer():
"""
Test dynamically created documentation help via the I/O registry for
'jsviewer'.
"""
out = StringIO()
Table.write.help("jsviewer", out)
doc = out.getvalue()
# Check a smattering of expected content
assert "Table.write general documentation" not in doc
assert "The available built-in formats" not in doc
assert "Table.write(format='jsviewer') documentation" in doc |
Reads the header of a file to determine if it is a VOTable file.
Parameters
----------
origin : str or readable file-like
Path or file object containing a VOTABLE_ xml file.
Returns
-------
is_votable : bool
Returns `True` if the given file is a VOTable file. | def is_votable(origin, filepath, fileobj, *args, **kwargs):
"""
Reads the header of a file to determine if it is a VOTable file.
Parameters
----------
origin : str or readable file-like
Path or file object containing a VOTABLE_ xml file.
Returns
-------
is_votable : bool
Returns `True` if the given file is a VOTable file.
"""
from . import is_votable
if origin == "read":
if fileobj is not None:
try:
result = is_votable(fileobj)
finally:
fileobj.seek(0)
return result
elif filepath is not None:
return is_votable(filepath)
return isinstance(args[0], (VOTableFile, TableElement))
else:
return False |
Read a Table object from an VO table file.
Parameters
----------
input : str or `~astropy.io.votable.tree.VOTableFile` or `~astropy.io.votable.tree.TableElement`
If a string, the filename to read the table from. If a
:class:`~astropy.io.votable.tree.VOTableFile` or
:class:`~astropy.io.votable.tree.TableElement` object, the object to extract
the table from.
table_id : str or int, optional
The table to read in. If a `str`, it is an ID corresponding
to the ID of the table in the file (not all VOTable files
assign IDs to their tables). If an `int`, it is the index of
the table in the file, starting at 0.
use_names_over_ids : bool, optional
When `True` use the ``name`` attributes of columns as the names
of columns in the `~astropy.table.Table` instance. Since names
are not guaranteed to be unique, this may cause some columns
to be renamed by appending numbers to the end. Otherwise
(default), use the ID attributes as the column names.
verify : {'ignore', 'warn', 'exception'}, optional
When ``'exception'``, raise an error when the file violates the spec,
otherwise either issue a warning (``'warn'``) or silently continue
(``'ignore'``). Warnings may be controlled using the standard Python
mechanisms. See the `warnings` module in the Python standard library
for more information. When not provided, uses the configuration setting
``astropy.io.votable.verify``, which defaults to ``'ignore'``.
**kwargs
Additional keyword arguments are passed on to `astropy.io.votable.parse`. | def read_table_votable(
input, table_id=None, use_names_over_ids=False, verify=None, **kwargs
):
"""
Read a Table object from an VO table file.
Parameters
----------
input : str or `~astropy.io.votable.tree.VOTableFile` or `~astropy.io.votable.tree.TableElement`
If a string, the filename to read the table from. If a
:class:`~astropy.io.votable.tree.VOTableFile` or
:class:`~astropy.io.votable.tree.TableElement` object, the object to extract
the table from.
table_id : str or int, optional
The table to read in. If a `str`, it is an ID corresponding
to the ID of the table in the file (not all VOTable files
assign IDs to their tables). If an `int`, it is the index of
the table in the file, starting at 0.
use_names_over_ids : bool, optional
When `True` use the ``name`` attributes of columns as the names
of columns in the `~astropy.table.Table` instance. Since names
are not guaranteed to be unique, this may cause some columns
to be renamed by appending numbers to the end. Otherwise
(default), use the ID attributes as the column names.
verify : {'ignore', 'warn', 'exception'}, optional
When ``'exception'``, raise an error when the file violates the spec,
otherwise either issue a warning (``'warn'``) or silently continue
(``'ignore'``). Warnings may be controlled using the standard Python
mechanisms. See the `warnings` module in the Python standard library
for more information. When not provided, uses the configuration setting
``astropy.io.votable.verify``, which defaults to ``'ignore'``.
**kwargs
Additional keyword arguments are passed on to `astropy.io.votable.parse`.
"""
if not isinstance(input, (VOTableFile, TableElement)):
input = parse(input, table_id=table_id, verify=verify, **kwargs)
# Parse all table objects
table_id_mapping = {}
tables = []
if isinstance(input, VOTableFile):
for table in input.iter_tables():
if table.ID is not None:
table_id_mapping[table.ID] = table
tables.append(table)
if len(tables) > 1:
if table_id is None:
raise ValueError(
"Multiple tables found: table id should be set via"
" the table_id= argument. The available tables are"
f" {', '.join(table_id_mapping)}, or integers less than"
f" {len(tables)}."
)
elif isinstance(table_id, str):
if table_id in table_id_mapping:
table = table_id_mapping[table_id]
else:
raise ValueError(f"No tables with id={table_id} found")
elif isinstance(table_id, int):
if table_id < len(tables):
table = tables[table_id]
else:
raise IndexError(
f"Table index {table_id} is out of range. {len(tables)} tables"
" found"
)
elif len(tables) == 1:
table = tables[0]
else:
raise ValueError("No table found")
elif isinstance(input, TableElement):
table = input
# Convert to an astropy.table.Table object
return table.to_table(use_names_over_ids=use_names_over_ids) |
Write a Table object to an VO table file.
Parameters
----------
input : Table
The table to write out.
output : str
The filename to write the table to.
table_id : str, optional
The table ID to use. If this is not specified, the 'ID' keyword in the
``meta`` object of the table will be used.
overwrite : bool, optional
Whether to overwrite any existing file without warning.
tabledata_format : str, optional
The format of table data to write. Must be one of ``tabledata``
(text representation), ``binary`` or ``binary2``. Default is
``tabledata``. See :ref:`astropy:votable-serialization`. | def write_table_votable(
input, output, table_id=None, overwrite=False, tabledata_format=None
):
"""
Write a Table object to an VO table file.
Parameters
----------
input : Table
The table to write out.
output : str
The filename to write the table to.
table_id : str, optional
The table ID to use. If this is not specified, the 'ID' keyword in the
``meta`` object of the table will be used.
overwrite : bool, optional
Whether to overwrite any existing file without warning.
tabledata_format : str, optional
The format of table data to write. Must be one of ``tabledata``
(text representation), ``binary`` or ``binary2``. Default is
``tabledata``. See :ref:`astropy:votable-serialization`.
"""
# Only those columns which are instances of BaseColumn or Quantity can be written
unsupported_cols = input.columns.not_isinstance((BaseColumn, Quantity))
if unsupported_cols:
unsupported_names = [col.info.name for col in unsupported_cols]
raise ValueError(
f"cannot write table with mixin column(s) {unsupported_names} to VOTable"
)
# Check if output file already exists
if isinstance(output, str) and os.path.exists(output):
if overwrite:
os.remove(output)
else:
raise OSError(NOT_OVERWRITING_MSG.format(output))
# Create a new VOTable file
table_file = from_table(input, table_id=table_id)
# Write out file
table_file.to_xml(output, tabledata_format=tabledata_format) |
This function allows writing a VOTable (XML) with PARQUET
serialization. This functionality is currently not
supported by Astropy (with the reason that this method
requires writing multiple files: a VOTable/XML and
PARQUET table). This function presents a wrapper, which
allows to do this. The concept is simple and probably
can be improved substantially. We first save the PARQUET
table using Astropy functionality. Then, we create a
VOTable with binary serialization. The latter is modified
later to include an external reference to the create
PARQUET table file.
Parameters
----------
input : `~astropy.table.Table`
The table to write out.
output : str
The filename to write the table to.
column_metadata : dict
Contains the metadata for the columns such as "unit" or
"ucd" or "utype".
(Example: {"id": {"unit": "", "ucd": "meta.id", "utype": "none"},
"mass": {"unit": "solMass", "ucd": "phys.mass", "utype": "none"}})
overwrite : bool, optional
Whether to overwrite any existing file without warning.
Returns
-------
This function creates a VOTable serialized in Parquet.
Two files are written:
1. The VOTable (XML file) including the column metadata and a
``STREAM`` tag that embeds the PARQUET table.
2. The PARQUET table itself.
Both files are stored at the same location. The name of the
VOTable is ``output``, and the name of the embedded PARQUET
file is f"{output}.parquet". | def write_table_votable_parquet(input, output, column_metadata, *, overwrite=False):
"""
This function allows writing a VOTable (XML) with PARQUET
serialization. This functionality is currently not
supported by Astropy (with the reason that this method
requires writing multiple files: a VOTable/XML and
PARQUET table). This function presents a wrapper, which
allows to do this. The concept is simple and probably
can be improved substantially. We first save the PARQUET
table using Astropy functionality. Then, we create a
VOTable with binary serialization. The latter is modified
later to include an external reference to the create
PARQUET table file.
Parameters
----------
input : `~astropy.table.Table`
The table to write out.
output : str
The filename to write the table to.
column_metadata : dict
Contains the metadata for the columns such as "unit" or
"ucd" or "utype".
(Example: {"id": {"unit": "", "ucd": "meta.id", "utype": "none"},
"mass": {"unit": "solMass", "ucd": "phys.mass", "utype": "none"}})
overwrite : bool, optional
Whether to overwrite any existing file without warning.
Returns
-------
This function creates a VOTable serialized in Parquet.
Two files are written:
1. The VOTable (XML file) including the column metadata and a
``STREAM`` tag that embeds the PARQUET table.
2. The PARQUET table itself.
Both files are stored at the same location. The name of the
VOTable is ``output``, and the name of the embedded PARQUET
file is f"{output}.parquet".
"""
# First save the PARQUET file.
parquet_filename = f"{output}.parquet"
path_type = f"file:{'//' if os.path.isabs(parquet_filename) else ''}"
if os.path.exists(parquet_filename) and not overwrite:
raise OSError(NOT_OVERWRITING_MSG.format(parquet_filename))
input.write(parquet_filename, format="parquet", overwrite=overwrite)
# Second, save table as binary VOT file. We will modify this file
# later to incorporate the FITS stream. Note that we use here the full
# table data so we get the datatype and arraysize correct. Later
# we can maybe make this more efficient and instead write the
# VOTable file from scratch, especially the FIELDS, which are the
# most important.
votablefile = VOTableFile()
votable = votablefile.from_table(input)
# Add the fields
# Maybe there is a smarter way to do this iteratively.
for field in votable.resources[0].tables[0].fields:
field.unit = column_metadata[field.name]["unit"]
field.ucd = column_metadata[field.name]["ucd"]
field.utype = column_metadata[field.name]["utype"]
if os.path.exists(output) and not overwrite:
raise OSError(NOT_OVERWRITING_MSG.format(output))
votable.to_xml(output, tabledata_format="binary")
# Now reopen the binary file and replace the binary part with
# the stream relating to the FITS file. This all is a bit flimsy
# and needs to be made more bullet-proof.
with open(output) as f:
lines = f.readlines()
# get start and end of <BINARY> tag
line_start = np.where(["<BINARY>" in line for line in lines])[0][0]
line_stop = np.where(["</BINARY>" in line for line in lines])[0][0]
# Add the extension tag
# We assume here that it is extension #1.
lines[line_start] = '<PARQUET type="VOTable-remote-file">\n'
lines[line_start + 1] = f'<STREAM href="{path_type}{parquet_filename}"/>\n'
lines[line_start + 2] = "</PARQUET>\n"
# remove last line
_ = lines.pop(line_stop)
# write new file
with open(output, "w") as f:
f.write("".join(lines)) |
Masked arrays of zero length that also have a mask of zero length
cause problems in Numpy (at least in 1.6.2). This function
creates a masked array from data and a mask, unless it is zero
length. | def _make_masked_array(data, mask):
"""
Masked arrays of zero length that also have a mask of zero length
cause problems in Numpy (at least in 1.6.2). This function
creates a masked array from data and a mask, unless it is zero
length.
"""
# np.ma doesn't like setting mask to []
if len(data):
return ma.array(np.array(data), mask=np.array(mask, dtype="bool"))
else:
return ma.array(np.array(data)) |
Converts a bit array (a string of bits in a bytes object) to a
boolean Numpy array.
Parameters
----------
data : bytes
The bit array. The most significant byte is read first.
length : int
The number of bits to read. The least significant bits in the
data bytes beyond length will be ignored.
Returns
-------
array : numpy bool array | def bitarray_to_bool(data, length):
"""
Converts a bit array (a string of bits in a bytes object) to a
boolean Numpy array.
Parameters
----------
data : bytes
The bit array. The most significant byte is read first.
length : int
The number of bits to read. The least significant bits in the
data bytes beyond length will be ignored.
Returns
-------
array : numpy bool array
"""
results = []
for byte in data:
for bit_no in range(7, -1, -1):
bit = byte & (1 << bit_no)
bit = bit != 0
results.append(bit)
if len(results) == length:
break
if len(results) == length:
break
return np.array(results, dtype="b1") |
Converts a numpy boolean array to a bit array (a string of bits in
a bytes object).
Parameters
----------
value : numpy bool array
Returns
-------
bit_array : bytes
The first value in the input array will be the most
significant bit in the result. The length will be `floor((N +
7) / 8)` where `N` is the length of `value`. | def bool_to_bitarray(value):
"""
Converts a numpy boolean array to a bit array (a string of bits in
a bytes object).
Parameters
----------
value : numpy bool array
Returns
-------
bit_array : bytes
The first value in the input array will be the most
significant bit in the result. The length will be `floor((N +
7) / 8)` where `N` is the length of `value`.
"""
value = value.flat
bit_no = 7
byte = 0
bytes = []
for v in value:
if v:
byte |= 1 << bit_no
if bit_no == 0:
bytes.append(byte)
bit_no = 7
byte = 0
else:
bit_no -= 1
if bit_no != 7:
bytes.append(byte)
return struct.pack(f"{len(bytes)}B", *bytes) |
Get an appropriate converter instance for a given field.
Parameters
----------
field : astropy.io.votable.tree.Field
config : dict, optional
Parser configuration dictionary
pos : tuple
Position in the input XML file. Used for error messages.
Returns
-------
converter : astropy.io.votable.converters.Converter | def get_converter(field, config=None, pos=None):
"""
Get an appropriate converter instance for a given field.
Parameters
----------
field : astropy.io.votable.tree.Field
config : dict, optional
Parser configuration dictionary
pos : tuple
Position in the input XML file. Used for error messages.
Returns
-------
converter : astropy.io.votable.converters.Converter
"""
if config is None:
config = {}
if field.datatype not in converter_mapping:
vo_raise(E06, (field.datatype, field.ID), config)
cls = converter_mapping[field.datatype]
converter = cls(field, config, pos)
arraysize = field.arraysize
# With numeric datatypes, special things need to happen for
# arrays.
if field.datatype not in ("char", "unicodeChar") and arraysize is not None:
if arraysize[-1] == "*":
arraysize = arraysize[:-1]
last_x = arraysize.rfind("x")
if last_x == -1:
arraysize = ""
else:
arraysize = arraysize[:last_x]
fixed = False
else:
fixed = True
if arraysize != "":
arraysize = [int(x) for x in arraysize.split("x")]
arraysize.reverse()
else:
arraysize = []
if arraysize != []:
converter = converter.array_type(field, converter, arraysize, config)
if not fixed:
converter = converter.vararray_type(field, converter, arraysize, config)
return converter |
Converts a numpy dtype and shape to a dictionary of attributes for
a VOTable FIELD element and correspond to that type.
Parameters
----------
dtype : Numpy dtype instance
shape : tuple
Returns
-------
attributes : dict
A dict containing 'datatype' and 'arraysize' keys that can be
set on a VOTable FIELD element. | def numpy_to_votable_dtype(dtype, shape):
"""
Converts a numpy dtype and shape to a dictionary of attributes for
a VOTable FIELD element and correspond to that type.
Parameters
----------
dtype : Numpy dtype instance
shape : tuple
Returns
-------
attributes : dict
A dict containing 'datatype' and 'arraysize' keys that can be
set on a VOTable FIELD element.
"""
if dtype.num not in numpy_dtype_to_field_mapping:
raise TypeError(f"{dtype!r} can not be represented in VOTable")
if dtype.char == "S":
return {"datatype": "char", "arraysize": str(dtype.itemsize)}
elif dtype.char == "U":
return {"datatype": "unicodeChar", "arraysize": str(dtype.itemsize // 4)}
else:
result = {"datatype": numpy_dtype_to_field_mapping[dtype.num]}
if len(shape):
result["arraysize"] = "x".join(str(x) for x in shape)
return result |
Given a `astropy.table.Column` instance, returns the attributes
necessary to create a VOTable FIELD element that corresponds to
the type of the column.
This necessarily must perform some heuristics to determine the
type of variable length arrays fields, since they are not directly
supported by Numpy.
If the column has dtype of "object", it performs the following
tests:
- If all elements are byte or unicode strings, it creates a
variable-length byte or unicode field, respectively.
- If all elements are numpy arrays of the same dtype and with a
consistent shape in all but the first dimension, it creates a
variable length array of fixed sized arrays. If the dtypes
match, but the shapes do not, a variable length array is
created.
If the dtype of the input is not understood, it sets the data type
to the most inclusive: a variable length unicodeChar array.
Parameters
----------
column : `astropy.table.Column` instance
Returns
-------
attributes : dict
A dict containing 'datatype' and 'arraysize' keys that can be
set on a VOTable FIELD element. | def table_column_to_votable_datatype(column):
"""
Given a `astropy.table.Column` instance, returns the attributes
necessary to create a VOTable FIELD element that corresponds to
the type of the column.
This necessarily must perform some heuristics to determine the
type of variable length arrays fields, since they are not directly
supported by Numpy.
If the column has dtype of "object", it performs the following
tests:
- If all elements are byte or unicode strings, it creates a
variable-length byte or unicode field, respectively.
- If all elements are numpy arrays of the same dtype and with a
consistent shape in all but the first dimension, it creates a
variable length array of fixed sized arrays. If the dtypes
match, but the shapes do not, a variable length array is
created.
If the dtype of the input is not understood, it sets the data type
to the most inclusive: a variable length unicodeChar array.
Parameters
----------
column : `astropy.table.Column` instance
Returns
-------
attributes : dict
A dict containing 'datatype' and 'arraysize' keys that can be
set on a VOTable FIELD element.
"""
votable_string_dtype = None
if column.info.meta is not None:
votable_string_dtype = column.info.meta.get("_votable_string_dtype")
if column.dtype.char == "O":
if votable_string_dtype is not None:
return {"datatype": votable_string_dtype, "arraysize": "*"}
elif isinstance(column[0], np.ndarray):
dtype, shape = _all_matching_dtype(column)
if dtype is not False:
result = numpy_to_votable_dtype(dtype, shape)
if "arraysize" not in result:
result["arraysize"] = "*"
else:
result["arraysize"] += "*"
return result
# All bets are off, do the most generic thing
return {"datatype": "unicodeChar", "arraysize": "*"}
# For fixed size string columns, datatype here will be unicodeChar,
# but honor the original FIELD datatype if present.
result = numpy_to_votable_dtype(column.dtype, column.shape[1:])
if result["datatype"] == "unicodeChar" and votable_string_dtype == "char":
result["datatype"] = "char"
return result |
Warn or raise an exception, depending on the verify setting. | def warn_or_raise(
warning_class, exception_class=None, args=(), config=None, pos=None, stacklevel=1
):
"""
Warn or raise an exception, depending on the verify setting.
"""
if config is None:
config = {}
# NOTE: the default here is deliberately warn rather than ignore, since
# one would expect that calling warn_or_raise without config should not
# silence the warnings.
config_value = config.get("verify", "warn")
if config_value == "exception":
if exception_class is None:
exception_class = warning_class
vo_raise(exception_class, args, config, pos)
elif config_value == "warn":
vo_warn(warning_class, args, config, pos, stacklevel=stacklevel + 1) |
Raise an exception, with proper position information if available. | def vo_raise(exception_class, args=(), config=None, pos=None):
"""
Raise an exception, with proper position information if available.
"""
if config is None:
config = {}
raise exception_class(args, config, pos) |
Raise an exception, with proper position information if available.
Restores the original traceback of the exception, and should only
be called within an "except:" block of code. | def vo_reraise(exc, config=None, pos=None, additional=""):
"""
Raise an exception, with proper position information if available.
Restores the original traceback of the exception, and should only
be called within an "except:" block of code.
"""
if config is None:
config = {}
message = _format_message(str(exc), exc.__class__.__name__, config, pos)
if message.split()[0] == str(exc).split()[0]:
message = str(exc)
if len(additional):
message += " " + additional
exc.args = (message,)
raise exc |
Warn, with proper position information if available. | def vo_warn(warning_class, args=(), config=None, pos=None, stacklevel=1):
"""
Warn, with proper position information if available.
"""
if config is None:
config = {}
# NOTE: the default here is deliberately warn rather than ignore, since
# one would expect that calling warn_or_raise without config should not
# silence the warnings.
if config.get("verify", "warn") != "ignore":
warning = warning_class(args, config, pos)
_suppressed_warning(warning, config, stacklevel=stacklevel + 1) |
Parses the vo warning string back into its parts. | def parse_vowarning(line):
"""
Parses the vo warning string back into its parts.
"""
result = {}
match = _warning_pat.search(line)
if match:
result["warning"] = warning = match.group("warning")
if warning is not None:
result["is_warning"] = warning[0].upper() == "W"
result["is_exception"] = not result["is_warning"]
result["number"] = int(match.group("warning")[1:])
result["doc_url"] = f"io/votable/api_exceptions.html#{warning.lower()}"
else:
result["is_warning"] = False
result["is_exception"] = False
result["is_other"] = True
result["number"] = None
result["doc_url"] = None
try:
result["nline"] = int(match.group("nline"))
except ValueError:
result["nline"] = 0
try:
result["nchar"] = int(match.group("nchar"))
except ValueError:
result["nchar"] = 0
result["message"] = match.group("rest")
result["is_something"] = True
else:
result["warning"] = None
result["is_warning"] = False
result["is_exception"] = False
result["is_other"] = False
result["is_something"] = False
if not isinstance(line, str):
line = line.decode("utf-8")
result["message"] = line
return result |
Parses a VOTABLE_ xml file (or file-like object), and returns a
`~astropy.io.votable.tree.VOTableFile` object.
Parameters
----------
source : path-like or file-like
Path or file-like object containing a VOTABLE_ xml file.
If file, must be readable.
columns : sequence of str, optional
List of field names to include in the output. The default is
to include all fields.
invalid : str, optional
One of the following values:
- 'exception': throw an exception when an invalid value is
encountered (default)
- 'mask': mask out invalid values
verify : {'ignore', 'warn', 'exception'}, optional
When ``'exception'``, raise an error when the file violates the spec,
otherwise either issue a warning (``'warn'``) or silently continue
(``'ignore'``). Warnings may be controlled using the standard Python
mechanisms. See the `warnings` module in the Python standard library
for more information. When not provided, uses the configuration setting
``astropy.io.votable.verify``, which defaults to 'ignore'.
.. versionchanged:: 4.0
``verify`` replaces the ``pedantic`` argument, which will be
deprecated in future.
.. versionchanged:: 5.0
The ``pedantic`` argument is deprecated.
.. versionchanged:: 6.0
The ``pedantic`` argument is removed.
chunk_size : int, optional
The number of rows to read before converting to an array.
Higher numbers are likely to be faster, but will consume more
memory.
table_number : int, optional
The number of table in the file to read in. If `None`, all
tables will be read. If a number, 0 refers to the first table
in the file, and only that numbered table will be parsed and
read in. Should not be used with ``table_id``.
table_id : str, optional
The ID of the table in the file to read in. Should not be
used with ``table_number``.
filename : str, optional
A filename, URL or other identifier to use in error messages.
If *filename* is None and *source* is a string (i.e. a path),
then *source* will be used as a filename for error messages.
Therefore, *filename* is only required when source is a
file-like object.
unit_format : str, astropy.units.format.Base instance or None, optional
The unit format to use when parsing unit attributes. If a
string, must be the name of a unit formatter. The built-in
formats include ``generic``, ``fits``, ``cds``, and
``vounit``. A custom formatter may be provided by passing a
`~astropy.units.UnitBase` instance. If `None` (default),
the unit format to use will be the one specified by the
VOTable specification (which is ``cds`` up to version 1.3 of
VOTable, and ``vounit`` in more recent versions of the spec).
datatype_mapping : dict, optional
A mapping of datatype names (`str`) to valid VOTable datatype names
(str). For example, if the file being read contains the datatype
"unsignedInt" (an invalid datatype in VOTable), include the mapping
``{"unsignedInt": "long"}``.
Returns
-------
votable : `~astropy.io.votable.tree.VOTableFile` object
See Also
--------
astropy.io.votable.exceptions : The exceptions this function may raise. | def parse(
source,
columns=None,
invalid="exception",
verify=None,
chunk_size=tree.DEFAULT_CHUNK_SIZE,
table_number=None,
table_id=None,
filename=None,
unit_format=None,
datatype_mapping=None,
_debug_python_based_parser=False,
):
"""
Parses a VOTABLE_ xml file (or file-like object), and returns a
`~astropy.io.votable.tree.VOTableFile` object.
Parameters
----------
source : path-like or file-like
Path or file-like object containing a VOTABLE_ xml file.
If file, must be readable.
columns : sequence of str, optional
List of field names to include in the output. The default is
to include all fields.
invalid : str, optional
One of the following values:
- 'exception': throw an exception when an invalid value is
encountered (default)
- 'mask': mask out invalid values
verify : {'ignore', 'warn', 'exception'}, optional
When ``'exception'``, raise an error when the file violates the spec,
otherwise either issue a warning (``'warn'``) or silently continue
(``'ignore'``). Warnings may be controlled using the standard Python
mechanisms. See the `warnings` module in the Python standard library
for more information. When not provided, uses the configuration setting
``astropy.io.votable.verify``, which defaults to 'ignore'.
.. versionchanged:: 4.0
``verify`` replaces the ``pedantic`` argument, which will be
deprecated in future.
.. versionchanged:: 5.0
The ``pedantic`` argument is deprecated.
.. versionchanged:: 6.0
The ``pedantic`` argument is removed.
chunk_size : int, optional
The number of rows to read before converting to an array.
Higher numbers are likely to be faster, but will consume more
memory.
table_number : int, optional
The number of table in the file to read in. If `None`, all
tables will be read. If a number, 0 refers to the first table
in the file, and only that numbered table will be parsed and
read in. Should not be used with ``table_id``.
table_id : str, optional
The ID of the table in the file to read in. Should not be
used with ``table_number``.
filename : str, optional
A filename, URL or other identifier to use in error messages.
If *filename* is None and *source* is a string (i.e. a path),
then *source* will be used as a filename for error messages.
Therefore, *filename* is only required when source is a
file-like object.
unit_format : str, astropy.units.format.Base instance or None, optional
The unit format to use when parsing unit attributes. If a
string, must be the name of a unit formatter. The built-in
formats include ``generic``, ``fits``, ``cds``, and
``vounit``. A custom formatter may be provided by passing a
`~astropy.units.UnitBase` instance. If `None` (default),
the unit format to use will be the one specified by the
VOTable specification (which is ``cds`` up to version 1.3 of
VOTable, and ``vounit`` in more recent versions of the spec).
datatype_mapping : dict, optional
A mapping of datatype names (`str`) to valid VOTable datatype names
(str). For example, if the file being read contains the datatype
"unsignedInt" (an invalid datatype in VOTable), include the mapping
``{"unsignedInt": "long"}``.
Returns
-------
votable : `~astropy.io.votable.tree.VOTableFile` object
See Also
--------
astropy.io.votable.exceptions : The exceptions this function may raise.
"""
from . import VERIFY_OPTIONS, conf
invalid = invalid.lower()
if invalid not in ("exception", "mask"):
raise ValueError(
"accepted values of ``invalid`` are: ``'exception'`` or ``'mask'``."
)
if verify is None:
verify = conf.verify
elif verify not in VERIFY_OPTIONS:
raise ValueError(f"verify should be one of {'/'.join(VERIFY_OPTIONS)}")
if datatype_mapping is None:
datatype_mapping = {}
config = {
"columns": columns,
"invalid": invalid,
"verify": verify,
"chunk_size": chunk_size,
"table_number": table_number,
"filename": filename,
"unit_format": unit_format,
"datatype_mapping": datatype_mapping,
}
if isinstance(source, str):
source = os.path.expanduser(source)
if filename is None and isinstance(source, str):
config["filename"] = source
with iterparser.get_xml_iterator(
source, _debug_python_based_parser=_debug_python_based_parser
) as iterator:
return tree.VOTableFile(config=config, pos=(1, 1)).parse(iterator, config) |
Parses a VOTABLE_ xml file (or file-like object), reading and
returning only the first `~astropy.io.votable.tree.TableElement`
instance.
See `parse` for a description of the keyword arguments.
Returns
-------
votable : `~astropy.io.votable.tree.TableElement` object | def parse_single_table(source, **kwargs):
"""
Parses a VOTABLE_ xml file (or file-like object), reading and
returning only the first `~astropy.io.votable.tree.TableElement`
instance.
See `parse` for a description of the keyword arguments.
Returns
-------
votable : `~astropy.io.votable.tree.TableElement` object
"""
if kwargs.get("table_number") is None:
kwargs["table_number"] = 0
votable = parse(source, **kwargs)
return votable.get_first_table() |
Writes a `~astropy.io.votable.tree.VOTableFile` to a VOTABLE_ xml file.
Parameters
----------
table : `~astropy.io.votable.tree.VOTableFile` or `~astropy.table.Table` instance.
file : str or :term:`file-like (writeable)`
Path or file object to write to
tabledata_format : str, optional
Override the format of the table(s) data to write. Must be
one of ``tabledata`` (text representation), ``binary`` or
``binary2``. By default, use the format that was specified in
each ``table`` object as it was created or read in. See
:ref:`astropy:astropy:votable-serialization`. | def writeto(table, file, tabledata_format=None):
"""
Writes a `~astropy.io.votable.tree.VOTableFile` to a VOTABLE_ xml file.
Parameters
----------
table : `~astropy.io.votable.tree.VOTableFile` or `~astropy.table.Table` instance.
file : str or :term:`file-like (writeable)`
Path or file object to write to
tabledata_format : str, optional
Override the format of the table(s) data to write. Must be
one of ``tabledata`` (text representation), ``binary`` or
``binary2``. By default, use the format that was specified in
each ``table`` object as it was created or read in. See
:ref:`astropy:astropy:votable-serialization`.
"""
from astropy.table import Table
if isinstance(table, Table):
table = tree.VOTableFile.from_table(table)
elif not isinstance(table, tree.VOTableFile):
raise TypeError(
"first argument must be astropy.io.vo.VOTableFile or "
"astropy.table.Table instance"
)
table.to_xml(
file, tabledata_format=tabledata_format, _debug_python_based_parser=True
) |
Prints a validation report for the given file.
Parameters
----------
source : path-like or file-like
Path to a VOTABLE_ xml file or `~pathlib.Path`
object having Path to a VOTABLE_ xml file.
If file-like object, must be readable.
output : file-like, optional
Where to output the report. Defaults to ``sys.stdout``.
If `None`, the output will be returned as a string.
Must be writable.
xmllint : bool, optional
When `True`, also send the file to ``xmllint`` for schema and
DTD validation. Requires that ``xmllint`` is installed. The
default is `False`. ``source`` must be a file on the local
filesystem in order for ``xmllint`` to work.
filename : str, optional
A filename to use in the error messages. If not provided, one
will be automatically determined from ``source``.
Returns
-------
is_valid : bool or str
Returns `True` if no warnings were found. If ``output`` is
`None`, the return value will be a string. | def validate(source, output=sys.stdout, xmllint=False, filename=None):
"""
Prints a validation report for the given file.
Parameters
----------
source : path-like or file-like
Path to a VOTABLE_ xml file or `~pathlib.Path`
object having Path to a VOTABLE_ xml file.
If file-like object, must be readable.
output : file-like, optional
Where to output the report. Defaults to ``sys.stdout``.
If `None`, the output will be returned as a string.
Must be writable.
xmllint : bool, optional
When `True`, also send the file to ``xmllint`` for schema and
DTD validation. Requires that ``xmllint`` is installed. The
default is `False`. ``source`` must be a file on the local
filesystem in order for ``xmllint`` to work.
filename : str, optional
A filename to use in the error messages. If not provided, one
will be automatically determined from ``source``.
Returns
-------
is_valid : bool or str
Returns `True` if no warnings were found. If ``output`` is
`None`, the return value will be a string.
"""
from astropy.utils.console import color_print, print_code_line
return_as_str = False
if output is None:
output = io.StringIO()
return_as_str = True
lines = []
votable = None
reset_vo_warnings()
if isinstance(source, str):
source = os.path.expanduser(source)
with data.get_readable_fileobj(source, encoding="binary") as fd:
content = fd.read()
content_buffer = io.BytesIO(content)
content_buffer.seek(0)
if filename is None:
if isinstance(source, str):
filename = source
elif hasattr(source, "name"):
filename = source.name
elif hasattr(source, "url"):
filename = source.url
else:
filename = "<unknown>"
with warnings.catch_warnings(record=True) as warning_lines:
warnings.resetwarnings()
warnings.simplefilter("always", exceptions.VOWarning, append=True)
try:
votable = parse(content_buffer, verify="warn", filename=filename)
except ValueError as e:
lines.append(str(e))
lines = [
str(x.message)
for x in warning_lines
if issubclass(x.category, exceptions.VOWarning)
] + lines
content_buffer.seek(0)
output.write(f"Validation report for {filename}\n\n")
if len(lines):
xml_lines = iterparser.xml_readlines(content_buffer)
for warning in lines:
w = exceptions.parse_vowarning(warning)
if not w["is_something"]:
output.write(w["message"])
output.write("\n\n")
else:
line = xml_lines[w["nline"] - 1]
warning = w["warning"]
if w["is_warning"]:
color = "yellow"
else:
color = "red"
color_print(
f"{w['nline']:d}: ",
"",
warning or "EXC",
color,
": ",
"",
textwrap.fill(
w["message"],
initial_indent=" ",
subsequent_indent=" ",
).lstrip(),
file=output,
)
print_code_line(line, w["nchar"], file=output)
output.write("\n")
else:
output.write("astropy.io.votable found no violations.\n\n")
success = 0
if xmllint and os.path.exists(filename):
from . import xmlutil
if votable is None:
version = "1.1"
else:
version = votable.version
success, stdout, stderr = xmlutil.validate_schema(filename, version)
if success != 0:
output.write("xmllint schema violations:\n\n")
output.write(stderr.decode("utf-8"))
else:
output.write("xmllint passed\n")
if return_as_str:
return output.getvalue()
return len(lines) == 0 and success == 0 |
Given an `~astropy.table.Table` object, return a
`~astropy.io.votable.tree.VOTableFile` file structure containing
just that single table.
Parameters
----------
table : `~astropy.table.Table` instance
table_id : str, optional
If not `None`, set the given id on the returned
`~astropy.io.votable.tree.TableElement` instance.
Returns
-------
votable : `~astropy.io.votable.tree.VOTableFile` instance | def from_table(table, table_id=None):
"""
Given an `~astropy.table.Table` object, return a
`~astropy.io.votable.tree.VOTableFile` file structure containing
just that single table.
Parameters
----------
table : `~astropy.table.Table` instance
table_id : str, optional
If not `None`, set the given id on the returned
`~astropy.io.votable.tree.TableElement` instance.
Returns
-------
votable : `~astropy.io.votable.tree.VOTableFile` instance
"""
return tree.VOTableFile.from_table(table, table_id=table_id) |
Reads the header of a file to determine if it is a VOTable file.
Parameters
----------
source : path-like or file-like
Path or file object containing a VOTABLE_ xml file.
If file, must be readable.
Returns
-------
is_votable : bool
Returns `True` if the given file is a VOTable file. | def is_votable(source):
"""
Reads the header of a file to determine if it is a VOTable file.
Parameters
----------
source : path-like or file-like
Path or file object containing a VOTABLE_ xml file.
If file, must be readable.
Returns
-------
is_votable : bool
Returns `True` if the given file is a VOTable file.
"""
if isinstance(source, str):
source = os.path.expanduser(source)
try:
with iterparser.get_xml_iterator(source) as iterator:
for start, tag, d, pos in iterator:
if tag != "xml":
return False
break
for start, tag, d, pos in iterator:
if tag != "VOTABLE":
return False
break
return True
except ValueError:
return False |
Resets all of the vo warning state so that warnings that
have already been emitted will be emitted again. This is
used, for example, by `validate` which must emit all
warnings each time it is called. | def reset_vo_warnings():
"""
Resets all of the vo warning state so that warnings that
have already been emitted will be emitted again. This is
used, for example, by `validate` which must emit all
warnings each time it is called.
"""
from . import converters, xmlutil
# -----------------------------------------------------------#
# This is a special variable used by the Python warnings #
# infrastructure to keep track of warnings that have #
# already been seen. Since we want to get every single #
# warning out of this, we have to delete all of them first. #
# -----------------------------------------------------------#
for module in (converters, exceptions, tree, xmlutil):
try:
del module.__warningregistry__
except AttributeError:
pass |
Masked arrays can not be resized inplace, and `np.resize` and
`ma.resize` are both incompatible with structured arrays.
Therefore, we do all this. | def _resize(masked, new_size):
"""
Masked arrays can not be resized inplace, and `np.resize` and
`ma.resize` are both incompatible with structured arrays.
Therefore, we do all this.
"""
new_array = ma.zeros((new_size,), dtype=masked.dtype)
length = min(len(masked), new_size)
new_array[:length] = masked[:length]
return new_array |
Creates a function useful for looking up an element by a given
attribute.
Parameters
----------
attr : str
The attribute name
unique : bool
Should be `True` if the attribute is unique and therefore this
should return only one value. Otherwise, returns a list of
values.
iterator : generator
A generator that iterates over some arbitrary set of elements
element_name : str
The XML element name of the elements being iterated over (used
for error messages only).
doc : str
A docstring to apply to the generated function.
Returns
-------
factory : function
A function that looks up an element by the given attribute. | def _lookup_by_attr_factory(attr, unique, iterator, element_name, doc):
"""
Creates a function useful for looking up an element by a given
attribute.
Parameters
----------
attr : str
The attribute name
unique : bool
Should be `True` if the attribute is unique and therefore this
should return only one value. Otherwise, returns a list of
values.
iterator : generator
A generator that iterates over some arbitrary set of elements
element_name : str
The XML element name of the elements being iterated over (used
for error messages only).
doc : str
A docstring to apply to the generated function.
Returns
-------
factory : function
A function that looks up an element by the given attribute.
"""
def lookup_by_attr(self, ref, before=None):
"""
Given a string *ref*, finds the first element in the iterator
where the given attribute == *ref*. If *before* is provided,
will stop searching at the object *before*. This is
important, since "forward references" are not allowed in the
VOTABLE format.
"""
for element in getattr(self, iterator)():
if element is before:
if getattr(element, attr, None) == ref:
vo_raise(
f"{element_name} references itself",
element._config,
element._pos,
KeyError,
)
break
if getattr(element, attr, None) == ref:
yield element
def lookup_by_attr_unique(self, ref, before=None):
for element in lookup_by_attr(self, ref, before=before):
return element
raise KeyError(
f"No {element_name} with {attr} '{ref}' found before the referencing "
f"{element_name}"
)
if unique:
lookup_by_attr_unique.__doc__ = doc
return lookup_by_attr_unique
else:
lookup_by_attr.__doc__ = doc
return lookup_by_attr |
Like `_lookup_by_attr_factory`, but looks in both the "ID" and
"name" attributes. | def _lookup_by_id_or_name_factory(iterator, element_name, doc):
"""
Like `_lookup_by_attr_factory`, but looks in both the "ID" and
"name" attributes.
"""
def lookup_by_id_or_name(self, ref, before=None):
"""
Given an key *ref*, finds the first element in the iterator
with the attribute ID == *ref* or name == *ref*. If *before*
is provided, will stop searching at the object *before*. This
is important, since "forward references" are not allowed in
the VOTABLE format.
"""
for element in getattr(self, iterator)():
if element is before:
if ref in (element.ID, element.name):
vo_raise(
f"{element_name} references itself",
element._config,
element._pos,
KeyError,
)
break
if ref in (element.ID, element.name):
return element
raise KeyError(
f"No {element_name} with ID or name '{ref}' found before the referencing "
f"{element_name}"
)
lookup_by_id_or_name.__doc__ = doc
return lookup_by_id_or_name |
Get the default unit format as specified in the VOTable spec. | def _get_default_unit_format(config):
"""
Get the default unit format as specified in the VOTable spec.
"""
# The unit format changed between VOTable versions 1.3 and 1.4,
# see issue #10791.
if config["version_1_4_or_later"]:
return "vounit"
else:
return "cds" |
Get the unit format based on the configuration. | def _get_unit_format(config):
"""
Get the unit format based on the configuration.
"""
if config.get("unit_format") is None:
format = _get_default_unit_format(config)
else:
format = config["unit_format"]
return format |
Raises a `~astropy.io.votable.exceptions.VOTableSpecError` if
*year* is not a valid astronomical year as defined by the VOTABLE
standard.
Parameters
----------
year : str
An astronomical year string
field : str
The name of the field this year was found in (used for error
message)
config, pos : optional
Information about the source of the value | def check_astroyear(year, field, config=None, pos=None):
"""
Raises a `~astropy.io.votable.exceptions.VOTableSpecError` if
*year* is not a valid astronomical year as defined by the VOTABLE
standard.
Parameters
----------
year : str
An astronomical year string
field : str
The name of the field this year was found in (used for error
message)
config, pos : optional
Information about the source of the value
"""
if year is not None and re.match(r"^[JB]?[0-9]+([.][0-9]*)?$", year) is None:
warn_or_raise(W07, W07, (field, year), config, pos)
return False
return True |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.