response
stringlengths 1
33.1k
| instruction
stringlengths 22
582k
|
---|---|
Regression test to make sure that the temperature can be an array. | def test_blackbody_array_temperature():
"""Regression test to make sure that the temperature can be an array."""
multibb = BlackBody([100, 200, 300] * u.K)
flux = multibb(1.2 * u.mm)
np.testing.assert_allclose(
flux.value, [1.804908e-12, 3.721328e-12, 5.638513e-12], rtol=1e-5
)
flux = multibb([2, 4, 6] * u.mm)
np.testing.assert_allclose(
flux.value, [6.657915e-13, 3.420677e-13, 2.291897e-13], rtol=1e-5
)
multibb = BlackBody(np.ones(4) * u.K)
flux = multibb(np.ones((3, 4)) * u.mm)
assert flux.shape == (3, 4) |
Test support for dimensionless (but not unscaled) units for scale | def test_blackbody_dimensionless():
"""Test support for dimensionless (but not unscaled) units for scale"""
T = 3000 * u.K
r = 1e14 * u.cm
DL = 100 * u.Mpc
scale = np.pi * (r / DL) ** 2
bb1 = BlackBody(temperature=T, scale=scale)
# even though we passed scale with units, we should be able to evaluate with unitless
bb1.evaluate(0.5, T.value, scale.to_value(u.dimensionless_unscaled))
bb2 = BlackBody(temperature=T, scale=scale.to_value(u.dimensionless_unscaled))
bb2.evaluate(0.5, T.value, scale.to_value(u.dimensionless_unscaled))
# bolometric flux for both cases should be equivalent
assert bb1.bolometric_flux == bb2.bolometric_flux |
Evaluation, density, and radii validation of NFW model. | def test_NFW_evaluate(mass):
"""Evaluation, density, and radii validation of NFW model."""
# Test parameters
concentration = 8.5
redshift = 0.63
cosmo = cosmology.Planck15
# Parsec tests
# 200c Overdensity
massfactor = ("critical", 200)
n200c = NFW(
mass=mass,
concentration=concentration,
redshift=redshift,
cosmo=cosmo,
massfactor=massfactor,
)
assert_quantity_allclose(
n200c(3.0 * u.Mpc),
(
3.709693508e12 * (u.solMass / u.Mpc**3),
7.376391187e42 * (u.kg / u.Mpc**3),
),
)
assert_quantity_allclose(
n200c.rho_scale, (7800150779863018.0 * (u.solMass / u.Mpc**3))
)
assert_quantity_allclose(n200c.r_s, (0.24684627641195428 * u.Mpc))
assert_quantity_allclose(n200c.r_virial, (2.0981933495016114 * u.Mpc))
# 200m Overdensity
massfactor = ("mean", 200)
n200m = NFW(
mass=mass,
concentration=concentration,
redshift=redshift,
cosmo=cosmo,
massfactor=massfactor,
)
assert_quantity_allclose(
n200m(3.0 * u.Mpc),
(
3.626093406e12 * (u.solMass / u.Mpc**3),
7.210159921e42 * (u.kg / u.Mpc**3),
),
)
assert_quantity_allclose(
n200m.rho_scale, (5118547639858115.0 * (u.solMass / u.Mpc**3))
)
assert_quantity_allclose(n200m.r_s, (0.2840612517326848 * u.Mpc))
assert_quantity_allclose(n200m.r_virial, (2.414520639727821 * u.Mpc))
# Virial mass
massfactor = "virial"
nvir = NFW(
mass=mass,
concentration=concentration,
redshift=redshift,
cosmo=cosmo,
massfactor=massfactor,
)
assert_quantity_allclose(
nvir(3.0 * u.Mpc),
(
3.646475546e12 * (u.solMass / u.Mpc**3),
7.250687967e42 * (u.kg / u.Mpc**3),
),
)
assert_quantity_allclose(
nvir.rho_scale, (5649367524651067.0 * (u.solMass / u.Mpc**3))
)
assert_quantity_allclose(nvir.r_s, (0.2748701862303786 * u.Mpc))
assert_quantity_allclose(nvir.r_virial, (2.3363965829582183 * u.Mpc))
# kpc tests
# 200c Overdensity
massfactor = ("critical", 200)
n200c = NFW(
mass=mass,
concentration=concentration,
redshift=redshift,
cosmo=cosmo,
massfactor=massfactor,
)
assert_quantity_allclose(
n200c(3141 * u.kpc),
(
3254.373619264334 * (u.solMass / u.kpc**3),
6.471028627484543e33 * (u.kg / u.kpc**3),
),
)
assert_quantity_allclose(
n200c.rho_scale, (7800150.779863021 * (u.solMass / u.kpc**3))
)
assert_quantity_allclose(n200c.r_s, (246.84627641195425 * u.kpc))
assert_quantity_allclose(n200c.r_virial, (2098.193349501611 * u.kpc))
# 200m Overdensity
massfactor = ("mean", 200)
n200m = NFW(
mass=mass,
concentration=concentration,
redshift=redshift,
cosmo=cosmo,
massfactor=massfactor,
)
assert_quantity_allclose(
n200m(3141 * u.kpc),
(
3184.0370866188623 * (u.solMass / u.kpc**3),
6.33117077170161e33 * (u.kg / u.kpc**3),
),
)
assert_quantity_allclose(
n200m.rho_scale, (5118547.639858116 * (u.solMass / u.kpc**3))
)
assert_quantity_allclose(n200m.r_s, (284.0612517326848 * u.kpc))
assert_quantity_allclose(n200m.r_virial, (2414.5206397278207 * u.kpc))
# Virial mass
massfactor = "virial"
nvir = NFW(
mass=mass,
concentration=concentration,
redshift=redshift,
cosmo=cosmo,
massfactor=massfactor,
)
assert_quantity_allclose(
nvir(3141 * u.kpc),
(
3201.1946851294997 * (u.solMass / u.kpc**3),
6.365287109937637e33 * (u.kg / u.kpc**3),
),
)
assert_quantity_allclose(
nvir.rho_scale, (5649367.5246510655 * (u.solMass / u.kpc**3))
)
assert_quantity_allclose(nvir.r_s, (274.87018623037864 * u.kpc))
assert_quantity_allclose(nvir.r_virial, (2336.3965829582185 * u.kpc))
# Meter tests
# 200c Overdensity
massfactor = ("critical", 200)
n200c = NFW(
mass=mass,
concentration=concentration,
redshift=redshift,
cosmo=cosmo,
massfactor=massfactor,
)
assert_quantity_allclose(
n200c(4.2e23 * u.m),
(
1.527649658673012e-57 * (u.solMass / u.m**3),
3.0375936602739256e-27 * (u.kg / u.m**3),
),
)
assert_quantity_allclose(
n200c.rho_scale, (2.654919529637763e-52 * (u.solMass / u.m**3))
)
assert_quantity_allclose(n200c.r_s, (7.616880211930209e21 * u.m))
assert_quantity_allclose(n200c.r_virial, (6.474348180140678e22 * u.m))
# 200m Overdensity
massfactor = ("mean", 200)
n200m = NFW(
mass=mass,
concentration=concentration,
redshift=redshift,
cosmo=cosmo,
massfactor=massfactor,
)
assert_quantity_allclose(
n200m(4.2e23 * u.m),
(
1.5194778058079436e-57 * (u.solMass / u.m**3),
3.0213446673751314e-27 * (u.kg / u.m**3),
),
)
assert_quantity_allclose(
n200m.rho_scale, (1.742188385322371e-52 * (u.solMass / u.m**3))
)
assert_quantity_allclose(n200m.r_s, (8.76521436235054e21 * u.m))
assert_quantity_allclose(n200m.r_virial, (7.450432207997959e22 * u.m))
# Virial mass
massfactor = "virial"
nvir = NFW(
mass=mass,
concentration=concentration,
redshift=redshift,
cosmo=cosmo,
massfactor=massfactor,
)
assert_quantity_allclose(
nvir(4.2e23 * u.m),
(
1.5214899184117633e-57 * (u.solMass / u.m**3),
3.0253455719375224e-27 * (u.kg / u.m**3),
),
)
assert_quantity_allclose(
nvir.rho_scale, (1.922862338766335e-52 * (u.solMass / u.m**3))
)
assert_quantity_allclose(nvir.r_s, (8.481607714647913e21 * u.m))
assert_quantity_allclose(nvir.r_virial, (7.209366557450727e22 * u.m))
# Verify string input of overdensity type
# 200c Overdensity
massfactor = "200c"
n200c = NFW(
mass=mass,
concentration=concentration,
redshift=redshift,
cosmo=cosmo,
massfactor=massfactor,
)
assert_quantity_allclose(
n200c(3.0 * u.Mpc),
(
3.709693508e12 * (u.solMass / u.Mpc**3),
7.376391187e42 * (u.kg / u.Mpc**3),
),
)
# 200m Overdensity
massfactor = "200m"
n200m = NFW(
mass=mass,
concentration=concentration,
redshift=redshift,
cosmo=cosmo,
massfactor=massfactor,
)
assert_quantity_allclose(
n200m(3.0 * u.Mpc),
(
3.626093406e12 * (u.solMass / u.Mpc**3),
7.210159921e42 * (u.kg / u.Mpc**3),
),
)
# Virial mass
massfactor = "virial"
nvir = NFW(
mass=mass,
concentration=concentration,
redshift=redshift,
cosmo=cosmo,
massfactor=massfactor,
)
assert_quantity_allclose(
nvir(3.0 * u.Mpc),
(
3.646475546e12 * (u.solMass / u.Mpc**3),
7.250687967e42 * (u.kg / u.Mpc**3),
),
) |
Test linear fitting of NFW model. | def test_NFW_fit(fitter):
"""Test linear fitting of NFW model."""
fitter = fitter()
if isinstance(fitter, DogBoxLSQFitter):
pytest.xfail("dogbox method is poor fitting method for NFW model")
# Fixed parameters
redshift = 0.63
cosmo = cosmology.Planck15
# Radial set
# fmt: off
r = np.array(
[
1.00e+01, 1.00e+02, 2.00e+02, 2.50e+02, 3.00e+02, 4.00e+02, 5.00e+02,
7.50e+02, 1.00e+03, 1.50e+03, 2.50e+03, 6.50e+03, 1.15e+04
]
) * u.kpc
# fmt: on
# 200c Overdensity
massfactor = ("critical", 200)
# fmt: off
density_r = np.array(
[
1.77842761e+08, 9.75233623e+06, 2.93789626e+06, 1.90107238e+06,
1.30776878e+06, 7.01004140e+05, 4.20678479e+05, 1.57421880e+05,
7.54669701e+04, 2.56319769e+04, 6.21976562e+03, 3.96522424e+02,
7.39336808e+01
]
) * (u.solMass / u.kpc**3)
# fmt: on
n200c = NFW(
mass=1.8e15 * u.M_sun,
concentration=7.0,
redshift=redshift,
cosmo=cosmo,
massfactor=massfactor,
)
n200c.redshift.fixed = True
n_fit = fitter(n200c, r, density_r, maxiter=1000)
assert_quantity_allclose(n_fit.mass, 2.0000000000000e15 * u.M_sun)
assert_quantity_allclose(n_fit.concentration, 8.5)
# 200m Overdensity
massfactor = ("mean", 200)
# fmt: off
density_r = np.array(
[
1.35677282e+08, 7.95392979e+06, 2.50352599e+06, 1.64535870e+06,
1.14642248e+06, 6.26805453e+05, 3.81691731e+05, 1.46294819e+05,
7.11559560e+04, 2.45737796e+04, 6.05459585e+03, 3.92183991e+02,
7.34674416e+01
]
) * (u.solMass / u.kpc**3)
# fmt: on
n200m = NFW(
mass=1.8e15 * u.M_sun,
concentration=7.0,
redshift=redshift,
cosmo=cosmo,
massfactor=massfactor,
)
n200m.redshift.fixed = True
n_fit = fitter(n200m, r, density_r, maxiter=1000)
assert_quantity_allclose(n_fit.mass, 2.0000000000000e15 * u.M_sun)
assert_quantity_allclose(n_fit.concentration, 8.5)
# Virial mass
massfactor = ("virial", 200)
# fmt: off
density_r = np.array(
[
1.44573515e+08, 8.34873998e+06, 2.60137484e+06, 1.70348738e+06,
1.18337370e+06, 6.43994654e+05, 3.90800249e+05, 1.48930537e+05,
7.21856397e+04, 2.48289464e+04, 6.09477095e+03, 3.93248818e+02,
7.35821787e+01
]
) * (u.solMass / u.kpc**3)
# fmt: on
nvir = NFW(
mass=1.8e15 * u.M_sun,
concentration=7.0,
redshift=redshift,
cosmo=cosmo,
massfactor=massfactor,
)
nvir.redshift.fixed = True
n_fit = fitter(nvir, r, density_r, maxiter=1000)
assert_quantity_allclose(n_fit.mass, 2.0000000000000e15 * u.M_sun)
assert_quantity_allclose(n_fit.concentration, 8.5) |
Test circular velocity and radial validation of NFW model. | def test_NFW_circular_velocity():
"""Test circular velocity and radial validation of NFW model."""
# Test parameters
mass = 2.0000000000000e15 * u.M_sun
concentration = 8.5
redshift = 0.63
cosmo = cosmology.Planck15
r_r = (
np.array([0.01, 0.1, 0.2, 0.25, 0.3, 0.4, 0.5, 0.75, 1.0, 1.5, 2.5, 6.5, 11.5])
* u.Mpc
)
# 200c Overdensity tests
massfactor = ("critical", 200)
n200c = NFW(
mass=mass,
concentration=concentration,
redshift=redshift,
cosmo=cosmo,
massfactor=massfactor,
)
# fmt: off
circ_v_200c = np.array(
[
702.45487454, 1812.4138346, 2150.50929296, 2231.5802568,
2283.96950242, 2338.45989696, 2355.78876772, 2332.41766543,
2276.89433811, 2154.53909153, 1950.07947819, 1512.37442943,
1260.94034541
]
) * (u.km / u.s)
# fmt: on
assert_quantity_allclose(n200c.circular_velocity(r_r), circ_v_200c)
assert_quantity_allclose(n200c.r_max, (0.5338248204429641 * u.Mpc))
assert_quantity_allclose(n200c.v_max, (2356.7204380904027 * (u.km / u.s)))
# 200m Overdensity tests
massfactor = ("mean", 200)
mass = 1.0e14 * u.M_sun
concentration = 12.3
redshift = 1.5
n200m = NFW(
mass=mass,
concentration=concentration,
redshift=redshift,
cosmo=cosmo,
massfactor=massfactor,
)
# fmt: off
circ_v_200m = np.array(
[
670.18236647, 1088.9843324, 1046.82334367, 1016.88890732,
987.97273478, 936.00207134, 891.80115232, 806.63307977,
744.91002191, 659.33401039, 557.82823549, 395.9735786,
318.29863006
]
) * (u.km / u.s)
# fmt: on
assert_quantity_allclose(n200m.circular_velocity(r_r), circ_v_200m)
assert_quantity_allclose(n200m.r_max, (0.10196917920081808 * u.Mpc))
assert_quantity_allclose(n200m.v_max, (1089.0224395818727 * (u.km / u.s)))
# Virial Overdensity tests
massfactor = "virial"
mass = 1.2e45 * u.kg
concentration = 2.4
redshift = 0.34
# fmt: off
r_r = np.array(
[
3.08567758e+20, 3.08567758e+21, 6.17135516e+21, 7.71419395e+21,
9.25703274e+21, 1.23427103e+22, 1.54283879e+22, 2.31425819e+22,
3.08567758e+22, 4.62851637e+22, 7.71419395e+22, 2.00569043e+23,
3.54852922e+23
]
) * u.m
# fmt: on
nvir = NFW(
mass=mass,
concentration=concentration,
redshift=redshift,
cosmo=cosmo,
massfactor=massfactor,
)
# fmt: off
circ_v_vir = np.array(
[
205.87461783, 604.65091823, 793.9190629, 857.52516521,
908.90280843, 986.53582718, 1041.69089845, 1124.19719446,
1164.58270747, 1191.33193561, 1174.02934755, 1023.69360527,
895.52206321
]
) * (u.km / u.s)
# fmt: on
assert_quantity_allclose(nvir.circular_velocity(r_r), circ_v_vir)
assert_quantity_allclose(nvir.r_max, (1.6484542328623448 * u.Mpc))
assert_quantity_allclose(nvir.v_max, (1192.3130989914962 * (u.km / u.s))) |
Test NFW exceptions. | def test_NFW_exceptions_and_warnings_and_misc():
"""Test NFW exceptions."""
# Arbitrary Test parameters
mass = 2.0000000000000e15 * u.M_sun
concentration = 8.5
redshift = 0.63
cosmo = cosmology.Planck15
massfactor = ("critical", 200)
# fmt: off
r_r = np.array(
[
1.00e+01, 1.00e+02, 2.00e+02, 2.50e+02, 3.00e+02, 4.00e+02, 5.00e+02,
7.50e+02, 1.00e+03, 1.50e+03, 2.50e+03, 6.50e+03, 1.15e+04
]
) * u.kpc
# fmt: on
# Massfactor exception tests
MESSAGE = r"Massfactor 'not' not one of 'critical', 'mean', or 'virial'"
with pytest.raises(ValueError, match=MESSAGE):
NFW(
mass=mass,
concentration=concentration,
redshift=redshift,
cosmo=cosmo,
massfactor=("not", "virial"),
)
MESSAGE = r"Massfactor not virial string not of the form '#m', '#c', or 'virial'"
with pytest.raises(ValueError, match=MESSAGE):
NFW(
mass=mass,
concentration=concentration,
redshift=redshift,
cosmo=cosmo,
massfactor="not virial",
)
MESSAGE = r"Massfactor 200 not a tuple or string"
with pytest.raises(TypeError, match=MESSAGE):
NFW(
mass=mass,
concentration=concentration,
redshift=redshift,
cosmo=cosmo,
massfactor=200,
)
# Verify unitless mass
# Density test
n200c = NFW(
mass=mass.value,
concentration=concentration,
redshift=redshift,
cosmo=cosmo,
massfactor=massfactor,
)
assert_quantity_allclose(
n200c(3000.0),
(
3.709693508e12 * (u.solMass / u.Mpc**3),
7.376391187e42 * (u.kg / u.Mpc**3),
),
)
# Circular velocity test with unitless mass
# fmt: off
circ_v_200c = np.array(
[
702.45487454, 1812.4138346, 2150.50929296, 2231.5802568,
2283.96950242, 2338.45989696, 2355.78876772, 2332.41766543,
2276.89433811, 2154.53909153, 1950.07947819, 1512.37442943,
1260.94034541
]
) * (u.km / u.s)
# fmt: on
assert_quantity_allclose(n200c.circular_velocity(r_r), circ_v_200c)
# test with unitless input velocity
assert_quantity_allclose(n200c.circular_velocity(r_r.value), circ_v_200c)
# Test Default Cosmology
ncos = NFW(mass=mass, concentration=concentration, redshift=redshift)
assert_quantity_allclose(ncos.A_NFW(concentration), 1.356554956501232) |
Test that polynomial models can be instantiated with constraints, but no
parameters specified.
Regression test for https://github.com/astropy/astropy/issues/3606 | def test_polynomial_init_with_constraints(model_class):
"""
Test that polynomial models can be instantiated with constraints, but no
parameters specified.
Regression test for https://github.com/astropy/astropy/issues/3606
"""
# Just determine which parameter to place a constraint on; it doesn't
# matter which parameter it is to exhibit the problem so long as it's a
# valid parameter for the model
if "1D" in model_class.__name__:
param = "c0"
else:
param = "c0_0"
if issubclass(model_class, Linear1D):
param = "intercept"
if issubclass(model_class, OrthoPolynomialBase):
degree = (2, 2)
else:
degree = (2,)
m = model_class(*degree, fixed={param: True})
assert m.fixed[param] is True
assert getattr(m, param).fixed is True
if issubclass(model_class, OrthoPolynomialBase):
assert (
repr(m)
== f"<{model_class.__name__}(2, 2, c0_0=0., c1_0=0., c2_0=0., c0_1=0., "
"c1_1=0., c2_1=0., c0_2=0., c1_2=0., c2_2=0.)>"
)
assert (
str(m) == f"Model: {model_class.__name__}\n"
"Inputs: ('x', 'y')\n"
"Outputs: ('z',)\n"
"Model set size: 1\n"
"X_Degree: 2\n"
"Y_Degree: 2\n"
"Parameters:\n"
" c0_0 c1_0 c2_0 c0_1 c1_1 c2_1 c0_2 c1_2 c2_2\n"
" ---- ---- ---- ---- ---- ---- ---- ---- ----\n"
" 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0"
)
else:
if model_class.__name__ == "Polynomial2D":
assert (
repr(m) == "<Polynomial2D(2, c0_0=0., c1_0=0., c2_0=0., "
"c0_1=0., c0_2=0., c1_1=0.)>"
)
assert (
str(m) == "Model: Polynomial2D\n"
"Inputs: ('x', 'y')\n"
"Outputs: ('z',)\n"
"Model set size: 1\n"
"Degree: 2\n"
"Parameters:\n"
" c0_0 c1_0 c2_0 c0_1 c0_2 c1_1\n"
" ---- ---- ---- ---- ---- ----\n"
" 0.0 0.0 0.0 0.0 0.0 0.0"
)
elif model_class.__name__ == "Linear1D":
assert repr(m) == "<Linear1D(slope=2., intercept=0.)>"
assert (
str(m) == "Model: Linear1D\n"
"Inputs: ('x',)\n"
"Outputs: ('y',)\n"
"Model set size: 1\n"
"Parameters:\n"
" slope intercept\n"
" ----- ---------\n"
" 2.0 0.0"
)
else:
assert repr(m) == f"<{model_class.__name__}(2, c0=0., c1=0., c2=0.)>"
assert (
str(m) == f"Model: {model_class.__name__}\n"
"Inputs: ('x',)\n"
"Outputs: ('y',)\n"
"Model set size: 1\n"
"Degree: 2\n"
"Parameters:\n"
" c0 c1 c2\n"
" --- --- ---\n"
" 0.0 0.0 0.0"
) |
Test SIP against astropy.wcs | def test_sip_hst():
"""Test SIP against astropy.wcs"""
test_file = get_pkg_data_filename(os.path.join("data", "hst_sip.hdr"))
hdr = fits.Header.fromtextfile(test_file)
crpix1 = hdr["CRPIX1"]
crpix2 = hdr["CRPIX2"]
wobj = wcs.WCS(hdr)
a_pars = dict(**hdr["A_*"])
b_pars = dict(**hdr["B_*"])
a_order = a_pars.pop("A_ORDER")
b_order = b_pars.pop("B_ORDER")
sip = SIP([crpix1, crpix2], a_order, b_order, a_pars, b_pars)
coords = [1, 1]
rel_coords = [1 - crpix1, 1 - crpix2]
astwcs_result = wobj.sip_pix2foc([coords], 1)[0] - rel_coords
assert_allclose(sip(1, 1), astwcs_result)
# Test changing of inputs and calling it with keyword argumenrts.
sip.inputs = ("r", "t")
assert_allclose(sip(r=1, t=1), astwcs_result)
assert_allclose(sip(1, t=1), astwcs_result)
# Test representations
assert (
repr(sip) == "<SIP([<Shift(offset=-2048.)>, <Shift(offset=-1024.)>, "
"<_SIP1D(4, 'A', A_2_0=0.00000855, A_3_0=-0., A_4_0=0., A_0_2=0.00000217, "
"A_0_3=0., A_0_4=0., A_1_1=-0.0000052, A_1_2=-0., A_1_3=-0., "
"A_2_1=-0., A_2_2=0., A_3_1=0.)>, "
"<_SIP1D(4, 'B', B_2_0=-0.00000175, B_3_0=0., B_4_0=-0., B_0_2=-0.00000722, "
"B_0_3=-0., B_0_4=-0., B_1_1=0.00000618, B_1_2=-0., B_1_3=0., "
"B_2_1=-0., B_2_2=-0., B_3_1=-0.)>])>"
)
with conf.set_temp("max_width", 80):
# fmt: off
assert str(sip) == (
"Model: SIP\n"
" Model: Shift\n"
" Inputs: ('x',)\n"
" Outputs: ('y',)\n"
" Model set size: 1\n"
" Parameters:\n"
" offset\n"
" -------\n"
" -2048.0\n"
"\n"
" Model: Shift\n"
" Inputs: ('x',)\n"
" Outputs: ('y',)\n"
" Model set size: 1\n"
" Parameters:\n"
" offset\n"
" -------\n"
" -1024.0\n"
"\n"
" Model: _SIP1D\n"
" Inputs: ('x', 'y')\n"
" Outputs: ('z',)\n"
" Model set size: 1\n"
" Order: 4\n"
" Coeff. Prefix: A\n"
" Parameters:\n"
" A_2_0 A_3_0 ... A_3_1 \n"
" --------------------- ---------------------- ... ---------------------\n"
" 8.551277582556502e-06 -4.730444829222791e-10 ... 1.971022971660309e-15\n"
"\n"
" Model: _SIP1D\n"
" Inputs: ('x', 'y')\n"
" Outputs: ('z',)\n"
" Model set size: 1\n"
" Order: 4\n"
" Coeff. Prefix: B\n"
" Parameters:\n"
" B_2_0 B_3_0 ... B_3_1 \n"
" ---------------------- --------------------- ... ----------------------\n"
" -1.746491877058669e-06 8.567635427816317e-11 ... -3.779506805487476e-15\n"
)
# fmt: on
# Test get num of coeffs
assert sip.sip1d_a.get_num_coeff(1) == 6
# Test error
MESSAGE = "Degree of polynomial must be 2< deg < 9"
sip.sip1d_a.order = 1
with pytest.raises(ValueError, match=MESSAGE):
sip.sip1d_a.get_num_coeff(1)
sip.sip1d_a.order = 10
with pytest.raises(ValueError, match=MESSAGE):
sip.sip1d_a.get_num_coeff(1) |
Test forward and inverse SIP against astropy.wcs | def test_sip_irac():
"""Test forward and inverse SIP against astropy.wcs"""
test_file = get_pkg_data_filename(os.path.join("data", "irac_sip.hdr"))
hdr = fits.Header.fromtextfile(test_file)
crpix1 = hdr["CRPIX1"]
crpix2 = hdr["CRPIX2"]
wobj = wcs.WCS(hdr)
a_pars = dict(**hdr["A_*"])
b_pars = dict(**hdr["B_*"])
ap_pars = dict(**hdr["AP_*"])
bp_pars = dict(**hdr["BP_*"])
a_order = a_pars.pop("A_ORDER")
b_order = b_pars.pop("B_ORDER")
ap_order = ap_pars.pop("AP_ORDER")
bp_order = bp_pars.pop("BP_ORDER")
del a_pars["A_DMAX"]
del b_pars["B_DMAX"]
pix = [200, 200]
rel_pix = [200 - crpix1, 200 - crpix2]
sip = SIP(
[crpix1, crpix2],
a_order,
b_order,
a_pars,
b_pars,
ap_order=ap_order,
ap_coeff=ap_pars,
bp_order=bp_order,
bp_coeff=bp_pars,
)
foc = wobj.sip_pix2foc([pix], 1)
newpix = wobj.sip_foc2pix(foc, 1)[0]
assert_allclose(sip(*pix), foc[0] - rel_pix)
assert_allclose(sip.inverse(*foc[0]) + foc[0] - rel_pix, newpix - pix)
# Test inverse representations
assert (
repr(sip.inverse)
== "<InverseSIP([<Polynomial2D(2, c0_0=0., c1_0=0.0000114, c2_0=0.00002353, "
"c0_1=-0.00000546, c0_2=-0.00000667, c1_1=-0.00001801)>, "
"<Polynomial2D(2, c0_0=0., c1_0=-0.00001495, c2_0=0.00000122, c0_1=0.00001975, "
"c0_2=-0.00002601, c1_1=0.00002944)>])>"
)
assert (
str(sip.inverse) == "Model: InverseSIP\n"
" Model: Polynomial2D\n"
" Inputs: ('x', 'y')\n"
" Outputs: ('z',)\n"
" Model set size: 1\n"
" Degree: 2\n"
" Parameters:\n"
" c0_0 c1_0 c2_0 c0_1 c0_2 c1_1 \n"
" ---- -------- --------- ---------- ---------- ----------\n"
" 0.0 1.14e-05 2.353e-05 -5.463e-06 -6.666e-06 -1.801e-05\n"
"\n"
" Model: Polynomial2D\n"
" Inputs: ('x', 'y')\n"
" Outputs: ('z',)\n"
" Model set size: 1\n"
" Degree: 2\n"
" Parameters:\n"
" c0_0 c1_0 c2_0 c0_1 c0_2 c1_1 \n"
" ---- ---------- --------- --------- ---------- ---------\n"
" 0.0 -1.495e-05 1.225e-06 1.975e-05 -2.601e-05 2.944e-05\n"
) |
A few tests that degree=0 polynomials are correctly evaluated and
fitted.
Regression test for https://github.com/astropy/astropy/pull/3589 | def test_zero_degree_polynomial(cls):
"""
A few tests that degree=0 polynomials are correctly evaluated and
fitted.
Regression test for https://github.com/astropy/astropy/pull/3589
"""
MESSAGE = "Degree of polynomial must be positive or null"
if cls.n_inputs == 1: # Test 1D polynomials
p1 = cls(degree=0, c0=1)
assert p1(0) == 1
assert np.all(p1(np.zeros(5)) == np.ones(5))
x = np.linspace(0, 1, 100)
# Add a little noise along a straight line
y = 1 + np.random.uniform(0, 0.1, len(x))
p1_init = cls(degree=0)
fitter = fitting.LinearLSQFitter()
p1_fit = fitter(p1_init, x, y)
# The fit won't be exact of course, but it should get close to within
# 1%
assert_allclose(p1_fit.c0, 1, atol=0.10)
# Error from negative degree
with pytest.raises(ValueError, match=MESSAGE):
cls(degree=-1)
elif cls.n_inputs == 2: # Test 2D polynomials
if issubclass(cls, OrthoPolynomialBase):
p2 = cls(x_degree=0, y_degree=0, c0_0=1)
# different shaped x and y inputs
a = np.array([1, 2, 3])
b = np.array([1, 2])
with mk.patch.object(
PolynomialBase,
"prepare_inputs",
autospec=True,
return_value=((a, b), mk.MagicMock()),
):
with pytest.raises(
ValueError, match=r"Expected input arrays to have the same shape"
):
p2.prepare_inputs(mk.MagicMock(), mk.MagicMock())
# Error from negative degree
with pytest.raises(ValueError, match=MESSAGE):
cls(x_degree=-1, y_degree=0)
with pytest.raises(ValueError, match=MESSAGE):
cls(x_degree=0, y_degree=-1)
else:
p2 = cls(degree=0, c0_0=1)
# Error from negative degree
with pytest.raises(ValueError, match=MESSAGE):
cls(degree=-1)
assert p2(0, 0) == 1
assert np.all(p2(np.zeros(5), np.zeros(5)) == np.ones(5))
y, x = np.mgrid[0:1:100j, 0:1:100j]
z = (1 + np.random.uniform(0, 0.1, x.size)).reshape(100, 100)
if issubclass(cls, OrthoPolynomialBase):
p2_init = cls(x_degree=0, y_degree=0)
else:
p2_init = cls(degree=0)
fitter = fitting.LinearLSQFitter()
p2_fit = fitter(p2_init, x, y, z)
assert_allclose(p2_fit.c0_0, 1, atol=0.10) |
Ensure that OrthoPolynomialBase (ie. Chebyshev2D & Legendre2D) models get
evaluated & fitted correctly when part of a compound model.
Regression test for https://github.com/astropy/astropy/pull/6085. | def test_2d_orthopolynomial_in_compound_model(fitter):
"""
Ensure that OrthoPolynomialBase (ie. Chebyshev2D & Legendre2D) models get
evaluated & fitted correctly when part of a compound model.
Regression test for https://github.com/astropy/astropy/pull/6085.
"""
fitter = fitter()
y, x = np.mgrid[0:5, 0:5]
z = x + y
simple_model = Chebyshev2D(2, 2)
with pytest.warns(AstropyUserWarning, match="Model is linear in parameters"):
simple_fit = fitter(simple_model, x, y, z)
compound_model = Identity(2) | Chebyshev2D(2, 2)
compound_model.fittable = True
compound_model.linear = True
with pytest.warns(AstropyUserWarning, match="Model is linear in parameters"):
compound_fit = fitter(compound_model, x, y, z)
assert_allclose(simple_fit(x, y), compound_fit(x, y), atol=1e-11) |
Check astropy model eval against wcslib eval | def test_Sky2Pix(code):
"""Check astropy model eval against wcslib eval"""
wcs_map = os.path.join(MAPS_DIR, f"1904-66_{code}.hdr")
test_file = get_pkg_data_filename(wcs_map)
header = fits.Header.fromfile(test_file, endcard=False, padding=False)
params = []
for i in range(3):
key = f"PV2_{i + 1}"
if key in header:
params.append(header[key])
w = wcs.WCS(header)
w.wcs.crval = [0.0, 0.0]
w.wcs.crpix = [0, 0]
w.wcs.cdelt = [1, 1]
wcslibout = w.wcs.p2s([PIX_COORDINATES], 1)
wcs_pix = w.wcs.s2p(wcslibout["world"], 1)["pixcrd"]
model = getattr(projections, "Sky2Pix_" + code)
tinv = model(*params)
x, y = tinv(wcslibout["phi"], wcslibout["theta"])
assert_almost_equal(np.asarray(x), wcs_pix[:, 0])
assert_almost_equal(np.asarray(y), wcs_pix[:, 1])
assert isinstance(tinv.prjprm, wcs.Prjprm) |
Check astropy model eval against wcslib eval | def test_Pix2Sky(code):
"""Check astropy model eval against wcslib eval"""
wcs_map = os.path.join(MAPS_DIR, f"1904-66_{code}.hdr")
test_file = get_pkg_data_filename(wcs_map)
header = fits.Header.fromfile(test_file, endcard=False, padding=False)
params = []
for i in range(3):
key = f"PV2_{i + 1}"
if key in header:
params.append(header[key])
w = wcs.WCS(header)
w.wcs.crval = [0.0, 0.0]
w.wcs.crpix = [0, 0]
w.wcs.cdelt = [1, 1]
wcslibout = w.wcs.p2s([PIX_COORDINATES], 1)
wcs_phi = wcslibout["phi"]
wcs_theta = wcslibout["theta"]
model = getattr(projections, "Pix2Sky_" + code)
tanprj = model(*params)
phi, theta = tanprj(*PIX_COORDINATES)
assert_almost_equal(np.asarray(phi), wcs_phi)
assert_almost_equal(np.asarray(theta), wcs_theta) |
Check astropy model eval against wcslib eval | def test_Sky2Pix_unit(code):
"""Check astropy model eval against wcslib eval"""
wcs_map = os.path.join(MAPS_DIR, f"1904-66_{code}.hdr")
test_file = get_pkg_data_filename(wcs_map)
header = fits.Header.fromfile(test_file, endcard=False, padding=False)
params = []
for i in range(3):
key = f"PV2_{i + 1}"
if key in header:
params.append(header[key])
w = wcs.WCS(header)
w.wcs.crval = [0.0, 0.0]
w.wcs.crpix = [0, 0]
w.wcs.cdelt = [1, 1]
wcslibout = w.wcs.p2s([PIX_COORDINATES], 1)
wcs_pix = w.wcs.s2p(wcslibout["world"], 1)["pixcrd"]
model = getattr(projections, "Sky2Pix_" + code)
tinv = model(*params)
x, y = tinv(wcslibout["phi"] * u.deg, wcslibout["theta"] * u.deg)
assert_quantity_allclose(x, wcs_pix[:, 0] * u.deg)
assert_quantity_allclose(y, wcs_pix[:, 1] * u.deg) |
Check astropy model eval against wcslib eval | def test_Pix2Sky_unit(code):
"""Check astropy model eval against wcslib eval"""
wcs_map = os.path.join(MAPS_DIR, f"1904-66_{code}.hdr")
test_file = get_pkg_data_filename(wcs_map)
header = fits.Header.fromfile(test_file, endcard=False, padding=False)
params = []
for i in range(3):
key = f"PV2_{i + 1}"
if key in header:
params.append(header[key])
w = wcs.WCS(header)
w.wcs.crval = [0.0, 0.0]
w.wcs.crpix = [0, 0]
w.wcs.cdelt = [1, 1]
wcslibout = w.wcs.p2s([PIX_COORDINATES], 1)
wcs_phi = wcslibout["phi"]
wcs_theta = wcslibout["theta"]
model = getattr(projections, "Pix2Sky_" + code)
tanprj = model(*params)
phi, theta = tanprj(*PIX_COORDINATES * u.deg)
assert_quantity_allclose(phi, wcs_phi * u.deg)
assert_quantity_allclose(theta, wcs_theta * u.deg)
phi, theta = tanprj(*(PIX_COORDINATES * u.deg).to(u.rad))
assert_quantity_allclose(phi, wcs_phi * u.deg)
assert_quantity_allclose(theta, wcs_theta * u.deg)
phi, theta = tanprj(*(PIX_COORDINATES * u.deg).to(u.arcmin))
assert_quantity_allclose(phi, wcs_phi * u.deg)
assert_quantity_allclose(theta, wcs_theta * u.deg) |
Check astropy model eval with default parameters | def test_projection_default(code):
"""Check astropy model eval with default parameters"""
# Just makes sure that the default parameter values are reasonable
# and accepted by wcslib.
model = getattr(projections, "Sky2Pix_" + code)
tinv = model()
x, y = tinv(45, 45)
model = getattr(projections, "Pix2Sky_" + code)
tinv = model()
x, y = tinv(0, 0) |
Test evaluation of a single model with Quantity parameters that do
not explicitly require units. | def test_evaluate_with_quantities():
"""
Test evaluation of a single model with Quantity parameters that do
not explicitly require units.
"""
# We create two models here - one with quantities, and one without. The one
# without is used to create the reference values for comparison.
g = Gaussian1D(1, 1, 0.1)
gq = Gaussian1D(1 * u.J, 1 * u.m, 0.1 * u.m)
# We first check that calling the Gaussian with quantities returns the
# expected result
assert_quantity_allclose(gq(1 * u.m), g(1) * u.J)
# Units have to be specified for the Gaussian with quantities - if not, an
# error is raised
with pytest.raises(UnitsError, match=MESSAGE.format("Gaussian1D", "", "m ")):
gq(1)
# However, zero is a special case
assert_quantity_allclose(gq(0), g(0) * u.J)
# We can also evaluate models with equivalent units
assert_allclose(gq(0.0005 * u.km).value, g(0.5))
# But not with incompatible units
with pytest.raises(UnitsError, match=MESSAGE.format("Gaussian1D", "s", "m")):
gq(3 * u.s)
# We also can't evaluate the model without quantities with a quantity
with pytest.raises(
UnitsError,
match=r"Can only apply 'subtract' function to dimensionless quantities .*",
):
g(3 * u.m) |
We now make sure that equivalencies are correctly taken into account | def test_evaluate_with_quantities_and_equivalencies():
"""
We now make sure that equivalencies are correctly taken into account
"""
g = Gaussian1D(1 * u.Jy, 10 * u.nm, 2 * u.nm)
# We aren't setting the equivalencies, so this won't work
with pytest.raises(UnitsError, match=MESSAGE.format("Gaussian1D", "PHz", "nm")):
g(30 * u.PHz)
# But it should now work if we pass equivalencies when evaluating
assert_quantity_allclose(
g(30 * u.PHz, equivalencies={"x": u.spectral()}), g(9.993081933333332 * u.nm)
) |
Test units to first model in chain. | def test_and_input_units():
"""
Test units to first model in chain.
"""
s1 = Shift(10 * u.deg)
s2 = Shift(10 * u.deg)
cs = s1 & s2
out = cs(10 * u.arcsecond, 20 * u.arcsecond)
assert_quantity_allclose(out[0], 10 * u.deg + 10 * u.arcsec)
assert_quantity_allclose(out[1], 10 * u.deg + 20 * u.arcsec) |
Test units to first model in chain. | def test_plus_input_units():
"""
Test units to first model in chain.
"""
s1 = Shift(10 * u.deg)
s2 = Shift(10 * u.deg)
cs = s1 + s2
out = cs(10 * u.arcsecond)
assert_quantity_allclose(out, 20 * u.deg + 20 * u.arcsec) |
Test units to first model in chain. | def test_compound_input_units():
"""
Test units to first model in chain.
"""
s1 = Shift(10 * u.deg)
s2 = Shift(10 * u.deg)
cs = s1 | s2
out = cs(10 * u.arcsecond)
assert_quantity_allclose(out, 20 * u.deg + 10 * u.arcsec) |
Test incompatible units to first model in chain. | def test_compound_input_units_fail():
"""
Test incompatible units to first model in chain.
"""
s1 = Shift(10 * u.deg)
s2 = Shift(10 * u.deg)
cs = s1 | s2
with pytest.raises(UnitsError, match=MESSAGE.format("Shift", "pix", "deg")):
cs(10 * u.pix) |
Test incompatible model units in chain. | def test_compound_incompatible_units_fail():
"""
Test incompatible model units in chain.
"""
s1 = Shift(10 * u.pix)
s2 = Shift(10 * u.deg)
cs = s1 | s2
with pytest.raises(UnitsError, match=MESSAGE.format("Shift", "pix", "deg")):
cs(10 * u.pix) |
Check that equivalencies work when passed to evaluate, for a chained model
(which has one input). | def test_compound_pipe_equiv_call():
"""
Check that equivalencies work when passed to evaluate, for a chained model
(which has one input).
"""
s1 = Shift(10 * u.deg)
s2 = Shift(10 * u.deg)
cs = s1 | s2
out = cs(10 * u.pix, equivalencies={"x": u.pixel_scale(0.5 * u.deg / u.pix)})
assert_quantity_allclose(out, 25 * u.deg) |
Check that equivalencies work when passed to evaluate, for a composite model
with two inputs. | def test_compound_and_equiv_call():
"""
Check that equivalencies work when passed to evaluate, for a composite model
with two inputs.
"""
s1 = Shift(10 * u.deg)
s2 = Shift(10 * u.deg)
cs = s1 & s2
out = cs(
10 * u.pix,
10 * u.pix,
equivalencies={
"x0": u.pixel_scale(0.5 * u.deg / u.pix),
"x1": u.pixel_scale(0.5 * u.deg / u.pix),
},
)
assert_quantity_allclose(out[0], 15 * u.deg)
assert_quantity_allclose(out[1], 15 * u.deg) |
Test setting input_units_equivalencies on one of the models. | def test_compound_input_units_equivalencies():
"""
Test setting input_units_equivalencies on one of the models.
"""
s1 = Shift(10 * u.deg)
s1.input_units_equivalencies = {"x": u.pixel_scale(0.5 * u.deg / u.pix)}
s2 = Shift(10 * u.deg)
sp = Shift(10 * u.pix)
cs = s1 | s2
assert cs.input_units_equivalencies == {"x": u.pixel_scale(0.5 * u.deg / u.pix)}
out = cs(10 * u.pix)
assert_quantity_allclose(out, 25 * u.deg)
cs = sp | s1
assert cs.input_units_equivalencies is None
out = cs(10 * u.pix)
assert_quantity_allclose(out, 20 * u.deg)
cs = s1 & s2
assert cs.input_units_equivalencies == {"x0": u.pixel_scale(0.5 * u.deg / u.pix)}
cs = cs.rename("TestModel")
out = cs(20 * u.pix, 10 * u.deg)
assert_quantity_allclose(out, 20 * u.deg)
with pytest.raises(UnitsError, match=MESSAGE.format("Shift", "pix", "deg")):
out = cs(20 * u.pix, 10 * u.pix) |
Test setting input_units_strict on one of the models. | def test_compound_input_units_strict():
"""
Test setting input_units_strict on one of the models.
"""
class ScaleDegrees(Scale):
input_units = {"x": u.deg}
s1 = ScaleDegrees(2)
s2 = Scale(2)
cs = s1 | s2
out = cs(10 * u.arcsec)
assert_quantity_allclose(out, 40 * u.arcsec)
assert out.unit is u.deg # important since this tests input_units_strict
cs = s2 | s1
out = cs(10 * u.arcsec)
assert_quantity_allclose(out, 40 * u.arcsec)
assert out.unit is u.deg # important since this tests input_units_strict
cs = s1 & s2
out = cs(10 * u.arcsec, 10 * u.arcsec)
assert_quantity_allclose(out, 20 * u.arcsec)
assert out[0].unit is u.deg
assert out[1].unit is u.arcsec |
Test setting input_units_allow_dimensionless on one of the models. | def test_compound_input_units_allow_dimensionless():
"""
Test setting input_units_allow_dimensionless on one of the models.
"""
class ScaleDegrees(Scale):
input_units = {"x": u.deg}
s1 = ScaleDegrees(2)
s1._input_units_allow_dimensionless = True
s2 = Scale(2)
cs = s1 | s2
cs = cs.rename("TestModel")
out = cs(10)
assert_quantity_allclose(out, 40 * u.one)
out = cs(10 * u.arcsec)
assert_quantity_allclose(out, 40 * u.arcsec)
with pytest.raises(UnitsError, match=MESSAGE.format("ScaleDegrees", "m", "deg")):
out = cs(10 * u.m)
s1._input_units_allow_dimensionless = False
cs = s1 | s2
cs = cs.rename("TestModel")
with pytest.raises(UnitsError, match=MESSAGE.format("ScaleDegrees", "", "deg")):
out = cs(10)
s1._input_units_allow_dimensionless = True
cs = s2 | s1
cs = cs.rename("TestModel")
out = cs(10)
assert_quantity_allclose(out, 40 * u.one)
out = cs(10 * u.arcsec)
assert_quantity_allclose(out, 40 * u.arcsec)
with pytest.raises(UnitsError, match=MESSAGE.format("ScaleDegrees", "m", "deg")):
out = cs(10 * u.m)
s1._input_units_allow_dimensionless = False
cs = s2 | s1
with pytest.raises(UnitsError, match=MESSAGE.format("ScaleDegrees", "", "deg")):
out = cs(10)
s1._input_units_allow_dimensionless = True
s1 = ScaleDegrees(2)
s1._input_units_allow_dimensionless = True
s2 = ScaleDegrees(2)
s2._input_units_allow_dimensionless = False
cs = s1 & s2
cs = cs.rename("TestModel")
out = cs(10, 10 * u.arcsec)
assert_quantity_allclose(out[0], 20 * u.one)
assert_quantity_allclose(out[1], 20 * u.arcsec)
with pytest.raises(UnitsError, match=MESSAGE.format("ScaleDegrees", "", "deg")):
out = cs(10, 10) |
Test that return_units on the first model in the chain is respected for the
input to the second. | def test_compound_return_units():
"""
Test that return_units on the first model in the chain is respected for the
input to the second.
"""
class PassModel(Model):
n_inputs = 2
n_outputs = 2
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@property
def input_units(self):
"""Input units."""
return {"x0": u.deg, "x1": u.deg}
@property
def return_units(self):
"""Output units."""
return {"x0": u.deg, "x1": u.deg}
def evaluate(self, x, y):
return x.value, y.value
cs = Pix2Sky_TAN() | PassModel()
assert_quantity_allclose(cs(0 * u.deg, 0 * u.deg), (0, 90) * u.deg) |
Raise an error if the model has units but the data doesn't | def test_fitting_missing_data_units(fitter):
"""
Raise an error if the model has units but the data doesn't
"""
fitter = fitter()
class UnorderedGaussian1D(models.Gaussian1D):
# Parameters are ordered differently here from Gaussian1D
# to ensure the order does not break functionality.
def _parameter_units_for_data_units(self, inputs_unit, outputs_unit):
return {
"amplitude": outputs_unit["y"],
"mean": inputs_unit["x"],
"stddev": inputs_unit["x"],
}
g_init = UnorderedGaussian1D(amplitude=1.0 * u.mJy, mean=3 * u.cm, stddev=2 * u.mm)
# We define flux unit so that conversion fails at wavelength unit.
# This is because the order of parameter unit conversion seems to
# follow the order defined in _parameter_units_for_data_units method.
MESSAGE = r"'cm' .* and '' .* are not convertible"
with pytest.raises(UnitsError, match=MESSAGE):
fitter(g_init, [1, 2, 3], [4, 5, 6] * (u.erg / (u.s * u.cm * u.cm * u.Hz)))
MESSAGE = r"'mJy' .* and '' .* are not convertible"
with pytest.raises(UnitsError, match=MESSAGE):
fitter(g_init, [1, 2, 3] * u.m, [4, 5, 6]) |
Proceed if the data has units but the model doesn't | def test_fitting_missing_model_units(fitter):
"""
Proceed if the data has units but the model doesn't
"""
fitter = fitter()
x, y = _fake_gaussian_data()
g_init = models.Gaussian1D(amplitude=1.0, mean=3, stddev=2)
g = fitter(g_init, x, y)
assert_quantity_allclose(g.amplitude, 3 * u.Jy, rtol=0.05)
assert_quantity_allclose(g.mean, 1.3 * u.m, rtol=0.05)
assert_quantity_allclose(g.stddev, 0.8 * u.m, rtol=0.05)
g_init = models.Gaussian1D(amplitude=1.0, mean=3 * u.m, stddev=2 * u.m)
g = fitter(g_init, x, y)
assert_quantity_allclose(g.amplitude, 3 * u.Jy, rtol=0.05)
assert_quantity_allclose(g.mean, 1.3 * u.m, rtol=0.05)
assert_quantity_allclose(g.stddev, 0.8 * u.m, rtol=0.05) |
Raise an error if the data and model have incompatible units | def test_fitting_incompatible_units(fitter):
"""
Raise an error if the data and model have incompatible units
"""
fitter = fitter()
g_init = models.Gaussian1D(amplitude=1.0 * u.Jy, mean=3 * u.m, stddev=2 * u.cm)
MESSAGE = r"'Hz' .* and 'm' .* are not convertible"
with pytest.raises(UnitsError, match=MESSAGE):
fitter(g_init, [1, 2, 3] * u.Hz, [4, 5, 6] * u.Jy) |
Tests fitting of models with custom inputs and outsputs names. | def test_fitting_custom_names(model, fitter):
"""Tests fitting of models with custom inputs and outsputs names."""
fitter = fitter()
x = np.linspace(0, 10, 100) * u.s
y = model(x)
new_model = fitter(model, x, y)
for param_name in model.param_names:
assert_quantity_allclose(
getattr(new_model, param_name).quantity, getattr(model, param_name).quantity
) |
Test that if constructed with Quanties models must be called with quantities. | def test_quantity_call():
"""
Test that if constructed with Quanties models must be called with quantities.
"""
g = Gaussian1D(mean=3 * u.m, stddev=3 * u.cm, amplitude=3 * u.Jy)
g(10 * u.m)
MESSAGE = (
r".* Units of input 'x', .* could not be converted to required input units of"
r" m .*"
)
with pytest.raises(u.UnitsError, match=MESSAGE):
g(10) |
Test that if not constructed with Quantites they can be called without quantities. | def test_no_quantity_call():
"""
Test that if not constructed with Quantites they can be called without quantities.
"""
g = Gaussian1D(mean=3, stddev=3, amplitude=3)
assert isinstance(g, Gaussian1D)
g(10) |
Test Quantity | def test_uses_quantity():
"""
Test Quantity
"""
g = Gaussian1D(mean=3 * u.m, stddev=3 * u.cm, amplitude=3 * u.Jy)
assert g.uses_quantity
g = Gaussian1D(mean=3, stddev=3, amplitude=3)
assert not g.uses_quantity
g.mean = 3 * u.m
assert g.uses_quantity |
Test Quantity | def test_uses_quantity_compound():
"""
Test Quantity
"""
g = Gaussian1D(mean=3 * u.m, stddev=3 * u.cm, amplitude=3 * u.Jy)
g2 = Gaussian1D(mean=5 * u.m, stddev=5 * u.cm, amplitude=5 * u.Jy)
assert (g | g2).uses_quantity
g = Gaussian1D(mean=3, stddev=3, amplitude=3)
g2 = Gaussian1D(mean=5, stddev=5, amplitude=5)
comp = g | g2
assert not (comp).uses_quantity |
input_units
return_units
input_units_allow_dimensionless
input_units_strict | def test_read_only(m):
"""
input_units
return_units
input_units_allow_dimensionless
input_units_strict
"""
with pytest.raises(AttributeError):
m.input_units = {}
with pytest.raises(AttributeError):
m.return_units = {}
with pytest.raises(AttributeError):
m.input_units_allow_dimensionless = {}
with pytest.raises(AttributeError):
m.input_units_strict = {} |
Basic tests for initializing general models (that do not require units)
with parameters that have units attached. | def test_parameter_quantity():
"""
Basic tests for initializing general models (that do not require units)
with parameters that have units attached.
"""
g = Gaussian1D(1 * u.J, 1 * u.m, 0.1 * u.m)
assert g.amplitude.value == 1.0
assert g.amplitude.unit is u.J
assert g.mean.value == 1.0
assert g.mean.unit is u.m
assert g.stddev.value == 0.1
assert g.stddev.unit is u.m |
Make sure that parameters that start off as quantities can be set to any
other quantity, regardless of whether the units of the new quantity are
compatible with the original ones.
We basically leave it up to the evaluate method to raise errors if there
are issues with incompatible units, and we don't check for consistency
at the parameter level. | def test_parameter_set_quantity():
"""
Make sure that parameters that start off as quantities can be set to any
other quantity, regardless of whether the units of the new quantity are
compatible with the original ones.
We basically leave it up to the evaluate method to raise errors if there
are issues with incompatible units, and we don't check for consistency
at the parameter level.
"""
g = Gaussian1D(1 * u.J, 1 * u.m, 0.1 * u.m)
# Try equivalent units
g.amplitude = 4 * u.kJ
assert_quantity_allclose(g.amplitude, 4 * u.kJ)
g.mean = 3 * u.km
assert_quantity_allclose(g.mean, 3 * u.km)
g.stddev = 2 * u.mm
assert_quantity_allclose(g.stddev, 2 * u.mm)
# Try different units
g.amplitude = 2 * u.s
assert_quantity_allclose(g.amplitude, 2 * u.s)
g.mean = 2 * u.Jy
assert_quantity_allclose(g.mean, 2 * u.Jy) |
Check that parameters that have been set to a quantity that are then set to
a value with no units raise an exception. We do this because setting a
parameter to a value with no units is ambiguous if units were set before:
if a parameter is 1 * u.Jy and the parameter is then set to 4, does this mean
2 without units, or 2 * u.Jy? | def test_parameter_lose_units():
"""
Check that parameters that have been set to a quantity that are then set to
a value with no units raise an exception. We do this because setting a
parameter to a value with no units is ambiguous if units were set before:
if a parameter is 1 * u.Jy and the parameter is then set to 4, does this mean
2 without units, or 2 * u.Jy?
"""
g = Gaussian1D(1 * u.Jy, 3, 0.1)
MESSAGE = (
r"The .* parameter should be given as a .* because it was originally"
r" initialized as a .*"
)
with pytest.raises(UnitsError, match=MESSAGE):
g.amplitude = 2 |
On the other hand, if starting from a parameter with no units, we should be
able to add units since this is unambiguous. | def test_parameter_add_units():
"""
On the other hand, if starting from a parameter with no units, we should be
able to add units since this is unambiguous.
"""
g = Gaussian1D(1, 3, 0.1)
g.amplitude = 2 * u.Jy
assert_quantity_allclose(g.amplitude, 2 * u.Jy) |
Test that changing the unit on a parameter does not work. This is an
ambiguous operation because it's not clear if it means that the value should
be converted or if the unit should be changed without conversion. | def test_parameter_change_unit():
"""
Test that changing the unit on a parameter does not work. This is an
ambiguous operation because it's not clear if it means that the value should
be converted or if the unit should be changed without conversion.
"""
g = Gaussian1D(1, 1 * u.m, 0.1 * u.m)
# Setting a unit on a unitless parameter should not work
MESSAGE = (
r"Cannot attach units to parameters that were not initially specified with"
r" units"
)
with pytest.raises(ValueError, match=MESSAGE):
g.amplitude.unit = u.Jy
# But changing to another unit should not, even if it is an equivalent unit
MESSAGE = (
r"Cannot change the unit attribute directly, instead change the parameter to a"
r" new quantity"
)
with pytest.raises(ValueError, match=MESSAGE):
g.mean.unit = u.cm |
Test that changing the value on a parameter works as expected. | def test_parameter_set_value():
"""
Test that changing the value on a parameter works as expected.
"""
g = Gaussian1D(1 * u.Jy, 1 * u.m, 0.1 * u.m)
# To set a parameter to a quantity, we simply do
g.amplitude = 2 * u.Jy
# If we try setting the value, we need to pass a non-quantity value
# TODO: determine whether this is the desired behavior?
g.amplitude.value = 4
assert_quantity_allclose(g.amplitude, 4 * u.Jy)
assert g.amplitude.value == 4
assert g.amplitude.unit is u.Jy
# If we try setting it to a Quantity, we raise an error
MESSAGE = (
r"The .value property on parameters should be set to unitless values, not"
r" Quantity objects.*"
)
with pytest.raises(TypeError, match=MESSAGE):
g.amplitude.value = 3 * u.Jy |
Test that the quantity property of Parameters behaves as expected | def test_parameter_quantity_property():
"""
Test that the quantity property of Parameters behaves as expected
"""
# Since parameters have a .value and .unit parameter that return just the
# value and unit respectively, we also have a .quantity parameter that
# returns a Quantity instance.
g = Gaussian1D(1 * u.Jy, 1 * u.m, 0.1 * u.m)
assert_quantity_allclose(g.amplitude.quantity, 1 * u.Jy)
# Setting a parameter to a quantity changes the value and the default unit
g.amplitude.quantity = 5 * u.mJy
assert g.amplitude.value == 5
assert g.amplitude.unit is u.mJy
# And we can also set the parameter to a value with different units
g.amplitude.quantity = 4 * u.s
assert g.amplitude.value == 4
assert g.amplitude.unit is u.s
# But not to a value without units
MESSAGE = r"The .quantity attribute should be set to a Quantity object"
with pytest.raises(TypeError, match=MESSAGE):
g.amplitude.quantity = 3 |
Test that default quantities are correctly taken into account | def test_parameter_defaults(unit, default):
"""
Test that default quantities are correctly taken into account
"""
class TestModel(BaseTestModel):
a = Parameter(default=default, unit=unit)
# TODO: decide whether the default property should return a value or
# a quantity?
# The default unit and value should be set on the class
assert TestModel.a.unit == u.m
assert TestModel.a.default == 1.0
# Check that the default unit and value are also set on a class instance
m = TestModel()
assert m.a.unit == u.m
assert m.a.default == m.a.value == 1.0
# If the parameter is set to a different value, the default is still the
# internal default
m = TestModel(2.0 * u.m)
assert m.a.unit == u.m
assert m.a.value == 2.0
assert m.a.default == 1.0
# Instantiate with a different, but compatible unit
m = TestModel(2.0 * u.pc)
assert m.a.unit == u.pc
assert m.a.value == 2.0
# The default is still in the original units
# TODO: but how do we know what those units are if we don't return a
# quantity?
assert m.a.default == 1.0
# Initialize with a completely different unit
m = TestModel(2.0 * u.Jy)
assert m.a.unit == u.Jy
assert m.a.value == 2.0
# TODO: this illustrates why the default doesn't make sense anymore
assert m.a.default == 1.0
# Instantiating with different units works, and just replaces the original unit
MESSAGE = r".* requires a Quantity for parameter .*"
with pytest.raises(InputParameterError, match=MESSAGE):
TestModel(1.0) |
Test that arithmetic operations with properties that have units return the
appropriate Quantities. | def test_parameter_quantity_arithmetic():
"""
Test that arithmetic operations with properties that have units return the
appropriate Quantities.
"""
g = Gaussian1D(1 * u.J, 1 * u.m, 0.1 * u.m)
# Addition should work if units are compatible
assert g.mean + (1 * u.m) == 2 * u.m
assert (1 * u.m) + g.mean == 2 * u.m
# Multiplication by a scalar should also preserve the quantity-ness
assert g.mean * 2 == (2 * u.m)
assert 2 * g.mean == (2 * u.m)
# Multiplication by a quantity should result in units being multiplied
assert g.mean * (2 * u.m) == (2 * (u.m**2))
assert (2 * u.m) * g.mean == (2 * (u.m**2))
# Negation should work properly too
assert -g.mean == (-1 * u.m)
assert abs(-g.mean) == g.mean
# However, addition of a quantity + scalar should not work
MESSAGE = (
r"Can only apply 'add' function to dimensionless quantities when other"
r" argument .*"
)
with pytest.raises(UnitsError, match=MESSAGE):
g.mean + 1
with pytest.raises(UnitsError, match=MESSAGE):
1 + g.mean |
Basic test of comparison operations on properties with units. | def test_parameter_quantity_comparison():
"""
Basic test of comparison operations on properties with units.
"""
g = Gaussian1D(1 * u.J, 1 * u.m, 0.1 * u.m)
# Essentially here we are checking that parameters behave like Quantity
assert g.mean == 1 * u.m
assert 1 * u.m == g.mean
assert g.mean != 1
assert 1 != g.mean
assert g.mean < 2 * u.m
assert 2 * u.m > g.mean
MESSAGE = (
r"Can only apply 'less' function to dimensionless quantities when other"
r" argument .*"
)
with pytest.raises(UnitsError, match=MESSAGE):
g.mean < 2 # noqa: B015
with pytest.raises(UnitsError, match=MESSAGE):
2 > g.mean # noqa: B015
g = Gaussian1D([1, 2] * u.J, [1, 2] * u.m, [0.1, 0.2] * u.m)
assert np.all(g.mean == [1, 2] * u.m)
assert np.all([1, 2] * u.m == g.mean)
assert np.all(g.mean != [1, 2])
assert np.all([1, 2] != g.mean)
with pytest.raises(UnitsError, match=MESSAGE):
g.mean < [3, 4] # noqa: B015
with pytest.raises(UnitsError, match=MESSAGE):
[3, 4] > g.mean |
Regression test for bug reproducer in issue #13133 | def test_magunit_parameter():
"""Regression test for bug reproducer in issue #13133"""
unit = u.ABmag
c = -20.0 * unit
model = Const1D(c)
assert model(-23.0 * unit) == c |
Regression test for issue #14511 | def test_log_getter():
"""Regression test for issue #14511"""
x = 6000 * u.AA
mdl_base = BlackBody(temperature=5000 * u.K, scale=u.Quantity(1))
class CustomBlackBody(BlackBody):
scale = Parameter(
"scale",
default=1,
bounds=(0, None),
getter=np.log,
setter=np.exp,
unit=u.dimensionless_unscaled,
)
mdl = CustomBlackBody(temperature=5000 * u.K, scale=u.Quantity(np.log(1)))
assert mdl.scale == np.log(1)
assert_quantity_allclose(mdl(x), mdl_base(x)) |
Regression test for issue #14511 | def test_sqrt_getter():
"""Regression test for issue #14511"""
x = 1 * u.m
mdl_base = Gaussian1D(mean=32 * u.m, stddev=3 * u.m)
class CustomGaussian1D(Gaussian1D):
mean = Parameter(
"mean",
default=1 * u.m,
bounds=(0, None),
getter=np.sqrt,
setter=np.square,
unit=u.m,
)
stddev = Parameter(
"stddev",
default=1 * u.m,
bounds=(0, None),
getter=np.sqrt,
setter=np.square,
unit=u.m,
)
mdl = CustomGaussian1D(mean=np.sqrt(32 * u.m), stddev=np.sqrt(3 * u.m))
assert mdl.mean == np.sqrt(32 * u.m)
assert (
mdl.mean._internal_value == np.sqrt(32) ** 2
) # numerical inaccuracy results in 32.00000000000001
assert mdl.mean._internal_unit == u.m
assert mdl.stddev == np.sqrt(3 * u.m)
assert (
mdl.stddev._internal_value == np.sqrt(3) ** 2
) # numerical inaccuracy results in 3.0000000000000004
assert mdl.stddev._internal_unit == u.m
assert_quantity_allclose(mdl(x), mdl_base(x)) |
Tests against all Euler sequences.
The rotation matrices definitions come from Wikipedia. | def test_euler_angles(axes_order):
"""
Tests against all Euler sequences.
The rotation matrices definitions come from Wikipedia.
"""
phi = np.deg2rad(23.4)
theta = np.deg2rad(12.2)
psi = np.deg2rad(34)
c1 = cos(phi)
c2 = cos(theta)
c3 = cos(psi)
s1 = sin(phi)
s2 = sin(theta)
s3 = sin(psi)
matrices = {
"zxz": np.array(
[
[(c1 * c3 - c2 * s1 * s3), (-c1 * s3 - c2 * c3 * s1), (s1 * s2)],
[(c3 * s1 + c1 * c2 * s3), (c1 * c2 * c3 - s1 * s3), (-c1 * s2)],
[(s2 * s3), (c3 * s2), (c2)],
]
),
"zyz": np.array(
[
[(c1 * c2 * c3 - s1 * s3), (-c3 * s1 - c1 * c2 * s3), (c1 * s2)],
[(c1 * s3 + c2 * c3 * s1), (c1 * c3 - c2 * s1 * s3), (s1 * s2)],
[(-c3 * s2), (s2 * s3), (c2)],
]
),
"yzy": np.array(
[
[(c1 * c2 * c3 - s1 * s3), (-c1 * s2), (c3 * s1 + c1 * c2 * s3)],
[(c3 * s2), (c2), (s2 * s3)],
[(-c1 * s3 - c2 * c3 * s1), (s1 * s2), (c1 * c3 - c2 * s1 * s3)],
]
),
"yxy": np.array(
[
[(c1 * c3 - c2 * s1 * s3), (s1 * s2), (c1 * s3 + c2 * c3 * s1)],
[(s2 * s3), (c2), (-c3 * s2)],
[(-c3 * s1 - c1 * c2 * s3), (c1 * s2), (c1 * c2 * c3 - s1 * s3)],
]
),
"xyx": np.array(
[
[(c2), (s2 * s3), (c3 * s2)],
[(s1 * s2), (c1 * c3 - c2 * s1 * s3), (-c1 * s3 - c2 * c3 * s1)],
[(-c1 * s2), (c3 * s1 + c1 * c2 * s3), (c1 * c2 * c3 - s1 * s3)],
]
),
"xzx": np.array(
[
[(c2), (-c3 * s2), (s2 * s3)],
[(c1 * s2), (c1 * c2 * c3 - s1 * s3), (-c3 * s1 - c1 * c2 * s3)],
[(s1 * s2), (c1 * s3 + c2 * c3 * s1), (c1 * c3 - c2 * s1 * s3)],
]
),
}
mat = rotations._create_matrix([phi, theta, psi], axes_order)
assert_allclose(mat.T, matrices[axes_order]) |
A sanity test - when V2_REF = 0 and V3_REF = 0,
for V2, V3 close to the origin
ROLL_REF should be approximately PA_V3 .
(Test taken from JWST SIAF report.) | def test_rotation_3d():
"""
A sanity test - when V2_REF = 0 and V3_REF = 0,
for V2, V3 close to the origin
ROLL_REF should be approximately PA_V3 .
(Test taken from JWST SIAF report.)
"""
def _roll_angle_from_matrix(matrix, v2, v3):
X = -(matrix[2, 0] * np.cos(v2) + matrix[2, 1] * np.sin(v2)) * np.sin(
v3
) + matrix[2, 2] * np.cos(v3)
Y = (matrix[0, 0] * matrix[1, 2] - matrix[1, 0] * matrix[0, 2]) * np.cos(v2) + (
matrix[0, 1] * matrix[1, 2] - matrix[1, 1] * matrix[0, 2]
) * np.sin(v2)
new_roll = np.rad2deg(np.arctan2(Y, X))
if new_roll < 0:
new_roll += 360
return new_roll
# reference points on sky and in a coordinate frame associated
# with the telescope
ra_ref = 165 # in deg
dec_ref = 54 # in deg
v2_ref = 0
v3_ref = 0
pa_v3 = 37 # in deg
v2 = np.deg2rad(2.7e-6) # in deg.01 # in arcsec
v3 = np.deg2rad(2.7e-6) # in deg .01 # in arcsec
angles = [v2_ref, -v3_ref, pa_v3, dec_ref, -ra_ref]
axes = "zyxyz"
M = rotations._create_matrix(np.deg2rad(angles) * u.deg, axes)
roll_angle = _roll_angle_from_matrix(M, v2, v3)
assert_allclose(roll_angle, pa_v3, atol=1e-3) |
Test taken from JWST INS report - converts
JWST telescope (V2, V3) coordinates to RA, DEC. | def test_spherical_rotation():
"""
Test taken from JWST INS report - converts
JWST telescope (V2, V3) coordinates to RA, DEC.
"""
ra_ref = 165 # in deg
dec_ref = 54 # in deg
v2_ref = -503.654472 / 3600 # in deg
v3_ref = -318.742464 / 3600 # in deg
r0 = 37 # in deg
v2 = 210 # in deg
v3 = -75 # in deg
expected_ra_dec = (107.12810484789563, -35.97940247128502) # in deg
angles = np.array([v2_ref, -v3_ref, r0, dec_ref, -ra_ref])
axes = "zyxyz"
v2s = rotations.RotationSequence3D(angles, axes_order=axes)
x, y, z = rotations.spherical2cartesian(v2, v3)
x1, y1, z1 = v2s(x, y, z)
radec = rotations.cartesian2spherical(x1, y1, z1)
assert_allclose(radec, expected_ra_dec, atol=1e-10)
v2s = rotations.SphericalRotationSequence(angles, axes_order=axes)
radec = v2s(v2, v3)
assert_allclose(radec, expected_ra_dec, atol=1e-10) |
Test input_units on various compound models. | def test_input_units(model, input_units, return_units):
"""Test input_units on various compound models."""
assert model.input_units == input_units
assert model.return_units == return_units |
Verifies if the input number is a bit flag (i.e., an integer number that is
an integer power of 2).
Parameters
----------
n : int
A positive integer number. Non-positive integers are considered not to
be "flags".
Returns
-------
bool
``True`` if input ``n`` is a bit flag and ``False`` if it is not. | def _is_bit_flag(n):
"""
Verifies if the input number is a bit flag (i.e., an integer number that is
an integer power of 2).
Parameters
----------
n : int
A positive integer number. Non-positive integers are considered not to
be "flags".
Returns
-------
bool
``True`` if input ``n`` is a bit flag and ``False`` if it is not.
"""
if n < 1:
return False
return bin(n).count("1") == 1 |
A convenience function for creating bit flags maps by subclassing an
existing map and adding additional flags supplied as keyword arguments.
Parameters
----------
cls_name : str
Class name of the bit flag map to be created.
base_cls : BitFlagNameMap, optional
Base class for the new bit flag map.
**kwargs : int
Each supplied keyword argument will be used to define bit flag
names in the new map. In addition to bit flag names, ``__version__`` is
allowed to indicate the version of the newly created map.
Examples
--------
>>> from astropy.nddata.bitmask import extend_bit_flag_map
>>> ST_DQ = extend_bit_flag_map('ST_DQ', __version__='1.0.0', CR=1, CLOUDY=4, RAINY=8)
>>> ST_CAM1_DQ = extend_bit_flag_map('ST_CAM1_DQ', ST_DQ, HOT=16, DEAD=32)
>>> ST_CAM1_DQ['HOT'] # <-- Access flags as dictionary keys
16
>>> ST_CAM1_DQ.HOT # <-- Access flags as class attributes
16 | def extend_bit_flag_map(cls_name, base_cls=BitFlagNameMap, **kwargs):
"""
A convenience function for creating bit flags maps by subclassing an
existing map and adding additional flags supplied as keyword arguments.
Parameters
----------
cls_name : str
Class name of the bit flag map to be created.
base_cls : BitFlagNameMap, optional
Base class for the new bit flag map.
**kwargs : int
Each supplied keyword argument will be used to define bit flag
names in the new map. In addition to bit flag names, ``__version__`` is
allowed to indicate the version of the newly created map.
Examples
--------
>>> from astropy.nddata.bitmask import extend_bit_flag_map
>>> ST_DQ = extend_bit_flag_map('ST_DQ', __version__='1.0.0', CR=1, CLOUDY=4, RAINY=8)
>>> ST_CAM1_DQ = extend_bit_flag_map('ST_CAM1_DQ', ST_DQ, HOT=16, DEAD=32)
>>> ST_CAM1_DQ['HOT'] # <-- Access flags as dictionary keys
16
>>> ST_CAM1_DQ.HOT # <-- Access flags as class attributes
16
"""
new_cls = BitFlagNameMeta.__new__(
BitFlagNameMeta, cls_name, (base_cls,), {"_locked": False}
)
for k, v in kwargs.items():
try:
setattr(new_cls, k, v)
except AttributeError as e:
if new_cls[k] != int(v):
raise e
new_cls._locked = True
return new_cls |
Converts input bit flags to a single integer value (bit mask) or `None`.
When input is a list of flags (either a Python list of integer flags or a
string of comma-, ``'|'``-, or ``'+'``-separated list of flags),
the returned bit mask is obtained by summing input flags.
.. note::
In order to flip the bits of the returned bit mask,
for input of `str` type, prepend '~' to the input string. '~' must
be prepended to the *entire string* and not to each bit flag! For
input that is already a bit mask or a Python list of bit flags, set
``flip_bits`` for `True` in order to flip the bits of the returned
bit mask.
Parameters
----------
bit_flags : int, str, list, None
An integer bit mask or flag, `None`, a string of comma-, ``'|'``- or
``'+'``-separated list of integer bit flags or mnemonic flag names,
or a Python list of integer bit flags. If ``bit_flags`` is a `str`
and if it is prepended with '~', then the output bit mask will have
its bits flipped (compared to simple sum of input flags).
For input ``bit_flags`` that is already a bit mask or a Python list
of bit flags, bit-flipping can be controlled through ``flip_bits``
parameter.
.. note::
When ``bit_flags`` is a list of flag names, the ``flag_name_map``
parameter must be provided.
.. note::
Only one flag separator is supported at a time. ``bit_flags``
string should not mix ``','``, ``'+'``, and ``'|'`` separators.
flip_bits : bool, None
Indicates whether or not to flip the bits of the returned bit mask
obtained from input bit flags. This parameter must be set to `None`
when input ``bit_flags`` is either `None` or a Python list of flags.
flag_name_map : BitFlagNameMap
A `BitFlagNameMap` object that provides mapping from mnemonic
bit flag names to integer bit values in order to translate mnemonic
flags to numeric values when ``bit_flags`` that are comma- or
'+'-separated list of menmonic bit flag names.
Returns
-------
bitmask : int or None
Returns an integer bit mask formed from the input bit value or `None`
if input ``bit_flags`` parameter is `None` or an empty string.
If input string value was prepended with '~' (or ``flip_bits`` was set
to `True`), then returned value will have its bits flipped
(inverse mask).
Examples
--------
>>> from astropy.nddata.bitmask import interpret_bit_flags, extend_bit_flag_map
>>> ST_DQ = extend_bit_flag_map('ST_DQ', CR=1, CLOUDY=4, RAINY=8, HOT=16, DEAD=32)
>>> "{0:016b}".format(0xFFFF & interpret_bit_flags(28))
'0000000000011100'
>>> "{0:016b}".format(0xFFFF & interpret_bit_flags('4,8,16'))
'0000000000011100'
>>> "{0:016b}".format(0xFFFF & interpret_bit_flags('CLOUDY,RAINY,HOT', flag_name_map=ST_DQ))
'0000000000011100'
>>> "{0:016b}".format(0xFFFF & interpret_bit_flags('~4,8,16'))
'1111111111100011'
>>> "{0:016b}".format(0xFFFF & interpret_bit_flags('~(4+8+16)'))
'1111111111100011'
>>> "{0:016b}".format(0xFFFF & interpret_bit_flags('~(CLOUDY+RAINY+HOT)',
... flag_name_map=ST_DQ))
'1111111111100011'
>>> "{0:016b}".format(0xFFFF & interpret_bit_flags([4, 8, 16]))
'0000000000011100'
>>> "{0:016b}".format(0xFFFF & interpret_bit_flags([4, 8, 16], flip_bits=True))
'1111111111100011' | def interpret_bit_flags(bit_flags, flip_bits=None, flag_name_map=None):
"""
Converts input bit flags to a single integer value (bit mask) or `None`.
When input is a list of flags (either a Python list of integer flags or a
string of comma-, ``'|'``-, or ``'+'``-separated list of flags),
the returned bit mask is obtained by summing input flags.
.. note::
In order to flip the bits of the returned bit mask,
for input of `str` type, prepend '~' to the input string. '~' must
be prepended to the *entire string* and not to each bit flag! For
input that is already a bit mask or a Python list of bit flags, set
``flip_bits`` for `True` in order to flip the bits of the returned
bit mask.
Parameters
----------
bit_flags : int, str, list, None
An integer bit mask or flag, `None`, a string of comma-, ``'|'``- or
``'+'``-separated list of integer bit flags or mnemonic flag names,
or a Python list of integer bit flags. If ``bit_flags`` is a `str`
and if it is prepended with '~', then the output bit mask will have
its bits flipped (compared to simple sum of input flags).
For input ``bit_flags`` that is already a bit mask or a Python list
of bit flags, bit-flipping can be controlled through ``flip_bits``
parameter.
.. note::
When ``bit_flags`` is a list of flag names, the ``flag_name_map``
parameter must be provided.
.. note::
Only one flag separator is supported at a time. ``bit_flags``
string should not mix ``','``, ``'+'``, and ``'|'`` separators.
flip_bits : bool, None
Indicates whether or not to flip the bits of the returned bit mask
obtained from input bit flags. This parameter must be set to `None`
when input ``bit_flags`` is either `None` or a Python list of flags.
flag_name_map : BitFlagNameMap
A `BitFlagNameMap` object that provides mapping from mnemonic
bit flag names to integer bit values in order to translate mnemonic
flags to numeric values when ``bit_flags`` that are comma- or
'+'-separated list of menmonic bit flag names.
Returns
-------
bitmask : int or None
Returns an integer bit mask formed from the input bit value or `None`
if input ``bit_flags`` parameter is `None` or an empty string.
If input string value was prepended with '~' (or ``flip_bits`` was set
to `True`), then returned value will have its bits flipped
(inverse mask).
Examples
--------
>>> from astropy.nddata.bitmask import interpret_bit_flags, extend_bit_flag_map
>>> ST_DQ = extend_bit_flag_map('ST_DQ', CR=1, CLOUDY=4, RAINY=8, HOT=16, DEAD=32)
>>> "{0:016b}".format(0xFFFF & interpret_bit_flags(28))
'0000000000011100'
>>> "{0:016b}".format(0xFFFF & interpret_bit_flags('4,8,16'))
'0000000000011100'
>>> "{0:016b}".format(0xFFFF & interpret_bit_flags('CLOUDY,RAINY,HOT', flag_name_map=ST_DQ))
'0000000000011100'
>>> "{0:016b}".format(0xFFFF & interpret_bit_flags('~4,8,16'))
'1111111111100011'
>>> "{0:016b}".format(0xFFFF & interpret_bit_flags('~(4+8+16)'))
'1111111111100011'
>>> "{0:016b}".format(0xFFFF & interpret_bit_flags('~(CLOUDY+RAINY+HOT)',
... flag_name_map=ST_DQ))
'1111111111100011'
>>> "{0:016b}".format(0xFFFF & interpret_bit_flags([4, 8, 16]))
'0000000000011100'
>>> "{0:016b}".format(0xFFFF & interpret_bit_flags([4, 8, 16], flip_bits=True))
'1111111111100011'
"""
has_flip_bits = flip_bits is not None
flip_bits = bool(flip_bits)
allow_non_flags = False
if _is_int(bit_flags):
return ~int(bit_flags) if flip_bits else int(bit_flags)
elif bit_flags is None:
if has_flip_bits:
raise TypeError(
"Keyword argument 'flip_bits' must be set to 'None' when "
"input 'bit_flags' is None."
)
return None
elif isinstance(bit_flags, str):
if has_flip_bits:
raise TypeError(
"Keyword argument 'flip_bits' is not permitted for "
"comma-separated string lists of bit flags. Prepend '~' to "
"the string to indicate bit-flipping."
)
bit_flags = str(bit_flags).strip()
if bit_flags.upper() in ["", "NONE", "INDEF"]:
return None
# check whether bitwise-NOT is present and if it is, check that it is
# in the first position:
bitflip_pos = bit_flags.find("~")
if bitflip_pos == 0:
flip_bits = True
bit_flags = bit_flags[1:].lstrip()
else:
if bitflip_pos > 0:
raise ValueError("Bitwise-NOT must precede bit flag list.")
flip_bits = False
# basic check for correct use of parenthesis:
while True:
nlpar = bit_flags.count("(")
nrpar = bit_flags.count(")")
if nlpar == 0 and nrpar == 0:
break
if nlpar != nrpar:
raise ValueError("Unbalanced parentheses in bit flag list.")
lpar_pos = bit_flags.find("(")
rpar_pos = bit_flags.rfind(")")
if lpar_pos > 0 or rpar_pos < (len(bit_flags) - 1):
raise ValueError(
"Incorrect syntax (incorrect use of parenthesis) in bit flag list."
)
bit_flags = bit_flags[1:-1].strip()
if sum(k in bit_flags for k in "+,|") > 1:
raise ValueError(
"Only one type of bit flag separator may be used in one "
"expression. Allowed separators are: '+', '|', or ','."
)
if "," in bit_flags:
bit_flags = bit_flags.split(",")
elif "+" in bit_flags:
bit_flags = bit_flags.split("+")
elif "|" in bit_flags:
bit_flags = bit_flags.split("|")
else:
if bit_flags == "":
raise ValueError(
"Empty bit flag lists not allowed when either bitwise-NOT "
"or parenthesis are present."
)
bit_flags = [bit_flags]
if flag_name_map is not None:
try:
int(bit_flags[0])
except ValueError:
bit_flags = [flag_name_map[f] for f in bit_flags]
allow_non_flags = len(bit_flags) == 1
elif hasattr(bit_flags, "__iter__"):
if not all(_is_int(flag) for flag in bit_flags):
if flag_name_map is not None and all(
isinstance(flag, str) for flag in bit_flags
):
bit_flags = [flag_name_map[f] for f in bit_flags]
else:
raise TypeError(
"Every bit flag in a list must be either an "
"integer flag value or a 'str' flag name."
)
else:
raise TypeError("Unsupported type for argument 'bit_flags'.")
bitset = set(map(int, bit_flags))
if len(bitset) != len(bit_flags):
warnings.warn("Duplicate bit flags will be ignored")
bitmask = 0
for v in bitset:
if not _is_bit_flag(v) and not allow_non_flags:
raise ValueError(
f"Input list contains invalid (not powers of two) bit flag: {v}"
)
bitmask += v
if flip_bits:
bitmask = ~bitmask
return bitmask |
bitfield_to_boolean_mask(bitfield, ignore_flags=None, flip_bits=None, good_mask_value=False, dtype=numpy.bool_)
Converts an array of bit fields to a boolean (or integer) mask array
according to a bit mask constructed from the supplied bit flags (see
``ignore_flags`` parameter).
This function is particularly useful to convert data quality arrays to
boolean masks with selective filtering of DQ flags.
Parameters
----------
bitfield : ndarray
An array of bit flags. By default, values different from zero are
interpreted as "bad" values and values equal to zero are considered
as "good" values. However, see ``ignore_flags`` parameter on how to
selectively ignore some bits in the ``bitfield`` array data.
ignore_flags : int, str, list, None (default = 0)
An integer bit mask, `None`, a Python list of bit flags, a comma-,
or ``'|'``-separated, ``'+'``-separated string list of integer
bit flags or mnemonic flag names that indicate what bits in the input
``bitfield`` should be *ignored* (i.e., zeroed), or `None`.
.. note::
When ``bit_flags`` is a list of flag names, the ``flag_name_map``
parameter must be provided.
| Setting ``ignore_flags`` to `None` effectively will make
`bitfield_to_boolean_mask` interpret all ``bitfield`` elements
as "good" regardless of their value.
| When ``ignore_flags`` argument is an integer bit mask, it will be
combined using bitwise-NOT and bitwise-AND with each element of the
input ``bitfield`` array (``~ignore_flags & bitfield``). If the
resultant bitfield element is non-zero, that element will be
interpreted as a "bad" in the output boolean mask and it will be
interpreted as "good" otherwise. ``flip_bits`` parameter may be used
to flip the bits (``bitwise-NOT``) of the bit mask thus effectively
changing the meaning of the ``ignore_flags`` parameter from "ignore"
to "use only" these flags.
.. note::
Setting ``ignore_flags`` to 0 effectively will assume that all
non-zero elements in the input ``bitfield`` array are to be
interpreted as "bad".
| When ``ignore_flags`` argument is a Python list of integer bit
flags, these flags are added together to create an integer bit mask.
Each item in the list must be a flag, i.e., an integer that is an
integer power of 2. In order to flip the bits of the resultant
bit mask, use ``flip_bits`` parameter.
| Alternatively, ``ignore_flags`` may be a string of comma- or
``'+'``(or ``'|'``)-separated list of integer bit flags that should
be added (bitwise OR) together to create an integer bit mask.
For example, both ``'4,8'``, ``'4|8'``, and ``'4+8'`` are equivalent
and indicate that bit flags 4 and 8 in the input ``bitfield``
array should be ignored when generating boolean mask.
.. note::
``'None'``, ``'INDEF'``, and empty (or all white space) strings
are special values of string ``ignore_flags`` that are
interpreted as `None`.
.. note::
Each item in the list must be a flag, i.e., an integer that is an
integer power of 2. In addition, for convenience, an arbitrary
**single** integer is allowed and it will be interpreted as an
integer bit mask. For example, instead of ``'4,8'`` one could
simply provide string ``'12'``.
.. note::
Only one flag separator is supported at a time. ``ignore_flags``
string should not mix ``','``, ``'+'``, and ``'|'`` separators.
.. note::
When ``ignore_flags`` is a `str` and when it is prepended with
'~', then the meaning of ``ignore_flags`` parameters will be
reversed: now it will be interpreted as a list of bit flags to be
*used* (or *not ignored*) when deciding which elements of the
input ``bitfield`` array are "bad". Following this convention,
an ``ignore_flags`` string value of ``'~0'`` would be equivalent
to setting ``ignore_flags=None``.
.. warning::
Because prepending '~' to a string ``ignore_flags`` is equivalent
to setting ``flip_bits`` to `True`, ``flip_bits`` cannot be used
with string ``ignore_flags`` and it must be set to `None`.
flip_bits : bool, None (default = None)
Specifies whether or not to invert the bits of the bit mask either
supplied directly through ``ignore_flags`` parameter or built from the
bit flags passed through ``ignore_flags`` (only when bit flags are
passed as Python lists of integer bit flags). Occasionally, it may be
useful to *consider only specific bit flags* in the ``bitfield``
array when creating a boolean mask as opposed to *ignoring* specific
bit flags as ``ignore_flags`` behaves by default. This can be achieved
by inverting/flipping the bits of the bit mask created from
``ignore_flags`` flags which effectively changes the meaning of the
``ignore_flags`` parameter from "ignore" to "use only" these flags.
Setting ``flip_bits`` to `None` means that no bit flipping will be
performed. Bit flipping for string lists of bit flags must be
specified by prepending '~' to string bit flag lists
(see documentation for ``ignore_flags`` for more details).
.. warning::
This parameter can be set to either `True` or `False` **ONLY** when
``ignore_flags`` is either an integer bit mask or a Python
list of integer bit flags. When ``ignore_flags`` is either
`None` or a string list of flags, ``flip_bits`` **MUST** be set
to `None`.
good_mask_value : int, bool (default = False)
This parameter is used to derive the values that will be assigned to
the elements in the output boolean mask array that correspond to the
"good" bit fields (that are 0 after zeroing bits specified by
``ignore_flags``) in the input ``bitfield`` array. When
``good_mask_value`` is non-zero or ``numpy.True_`` then values in the
output boolean mask array corresponding to "good" bit fields in
``bitfield`` will be ``numpy.True_`` (if ``dtype`` is ``numpy.bool_``)
or 1 (if ``dtype`` is of numerical type) and values of corresponding
to "bad" flags will be ``numpy.False_`` (or 0). When
``good_mask_value`` is zero or ``numpy.False_`` then the values
in the output boolean mask array corresponding to "good" bit fields
in ``bitfield`` will be ``numpy.False_`` (if ``dtype`` is
``numpy.bool_``) or 0 (if ``dtype`` is of numerical type) and values
of corresponding to "bad" flags will be ``numpy.True_`` (or 1).
dtype : data-type (default = ``numpy.bool_``)
The desired data-type for the output binary mask array.
flag_name_map : BitFlagNameMap
A `BitFlagNameMap` object that provides mapping from mnemonic
bit flag names to integer bit values in order to translate mnemonic
flags to numeric values when ``bit_flags`` that are comma- or
'+'-separated list of menmonic bit flag names.
Returns
-------
mask : ndarray
Returns an array of the same dimensionality as the input ``bitfield``
array whose elements can have two possible values,
e.g., ``numpy.True_`` or ``numpy.False_`` (or 1 or 0 for integer
``dtype``) according to values of to the input ``bitfield`` elements,
``ignore_flags`` parameter, and the ``good_mask_value`` parameter.
Examples
--------
>>> from astropy.nddata import bitmask
>>> import numpy as np
>>> dqarr = np.asarray([[0, 0, 1, 2, 0, 8, 12, 0],
... [10, 4, 0, 0, 0, 16, 6, 0]])
>>> flag_map = bitmask.extend_bit_flag_map(
... 'ST_DQ', CR=2, CLOUDY=4, RAINY=8, HOT=16, DEAD=32
... )
>>> bitmask.bitfield_to_boolean_mask(dqarr, ignore_flags=0,
... dtype=int)
array([[0, 0, 1, 1, 0, 1, 1, 0],
[1, 1, 0, 0, 0, 1, 1, 0]])
>>> bitmask.bitfield_to_boolean_mask(dqarr, ignore_flags=0,
... dtype=bool)
array([[False, False, True, True, False, True, True, False],
[ True, True, False, False, False, True, True, False]]...)
>>> bitmask.bitfield_to_boolean_mask(dqarr, ignore_flags=6,
... good_mask_value=0, dtype=int)
array([[0, 0, 1, 0, 0, 1, 1, 0],
[1, 0, 0, 0, 0, 1, 0, 0]])
>>> bitmask.bitfield_to_boolean_mask(dqarr, ignore_flags=~6,
... good_mask_value=0, dtype=int)
array([[0, 0, 0, 1, 0, 0, 1, 0],
[1, 1, 0, 0, 0, 0, 1, 0]])
>>> bitmask.bitfield_to_boolean_mask(dqarr, ignore_flags=6, dtype=int,
... flip_bits=True, good_mask_value=0)
array([[0, 0, 0, 1, 0, 0, 1, 0],
[1, 1, 0, 0, 0, 0, 1, 0]])
>>> bitmask.bitfield_to_boolean_mask(dqarr, ignore_flags='~(2+4)',
... good_mask_value=0, dtype=int)
array([[0, 0, 0, 1, 0, 0, 1, 0],
[1, 1, 0, 0, 0, 0, 1, 0]])
>>> bitmask.bitfield_to_boolean_mask(dqarr, ignore_flags=[2, 4],
... flip_bits=True, good_mask_value=0,
... dtype=int)
array([[0, 0, 0, 1, 0, 0, 1, 0],
[1, 1, 0, 0, 0, 0, 1, 0]])
>>> bitmask.bitfield_to_boolean_mask(dqarr, ignore_flags='~(CR,CLOUDY)',
... good_mask_value=0, dtype=int,
... flag_name_map=flag_map)
array([[0, 0, 0, 1, 0, 0, 1, 0],
[1, 1, 0, 0, 0, 0, 1, 0]])
>>> bitmask.bitfield_to_boolean_mask(dqarr, ignore_flags='~(CR+CLOUDY)',
... good_mask_value=0, dtype=int,
... flag_name_map=flag_map)
array([[0, 0, 0, 1, 0, 0, 1, 0],
[1, 1, 0, 0, 0, 0, 1, 0]]) | def bitfield_to_boolean_mask(
bitfield,
ignore_flags=0,
flip_bits=None,
good_mask_value=False,
dtype=np.bool_,
flag_name_map=None,
):
"""
bitfield_to_boolean_mask(bitfield, ignore_flags=None, flip_bits=None, \
good_mask_value=False, dtype=numpy.bool_)
Converts an array of bit fields to a boolean (or integer) mask array
according to a bit mask constructed from the supplied bit flags (see
``ignore_flags`` parameter).
This function is particularly useful to convert data quality arrays to
boolean masks with selective filtering of DQ flags.
Parameters
----------
bitfield : ndarray
An array of bit flags. By default, values different from zero are
interpreted as "bad" values and values equal to zero are considered
as "good" values. However, see ``ignore_flags`` parameter on how to
selectively ignore some bits in the ``bitfield`` array data.
ignore_flags : int, str, list, None (default = 0)
An integer bit mask, `None`, a Python list of bit flags, a comma-,
or ``'|'``-separated, ``'+'``-separated string list of integer
bit flags or mnemonic flag names that indicate what bits in the input
``bitfield`` should be *ignored* (i.e., zeroed), or `None`.
.. note::
When ``bit_flags`` is a list of flag names, the ``flag_name_map``
parameter must be provided.
| Setting ``ignore_flags`` to `None` effectively will make
`bitfield_to_boolean_mask` interpret all ``bitfield`` elements
as "good" regardless of their value.
| When ``ignore_flags`` argument is an integer bit mask, it will be
combined using bitwise-NOT and bitwise-AND with each element of the
input ``bitfield`` array (``~ignore_flags & bitfield``). If the
resultant bitfield element is non-zero, that element will be
interpreted as a "bad" in the output boolean mask and it will be
interpreted as "good" otherwise. ``flip_bits`` parameter may be used
to flip the bits (``bitwise-NOT``) of the bit mask thus effectively
changing the meaning of the ``ignore_flags`` parameter from "ignore"
to "use only" these flags.
.. note::
Setting ``ignore_flags`` to 0 effectively will assume that all
non-zero elements in the input ``bitfield`` array are to be
interpreted as "bad".
| When ``ignore_flags`` argument is a Python list of integer bit
flags, these flags are added together to create an integer bit mask.
Each item in the list must be a flag, i.e., an integer that is an
integer power of 2. In order to flip the bits of the resultant
bit mask, use ``flip_bits`` parameter.
| Alternatively, ``ignore_flags`` may be a string of comma- or
``'+'``(or ``'|'``)-separated list of integer bit flags that should
be added (bitwise OR) together to create an integer bit mask.
For example, both ``'4,8'``, ``'4|8'``, and ``'4+8'`` are equivalent
and indicate that bit flags 4 and 8 in the input ``bitfield``
array should be ignored when generating boolean mask.
.. note::
``'None'``, ``'INDEF'``, and empty (or all white space) strings
are special values of string ``ignore_flags`` that are
interpreted as `None`.
.. note::
Each item in the list must be a flag, i.e., an integer that is an
integer power of 2. In addition, for convenience, an arbitrary
**single** integer is allowed and it will be interpreted as an
integer bit mask. For example, instead of ``'4,8'`` one could
simply provide string ``'12'``.
.. note::
Only one flag separator is supported at a time. ``ignore_flags``
string should not mix ``','``, ``'+'``, and ``'|'`` separators.
.. note::
When ``ignore_flags`` is a `str` and when it is prepended with
'~', then the meaning of ``ignore_flags`` parameters will be
reversed: now it will be interpreted as a list of bit flags to be
*used* (or *not ignored*) when deciding which elements of the
input ``bitfield`` array are "bad". Following this convention,
an ``ignore_flags`` string value of ``'~0'`` would be equivalent
to setting ``ignore_flags=None``.
.. warning::
Because prepending '~' to a string ``ignore_flags`` is equivalent
to setting ``flip_bits`` to `True`, ``flip_bits`` cannot be used
with string ``ignore_flags`` and it must be set to `None`.
flip_bits : bool, None (default = None)
Specifies whether or not to invert the bits of the bit mask either
supplied directly through ``ignore_flags`` parameter or built from the
bit flags passed through ``ignore_flags`` (only when bit flags are
passed as Python lists of integer bit flags). Occasionally, it may be
useful to *consider only specific bit flags* in the ``bitfield``
array when creating a boolean mask as opposed to *ignoring* specific
bit flags as ``ignore_flags`` behaves by default. This can be achieved
by inverting/flipping the bits of the bit mask created from
``ignore_flags`` flags which effectively changes the meaning of the
``ignore_flags`` parameter from "ignore" to "use only" these flags.
Setting ``flip_bits`` to `None` means that no bit flipping will be
performed. Bit flipping for string lists of bit flags must be
specified by prepending '~' to string bit flag lists
(see documentation for ``ignore_flags`` for more details).
.. warning::
This parameter can be set to either `True` or `False` **ONLY** when
``ignore_flags`` is either an integer bit mask or a Python
list of integer bit flags. When ``ignore_flags`` is either
`None` or a string list of flags, ``flip_bits`` **MUST** be set
to `None`.
good_mask_value : int, bool (default = False)
This parameter is used to derive the values that will be assigned to
the elements in the output boolean mask array that correspond to the
"good" bit fields (that are 0 after zeroing bits specified by
``ignore_flags``) in the input ``bitfield`` array. When
``good_mask_value`` is non-zero or ``numpy.True_`` then values in the
output boolean mask array corresponding to "good" bit fields in
``bitfield`` will be ``numpy.True_`` (if ``dtype`` is ``numpy.bool_``)
or 1 (if ``dtype`` is of numerical type) and values of corresponding
to "bad" flags will be ``numpy.False_`` (or 0). When
``good_mask_value`` is zero or ``numpy.False_`` then the values
in the output boolean mask array corresponding to "good" bit fields
in ``bitfield`` will be ``numpy.False_`` (if ``dtype`` is
``numpy.bool_``) or 0 (if ``dtype`` is of numerical type) and values
of corresponding to "bad" flags will be ``numpy.True_`` (or 1).
dtype : data-type (default = ``numpy.bool_``)
The desired data-type for the output binary mask array.
flag_name_map : BitFlagNameMap
A `BitFlagNameMap` object that provides mapping from mnemonic
bit flag names to integer bit values in order to translate mnemonic
flags to numeric values when ``bit_flags`` that are comma- or
'+'-separated list of menmonic bit flag names.
Returns
-------
mask : ndarray
Returns an array of the same dimensionality as the input ``bitfield``
array whose elements can have two possible values,
e.g., ``numpy.True_`` or ``numpy.False_`` (or 1 or 0 for integer
``dtype``) according to values of to the input ``bitfield`` elements,
``ignore_flags`` parameter, and the ``good_mask_value`` parameter.
Examples
--------
>>> from astropy.nddata import bitmask
>>> import numpy as np
>>> dqarr = np.asarray([[0, 0, 1, 2, 0, 8, 12, 0],
... [10, 4, 0, 0, 0, 16, 6, 0]])
>>> flag_map = bitmask.extend_bit_flag_map(
... 'ST_DQ', CR=2, CLOUDY=4, RAINY=8, HOT=16, DEAD=32
... )
>>> bitmask.bitfield_to_boolean_mask(dqarr, ignore_flags=0,
... dtype=int)
array([[0, 0, 1, 1, 0, 1, 1, 0],
[1, 1, 0, 0, 0, 1, 1, 0]])
>>> bitmask.bitfield_to_boolean_mask(dqarr, ignore_flags=0,
... dtype=bool)
array([[False, False, True, True, False, True, True, False],
[ True, True, False, False, False, True, True, False]]...)
>>> bitmask.bitfield_to_boolean_mask(dqarr, ignore_flags=6,
... good_mask_value=0, dtype=int)
array([[0, 0, 1, 0, 0, 1, 1, 0],
[1, 0, 0, 0, 0, 1, 0, 0]])
>>> bitmask.bitfield_to_boolean_mask(dqarr, ignore_flags=~6,
... good_mask_value=0, dtype=int)
array([[0, 0, 0, 1, 0, 0, 1, 0],
[1, 1, 0, 0, 0, 0, 1, 0]])
>>> bitmask.bitfield_to_boolean_mask(dqarr, ignore_flags=6, dtype=int,
... flip_bits=True, good_mask_value=0)
array([[0, 0, 0, 1, 0, 0, 1, 0],
[1, 1, 0, 0, 0, 0, 1, 0]])
>>> bitmask.bitfield_to_boolean_mask(dqarr, ignore_flags='~(2+4)',
... good_mask_value=0, dtype=int)
array([[0, 0, 0, 1, 0, 0, 1, 0],
[1, 1, 0, 0, 0, 0, 1, 0]])
>>> bitmask.bitfield_to_boolean_mask(dqarr, ignore_flags=[2, 4],
... flip_bits=True, good_mask_value=0,
... dtype=int)
array([[0, 0, 0, 1, 0, 0, 1, 0],
[1, 1, 0, 0, 0, 0, 1, 0]])
>>> bitmask.bitfield_to_boolean_mask(dqarr, ignore_flags='~(CR,CLOUDY)',
... good_mask_value=0, dtype=int,
... flag_name_map=flag_map)
array([[0, 0, 0, 1, 0, 0, 1, 0],
[1, 1, 0, 0, 0, 0, 1, 0]])
>>> bitmask.bitfield_to_boolean_mask(dqarr, ignore_flags='~(CR+CLOUDY)',
... good_mask_value=0, dtype=int,
... flag_name_map=flag_map)
array([[0, 0, 0, 1, 0, 0, 1, 0],
[1, 1, 0, 0, 0, 0, 1, 0]])
"""
bitfield = np.asarray(bitfield)
if not np.issubdtype(bitfield.dtype, np.integer):
raise TypeError("Input bitfield array must be of integer type.")
ignore_mask = interpret_bit_flags(
ignore_flags, flip_bits=flip_bits, flag_name_map=flag_name_map
)
if ignore_mask is None:
if good_mask_value:
mask = np.ones_like(bitfield, dtype=dtype)
else:
mask = np.zeros_like(bitfield, dtype=dtype)
return mask
# filter out bits beyond the maximum supported by the data type:
ignore_mask = ignore_mask & _SUPPORTED_FLAGS
# invert the "ignore" mask:
ignore_mask = np.bitwise_not(
ignore_mask, dtype=bitfield.dtype.type, casting="unsafe"
)
mask = np.empty_like(bitfield, dtype=np.bool_)
np.bitwise_and(bitfield, ignore_mask, out=mask, casting="unsafe")
if good_mask_value:
np.logical_not(mask, out=mask)
return mask.astype(dtype=dtype, subok=False, copy=False) |
Reshape a data array into blocks.
This is useful to efficiently apply functions on block subsets of
the data instead of using loops. The reshaped array is a view of
the input data array.
.. versionadded:: 4.1
Parameters
----------
data : ndarray
The input data array.
block_size : int or array-like (int)
The integer block size along each axis. If ``block_size`` is a
scalar and ``data`` has more than one dimension, then
``block_size`` will be used for for every axis. Each dimension
of ``block_size`` must divide evenly into the corresponding
dimension of ``data``.
Returns
-------
output : ndarray
The reshaped array as a view of the input ``data`` array.
Examples
--------
>>> import numpy as np
>>> from astropy.nddata import reshape_as_blocks
>>> data = np.arange(16).reshape(4, 4)
>>> data
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
>>> reshape_as_blocks(data, (2, 2))
array([[[[ 0, 1],
[ 4, 5]],
[[ 2, 3],
[ 6, 7]]],
[[[ 8, 9],
[12, 13]],
[[10, 11],
[14, 15]]]]) | def reshape_as_blocks(data, block_size):
"""
Reshape a data array into blocks.
This is useful to efficiently apply functions on block subsets of
the data instead of using loops. The reshaped array is a view of
the input data array.
.. versionadded:: 4.1
Parameters
----------
data : ndarray
The input data array.
block_size : int or array-like (int)
The integer block size along each axis. If ``block_size`` is a
scalar and ``data`` has more than one dimension, then
``block_size`` will be used for for every axis. Each dimension
of ``block_size`` must divide evenly into the corresponding
dimension of ``data``.
Returns
-------
output : ndarray
The reshaped array as a view of the input ``data`` array.
Examples
--------
>>> import numpy as np
>>> from astropy.nddata import reshape_as_blocks
>>> data = np.arange(16).reshape(4, 4)
>>> data
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
>>> reshape_as_blocks(data, (2, 2))
array([[[[ 0, 1],
[ 4, 5]],
[[ 2, 3],
[ 6, 7]]],
[[[ 8, 9],
[12, 13]],
[[10, 11],
[14, 15]]]])
"""
data, block_size = _process_block_inputs(data, block_size)
if np.any(np.mod(data.shape, block_size) != 0):
raise ValueError(
"Each dimension of block_size must divide evenly "
"into the corresponding dimension of data"
)
nblocks = np.array(data.shape) // block_size
new_shape = tuple(k for ij in zip(nblocks, block_size) for k in ij)
nblocks_idx = tuple(range(0, len(new_shape), 2)) # even indices
block_idx = tuple(range(1, len(new_shape), 2)) # odd indices
return data.reshape(new_shape).transpose(nblocks_idx + block_idx) |
Downsample a data array by applying a function to local blocks.
If ``data`` is not perfectly divisible by ``block_size`` along a
given axis then the data will be trimmed (from the end) along that
axis.
Parameters
----------
data : array-like
The data to be resampled.
block_size : int or array-like (int)
The integer block size along each axis. If ``block_size`` is a
scalar and ``data`` has more than one dimension, then
``block_size`` will be used for for every axis.
func : callable, optional
The method to use to downsample the data. Must be a callable
that takes in a 4D `~numpy.ndarray` (the 2D `~numpy.ndarray`
input into `block_reduce` gets reshaped as 4D) and has an
``axis`` keyword that accepts tuples. This function will be
called with ``axis=(2, 3)`` and it should return a 2D array. The
default is `~numpy.sum`, which provides block summation (and
conserves the data sum).
Returns
-------
output : array-like
The resampled data. Note the depending on the input ``func``,
the dtype of the output array may not match the input array.
Examples
--------
>>> import numpy as np
>>> from astropy.nddata import block_reduce
>>> data = np.arange(16).reshape(4, 4)
>>> block_reduce(data, 2) # doctest: +FLOAT_CMP
array([[10, 18],
[42, 50]])
>>> block_reduce(data, 2, func=np.mean) # doctest: +FLOAT_CMP
array([[ 2.5, 4.5],
[ 10.5, 12.5]]) | def block_reduce(data, block_size, func=np.sum):
"""
Downsample a data array by applying a function to local blocks.
If ``data`` is not perfectly divisible by ``block_size`` along a
given axis then the data will be trimmed (from the end) along that
axis.
Parameters
----------
data : array-like
The data to be resampled.
block_size : int or array-like (int)
The integer block size along each axis. If ``block_size`` is a
scalar and ``data`` has more than one dimension, then
``block_size`` will be used for for every axis.
func : callable, optional
The method to use to downsample the data. Must be a callable
that takes in a 4D `~numpy.ndarray` (the 2D `~numpy.ndarray`
input into `block_reduce` gets reshaped as 4D) and has an
``axis`` keyword that accepts tuples. This function will be
called with ``axis=(2, 3)`` and it should return a 2D array. The
default is `~numpy.sum`, which provides block summation (and
conserves the data sum).
Returns
-------
output : array-like
The resampled data. Note the depending on the input ``func``,
the dtype of the output array may not match the input array.
Examples
--------
>>> import numpy as np
>>> from astropy.nddata import block_reduce
>>> data = np.arange(16).reshape(4, 4)
>>> block_reduce(data, 2) # doctest: +FLOAT_CMP
array([[10, 18],
[42, 50]])
>>> block_reduce(data, 2, func=np.mean) # doctest: +FLOAT_CMP
array([[ 2.5, 4.5],
[ 10.5, 12.5]])
"""
data, block_size = _process_block_inputs(data, block_size)
nblocks = np.array(data.shape) // block_size
size_init = nblocks * block_size # evenly-divisible size
# trim data if necessary
for axis in range(data.ndim):
if data.shape[axis] != size_init[axis]:
data = data.swapaxes(0, axis)
data = data[: size_init[axis]]
data = data.swapaxes(0, axis)
reshaped = reshape_as_blocks(data, block_size)
axis = tuple(range(data.ndim, reshaped.ndim))
return func(reshaped, axis=axis) |
Upsample a data array by block replication.
Parameters
----------
data : array-like
The data to be block replicated.
block_size : int or array-like (int)
The integer block size along each axis. If ``block_size`` is a
scalar and ``data`` has more than one dimension, then
``block_size`` will be used for for every axis.
conserve_sum : bool, optional
If `True` (the default) then the sum of the output
block-replicated data will equal the sum of the input ``data``.
Returns
-------
output : array-like
The block-replicated data. Note that when ``conserve_sum`` is
`True`, the dtype of the output array will be float.
Examples
--------
>>> import numpy as np
>>> from astropy.nddata import block_replicate
>>> data = np.array([[0., 1.], [2., 3.]])
>>> block_replicate(data, 2) # doctest: +FLOAT_CMP
array([[0. , 0. , 0.25, 0.25],
[0. , 0. , 0.25, 0.25],
[0.5 , 0.5 , 0.75, 0.75],
[0.5 , 0.5 , 0.75, 0.75]])
>>> block_replicate(data, 2, conserve_sum=False) # doctest: +FLOAT_CMP
array([[0., 0., 1., 1.],
[0., 0., 1., 1.],
[2., 2., 3., 3.],
[2., 2., 3., 3.]]) | def block_replicate(data, block_size, conserve_sum=True):
"""
Upsample a data array by block replication.
Parameters
----------
data : array-like
The data to be block replicated.
block_size : int or array-like (int)
The integer block size along each axis. If ``block_size`` is a
scalar and ``data`` has more than one dimension, then
``block_size`` will be used for for every axis.
conserve_sum : bool, optional
If `True` (the default) then the sum of the output
block-replicated data will equal the sum of the input ``data``.
Returns
-------
output : array-like
The block-replicated data. Note that when ``conserve_sum`` is
`True`, the dtype of the output array will be float.
Examples
--------
>>> import numpy as np
>>> from astropy.nddata import block_replicate
>>> data = np.array([[0., 1.], [2., 3.]])
>>> block_replicate(data, 2) # doctest: +FLOAT_CMP
array([[0. , 0. , 0.25, 0.25],
[0. , 0. , 0.25, 0.25],
[0.5 , 0.5 , 0.75, 0.75],
[0.5 , 0.5 , 0.75, 0.75]])
>>> block_replicate(data, 2, conserve_sum=False) # doctest: +FLOAT_CMP
array([[0., 0., 1., 1.],
[0., 0., 1., 1.],
[2., 2., 3., 3.],
[2., 2., 3., 3.]])
"""
data, block_size = _process_block_inputs(data, block_size)
for i in range(data.ndim):
data = np.repeat(data, block_size[i], axis=i)
if conserve_sum:
# in-place division can fail due to dtype casting rule
data = data / np.prod(block_size)
return data |
Decorator factory which temporarily disables the need for a unit when
creating a new CCDData instance. The final result must have a unit.
Parameters
----------
op : function
The function to apply. Supported are:
- ``np.add``
- ``np.subtract``
- ``np.multiply``
- ``np.true_divide``
Notes
-----
Should only be used on CCDData ``add``, ``subtract``, ``divide`` or
``multiply`` because only these methods from NDArithmeticMixin are
overwritten. | def _arithmetic(op):
"""Decorator factory which temporarily disables the need for a unit when
creating a new CCDData instance. The final result must have a unit.
Parameters
----------
op : function
The function to apply. Supported are:
- ``np.add``
- ``np.subtract``
- ``np.multiply``
- ``np.true_divide``
Notes
-----
Should only be used on CCDData ``add``, ``subtract``, ``divide`` or
``multiply`` because only these methods from NDArithmeticMixin are
overwritten.
"""
def decorator(func):
def inner(self, operand, operand2=None, **kwargs):
global _config_ccd_requires_unit
_config_ccd_requires_unit = False
result = self._prepare_then_do_arithmetic(op, operand, operand2, **kwargs)
# Wrap it again as CCDData so it checks the final unit.
_config_ccd_requires_unit = True
return result.__class__(result)
inner.__doc__ = f"See `astropy.nddata.NDArithmeticMixin.{func.__name__}`."
return sharedmethod(inner)
return decorator |
Generate a WCS object from a header and remove the WCS-specific
keywords from the header.
Parameters
----------
hdr : astropy.io.fits.header or other dict-like
Returns
-------
new_header, wcs | def _generate_wcs_and_update_header(hdr):
"""
Generate a WCS object from a header and remove the WCS-specific
keywords from the header.
Parameters
----------
hdr : astropy.io.fits.header or other dict-like
Returns
-------
new_header, wcs
"""
# Try constructing a WCS object.
try:
wcs = WCS(hdr)
except Exception as exc:
# Normally WCS only raises Warnings and doesn't fail but in rare
# cases (malformed header) it could fail...
log.info(
"An exception happened while extracting WCS information from "
f"the Header.\n{type(exc).__name__}: {str(exc)}"
)
return hdr, None
# Test for success by checking to see if the wcs ctype has a non-empty
# value, return None for wcs if ctype is empty.
if not wcs.wcs.ctype[0]:
return (hdr, None)
new_hdr = hdr.copy()
# If the keywords below are in the header they are also added to WCS.
# It seems like they should *not* be removed from the header, though.
wcs_header = wcs.to_header(relax=True)
for k in wcs_header:
if k not in _KEEP_THESE_KEYWORDS_IN_HEADER:
new_hdr.remove(k, ignore_missing=True)
# Check that this does not result in an inconsistent header WCS if the WCS
# is converted back to a header.
if (_PCs & set(wcs_header)) and (_CDs & set(new_hdr)):
# The PCi_j representation is used by the astropy.wcs object,
# so CDi_j keywords were not removed from new_hdr. Remove them now.
for cd in _CDs:
new_hdr.remove(cd, ignore_missing=True)
# The other case -- CD in the header produced by astropy.wcs -- should
# never happen based on [1], which computes the matrix in PC form.
# [1]: https://github.com/astropy/astropy/blob/1cf277926d3598dd672dd528504767c37531e8c9/cextern/wcslib/C/wcshdr.c#L596
#
# The test test_ccddata.test_wcs_keyword_removal_for_wcs_test_files() does
# check for the possibility that both PC and CD are present in the result
# so if the implementation of to_header changes in wcslib in the future
# then the tests should catch it, and then this code will need to be
# updated.
# We need to check for any SIP coefficients that got left behind if the
# header has SIP.
if wcs.sip is not None:
keyword = "{}_{}_{}"
polynomials = ["A", "B", "AP", "BP"]
for poly in polynomials:
order = wcs.sip.__getattribute__(f"{poly.lower()}_order")
for i, j in itertools.product(range(order), repeat=2):
new_hdr.remove(keyword.format(poly, i, j), ignore_missing=True)
return (new_hdr, wcs) |
Generate a CCDData object from a FITS file.
Parameters
----------
filename : str
Name of fits file.
hdu : int, str, tuple of (str, int), optional
Index or other identifier of the Header Data Unit of the FITS
file from which CCDData should be initialized. If zero and
no data in the primary HDU, it will search for the first
extension HDU with data. The header will be added to the primary HDU.
Default is ``0``.
unit : `~astropy.units.Unit`, optional
Units of the image data. If this argument is provided and there is a
unit for the image in the FITS header (the keyword ``BUNIT`` is used
as the unit, if present), this argument is used for the unit.
Default is ``None``.
hdu_uncertainty : str or None, optional
FITS extension from which the uncertainty should be initialized. If the
extension does not exist the uncertainty of the CCDData is ``None``.
Default is ``'UNCERT'``.
hdu_mask : str or None, optional
FITS extension from which the mask should be initialized. If the
extension does not exist the mask of the CCDData is ``None``.
Default is ``'MASK'``.
hdu_flags : str or None, optional
Currently not implemented.
Default is ``None``.
key_uncertainty_type : str, optional
The header key name where the class name of the uncertainty is stored
in the hdu of the uncertainty (if any).
Default is ``UTYPE``.
.. versionadded:: 3.1
hdu_psf : str or None, optional
FITS extension from which the psf image should be initialized. If the
extension does not exist the psf of the CCDData is `None`.
kwd :
Any additional keyword parameters are passed through to the FITS reader
in :mod:`astropy.io.fits`; see Notes for additional discussion.
Notes
-----
FITS files that contained scaled data (e.g. unsigned integer images) will
be scaled and the keywords used to manage scaled data in
:mod:`astropy.io.fits` are disabled. | def fits_ccddata_reader(
filename,
hdu=0,
unit=None,
hdu_uncertainty="UNCERT",
hdu_mask="MASK",
hdu_flags=None,
key_uncertainty_type="UTYPE",
hdu_psf="PSFIMAGE",
**kwd,
):
"""
Generate a CCDData object from a FITS file.
Parameters
----------
filename : str
Name of fits file.
hdu : int, str, tuple of (str, int), optional
Index or other identifier of the Header Data Unit of the FITS
file from which CCDData should be initialized. If zero and
no data in the primary HDU, it will search for the first
extension HDU with data. The header will be added to the primary HDU.
Default is ``0``.
unit : `~astropy.units.Unit`, optional
Units of the image data. If this argument is provided and there is a
unit for the image in the FITS header (the keyword ``BUNIT`` is used
as the unit, if present), this argument is used for the unit.
Default is ``None``.
hdu_uncertainty : str or None, optional
FITS extension from which the uncertainty should be initialized. If the
extension does not exist the uncertainty of the CCDData is ``None``.
Default is ``'UNCERT'``.
hdu_mask : str or None, optional
FITS extension from which the mask should be initialized. If the
extension does not exist the mask of the CCDData is ``None``.
Default is ``'MASK'``.
hdu_flags : str or None, optional
Currently not implemented.
Default is ``None``.
key_uncertainty_type : str, optional
The header key name where the class name of the uncertainty is stored
in the hdu of the uncertainty (if any).
Default is ``UTYPE``.
.. versionadded:: 3.1
hdu_psf : str or None, optional
FITS extension from which the psf image should be initialized. If the
extension does not exist the psf of the CCDData is `None`.
kwd :
Any additional keyword parameters are passed through to the FITS reader
in :mod:`astropy.io.fits`; see Notes for additional discussion.
Notes
-----
FITS files that contained scaled data (e.g. unsigned integer images) will
be scaled and the keywords used to manage scaled data in
:mod:`astropy.io.fits` are disabled.
"""
unsupport_open_keywords = {
"do_not_scale_image_data": "Image data must be scaled.",
"scale_back": "Scale information is not preserved.",
}
for key, msg in unsupport_open_keywords.items():
if key in kwd:
prefix = f"unsupported keyword: {key}."
raise TypeError(f"{prefix} {msg}")
with fits.open(filename, **kwd) as hdus:
hdr = hdus[hdu].header
if hdu_uncertainty is not None and hdu_uncertainty in hdus:
unc_hdu = hdus[hdu_uncertainty]
stored_unc_name = unc_hdu.header.get(key_uncertainty_type, "None")
# For compatibility reasons the default is standard deviation
# uncertainty because files could have been created before the
# uncertainty type was stored in the header.
unc_type = _unc_name_to_cls.get(stored_unc_name, StdDevUncertainty)
uncertainty = unc_type(unc_hdu.data)
else:
uncertainty = None
if hdu_mask is not None and hdu_mask in hdus:
# Mask is saved as uint but we want it to be boolean.
mask = hdus[hdu_mask].data.astype(np.bool_)
else:
mask = None
if hdu_flags is not None and hdu_flags in hdus:
raise NotImplementedError("loading flags is currently not supported.")
if hdu_psf is not None and hdu_psf in hdus:
psf = hdus[hdu_psf].data
else:
psf = None
# search for the first instance with data if
# the primary header is empty.
if hdu == 0 and hdus[hdu].data is None:
for i in range(len(hdus)):
if (
hdus.info(hdu)[i][3] == "ImageHDU"
and hdus.fileinfo(i)["datSpan"] > 0
):
hdu = i
comb_hdr = hdus[hdu].header.copy()
# Add header values from the primary header that aren't
# present in the extension header.
comb_hdr.extend(hdr, unique=True)
hdr = comb_hdr
log.info(f"first HDU with data is extension {hdu}.")
break
if "bunit" in hdr:
fits_unit_string = hdr["bunit"]
# patch to handle FITS files using ADU for the unit instead of the
# standard version of 'adu'
if fits_unit_string.strip().lower() == "adu":
fits_unit_string = fits_unit_string.lower()
else:
fits_unit_string = None
if fits_unit_string:
if unit is None:
# Convert the BUNIT header keyword to a unit and if that's not
# possible raise a meaningful error message.
try:
kifus = CCDData.known_invalid_fits_unit_strings
if fits_unit_string in kifus:
fits_unit_string = kifus[fits_unit_string]
fits_unit_string = u.Unit(fits_unit_string)
except ValueError:
raise ValueError(
f"The Header value for the key BUNIT ({fits_unit_string}) "
"cannot be interpreted as valid unit. To successfully read the "
"file as CCDData you can pass in a valid `unit` "
"argument explicitly or change the header of the FITS "
"file before reading it."
)
else:
log.info(
f"using the unit {unit} passed to the FITS reader instead "
f"of the unit {fits_unit_string} in the FITS file."
)
use_unit = unit or fits_unit_string
hdr, wcs = _generate_wcs_and_update_header(hdr)
ccd_data = CCDData(
hdus[hdu].data,
meta=hdr,
unit=use_unit,
mask=mask,
uncertainty=uncertainty,
wcs=wcs,
psf=psf,
)
return ccd_data |
Write CCDData object to FITS file.
Parameters
----------
ccd_data : CCDData
Object to write.
filename : str
Name of file.
hdu_mask, hdu_uncertainty, hdu_flags, hdu_psf : str or None, optional
If it is a string append this attribute to the HDUList as
`~astropy.io.fits.ImageHDU` with the string as extension name.
Flags are not supported at this time. If ``None`` this attribute
is not appended.
Default is ``'MASK'`` for mask, ``'UNCERT'`` for uncertainty,
``'PSFIMAGE'`` for psf, and `None` for flags.
key_uncertainty_type : str, optional
The header key name for the class name of the uncertainty (if any)
that is used to store the uncertainty type in the uncertainty hdu.
Default is ``UTYPE``.
.. versionadded:: 3.1
as_image_hdu : bool
If this option is `True`, the first item of the returned
`~astropy.io.fits.HDUList` is a `~astropy.io.fits.ImageHDU`, instead of
the default `~astropy.io.fits.PrimaryHDU`.
kwd :
All additional keywords are passed to :py:mod:`astropy.io.fits`
Raises
------
ValueError
- If ``self.mask`` is set but not a `numpy.ndarray`.
- If ``self.uncertainty`` is set but not a
`~astropy.nddata.StdDevUncertainty`.
- If ``self.uncertainty`` is set but has another unit then
``self.data``.
NotImplementedError
Saving flags is not supported. | def fits_ccddata_writer(
ccd_data,
filename,
hdu_mask="MASK",
hdu_uncertainty="UNCERT",
hdu_flags=None,
key_uncertainty_type="UTYPE",
as_image_hdu=False,
hdu_psf="PSFIMAGE",
**kwd,
):
"""
Write CCDData object to FITS file.
Parameters
----------
ccd_data : CCDData
Object to write.
filename : str
Name of file.
hdu_mask, hdu_uncertainty, hdu_flags, hdu_psf : str or None, optional
If it is a string append this attribute to the HDUList as
`~astropy.io.fits.ImageHDU` with the string as extension name.
Flags are not supported at this time. If ``None`` this attribute
is not appended.
Default is ``'MASK'`` for mask, ``'UNCERT'`` for uncertainty,
``'PSFIMAGE'`` for psf, and `None` for flags.
key_uncertainty_type : str, optional
The header key name for the class name of the uncertainty (if any)
that is used to store the uncertainty type in the uncertainty hdu.
Default is ``UTYPE``.
.. versionadded:: 3.1
as_image_hdu : bool
If this option is `True`, the first item of the returned
`~astropy.io.fits.HDUList` is a `~astropy.io.fits.ImageHDU`, instead of
the default `~astropy.io.fits.PrimaryHDU`.
kwd :
All additional keywords are passed to :py:mod:`astropy.io.fits`
Raises
------
ValueError
- If ``self.mask`` is set but not a `numpy.ndarray`.
- If ``self.uncertainty`` is set but not a
`~astropy.nddata.StdDevUncertainty`.
- If ``self.uncertainty`` is set but has another unit then
``self.data``.
NotImplementedError
Saving flags is not supported.
"""
hdu = ccd_data.to_hdu(
hdu_mask=hdu_mask,
hdu_uncertainty=hdu_uncertainty,
key_uncertainty_type=key_uncertainty_type,
hdu_flags=hdu_flags,
as_image_hdu=as_image_hdu,
hdu_psf=hdu_psf,
)
if as_image_hdu:
hdu.insert(0, fits.PrimaryHDU())
hdu.writeto(filename, **kwd) |
Decorator to wrap functions that could accept an NDData instance with
its properties passed as function arguments.
Parameters
----------
_func : callable, None, optional
The function to decorate or ``None`` if used as factory. The first
positional argument should be ``data`` and take a numpy array. It is
possible to overwrite the name, see ``attribute_argument_mapping``
argument.
Default is ``None``.
accepts : class, optional
The class or subclass of ``NDData`` that should be unpacked before
calling the function.
Default is ``NDData``
repack : bool, optional
Should be ``True`` if the return should be converted to the input
class again after the wrapped function call.
Default is ``False``.
.. note::
Must be ``True`` if either one of ``returns`` or ``keeps``
is specified.
returns : iterable, None, optional
An iterable containing strings which returned value should be set
on the class. For example if a function returns data and mask, this
should be ``['data', 'mask']``. If ``None`` assume the function only
returns one argument: ``'data'``.
Default is ``None``.
.. note::
Must be ``None`` if ``repack=False``.
keeps : iterable. None, optional
An iterable containing strings that indicate which values should be
copied from the original input to the returned class. If ``None``
assume that no attributes are copied.
Default is ``None``.
.. note::
Must be ``None`` if ``repack=False``.
attribute_argument_mapping :
Keyword parameters that optionally indicate which function argument
should be interpreted as which attribute on the input. By default
it assumes the function takes a ``data`` argument as first argument,
but if the first argument is called ``input`` one should pass
``support_nddata(..., data='input')`` to the function.
Returns
-------
decorator_factory or decorated_function : callable
If ``_func=None`` this returns a decorator, otherwise it returns the
decorated ``_func``.
Notes
-----
If properties of ``NDData`` are set but have no corresponding function
argument a Warning is shown.
If a property is set of the ``NDData`` are set and an explicit argument is
given, the explicitly given argument is used and a Warning is shown.
The supported properties are:
- ``mask``
- ``unit``
- ``wcs``
- ``meta``
- ``uncertainty``
- ``flags``
Examples
--------
This function takes a Numpy array for the data, and some WCS information
with the ``wcs`` keyword argument::
def downsample(data, wcs=None):
# downsample data and optionally WCS here
pass
However, you might have an NDData instance that has the ``wcs`` property
set and you would like to be able to call the function with
``downsample(my_nddata)`` and have the WCS information, if present,
automatically be passed to the ``wcs`` keyword argument.
This decorator can be used to make this possible::
@support_nddata
def downsample(data, wcs=None):
# downsample data and optionally WCS here
pass
This function can now either be called as before, specifying the data and
WCS separately, or an NDData instance can be passed to the ``data``
argument. | def support_nddata(
_func=None,
accepts=NDData,
repack=False,
returns=None,
keeps=None,
**attribute_argument_mapping,
):
"""Decorator to wrap functions that could accept an NDData instance with
its properties passed as function arguments.
Parameters
----------
_func : callable, None, optional
The function to decorate or ``None`` if used as factory. The first
positional argument should be ``data`` and take a numpy array. It is
possible to overwrite the name, see ``attribute_argument_mapping``
argument.
Default is ``None``.
accepts : class, optional
The class or subclass of ``NDData`` that should be unpacked before
calling the function.
Default is ``NDData``
repack : bool, optional
Should be ``True`` if the return should be converted to the input
class again after the wrapped function call.
Default is ``False``.
.. note::
Must be ``True`` if either one of ``returns`` or ``keeps``
is specified.
returns : iterable, None, optional
An iterable containing strings which returned value should be set
on the class. For example if a function returns data and mask, this
should be ``['data', 'mask']``. If ``None`` assume the function only
returns one argument: ``'data'``.
Default is ``None``.
.. note::
Must be ``None`` if ``repack=False``.
keeps : iterable. None, optional
An iterable containing strings that indicate which values should be
copied from the original input to the returned class. If ``None``
assume that no attributes are copied.
Default is ``None``.
.. note::
Must be ``None`` if ``repack=False``.
attribute_argument_mapping :
Keyword parameters that optionally indicate which function argument
should be interpreted as which attribute on the input. By default
it assumes the function takes a ``data`` argument as first argument,
but if the first argument is called ``input`` one should pass
``support_nddata(..., data='input')`` to the function.
Returns
-------
decorator_factory or decorated_function : callable
If ``_func=None`` this returns a decorator, otherwise it returns the
decorated ``_func``.
Notes
-----
If properties of ``NDData`` are set but have no corresponding function
argument a Warning is shown.
If a property is set of the ``NDData`` are set and an explicit argument is
given, the explicitly given argument is used and a Warning is shown.
The supported properties are:
- ``mask``
- ``unit``
- ``wcs``
- ``meta``
- ``uncertainty``
- ``flags``
Examples
--------
This function takes a Numpy array for the data, and some WCS information
with the ``wcs`` keyword argument::
def downsample(data, wcs=None):
# downsample data and optionally WCS here
pass
However, you might have an NDData instance that has the ``wcs`` property
set and you would like to be able to call the function with
``downsample(my_nddata)`` and have the WCS information, if present,
automatically be passed to the ``wcs`` keyword argument.
This decorator can be used to make this possible::
@support_nddata
def downsample(data, wcs=None):
# downsample data and optionally WCS here
pass
This function can now either be called as before, specifying the data and
WCS separately, or an NDData instance can be passed to the ``data``
argument.
"""
if (returns is not None or keeps is not None) and not repack:
raise ValueError("returns or keeps should only be set if repack=True.")
elif returns is None and repack:
raise ValueError("returns should be set if repack=True.")
else:
# Use empty lists for returns and keeps so we don't need to check
# if any of those is None later on.
if returns is None:
returns = []
if keeps is None:
keeps = []
# Short version to avoid the long variable name later.
attr_arg_map = attribute_argument_mapping
if any(keep in returns for keep in keeps):
raise ValueError("cannot specify the same attribute in `returns` and `keeps`.")
all_returns = returns + keeps
def support_nddata_decorator(func):
# Find out args and kwargs
func_args, func_kwargs = [], []
sig = signature(func).parameters
for param_name, param in sig.items():
if param.kind in (param.VAR_POSITIONAL, param.VAR_KEYWORD):
raise ValueError("func may not have *args or **kwargs.")
try:
if param.default == param.empty:
func_args.append(param_name)
else:
func_kwargs.append(param_name)
# The comparison to param.empty may fail if the default is a
# numpy array or something similar. So if the comparison fails then
# it's quite obvious that there was a default and it should be
# appended to the "func_kwargs".
except ValueError as exc:
if (
"The truth value of an array with more than one element "
"is ambiguous." in str(exc)
):
func_kwargs.append(param_name)
else:
raise
# First argument should be data
if not func_args or func_args[0] != attr_arg_map.get("data", "data"):
raise ValueError(
"Can only wrap functions whose first positional "
"argument is `{}`"
"".format(attr_arg_map.get("data", "data"))
)
@wraps(func)
def wrapper(data, *args, **kwargs):
bound_args = signature(func).bind(data, *args, **kwargs)
unpack = isinstance(data, accepts)
input_data = data
ignored = []
if not unpack and isinstance(data, NDData):
raise TypeError(
f"Only NDData sub-classes that inherit from {accepts.__name__}"
" can be used by this function"
""
)
# If data is an NDData instance, we can try and find properties
# that can be passed as kwargs.
if unpack:
# We loop over a list of pre-defined properties
for prop in islice(SUPPORTED_PROPERTIES, 1, None):
# We only need to do something if the property exists on
# the NDData object
try:
value = getattr(data, prop)
except AttributeError:
continue
# Skip if the property exists but is None or empty.
if prop == "meta" and not value:
continue
elif value is None:
continue
# Warn if the property is set but not used by the function.
propmatch = attr_arg_map.get(prop, prop)
if propmatch not in func_kwargs:
ignored.append(prop)
continue
# Check if the property was explicitly given and issue a
# Warning if it is.
if propmatch in bound_args.arguments:
# If it's in the func_args it's trivial but if it was
# in the func_kwargs we need to compare it to the
# default.
# Comparison to the default is done by comparing their
# identity, this works because defaults in function
# signatures are only created once and always reference
# the same item.
# FIXME: Python interns some values, for example the
# integers from -5 to 255 (any maybe some other types
# as well). In that case the default is
# indistinguishable from an explicitly passed kwarg
# and it won't notice that and use the attribute of the
# NDData.
if propmatch in func_args or (
propmatch in func_kwargs
and (
bound_args.arguments[propmatch]
is not sig[propmatch].default
)
):
warnings.warn(
"Property {} has been passed explicitly and "
"as an NDData property{}, using explicitly "
"specified value"
"".format(
propmatch, "" if prop == propmatch else " " + prop
),
AstropyUserWarning,
)
continue
# Otherwise use the property as input for the function.
kwargs[propmatch] = value
# Finally, replace data by the data attribute
data = data.data
if ignored:
warnings.warn(
"The following attributes were set on the "
"data object, but will be ignored by the "
"function: " + ", ".join(ignored),
AstropyUserWarning,
)
result = func(data, *args, **kwargs)
if unpack and repack:
# If there are multiple required returned arguments make sure
# the result is a tuple (because we don't want to unpack
# numpy arrays or compare their length, never!) and has the
# same length.
if len(returns) > 1:
if not isinstance(result, tuple) or len(returns) != len(result):
raise ValueError(
"Function did not return the expected number of arguments."
)
elif len(returns) == 1:
result = [result]
if keeps is not None:
for keep in keeps:
result.append(deepcopy(getattr(input_data, keep)))
resultdata = result[all_returns.index("data")]
resultkwargs = {
ret: res for ret, res in zip(all_returns, result) if ret != "data"
}
return input_data.__class__(resultdata, **resultkwargs)
else:
return result
return wrapper
# If _func is set, this means that the decorator was used without
# parameters so we have to return the result of the
# support_nddata_decorator decorator rather than the decorator itself
if _func is not None:
return support_nddata_decorator(_func)
else:
return support_nddata_decorator |
Just a simple inverse for use in the InverseVariance. | def _inverse(x):
"""Just a simple inverse for use in the InverseVariance."""
return 1 / x |
Get slices for the overlapping part of a small and a large array.
Given a certain position of the center of the small array, with
respect to the large array, tuples of slices are returned which can be
used to extract, add or subtract the small array at the given
position. This function takes care of the correct behavior at the
boundaries, where the small array is cut of appropriately.
Integer positions are at the pixel centers.
Parameters
----------
large_array_shape : tuple of int or int
The shape of the large array (for 1D arrays, this can be an
`int`).
small_array_shape : int or tuple thereof
The shape of the small array (for 1D arrays, this can be an
`int`). See the ``mode`` keyword for additional details.
position : number or tuple thereof
The position of the small array's center with respect to the
large array. The pixel coordinates should be in the same order
as the array shape. Integer positions are at the pixel centers.
For any axis where ``small_array_shape`` is even, the position
is rounded up, e.g. extracting two elements with a center of
``1`` will define the extracted region as ``[0, 1]``.
mode : {'partial', 'trim', 'strict'}, optional
In ``'partial'`` mode, a partial overlap of the small and the
large array is sufficient. The ``'trim'`` mode is similar to
the ``'partial'`` mode, but ``slices_small`` will be adjusted to
return only the overlapping elements. In the ``'strict'`` mode,
the small array has to be fully contained in the large array,
otherwise an `~astropy.nddata.utils.PartialOverlapError` is
raised. In all modes, non-overlapping arrays will raise a
`~astropy.nddata.utils.NoOverlapError`.
Returns
-------
slices_large : tuple of slice
A tuple of slice objects for each axis of the large array, such
that ``large_array[slices_large]`` extracts the region of the
large array that overlaps with the small array.
slices_small : tuple of slice
A tuple of slice objects for each axis of the small array, such
that ``small_array[slices_small]`` extracts the region that is
inside the large array. | def overlap_slices(large_array_shape, small_array_shape, position, mode="partial"):
"""
Get slices for the overlapping part of a small and a large array.
Given a certain position of the center of the small array, with
respect to the large array, tuples of slices are returned which can be
used to extract, add or subtract the small array at the given
position. This function takes care of the correct behavior at the
boundaries, where the small array is cut of appropriately.
Integer positions are at the pixel centers.
Parameters
----------
large_array_shape : tuple of int or int
The shape of the large array (for 1D arrays, this can be an
`int`).
small_array_shape : int or tuple thereof
The shape of the small array (for 1D arrays, this can be an
`int`). See the ``mode`` keyword for additional details.
position : number or tuple thereof
The position of the small array's center with respect to the
large array. The pixel coordinates should be in the same order
as the array shape. Integer positions are at the pixel centers.
For any axis where ``small_array_shape`` is even, the position
is rounded up, e.g. extracting two elements with a center of
``1`` will define the extracted region as ``[0, 1]``.
mode : {'partial', 'trim', 'strict'}, optional
In ``'partial'`` mode, a partial overlap of the small and the
large array is sufficient. The ``'trim'`` mode is similar to
the ``'partial'`` mode, but ``slices_small`` will be adjusted to
return only the overlapping elements. In the ``'strict'`` mode,
the small array has to be fully contained in the large array,
otherwise an `~astropy.nddata.utils.PartialOverlapError` is
raised. In all modes, non-overlapping arrays will raise a
`~astropy.nddata.utils.NoOverlapError`.
Returns
-------
slices_large : tuple of slice
A tuple of slice objects for each axis of the large array, such
that ``large_array[slices_large]`` extracts the region of the
large array that overlaps with the small array.
slices_small : tuple of slice
A tuple of slice objects for each axis of the small array, such
that ``small_array[slices_small]`` extracts the region that is
inside the large array.
"""
if mode not in ["partial", "trim", "strict"]:
raise ValueError('Mode can be only "partial", "trim", or "strict".')
if np.isscalar(small_array_shape):
small_array_shape = (small_array_shape,)
if np.isscalar(large_array_shape):
large_array_shape = (large_array_shape,)
if np.isscalar(position):
position = (position,)
if any(~np.isfinite(position)):
raise ValueError("Input position contains invalid values (NaNs or infs).")
if len(small_array_shape) != len(large_array_shape):
raise ValueError(
'"large_array_shape" and "small_array_shape" must '
"have the same number of dimensions."
)
if len(small_array_shape) != len(position):
raise ValueError(
'"position" must have the same number of dimensions as "small_array_shape".'
)
# define the min/max pixel indices
indices_min = [
int(np.ceil(pos - (small_shape / 2.0)))
for (pos, small_shape) in zip(position, small_array_shape)
]
indices_max = [
int(np.ceil(pos + (small_shape / 2.0)))
for (pos, small_shape) in zip(position, small_array_shape)
]
for e_max in indices_max:
if e_max < 0:
raise NoOverlapError("Arrays do not overlap.")
for e_min, large_shape in zip(indices_min, large_array_shape):
if e_min >= large_shape:
raise NoOverlapError("Arrays do not overlap.")
if mode == "strict":
for e_min in indices_min:
if e_min < 0:
raise PartialOverlapError("Arrays overlap only partially.")
for e_max, large_shape in zip(indices_max, large_array_shape):
if e_max > large_shape:
raise PartialOverlapError("Arrays overlap only partially.")
# Set up slices
slices_large = tuple(
slice(max(0, indices_min), min(large_shape, indices_max))
for (indices_min, indices_max, large_shape) in zip(
indices_min, indices_max, large_array_shape
)
)
if mode == "trim":
slices_small = tuple(slice(0, slc.stop - slc.start) for slc in slices_large)
else:
slices_small = tuple(
slice(
max(0, -indices_min),
min(large_shape - indices_min, indices_max - indices_min),
)
for (indices_min, indices_max, large_shape) in zip(
indices_min, indices_max, large_array_shape
)
)
return slices_large, slices_small |
Extract a smaller array of the given shape and position from a
larger array.
Parameters
----------
array_large : ndarray
The array from which to extract the small array.
shape : int or tuple thereof
The shape of the extracted array (for 1D arrays, this can be an
`int`). See the ``mode`` keyword for additional details.
position : number or tuple thereof
The position of the small array's center with respect to the
large array. The pixel coordinates should be in the same order
as the array shape. Integer positions are at the pixel centers
(for 1D arrays, this can be a number).
mode : {'partial', 'trim', 'strict'}, optional
The mode used for extracting the small array. For the
``'partial'`` and ``'trim'`` modes, a partial overlap of the
small array and the large array is sufficient. For the
``'strict'`` mode, the small array has to be fully contained
within the large array, otherwise an
`~astropy.nddata.utils.PartialOverlapError` is raised. In all
modes, non-overlapping arrays will raise a
`~astropy.nddata.utils.NoOverlapError`. In ``'partial'`` mode,
positions in the small array that do not overlap with the large
array will be filled with ``fill_value``. In ``'trim'`` mode
only the overlapping elements are returned, thus the resulting
small array may be smaller than the requested ``shape``.
fill_value : number, optional
If ``mode='partial'``, the value to fill pixels in the extracted
small array that do not overlap with the input ``array_large``.
``fill_value`` will be changed to have the same ``dtype`` as the
``array_large`` array, with one exception. If ``array_large``
has integer type and ``fill_value`` is ``np.nan``, then a
`ValueError` will be raised.
return_position : bool, optional
If `True`, return the coordinates of ``position`` in the
coordinate system of the returned array.
Returns
-------
array_small : ndarray
The extracted array.
new_position : tuple
If ``return_position`` is true, this tuple will contain the
coordinates of the input ``position`` in the coordinate system
of ``array_small``. Note that for partially overlapping arrays,
``new_position`` might actually be outside of the
``array_small``; ``array_small[new_position]`` might give wrong
results if any element in ``new_position`` is negative.
Examples
--------
We consider a large array with the shape 11x10, from which we extract
a small array of shape 3x5:
>>> import numpy as np
>>> from astropy.nddata.utils import extract_array
>>> large_array = np.arange(110).reshape((11, 10))
>>> extract_array(large_array, (3, 5), (7, 7))
array([[65, 66, 67, 68, 69],
[75, 76, 77, 78, 79],
[85, 86, 87, 88, 89]]) | def extract_array(
array_large,
shape,
position,
mode="partial",
fill_value=np.nan,
return_position=False,
):
"""
Extract a smaller array of the given shape and position from a
larger array.
Parameters
----------
array_large : ndarray
The array from which to extract the small array.
shape : int or tuple thereof
The shape of the extracted array (for 1D arrays, this can be an
`int`). See the ``mode`` keyword for additional details.
position : number or tuple thereof
The position of the small array's center with respect to the
large array. The pixel coordinates should be in the same order
as the array shape. Integer positions are at the pixel centers
(for 1D arrays, this can be a number).
mode : {'partial', 'trim', 'strict'}, optional
The mode used for extracting the small array. For the
``'partial'`` and ``'trim'`` modes, a partial overlap of the
small array and the large array is sufficient. For the
``'strict'`` mode, the small array has to be fully contained
within the large array, otherwise an
`~astropy.nddata.utils.PartialOverlapError` is raised. In all
modes, non-overlapping arrays will raise a
`~astropy.nddata.utils.NoOverlapError`. In ``'partial'`` mode,
positions in the small array that do not overlap with the large
array will be filled with ``fill_value``. In ``'trim'`` mode
only the overlapping elements are returned, thus the resulting
small array may be smaller than the requested ``shape``.
fill_value : number, optional
If ``mode='partial'``, the value to fill pixels in the extracted
small array that do not overlap with the input ``array_large``.
``fill_value`` will be changed to have the same ``dtype`` as the
``array_large`` array, with one exception. If ``array_large``
has integer type and ``fill_value`` is ``np.nan``, then a
`ValueError` will be raised.
return_position : bool, optional
If `True`, return the coordinates of ``position`` in the
coordinate system of the returned array.
Returns
-------
array_small : ndarray
The extracted array.
new_position : tuple
If ``return_position`` is true, this tuple will contain the
coordinates of the input ``position`` in the coordinate system
of ``array_small``. Note that for partially overlapping arrays,
``new_position`` might actually be outside of the
``array_small``; ``array_small[new_position]`` might give wrong
results if any element in ``new_position`` is negative.
Examples
--------
We consider a large array with the shape 11x10, from which we extract
a small array of shape 3x5:
>>> import numpy as np
>>> from astropy.nddata.utils import extract_array
>>> large_array = np.arange(110).reshape((11, 10))
>>> extract_array(large_array, (3, 5), (7, 7))
array([[65, 66, 67, 68, 69],
[75, 76, 77, 78, 79],
[85, 86, 87, 88, 89]])
"""
if np.isscalar(shape):
shape = (shape,)
if np.isscalar(position):
position = (position,)
if mode not in ["partial", "trim", "strict"]:
raise ValueError("Valid modes are 'partial', 'trim', and 'strict'.")
large_slices, small_slices = overlap_slices(
array_large.shape, shape, position, mode=mode
)
extracted_array = array_large[large_slices]
if return_position:
new_position = [i - s.start for i, s in zip(position, large_slices)]
# Extracting on the edges is presumably a rare case, so treat special here
if (extracted_array.shape != shape) and (mode == "partial"):
extracted_array = np.zeros(shape, dtype=array_large.dtype)
try:
extracted_array[:] = fill_value
except ValueError as exc:
exc.args += (
"fill_value is inconsistent with the data type of "
"the input array (e.g., fill_value cannot be set to "
"np.nan if the input array has integer type). Please "
"change either the input array dtype or the "
"fill_value.",
)
raise exc
extracted_array[small_slices] = array_large[large_slices]
if return_position:
new_position = [i + s.start for i, s in zip(new_position, small_slices)]
if return_position:
return extracted_array, tuple(new_position)
else:
return extracted_array |
Add a smaller array at a given position in a larger array.
Parameters
----------
array_large : ndarray
Large array.
array_small : ndarray
Small array to add. Can be equal to ``array_large`` in size in a given
dimension, but not larger.
position : tuple
Position of the small array's center, with respect to the large array.
Coordinates should be in the same order as the array shape.
Returns
-------
new_array : ndarray
The new array formed from the sum of ``array_large`` and
``array_small``.
Notes
-----
The addition is done in-place.
Examples
--------
We consider a large array of zeros with the shape 5x5 and a small
array of ones with a shape of 3x3:
>>> import numpy as np
>>> from astropy.nddata.utils import add_array
>>> large_array = np.zeros((5, 5))
>>> small_array = np.ones((3, 3))
>>> add_array(large_array, small_array, (1, 2)) # doctest: +FLOAT_CMP
array([[0., 1., 1., 1., 0.],
[0., 1., 1., 1., 0.],
[0., 1., 1., 1., 0.],
[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.]]) | def add_array(array_large, array_small, position):
"""
Add a smaller array at a given position in a larger array.
Parameters
----------
array_large : ndarray
Large array.
array_small : ndarray
Small array to add. Can be equal to ``array_large`` in size in a given
dimension, but not larger.
position : tuple
Position of the small array's center, with respect to the large array.
Coordinates should be in the same order as the array shape.
Returns
-------
new_array : ndarray
The new array formed from the sum of ``array_large`` and
``array_small``.
Notes
-----
The addition is done in-place.
Examples
--------
We consider a large array of zeros with the shape 5x5 and a small
array of ones with a shape of 3x3:
>>> import numpy as np
>>> from astropy.nddata.utils import add_array
>>> large_array = np.zeros((5, 5))
>>> small_array = np.ones((3, 3))
>>> add_array(large_array, small_array, (1, 2)) # doctest: +FLOAT_CMP
array([[0., 1., 1., 1., 0.],
[0., 1., 1., 1., 0.],
[0., 1., 1., 1., 0.],
[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.]])
"""
# Check if large array is not smaller
if all(
large_shape >= small_shape
for (large_shape, small_shape) in zip(array_large.shape, array_small.shape)
):
large_slices, small_slices = overlap_slices(
array_large.shape, array_small.shape, position
)
array_large[large_slices] += array_small[small_slices]
return array_large
else:
raise ValueError("Can't add array. Small array too large.") |
Convert decimal points to indices, given a subsampling factor.
This discards the integer part of the position and uses only the decimal
place, and converts this to a subpixel position depending on the
subsampling specified. The center of a pixel corresponds to an integer
position.
Parameters
----------
position : ndarray or array-like
Positions in pixels.
subsampling : int
Subsampling factor per pixel.
Returns
-------
indices : ndarray
The integer subpixel indices corresponding to the input positions.
Examples
--------
If no subsampling is used, then the subpixel indices returned are always 0:
>>> from astropy.nddata.utils import subpixel_indices
>>> subpixel_indices([1.2, 3.4, 5.6], 1) # doctest: +FLOAT_CMP
array([0., 0., 0.])
If instead we use a subsampling of 2, we see that for the two first values
(1.1 and 3.4) the subpixel position is 1, while for 5.6 it is 0. This is
because the values of 1, 3, and 6 lie in the center of pixels, and 1.1 and
3.4 lie in the left part of the pixels and 5.6 lies in the right part.
>>> subpixel_indices([1.2, 3.4, 5.5], 2) # doctest: +FLOAT_CMP
array([1., 1., 0.]) | def subpixel_indices(position, subsampling):
"""
Convert decimal points to indices, given a subsampling factor.
This discards the integer part of the position and uses only the decimal
place, and converts this to a subpixel position depending on the
subsampling specified. The center of a pixel corresponds to an integer
position.
Parameters
----------
position : ndarray or array-like
Positions in pixels.
subsampling : int
Subsampling factor per pixel.
Returns
-------
indices : ndarray
The integer subpixel indices corresponding to the input positions.
Examples
--------
If no subsampling is used, then the subpixel indices returned are always 0:
>>> from astropy.nddata.utils import subpixel_indices
>>> subpixel_indices([1.2, 3.4, 5.6], 1) # doctest: +FLOAT_CMP
array([0., 0., 0.])
If instead we use a subsampling of 2, we see that for the two first values
(1.1 and 3.4) the subpixel position is 1, while for 5.6 it is 0. This is
because the values of 1, 3, and 6 lie in the center of pixels, and 1.1 and
3.4 lie in the left part of the pixels and 5.6 lies in the right part.
>>> subpixel_indices([1.2, 3.4, 5.5], 2) # doctest: +FLOAT_CMP
array([1., 1., 0.])
"""
# Get decimal points
fractions = np.modf(np.asanyarray(position) + 0.5)[0]
return np.floor(fractions * subsampling) |
Just checks a few attributes to make sure wcs instances seem to be
equal. | def assert_wcs_seem_equal(wcs1, wcs2):
"""Just checks a few attributes to make sure wcs instances seem to be
equal.
"""
if wcs1 is None and wcs2 is None:
return
assert wcs1 is not None
assert wcs2 is not None
if isinstance(wcs1, BaseHighLevelWCS):
wcs1 = wcs1.low_level_wcs
if isinstance(wcs2, BaseHighLevelWCS):
wcs2 = wcs2.low_level_wcs
assert isinstance(wcs1, WCS)
assert isinstance(wcs2, WCS)
if wcs1 is wcs2:
return
assert wcs1.wcs.compare(wcs2.wcs) |
Test that math on objects with a psf warn. | def test_psf_warning():
"""Test that math on objects with a psf warn."""
ndd1 = NDDataArithmetic(np.ones((3, 3)), psf=np.zeros(3))
ndd2 = NDDataArithmetic(np.ones((3, 3)), psf=None)
# no warning if both are None
ndd2.add(ndd2)
with pytest.warns(AstropyUserWarning, match="Not setting psf attribute during add"):
ndd1.add(ndd2)
with pytest.warns(AstropyUserWarning, match="Not setting psf attribute during add"):
ndd2.add(ndd1)
with pytest.warns(AstropyUserWarning, match="Not setting psf attribute during add"):
ndd1.add(ndd1) |
Pytest fixture to run a test case with tilde-prefixed paths.
In the tilde-path case, environment variables will be temporarily
modified so that '~' resolves to the temp directory. | def home_is_tmpdir(tmp_path, monkeypatch, request):
"""
Pytest fixture to run a test case with tilde-prefixed paths.
In the tilde-path case, environment variables will be temporarily
modified so that '~' resolves to the temp directory.
"""
# For Unix
monkeypatch.setenv("HOME", str(tmp_path))
# For Windows
monkeypatch.setenv("USERPROFILE", str(tmp_path)) |
Return a CCDData object of size DEFAULT_DATA_SIZE x DEFAULT_DATA_SIZE
with units of ADU. | def create_ccd_data():
"""
Return a CCDData object of size DEFAULT_DATA_SIZE x DEFAULT_DATA_SIZE
with units of ADU.
"""
data = _random_array.copy()
fake_meta = {"my_key": 42, "your_key": "not 42"}
ccd = CCDData(data, unit=u.adu)
ccd.header = fake_meta
return ccd |
Check that WCS attribute gets added to header, and that if a CCDData
object is created from a FITS file with a header, and the WCS attribute
is modified, then the CCDData object is turned back into an hdu, the
WCS object overwrites the old WCS information in the header. | def test_wcs_attribute(tmp_path):
"""
Check that WCS attribute gets added to header, and that if a CCDData
object is created from a FITS file with a header, and the WCS attribute
is modified, then the CCDData object is turned back into an hdu, the
WCS object overwrites the old WCS information in the header.
"""
ccd_data = create_ccd_data()
tmpfile = str(tmp_path / "temp.fits")
# This wcs example is taken from the astropy.wcs docs.
wcs = WCS(naxis=2)
wcs.wcs.crpix = np.array(ccd_data.shape) / 2
wcs.wcs.cdelt = np.array([-0.066667, 0.066667])
wcs.wcs.crval = [0, -90]
wcs.wcs.ctype = ["RA---AIR", "DEC--AIR"]
wcs.wcs.set_pv([(2, 1, 45.0)])
ccd_data.header = ccd_data.to_hdu()[0].header
ccd_data.header.extend(wcs.to_header(), useblanks=False)
ccd_data.write(tmpfile)
# Get the header length after it has been extended by the WCS keywords
original_header_length = len(ccd_data.header)
ccd_new = CCDData.read(tmpfile)
# WCS attribute should be set for ccd_new
assert ccd_new.wcs is not None
# WCS attribute should be equal to wcs above.
assert ccd_new.wcs.wcs == wcs.wcs
# Converting CCDData object with wcs to an hdu shouldn't
# create duplicate wcs-related entries in the header.
ccd_new_hdu = ccd_new.to_hdu()[0]
assert len(ccd_new_hdu.header) == original_header_length
# Making a CCDData with WCS (but not WCS in the header) should lead to
# WCS information in the header when it is converted to an HDU.
ccd_wcs_not_in_header = CCDData(ccd_data.data, wcs=wcs, unit="adu")
hdu = ccd_wcs_not_in_header.to_hdu()[0]
wcs_header = wcs.to_header()
for k in wcs_header.keys():
# Skip these keywords if they are in the WCS header because they are
# not WCS-specific.
if k in ["", "COMMENT", "HISTORY"]:
continue
# No keyword from the WCS should be in the header.
assert k not in ccd_wcs_not_in_header.header
# Every keyword in the WCS should be in the header of the HDU
assert hdu.header[k] == wcs_header[k]
# Now check that if WCS of a CCDData is modified, then the CCDData is
# converted to an HDU, the WCS keywords in the header are overwritten
# with the appropriate keywords from the header.
#
# ccd_new has a WCS and WCS keywords in the header, so try modifying
# the WCS.
ccd_new.wcs.wcs.cdelt *= 2
ccd_new_hdu_mod_wcs = ccd_new.to_hdu()[0]
assert ccd_new_hdu_mod_wcs.header["CDELT1"] == ccd_new.wcs.wcs.cdelt[0]
assert ccd_new_hdu_mod_wcs.header["CDELT2"] == ccd_new.wcs.wcs.cdelt[1] |
Test, for the file included with the nddata tests, that WCS keywords are
properly removed from header. | def test_wcs_keywords_removed_from_header():
"""
Test, for the file included with the nddata tests, that WCS keywords are
properly removed from header.
"""
from astropy.nddata.ccddata import _KEEP_THESE_KEYWORDS_IN_HEADER
keepers = set(_KEEP_THESE_KEYWORDS_IN_HEADER)
data_file = get_pkg_data_filename("data/sip-wcs.fits")
ccd = CCDData.read(data_file)
with pytest.warns(
AstropyWarning, match=r"Some non-standard WCS keywords were excluded"
):
wcs_header = ccd.wcs.to_header()
assert not (set(wcs_header) & set(ccd.meta) - keepers)
# Make sure that exceptions are not raised when trying to remove missing
# keywords. o4sp040b0_raw.fits of io.fits is missing keyword 'PC1_1'.
data_file1 = get_pkg_data_filename(
"data/o4sp040b0_raw.fits", package="astropy.io.fits.tests"
)
if PYTEST_LT_8_0:
ctx = nullcontext()
else:
ctx = pytest.warns(FITSFixedWarning, match="'datfix' made the change")
with pytest.warns(FITSFixedWarning, match="'unitfix' made the change"), ctx:
ccd = CCDData.read(data_file1, unit="count") |
Test, for the WCS test files, that keyword removal works as
expected. Those cover a much broader range of WCS types than
test_wcs_keywords_removed_from_header.
Includes regression test for #8597 | def test_wcs_keyword_removal_for_wcs_test_files():
"""
Test, for the WCS test files, that keyword removal works as
expected. Those cover a much broader range of WCS types than
test_wcs_keywords_removed_from_header.
Includes regression test for #8597
"""
from astropy.nddata.ccddata import (
_KEEP_THESE_KEYWORDS_IN_HEADER,
_CDs,
_generate_wcs_and_update_header,
_PCs,
)
keepers = set(_KEEP_THESE_KEYWORDS_IN_HEADER)
# NOTE: pyinstaller requires relative path here.
wcs_headers = get_pkg_data_filenames("../../wcs/tests/data", pattern="*.hdr")
for hdr in wcs_headers:
# Skip the files that are expected to be bad...
if (
"invalid" in hdr
or "nonstandard" in hdr
or "segfault" in hdr
or "chandra-pixlist-wcs" in hdr
):
continue
header_string = get_pkg_data_contents(hdr)
header = fits.Header.fromstring(header_string)
wcs = WCS(header_string)
header_from_wcs = wcs.to_header(relax=True)
new_header, new_wcs = _generate_wcs_and_update_header(header)
new_wcs_header = new_wcs.to_header(relax=True)
# Make sure all of the WCS-related keywords generated by astropy
# have been removed.
assert not (set(new_header) & set(new_wcs_header) - keepers)
# Check that new_header contains no remaining WCS information.
# Specifically, check that
# 1. The combination of new_header and new_wcs does not contain
# both PCi_j and CDi_j keywords. See #8597.
# Check for 1
final_header = new_header + new_wcs_header
final_header_set = set(final_header)
if _PCs & final_header_set:
assert not (_CDs & final_header_set)
elif _CDs & final_header_set:
assert not (_PCs & final_header_set)
# Check that the new wcs is the same as the old.
for k, v in new_wcs_header.items():
if isinstance(v, str):
assert header_from_wcs[k] == v
else:
np.testing.assert_almost_equal(header_from_wcs[k], v) |
Check whether the ctypes RA---TAN-SIP and DEC--TAN-SIP survive
a roundtrip unchanged. | def test_wcs_sip_handling():
"""
Check whether the ctypes RA---TAN-SIP and DEC--TAN-SIP survive
a roundtrip unchanged.
"""
data_file = get_pkg_data_filename("data/sip-wcs.fits")
def check_wcs_ctypes(header):
expected_wcs_ctypes = {"CTYPE1": "RA---TAN-SIP", "CTYPE2": "DEC--TAN-SIP"}
return [header[k] == v for k, v in expected_wcs_ctypes.items()]
ccd_original = CCDData.read(data_file)
# After initialization the keywords should be in the WCS, not in the
# meta.
with fits.open(data_file) as raw:
good_ctype = check_wcs_ctypes(raw[0].header)
assert all(good_ctype)
ccd_new = ccd_original.to_hdu()
good_ctype = check_wcs_ctypes(ccd_new[0].header)
assert all(good_ctype)
# Try converting to header with wcs_relax=False and
# the header should contain the CTYPE keywords without
# the -SIP
ccd_no_relax = ccd_original.to_hdu(wcs_relax=False)
good_ctype = check_wcs_ctypes(ccd_no_relax[0].header)
assert not any(good_ctype)
assert ccd_no_relax[0].header["CTYPE1"] == "RA---TAN"
assert ccd_no_relax[0].header["CTYPE2"] == "DEC--TAN" |
Test that we can round-trip a CCDData with an attached PSF image. | def test_write_read_psf(tmp_path):
"""Test that we can round-trip a CCDData with an attached PSF image."""
ccd_data = create_ccd_data()
ccd_data.psf = _random_psf
filename = tmp_path / "test_write_read_psf.fits"
ccd_data.write(filename)
ccd_disk = CCDData.read(filename)
np.testing.assert_array_equal(ccd_data.data, ccd_disk.data)
np.testing.assert_array_equal(ccd_data.psf, ccd_disk.psf)
# Try a different name for the PSF HDU.
filename = tmp_path / "test_write_read_psf_hdu.fits"
ccd_data.write(filename, hdu_psf="PSFOTHER")
# psf will be None if we don't supply the new HDU name to the reader.
ccd_disk = CCDData.read(filename)
np.testing.assert_array_equal(ccd_data.data, ccd_disk.data)
assert ccd_disk.psf is None
# psf will round-trip if we do supply the new HDU name.
ccd_disk = CCDData.read(filename, hdu_psf="PSFOTHER")
np.testing.assert_array_equal(ccd_data.data, ccd_disk.data)
np.testing.assert_array_equal(ccd_data.psf, ccd_disk.psf) |
Overlap from arrays with different number of dim is undefined. | def test_slices_different_dim():
"""Overlap from arrays with different number of dim is undefined."""
with pytest.raises(ValueError, match=".*the same number of dimensions.*"):
overlap_slices((4, 5, 6), (1, 2), (0, 0)) |
Position must have same dim as arrays. | def test_slices_pos_different_dim():
"""Position must have same dim as arrays."""
with pytest.raises(ValueError, match=".*the same number of dimensions.*"):
overlap_slices((4, 5), (1, 2), (0, 0, 3)) |
If there is no overlap between arrays, an error should be raised. | def test_slices_no_overlap(pos):
"""If there is no overlap between arrays, an error should be raised."""
with pytest.raises(NoOverlapError):
overlap_slices((5, 5), (2, 2), pos) |
Compute a slice for partially overlapping arrays. | def test_slices_partial_overlap():
"""Compute a slice for partially overlapping arrays."""
temp = overlap_slices((5,), (3,), (0,))
assert temp == ((slice(0, 2, None),), (slice(1, 3, None),))
temp = overlap_slices((5,), (3,), (0,), mode="partial")
assert temp == ((slice(0, 2, None),), (slice(1, 3, None),))
for pos in [0, 4]:
with pytest.raises(
PartialOverlapError, match=".*Arrays overlap only partially.*"
):
temp = overlap_slices((5,), (3,), (pos,), mode="strict") |
Test overlap_slices when extracting along edges. | def test_slices_edges():
"""
Test overlap_slices when extracting along edges.
"""
slc_lg, slc_sm = overlap_slices((10, 10), (3, 3), (1, 1), mode="strict")
assert slc_lg[0].start == slc_lg[1].start == 0
assert slc_lg[0].stop == slc_lg[1].stop == 3
assert slc_sm[0].start == slc_sm[1].start == 0
assert slc_sm[0].stop == slc_sm[1].stop == 3
slc_lg, slc_sm = overlap_slices((10, 10), (3, 3), (8, 8), mode="strict")
assert slc_lg[0].start == slc_lg[1].start == 7
assert slc_lg[0].stop == slc_lg[1].stop == 10
assert slc_sm[0].start == slc_sm[1].start == 0
assert slc_sm[0].stop == slc_sm[1].stop == 3
# test (0, 0) shape
slc_lg, slc_sm = overlap_slices((10, 10), (0, 0), (0, 0))
assert slc_lg[0].start == slc_lg[0].stop == 0
assert slc_lg[1].start == slc_lg[1].stop == 0
assert slc_sm[0].start == slc_sm[0].stop == 0
assert slc_sm[1].start == slc_sm[1].stop == 0
slc_lg, slc_sm = overlap_slices((10, 10), (0, 0), (5, 5))
assert slc_lg[0].start == slc_lg[0].stop == 5
assert slc_lg[1].start == slc_lg[1].stop == 5
assert slc_sm[0].start == slc_sm[0].stop == 0
assert slc_sm[1].start == slc_sm[1].stop == 0 |
Call overlap_slices with non-existing mode. | def test_slices_overlap_wrong_mode():
"""Call overlap_slices with non-existing mode."""
with pytest.raises(ValueError, match="^Mode can be only.*"):
overlap_slices((5,), (3,), (0,), mode="full") |
A ValueError should be raised if position contains a non-finite
value. | def test_slices_nonfinite_position(position):
"""
A ValueError should be raised if position contains a non-finite
value.
"""
with pytest.raises(ValueError):
overlap_slices((7, 7), (3, 3), position) |
Test overlap_slices (via extract_array) for rounding with an
even-shaped extraction. | def test_extract_array_even_shape_rounding():
"""
Test overlap_slices (via extract_array) for rounding with an
even-shaped extraction.
"""
data = np.arange(10)
shape = (2,)
positions_expected = [
(1.49, (1, 2)),
(1.5, (1, 2)),
(1.501, (1, 2)),
(1.99, (1, 2)),
(2.0, (1, 2)),
(2.01, (2, 3)),
(2.49, (2, 3)),
(2.5, (2, 3)),
(2.501, (2, 3)),
(2.99, (2, 3)),
(3.0, (2, 3)),
(3.01, (3, 4)),
]
for pos, exp in positions_expected:
out = extract_array(data, shape, (pos,), mode="partial")
assert_array_equal(out, exp)
# test negative positions
positions = (-0.99, -0.51, -0.5, -0.49, -0.01, 0)
exp1 = (-99, 0)
exp2 = (0, 1)
expected = [exp1] * 6 + [exp2]
for pos, exp in zip(positions, expected):
out = extract_array(data, shape, (pos,), mode="partial", fill_value=-99)
assert_array_equal(out, exp) |
Test overlap_slices (via extract_array) for rounding with an
even-shaped extraction. | def test_extract_array_odd_shape_rounding():
"""
Test overlap_slices (via extract_array) for rounding with an
even-shaped extraction.
"""
data = np.arange(10)
shape = (3,)
positions_expected = [
(1.49, (0, 1, 2)),
(1.5, (0, 1, 2)),
(1.501, (1, 2, 3)),
(1.99, (1, 2, 3)),
(2.0, (1, 2, 3)),
(2.01, (1, 2, 3)),
(2.49, (1, 2, 3)),
(2.5, (1, 2, 3)),
(2.501, (2, 3, 4)),
(2.99, (2, 3, 4)),
(3.0, (2, 3, 4)),
(3.01, (2, 3, 4)),
]
for pos, exp in positions_expected:
out = extract_array(data, shape, (pos,), mode="partial")
assert_array_equal(out, exp)
# test negative positions
positions = (-0.99, -0.51, -0.5, -0.49, -0.01, 0)
exp1 = (-99, -99, 0)
exp2 = (-99, 0, 1)
expected = [exp1] * 3 + [exp2] * 4
for pos, exp in zip(positions, expected):
out = extract_array(data, shape, (pos,), mode="partial", fill_value=-99)
assert_array_equal(out, exp) |
Call extract_array with non-existing mode. | def test_extract_array_wrong_mode():
"""Call extract_array with non-existing mode."""
with pytest.raises(
ValueError, match="Valid modes are 'partial', 'trim', and 'strict'."
):
extract_array(np.arange(4), (2,), (0,), mode="full") |
Extract 1 d arrays.
All dimensions are treated the same, so we can test in 1 dim. | def test_extract_array_1d_even():
"""Extract 1 d arrays.
All dimensions are treated the same, so we can test in 1 dim.
"""
assert np.all(
extract_array(np.arange(4), (2,), (0,), fill_value=-99) == np.array([-99, 0])
)
for i in [1, 2, 3]:
assert np.all(extract_array(np.arange(4), (2,), (i,)) == np.array([i - 1, i]))
assert np.all(
extract_array(np.arange(4.0), (2,), (4,), fill_value=np.inf)
== np.array([3, np.inf])
) |
Extract 1 d arrays.
All dimensions are treated the same, so we can test in 1 dim.
The first few lines test the most error-prone part: Extraction of an
array on the boundaries.
Additional tests (e.g. dtype of return array) are done for the last
case only. | def test_extract_array_1d_odd():
"""Extract 1 d arrays.
All dimensions are treated the same, so we can test in 1 dim.
The first few lines test the most error-prone part: Extraction of an
array on the boundaries.
Additional tests (e.g. dtype of return array) are done for the last
case only.
"""
assert np.all(
extract_array(np.arange(4), (3,), (-1,), fill_value=-99)
== np.array([-99, -99, 0])
)
assert np.all(
extract_array(np.arange(4), (3,), (0,), fill_value=-99) == np.array([-99, 0, 1])
)
for i in [1, 2]:
assert np.all(
extract_array(np.arange(4), (3,), (i,)) == np.array([i - 1, i, i + 1])
)
assert np.all(
extract_array(np.arange(4), (3,), (3,), fill_value=-99) == np.array([2, 3, -99])
)
arrayin = np.arange(4.0)
extracted = extract_array(arrayin, (3,), (4,))
assert extracted[0] == 3
assert np.isnan(extracted[1]) # since I cannot use `==` to test for nan
assert extracted.dtype == arrayin.dtype |
In 1d, shape can be int instead of tuple | def test_extract_array_1d():
"""In 1d, shape can be int instead of tuple"""
assert np.all(
extract_array(np.arange(4), 3, (-1,), fill_value=-99) == np.array([-99, -99, 0])
)
assert np.all(
extract_array(np.arange(4), 3, -1, fill_value=-99) == np.array([-99, -99, 0])
) |
integer is at bin center | def test_extract_Array_float():
"""integer is at bin center"""
for a in np.arange(2.51, 3.49, 0.1):
assert np.all(extract_array(np.arange(5), 3, a) == np.array([2, 3, 4])) |
Extract 1 d arrays.
All dimensions are treated the same, so we can test in 1 dim. | def test_extract_array_1d_trim():
"""Extract 1 d arrays.
All dimensions are treated the same, so we can test in 1 dim.
"""
assert np.all(extract_array(np.arange(4), (2,), (0,), mode="trim") == np.array([0]))
for i in [1, 2, 3]:
assert np.all(
extract_array(np.arange(4), (2,), (i,), mode="trim") == np.array([i - 1, i])
)
assert np.all(
extract_array(np.arange(4.0), (2,), (4,), mode="trim") == np.array([3])
) |
Test extract_array utility function.
Test by extracting an array of ones out of an array of zeros. | def test_extract_array_easy(mode):
"""
Test extract_array utility function.
Test by extracting an array of ones out of an array of zeros.
"""
large_test_array = np.zeros((11, 11))
small_test_array = np.ones((5, 5))
large_test_array[3:8, 3:8] = small_test_array
extracted_array = extract_array(large_test_array, (5, 5), (5, 5), mode=mode)
assert np.all(extracted_array == small_test_array) |
Check that the return position is calculated correctly.
The result will differ by mode. All test here are done in 1d because it's
easier to construct correct test cases. | def test_extract_array_return_pos():
"""Check that the return position is calculated correctly.
The result will differ by mode. All test here are done in 1d because it's
easier to construct correct test cases.
"""
large_test_array = np.arange(5, dtype=float)
for i in np.arange(-1, 6):
extracted, new_pos = extract_array(
large_test_array, 3, i, mode="partial", return_position=True
)
assert new_pos == (1,)
# Now check an array with an even number
for i, expected in zip([1.49, 1.51, 3], [0.49, 0.51, 1]):
extracted, new_pos = extract_array(
large_test_array, (2,), (i,), mode="strict", return_position=True
)
assert new_pos == (expected,)
# For mode='trim' the answer actually depends
for i, expected in zip(np.arange(-1, 6), (-1, 0, 1, 1, 1, 1, 1)):
extracted, new_pos = extract_array(
large_test_array, (3,), (i,), mode="trim", return_position=True
)
assert new_pos == (expected,) |
Test add_array utility function.
Test by adding an array of ones out of an array of zeros. | def test_add_array_odd_shape():
"""
Test add_array utility function.
Test by adding an array of ones out of an array of zeros.
"""
large_test_array = np.zeros((11, 11))
small_test_array = np.ones((5, 5))
large_test_array_ref = large_test_array.copy()
large_test_array_ref[3:8, 3:8] += small_test_array
added_array = add_array(large_test_array, small_test_array, (5, 5))
assert np.all(added_array == large_test_array_ref) |
Test add_array_2D utility function.
Test by adding an array of ones out of an array of zeros. | def test_add_array_even_shape():
"""
Test add_array_2D utility function.
Test by adding an array of ones out of an array of zeros.
"""
large_test_array = np.zeros((11, 11))
small_test_array = np.ones((4, 4))
large_test_array_ref = large_test_array.copy()
large_test_array_ref[0:2, 0:2] += small_test_array[2:4, 2:4]
added_array = add_array(large_test_array, small_test_array, (0, 0))
assert np.all(added_array == large_test_array_ref) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.