response
stringlengths
1
33.1k
instruction
stringlengths
22
582k
A private function to compute the brute force periodogram result.
def _bls_slow_one(t, y, ivar, duration, oversample, use_likelihood, period): """A private function to compute the brute force periodogram result.""" best = (-np.inf, None) hp = 0.5 * period min_t = np.min(t) for dur in duration: # Compute the phase grid (this is set by the duration and oversample). d_phase = dur / oversample phase = np.arange(0, period + d_phase, d_phase) for t0 in phase: # Figure out which data points are in and out of transit. m_in = np.abs((t - min_t - t0 + hp) % period - hp) < 0.5 * dur m_out = ~m_in # Compute the estimates of the in and out-of-transit flux. ivar_in = np.sum(ivar[m_in]) ivar_out = np.sum(ivar[m_out]) y_in = np.sum(y[m_in] * ivar[m_in]) / ivar_in y_out = np.sum(y[m_out] * ivar[m_out]) / ivar_out # Use this to compute the best fit depth and uncertainty. depth = y_out - y_in depth_err = np.sqrt(1.0 / ivar_in + 1.0 / ivar_out) snr = depth / depth_err # Compute the log likelihood of this model. loglike = -0.5 * np.sum((y_in - y[m_in]) ** 2 * ivar[m_in]) loglike += 0.5 * np.sum((y_out - y[m_in]) ** 2 * ivar[m_in]) # Choose which objective should be used for the optimization. if use_likelihood: objective = loglike else: objective = snr # If this model is better than any before, keep it. if depth > 0 and objective > best[0]: best = ( objective, ( objective, depth, depth_err, dur, (t0 + min_t) % period, snr, loglike, ), ) return best[1]
Assert that another BoxLeastSquaresResults object is consistent This method loops over all attributes and compares the values using :func:`~astropy.tests.helper.assert_quantity_allclose` function. Parameters ---------- other : BoxLeastSquaresResults The other results object to compare.
def assert_allclose_blsresults(blsresult, other, **kwargs): """Assert that another BoxLeastSquaresResults object is consistent This method loops over all attributes and compares the values using :func:`~astropy.tests.helper.assert_quantity_allclose` function. Parameters ---------- other : BoxLeastSquaresResults The other results object to compare. """ for k, v in blsresult.items(): if k not in other: raise AssertionError(f"missing key '{k}'") if k == "objective": assert ( v == other[k] ), f"Mismatched objectives. Expected '{v}', got '{other[k]}'" continue assert_quantity_allclose(v, other[k], **kwargs)
Compute the reference chi-square for a particular dataset. Note: this is not valid center_data=False and fit_mean=False. Parameters ---------- y : array-like data values dy : float, array, or None, optional data uncertainties center_data : bool specify whether data should be pre-centered fit_mean : bool specify whether model should fit the mean of the data Returns ------- chi2_ref : float The reference chi-square for the periodogram of this data
def compute_chi2_ref(y, dy=None, center_data=True, fit_mean=True): """Compute the reference chi-square for a particular dataset. Note: this is not valid center_data=False and fit_mean=False. Parameters ---------- y : array-like data values dy : float, array, or None, optional data uncertainties center_data : bool specify whether data should be pre-centered fit_mean : bool specify whether model should fit the mean of the data Returns ------- chi2_ref : float The reference chi-square for the periodogram of this data """ if dy is None: dy = 1 y, dy = np.broadcast_arrays(y, dy) w = dy**-2.0 if center_data or fit_mean: mu = np.dot(w, y) / w.sum() else: mu = 0 yw = (y - mu) / dy return np.dot(yw, yw)
Convert power from one normalization to another. This currently only works for standard & floating-mean models. Parameters ---------- Z : array-like the periodogram output N : int the number of data points from_normalization, to_normalization : str the normalization to convert from and to. Options are ['standard', 'model', 'log', 'psd'] chi2_ref : float The reference chi-square, required for converting to or from the psd normalization. Returns ------- Z_out : ndarray The periodogram in the new normalization
def convert_normalization(Z, N, from_normalization, to_normalization, chi2_ref=None): """Convert power from one normalization to another. This currently only works for standard & floating-mean models. Parameters ---------- Z : array-like the periodogram output N : int the number of data points from_normalization, to_normalization : str the normalization to convert from and to. Options are ['standard', 'model', 'log', 'psd'] chi2_ref : float The reference chi-square, required for converting to or from the psd normalization. Returns ------- Z_out : ndarray The periodogram in the new normalization """ Z = np.asarray(Z) from_to = (from_normalization, to_normalization) for norm in from_to: if norm not in NORMALIZATIONS: raise ValueError(f"{from_normalization} is not a valid normalization") if from_normalization == to_normalization: return Z if "psd" in from_to and chi2_ref is None: raise ValueError( "must supply reference chi^2 when converting to or from psd normalization" ) if from_to == ("log", "standard"): return 1 - np.exp(-Z) elif from_to == ("standard", "log"): return -np.log(1 - Z) elif from_to == ("log", "model"): return np.exp(Z) - 1 elif from_to == ("model", "log"): return np.log(Z + 1) elif from_to == ("model", "standard"): return Z / (1 + Z) elif from_to == ("standard", "model"): return Z / (1 - Z) elif from_normalization == "psd": return convert_normalization( 2 / chi2_ref * Z, N, from_normalization="standard", to_normalization=to_normalization, ) elif to_normalization == "psd": Z_standard = convert_normalization( Z, N, from_normalization=from_normalization, to_normalization="standard" ) return 0.5 * chi2_ref * Z_standard else: raise NotImplementedError( f"conversion from '{from_normalization}' to '{to_normalization}'" )
Probability density function for Lomb-Scargle periodogram. Compute the expected probability density function of the periodogram for the null hypothesis - i.e. data consisting of Gaussian noise. Parameters ---------- z : array-like The periodogram value. N : int The number of data points from which the periodogram was computed. normalization : {'standard', 'model', 'log', 'psd'} The periodogram normalization. dH, dK : int, optional The number of parameters in the null hypothesis and the model. Returns ------- pdf : np.ndarray The expected probability density function. Notes ----- For normalization='psd', the distribution can only be computed for periodograms constructed with errors specified. All expressions used here are adapted from Table 1 of Baluev 2008 [1]_. References ---------- .. [1] Baluev, R.V. MNRAS 385, 1279 (2008)
def pdf_single(z, N, normalization, dH=1, dK=3): """Probability density function for Lomb-Scargle periodogram. Compute the expected probability density function of the periodogram for the null hypothesis - i.e. data consisting of Gaussian noise. Parameters ---------- z : array-like The periodogram value. N : int The number of data points from which the periodogram was computed. normalization : {'standard', 'model', 'log', 'psd'} The periodogram normalization. dH, dK : int, optional The number of parameters in the null hypothesis and the model. Returns ------- pdf : np.ndarray The expected probability density function. Notes ----- For normalization='psd', the distribution can only be computed for periodograms constructed with errors specified. All expressions used here are adapted from Table 1 of Baluev 2008 [1]_. References ---------- .. [1] Baluev, R.V. MNRAS 385, 1279 (2008) """ z = np.asarray(z) if dK - dH != 2: raise NotImplementedError("Degrees of freedom != 2") Nk = N - dK if normalization == "psd": return np.exp(-z) elif normalization == "standard": return 0.5 * Nk * (1 - z) ** (0.5 * Nk - 1) elif normalization == "model": return 0.5 * Nk * (1 + z) ** (-0.5 * Nk - 1) elif normalization == "log": return 0.5 * Nk * np.exp(-0.5 * Nk * z) else: raise ValueError(f"normalization='{normalization}' is not recognized")
Single-frequency false alarm probability for the Lomb-Scargle periodogram. This is equal to 1 - cdf, where cdf is the cumulative distribution. The single-frequency false alarm probability should not be confused with the false alarm probability for the largest peak. Parameters ---------- z : array-like The periodogram value. N : int The number of data points from which the periodogram was computed. normalization : {'standard', 'model', 'log', 'psd'} The periodogram normalization. dH, dK : int, optional The number of parameters in the null hypothesis and the model. Returns ------- false_alarm_probability : np.ndarray The single-frequency false alarm probability. Notes ----- For normalization='psd', the distribution can only be computed for periodograms constructed with errors specified. All expressions used here are adapted from Table 1 of Baluev 2008 [1]_. References ---------- .. [1] Baluev, R.V. MNRAS 385, 1279 (2008)
def fap_single(z, N, normalization, dH=1, dK=3): """Single-frequency false alarm probability for the Lomb-Scargle periodogram. This is equal to 1 - cdf, where cdf is the cumulative distribution. The single-frequency false alarm probability should not be confused with the false alarm probability for the largest peak. Parameters ---------- z : array-like The periodogram value. N : int The number of data points from which the periodogram was computed. normalization : {'standard', 'model', 'log', 'psd'} The periodogram normalization. dH, dK : int, optional The number of parameters in the null hypothesis and the model. Returns ------- false_alarm_probability : np.ndarray The single-frequency false alarm probability. Notes ----- For normalization='psd', the distribution can only be computed for periodograms constructed with errors specified. All expressions used here are adapted from Table 1 of Baluev 2008 [1]_. References ---------- .. [1] Baluev, R.V. MNRAS 385, 1279 (2008) """ z = np.asarray(z) if dK - dH != 2: raise NotImplementedError("Degrees of freedom != 2") Nk = N - dK if normalization == "psd": return np.exp(-z) elif normalization == "standard": return (1 - z) ** (0.5 * Nk) elif normalization == "model": return (1 + z) ** (-0.5 * Nk) elif normalization == "log": return np.exp(-0.5 * Nk * z) else: raise ValueError(f"normalization='{normalization}' is not recognized")
Single-frequency inverse false alarm probability. This function computes the periodogram value associated with the specified single-frequency false alarm probability. This should not be confused with the false alarm level of the largest peak. Parameters ---------- fap : array-like The false alarm probability. N : int The number of data points from which the periodogram was computed. normalization : {'standard', 'model', 'log', 'psd'} The periodogram normalization. dH, dK : int, optional The number of parameters in the null hypothesis and the model. Returns ------- z : np.ndarray The periodogram power corresponding to the single-peak false alarm probability. Notes ----- For normalization='psd', the distribution can only be computed for periodograms constructed with errors specified. All expressions used here are adapted from Table 1 of Baluev 2008 [1]_. References ---------- .. [1] Baluev, R.V. MNRAS 385, 1279 (2008)
def inv_fap_single(fap, N, normalization, dH=1, dK=3): """Single-frequency inverse false alarm probability. This function computes the periodogram value associated with the specified single-frequency false alarm probability. This should not be confused with the false alarm level of the largest peak. Parameters ---------- fap : array-like The false alarm probability. N : int The number of data points from which the periodogram was computed. normalization : {'standard', 'model', 'log', 'psd'} The periodogram normalization. dH, dK : int, optional The number of parameters in the null hypothesis and the model. Returns ------- z : np.ndarray The periodogram power corresponding to the single-peak false alarm probability. Notes ----- For normalization='psd', the distribution can only be computed for periodograms constructed with errors specified. All expressions used here are adapted from Table 1 of Baluev 2008 [1]_. References ---------- .. [1] Baluev, R.V. MNRAS 385, 1279 (2008) """ fap = np.asarray(fap) if dK - dH != 2: raise NotImplementedError("Degrees of freedom != 2") Nk = N - dK # No warnings for fap = 0; rather, just let it give the right infinity. with np.errstate(divide="ignore"): if normalization == "psd": return -np.log(fap) elif normalization == "standard": return 1 - fap ** (2 / Nk) elif normalization == "model": return -1 + fap ** (-2 / Nk) elif normalization == "log": return -2 / Nk * np.log(fap) else: raise ValueError(f"normalization='{normalization}' is not recognized")
Cumulative distribution for the Lomb-Scargle periodogram. Compute the expected cumulative distribution of the periodogram for the null hypothesis - i.e. data consisting of Gaussian noise. Parameters ---------- z : array-like The periodogram value. N : int The number of data points from which the periodogram was computed. normalization : {'standard', 'model', 'log', 'psd'} The periodogram normalization. dH, dK : int, optional The number of parameters in the null hypothesis and the model. Returns ------- cdf : np.ndarray The expected cumulative distribution function. Notes ----- For normalization='psd', the distribution can only be computed for periodograms constructed with errors specified. All expressions used here are adapted from Table 1 of Baluev 2008 [1]_. References ---------- .. [1] Baluev, R.V. MNRAS 385, 1279 (2008)
def cdf_single(z, N, normalization, dH=1, dK=3): """Cumulative distribution for the Lomb-Scargle periodogram. Compute the expected cumulative distribution of the periodogram for the null hypothesis - i.e. data consisting of Gaussian noise. Parameters ---------- z : array-like The periodogram value. N : int The number of data points from which the periodogram was computed. normalization : {'standard', 'model', 'log', 'psd'} The periodogram normalization. dH, dK : int, optional The number of parameters in the null hypothesis and the model. Returns ------- cdf : np.ndarray The expected cumulative distribution function. Notes ----- For normalization='psd', the distribution can only be computed for periodograms constructed with errors specified. All expressions used here are adapted from Table 1 of Baluev 2008 [1]_. References ---------- .. [1] Baluev, R.V. MNRAS 385, 1279 (2008) """ return 1 - fap_single(z, N, normalization=normalization, dH=dH, dK=dK)
tau factor for estimating Davies bound (Baluev 2008, Table 1).
def tau_davies(Z, fmax, t, y, dy, normalization="standard", dH=1, dK=3): """tau factor for estimating Davies bound (Baluev 2008, Table 1).""" N = len(t) NH = N - dH # DOF for null hypothesis NK = N - dK # DOF for periodic hypothesis Dt = _weighted_var(t, dy) Teff = np.sqrt(4 * np.pi * Dt) # Effective baseline W = fmax * Teff Z = np.asarray(Z) if normalization == "psd": # 'psd' normalization is same as Baluev's z return W * np.exp(-Z) * np.sqrt(Z) elif normalization == "standard": # 'standard' normalization is Z = 2/NH * z_1 return _gamma(NH) * W * (1 - Z) ** (0.5 * (NK - 1)) * np.sqrt(0.5 * NH * Z) elif normalization == "model": # 'model' normalization is Z = 2/NK * z_2 return _gamma(NK) * W * (1 + Z) ** (-0.5 * NK) * np.sqrt(0.5 * NK * Z) elif normalization == "log": # 'log' normalization is Z = 2/NK * z_3 return ( _gamma(NK) * W * np.exp(-0.5 * Z * (NK - 0.5)) * np.sqrt(NK * np.sinh(0.5 * Z)) ) else: raise NotImplementedError(f"normalization={normalization}")
False Alarm Probability based on estimated number of indep frequencies.
def fap_naive(Z, fmax, t, y, dy, normalization="standard"): """False Alarm Probability based on estimated number of indep frequencies.""" N = len(t) T = max(t) - min(t) N_eff = fmax * T fap_s = fap_single(Z, N, normalization=normalization) # result is 1 - (1 - fap_s) ** N_eff # this is much more precise for small Z / large N # Ignore divide by zero no np.log1p - fine to let it return -inf. with np.errstate(divide="ignore"): return -np.expm1(N_eff * np.log1p(-fap_s))
Inverse FAP based on estimated number of indep frequencies.
def inv_fap_naive(fap, fmax, t, y, dy, normalization="standard"): """Inverse FAP based on estimated number of indep frequencies.""" fap = np.asarray(fap) N = len(t) T = max(t) - min(t) N_eff = fmax * T # fap_s = 1 - (1 - fap) ** (1 / N_eff) # Ignore divide by zero no np.log - fine to let it return -inf. with np.errstate(divide="ignore"): fap_s = -np.expm1(np.log(1 - fap) / N_eff) return inv_fap_single(fap_s, N, normalization)
Davies upper-bound to the false alarm probability. (Eqn 5 of Baluev 2008)
def fap_davies(Z, fmax, t, y, dy, normalization="standard"): """Davies upper-bound to the false alarm probability. (Eqn 5 of Baluev 2008) """ N = len(t) fap_s = fap_single(Z, N, normalization=normalization) tau = tau_davies(Z, fmax, t, y, dy, normalization=normalization) return fap_s + tau
Inverse of the davies upper-bound.
def inv_fap_davies(p, fmax, t, y, dy, normalization="standard"): """Inverse of the davies upper-bound.""" from scipy import optimize args = (fmax, t, y, dy, normalization) z0 = inv_fap_naive(p, *args) func = lambda z, *args: fap_davies(z, *args) - p res = optimize.root(func, z0, args=args, method="lm") if not res.success: raise ValueError(f"inv_fap_baluev did not converge for p={p}") return res.x
Alias-free approximation to false alarm probability. (Eqn 6 of Baluev 2008)
def fap_baluev(Z, fmax, t, y, dy, normalization="standard"): """Alias-free approximation to false alarm probability. (Eqn 6 of Baluev 2008) """ fap_s = fap_single(Z, len(t), normalization) tau = tau_davies(Z, fmax, t, y, dy, normalization=normalization) # result is 1 - (1 - fap_s) * np.exp(-tau) # this is much more precise for small numbers return -np.expm1(-tau) + fap_s * np.exp(-tau)
Inverse of the Baluev alias-free approximation.
def inv_fap_baluev(p, fmax, t, y, dy, normalization="standard"): """Inverse of the Baluev alias-free approximation.""" from scipy import optimize args = (fmax, t, y, dy, normalization) z0 = inv_fap_naive(p, *args) func = lambda z, *args: fap_baluev(z, *args) - p res = optimize.root(func, z0, args=args, method="lm") if not res.success: raise ValueError(f"inv_fap_baluev did not converge for p={p}") return res.x
Generate a sequence of bootstrap estimates of the max.
def _bootstrap_max(t, y, dy, fmax, normalization, random_seed, n_bootstrap=1000): """Generate a sequence of bootstrap estimates of the max.""" from .core import LombScargle rng = np.random.default_rng(random_seed) power_max = [] for _ in range(n_bootstrap): s = rng.integers(0, len(y), len(y)) # sample with replacement ls_boot = LombScargle( t, y[s], dy if dy is None else dy[s], normalization=normalization ) freq, power = ls_boot.autopower(maximum_frequency=fmax) power_max.append(power.max()) power_max = u.Quantity(power_max) power_max.sort() return power_max
Bootstrap estimate of the false alarm probability.
def fap_bootstrap( Z, fmax, t, y, dy, normalization="standard", n_bootstraps=1000, random_seed=None ): """Bootstrap estimate of the false alarm probability.""" pmax = _bootstrap_max(t, y, dy, fmax, normalization, random_seed, n_bootstraps) return 1 - np.searchsorted(pmax, Z) / len(pmax)
Bootstrap estimate of the inverse false alarm probability.
def inv_fap_bootstrap( fap, fmax, t, y, dy, normalization="standard", n_bootstraps=1000, random_seed=None ): """Bootstrap estimate of the inverse false alarm probability.""" fap = np.asarray(fap) pmax = _bootstrap_max(t, y, dy, fmax, normalization, random_seed, n_bootstraps) return pmax[np.clip(np.floor((1 - fap) * len(pmax)).astype(int), 0, len(pmax) - 1)]
Compute the approximate false alarm probability for periodogram peaks Z. This gives an estimate of the false alarm probability for the largest value in a periodogram, based on the null hypothesis of non-varying data with Gaussian noise. The true probability cannot be computed analytically, so each method available here is an approximation to the true value. Parameters ---------- Z : array-like The periodogram value. fmax : float The maximum frequency of the periodogram. t, y, dy : array-like The data times, values, and errors. normalization : {'standard', 'model', 'log', 'psd'}, optional The periodogram normalization. method : {'baluev', 'davies', 'naive', 'bootstrap'}, optional The approximation method to use. method_kwds : dict, optional Additional method-specific keywords. Returns ------- false_alarm_probability : np.ndarray The false alarm probability. Notes ----- For normalization='psd', the distribution can only be computed for periodograms constructed with errors specified. See Also -------- false_alarm_level : compute the periodogram level for a particular fap References ---------- .. [1] Baluev, R.V. MNRAS 385, 1279 (2008)
def false_alarm_probability( Z, fmax, t, y, dy, normalization="standard", method="baluev", method_kwds=None ): """Compute the approximate false alarm probability for periodogram peaks Z. This gives an estimate of the false alarm probability for the largest value in a periodogram, based on the null hypothesis of non-varying data with Gaussian noise. The true probability cannot be computed analytically, so each method available here is an approximation to the true value. Parameters ---------- Z : array-like The periodogram value. fmax : float The maximum frequency of the periodogram. t, y, dy : array-like The data times, values, and errors. normalization : {'standard', 'model', 'log', 'psd'}, optional The periodogram normalization. method : {'baluev', 'davies', 'naive', 'bootstrap'}, optional The approximation method to use. method_kwds : dict, optional Additional method-specific keywords. Returns ------- false_alarm_probability : np.ndarray The false alarm probability. Notes ----- For normalization='psd', the distribution can only be computed for periodograms constructed with errors specified. See Also -------- false_alarm_level : compute the periodogram level for a particular fap References ---------- .. [1] Baluev, R.V. MNRAS 385, 1279 (2008) """ if method == "single": return fap_single(Z, len(t), normalization) elif method not in METHODS: raise ValueError(f"Unrecognized method: {method}") method = METHODS[method] method_kwds = method_kwds or {} return method(Z, fmax, t, y, dy, normalization, **method_kwds)
Compute the approximate periodogram level given a false alarm probability. This gives an estimate of the periodogram level corresponding to a specified false alarm probability for the largest peak, assuming a null hypothesis of non-varying data with Gaussian noise. The true level cannot be computed analytically, so each method available here is an approximation to the true value. Parameters ---------- p : array-like The false alarm probability (0 < p < 1). fmax : float The maximum frequency of the periodogram. t, y, dy : arrays The data times, values, and errors. normalization : {'standard', 'model', 'log', 'psd'}, optional The periodogram normalization. method : {'baluev', 'davies', 'naive', 'bootstrap'}, optional The approximation method to use. method_kwds : dict, optional Additional method-specific keywords. Returns ------- z : np.ndarray The periodogram level. Notes ----- For normalization='psd', the distribution can only be computed for periodograms constructed with errors specified. See Also -------- false_alarm_probability : compute the fap for a given periodogram level References ---------- .. [1] Baluev, R.V. MNRAS 385, 1279 (2008)
def false_alarm_level( p, fmax, t, y, dy, normalization, method="baluev", method_kwds=None ): """Compute the approximate periodogram level given a false alarm probability. This gives an estimate of the periodogram level corresponding to a specified false alarm probability for the largest peak, assuming a null hypothesis of non-varying data with Gaussian noise. The true level cannot be computed analytically, so each method available here is an approximation to the true value. Parameters ---------- p : array-like The false alarm probability (0 < p < 1). fmax : float The maximum frequency of the periodogram. t, y, dy : arrays The data times, values, and errors. normalization : {'standard', 'model', 'log', 'psd'}, optional The periodogram normalization. method : {'baluev', 'davies', 'naive', 'bootstrap'}, optional The approximation method to use. method_kwds : dict, optional Additional method-specific keywords. Returns ------- z : np.ndarray The periodogram level. Notes ----- For normalization='psd', the distribution can only be computed for periodograms constructed with errors specified. See Also -------- false_alarm_probability : compute the fap for a given periodogram level References ---------- .. [1] Baluev, R.V. MNRAS 385, 1279 (2008) """ if method == "single": return inv_fap_single(p, len(t), normalization) elif method not in INV_METHODS: raise ValueError(f"Unrecognized method: {method}") method = INV_METHODS[method] method_kwds = method_kwds or {} return method(p, fmax, t, y, dy, normalization, **method_kwds)
Lomb-Scargle Periodogram. This implements a chi-squared-based periodogram, which is relatively slow but useful for validating the faster algorithms in the package. Parameters ---------- t, y, dy : array-like times, values, and errors of the data points. These should be broadcastable to the same shape. None should be `~astropy.units.Quantity``. frequency : array-like frequencies (not angular frequencies) at which to calculate periodogram normalization : str, optional Normalization to use for the periodogram. Options are 'standard', 'model', 'log', or 'psd'. fit_mean : bool, optional if True, include a constant offset as part of the model at each frequency. This can lead to more accurate results, especially in the case of incomplete phase coverage. center_data : bool, optional if True, pre-center the data by subtracting the weighted mean of the input data. This is especially important if ``fit_mean = False`` nterms : int, optional Number of Fourier terms in the fit Returns ------- power : array-like Lomb-Scargle power associated with each frequency. Units of the result depend on the normalization. References ---------- .. [1] M. Zechmeister and M. Kurster, A&A 496, 577-584 (2009) .. [2] W. Press et al, Numerical Recipes in C (2002) .. [3] Scargle, J.D. 1982, ApJ 263:835-853
def lombscargle_chi2( t, y, dy, frequency, normalization="standard", fit_mean=True, center_data=True, nterms=1, ): """Lomb-Scargle Periodogram. This implements a chi-squared-based periodogram, which is relatively slow but useful for validating the faster algorithms in the package. Parameters ---------- t, y, dy : array-like times, values, and errors of the data points. These should be broadcastable to the same shape. None should be `~astropy.units.Quantity``. frequency : array-like frequencies (not angular frequencies) at which to calculate periodogram normalization : str, optional Normalization to use for the periodogram. Options are 'standard', 'model', 'log', or 'psd'. fit_mean : bool, optional if True, include a constant offset as part of the model at each frequency. This can lead to more accurate results, especially in the case of incomplete phase coverage. center_data : bool, optional if True, pre-center the data by subtracting the weighted mean of the input data. This is especially important if ``fit_mean = False`` nterms : int, optional Number of Fourier terms in the fit Returns ------- power : array-like Lomb-Scargle power associated with each frequency. Units of the result depend on the normalization. References ---------- .. [1] M. Zechmeister and M. Kurster, A&A 496, 577-584 (2009) .. [2] W. Press et al, Numerical Recipes in C (2002) .. [3] Scargle, J.D. 1982, ApJ 263:835-853 """ if dy is None: dy = 1 t, y, dy = np.broadcast_arrays(t, y, dy) frequency = np.asarray(frequency) if t.ndim != 1: raise ValueError("t, y, dy should be one dimensional") if frequency.ndim != 1: raise ValueError("frequency should be one-dimensional") w = dy**-2.0 w /= w.sum() # if fit_mean is true, centering the data now simplifies the math below. if center_data or fit_mean: yw = (y - np.dot(w, y)) / dy else: yw = y / dy chi2_ref = np.dot(yw, yw) # compute the unnormalized model chi2 at each frequency def compute_power(f): X = design_matrix(t, f, dy=dy, bias=fit_mean, nterms=nterms) XTX = np.dot(X.T, X) XTy = np.dot(X.T, yw) return np.dot(XTy.T, np.linalg.solve(XTX, XTy)) p = np.array([compute_power(f) for f in frequency]) if normalization == "psd": p *= 0.5 elif normalization == "model": p /= chi2_ref - p elif normalization == "log": p = -np.log(1 - p / chi2_ref) elif normalization == "standard": p /= chi2_ref else: raise ValueError(f"normalization='{normalization}' not recognized") return p
Lomb-Scargle Periodogram. This implements a fast chi-squared periodogram using the algorithm outlined in [4]_. The result is identical to the standard Lomb-Scargle periodogram. The advantage of this algorithm is the ability to compute multiterm periodograms relatively quickly. Parameters ---------- t, y, dy : array-like times, values, and errors of the data points. These should be broadcastable to the same shape. None should be `~astropy.units.Quantity`. f0, df, Nf : (float, float, int) parameters describing the frequency grid, f = f0 + df * arange(Nf). normalization : str, optional Normalization to use for the periodogram. Options are 'standard', 'model', 'log', or 'psd'. fit_mean : bool, optional if True, include a constant offset as part of the model at each frequency. This can lead to more accurate results, especially in the case of incomplete phase coverage. center_data : bool, optional if True, pre-center the data by subtracting the weighted mean of the input data. This is especially important if ``fit_mean = False`` nterms : int, optional Number of Fourier terms in the fit Returns ------- power : array-like Lomb-Scargle power associated with each frequency. Units of the result depend on the normalization. References ---------- .. [1] M. Zechmeister and M. Kurster, A&A 496, 577-584 (2009) .. [2] W. Press et al, Numerical Recipes in C (2002) .. [3] Scargle, J.D. ApJ 263:835-853 (1982) .. [4] Palmer, J. ApJ 695:496-502 (2009)
def lombscargle_fastchi2( t, y, dy, f0, df, Nf, normalization="standard", fit_mean=True, center_data=True, nterms=1, use_fft=True, trig_sum_kwds=None, ): """Lomb-Scargle Periodogram. This implements a fast chi-squared periodogram using the algorithm outlined in [4]_. The result is identical to the standard Lomb-Scargle periodogram. The advantage of this algorithm is the ability to compute multiterm periodograms relatively quickly. Parameters ---------- t, y, dy : array-like times, values, and errors of the data points. These should be broadcastable to the same shape. None should be `~astropy.units.Quantity`. f0, df, Nf : (float, float, int) parameters describing the frequency grid, f = f0 + df * arange(Nf). normalization : str, optional Normalization to use for the periodogram. Options are 'standard', 'model', 'log', or 'psd'. fit_mean : bool, optional if True, include a constant offset as part of the model at each frequency. This can lead to more accurate results, especially in the case of incomplete phase coverage. center_data : bool, optional if True, pre-center the data by subtracting the weighted mean of the input data. This is especially important if ``fit_mean = False`` nterms : int, optional Number of Fourier terms in the fit Returns ------- power : array-like Lomb-Scargle power associated with each frequency. Units of the result depend on the normalization. References ---------- .. [1] M. Zechmeister and M. Kurster, A&A 496, 577-584 (2009) .. [2] W. Press et al, Numerical Recipes in C (2002) .. [3] Scargle, J.D. ApJ 263:835-853 (1982) .. [4] Palmer, J. ApJ 695:496-502 (2009) """ if nterms == 0 and not fit_mean: raise ValueError("Cannot have nterms = 0 without fitting bias") if dy is None: dy = 1 # Validate and setup input data t, y, dy = np.broadcast_arrays(t, y, dy) if t.ndim != 1: raise ValueError("t, y, dy should be one dimensional") # Validate and setup frequency grid if f0 < 0: raise ValueError("Frequencies must be positive") if df <= 0: raise ValueError("Frequency steps must be positive") if Nf <= 0: raise ValueError("Number of frequencies must be positive") w = dy**-2.0 ws = np.sum(w) # if fit_mean is true, centering the data now simplifies the math below. if center_data or fit_mean: y = y - np.dot(w, y) / ws yw = y / dy chi2_ref = np.dot(yw, yw) kwargs = dict.copy(trig_sum_kwds or {}) kwargs.update(f0=f0, df=df, use_fft=use_fft, N=Nf) # Here we build-up the matrices XTX and XTy using pre-computed # sums. The relevant identities are # 2 sin(mx) sin(nx) = cos(m-n)x - cos(m+n)x # 2 cos(mx) cos(nx) = cos(m-n)x + cos(m+n)x # 2 sin(mx) cos(nx) = sin(m-n)x + sin(m+n)x yws = np.sum(y * w) SCw = [(np.zeros(Nf), ws * np.ones(Nf))] SCw.extend( [trig_sum(t, w, freq_factor=i, **kwargs) for i in range(1, 2 * nterms + 1)] ) Sw, Cw = zip(*SCw) SCyw = [(np.zeros(Nf), yws * np.ones(Nf))] SCyw.extend( [trig_sum(t, w * y, freq_factor=i, **kwargs) for i in range(1, nterms + 1)] ) Syw, Cyw = zip(*SCyw) # Now create an indexing scheme so we can quickly # build-up matrices at each frequency order = [("C", 0)] if fit_mean else [] order.extend(chain(*([("S", i), ("C", i)] for i in range(1, nterms + 1)))) funcs = dict( S=lambda m, i: Syw[m][i], C=lambda m, i: Cyw[m][i], SS=lambda m, n, i: 0.5 * (Cw[abs(m - n)][i] - Cw[m + n][i]), CC=lambda m, n, i: 0.5 * (Cw[abs(m - n)][i] + Cw[m + n][i]), SC=lambda m, n, i: 0.5 * (np.sign(m - n) * Sw[abs(m - n)][i] + Sw[m + n][i]), CS=lambda m, n, i: 0.5 * (np.sign(n - m) * Sw[abs(n - m)][i] + Sw[n + m][i]), ) def compute_power(i): XTX = np.array( [[funcs[A[0] + B[0]](A[1], B[1], i) for A in order] for B in order] ) XTy = np.array([funcs[A[0]](A[1], i) for A in order]) return np.dot(XTy.T, np.linalg.solve(XTX, XTy)) p = np.array([compute_power(i) for i in range(Nf)]) if normalization == "psd": p *= 0.5 elif normalization == "standard": p /= chi2_ref elif normalization == "log": p = -np.log(1 - p / chi2_ref) elif normalization == "model": p /= chi2_ref - p else: raise ValueError(f"normalization='{normalization}' not recognized") return p
Fast Lomb-Scargle Periodogram. This implements the Press & Rybicki method [1]_ for fast O[N log(N)] Lomb-Scargle periodograms. Parameters ---------- t, y, dy : array-like times, values, and errors of the data points. These should be broadcastable to the same shape. None should be `~astropy.units.Quantity`. f0, df, Nf : (float, float, int) parameters describing the frequency grid, f = f0 + df * arange(Nf). center_data : bool (default=True) Specify whether to subtract the mean of the data before the fit fit_mean : bool (default=True) If True, then compute the floating-mean periodogram; i.e. let the mean vary with the fit. normalization : str, optional Normalization to use for the periodogram. Options are 'standard', 'model', 'log', or 'psd'. use_fft : bool (default=True) If True, then use the Press & Rybicki O[NlogN] algorithm to compute the result. Otherwise, use a slower O[N^2] algorithm trig_sum_kwds : dict or None, optional extra keyword arguments to pass to the ``trig_sum`` utility. Options are ``oversampling`` and ``Mfft``. See documentation of ``trig_sum`` for details. Returns ------- power : ndarray Lomb-Scargle power associated with each frequency. Units of the result depend on the normalization. Notes ----- Note that the ``use_fft=True`` algorithm is an approximation to the true Lomb-Scargle periodogram, and as the number of points grows this approximation improves. On the other hand, for very small datasets (<~50 points or so) this approximation may not be useful. References ---------- .. [1] Press W.H. and Rybicki, G.B, "Fast algorithm for spectral analysis of unevenly sampled data". ApJ 1:338, p277, 1989 .. [2] M. Zechmeister and M. Kurster, A&A 496, 577-584 (2009) .. [3] W. Press et al, Numerical Recipes in C (2002)
def lombscargle_fast( t, y, dy, f0, df, Nf, center_data=True, fit_mean=True, normalization="standard", use_fft=True, trig_sum_kwds=None, ): """Fast Lomb-Scargle Periodogram. This implements the Press & Rybicki method [1]_ for fast O[N log(N)] Lomb-Scargle periodograms. Parameters ---------- t, y, dy : array-like times, values, and errors of the data points. These should be broadcastable to the same shape. None should be `~astropy.units.Quantity`. f0, df, Nf : (float, float, int) parameters describing the frequency grid, f = f0 + df * arange(Nf). center_data : bool (default=True) Specify whether to subtract the mean of the data before the fit fit_mean : bool (default=True) If True, then compute the floating-mean periodogram; i.e. let the mean vary with the fit. normalization : str, optional Normalization to use for the periodogram. Options are 'standard', 'model', 'log', or 'psd'. use_fft : bool (default=True) If True, then use the Press & Rybicki O[NlogN] algorithm to compute the result. Otherwise, use a slower O[N^2] algorithm trig_sum_kwds : dict or None, optional extra keyword arguments to pass to the ``trig_sum`` utility. Options are ``oversampling`` and ``Mfft``. See documentation of ``trig_sum`` for details. Returns ------- power : ndarray Lomb-Scargle power associated with each frequency. Units of the result depend on the normalization. Notes ----- Note that the ``use_fft=True`` algorithm is an approximation to the true Lomb-Scargle periodogram, and as the number of points grows this approximation improves. On the other hand, for very small datasets (<~50 points or so) this approximation may not be useful. References ---------- .. [1] Press W.H. and Rybicki, G.B, "Fast algorithm for spectral analysis of unevenly sampled data". ApJ 1:338, p277, 1989 .. [2] M. Zechmeister and M. Kurster, A&A 496, 577-584 (2009) .. [3] W. Press et al, Numerical Recipes in C (2002) """ if dy is None: dy = 1 # Validate and setup input data t, y, dy = np.broadcast_arrays(t, y, dy) if t.ndim != 1: raise ValueError("t, y, dy should be one dimensional") # Validate and setup frequency grid if f0 < 0: raise ValueError("Frequencies must be positive") if df <= 0: raise ValueError("Frequency steps must be positive") if Nf <= 0: raise ValueError("Number of frequencies must be positive") w = dy**-2.0 w /= w.sum() # Center the data. Even if we're fitting the offset, # this step makes the expressions below more succinct if center_data or fit_mean: y = y - np.dot(w, y) # set up arguments to trig_sum kwargs = dict.copy(trig_sum_kwds or {}) kwargs.update(f0=f0, df=df, use_fft=use_fft, N=Nf) # ---------------------------------------------------------------------- # 1. compute functions of the time-shift tau at each frequency Sh, Ch = trig_sum(t, w * y, **kwargs) S2, C2 = trig_sum(t, w, freq_factor=2, **kwargs) if fit_mean: S, C = trig_sum(t, w, **kwargs) tan_2omega_tau = (S2 - 2 * S * C) / (C2 - (C * C - S * S)) else: tan_2omega_tau = S2 / C2 # This is what we're computing below; the straightforward way is slower # and less stable, so we use trig identities instead # # omega_tau = 0.5 * np.arctan(tan_2omega_tau) # S2w, C2w = np.sin(2 * omega_tau), np.cos(2 * omega_tau) # Sw, Cw = np.sin(omega_tau), np.cos(omega_tau) S2w = tan_2omega_tau / np.sqrt(1 + tan_2omega_tau * tan_2omega_tau) C2w = 1 / np.sqrt(1 + tan_2omega_tau * tan_2omega_tau) Cw = np.sqrt(0.5) * np.sqrt(1 + C2w) Sw = np.sqrt(0.5) * np.sign(S2w) * np.sqrt(1 - C2w) # ---------------------------------------------------------------------- # 2. Compute the periodogram, following Zechmeister & Kurster # and using tricks from Press & Rybicki. YY = np.dot(w, y**2) YC = Ch * Cw + Sh * Sw YS = Sh * Cw - Ch * Sw CC = 0.5 * (1 + C2 * C2w + S2 * S2w) SS = 0.5 * (1 - C2 * C2w - S2 * S2w) if fit_mean: CC -= (C * Cw + S * Sw) ** 2 SS -= (S * Cw - C * Sw) ** 2 power = YC * YC / CC + YS * YS / SS if normalization == "standard": power /= YY elif normalization == "model": power /= YY - power elif normalization == "log": power = -np.log(1 - power / YY) elif normalization == "psd": power *= 0.5 * (dy**-2.0).sum() else: raise ValueError(f"normalization='{normalization}' not recognized") return power
Utility to get grid parameters from a frequency array. Parameters ---------- frequency : array-like or `~astropy.units.Quantity` ['frequency'] input frequency grid assume_regular_frequency : bool (default = False) if True, then do not check whether frequency is a regular grid Returns ------- f0, df, N : scalar Parameters such that all(frequency == f0 + df * np.arange(N))
def _get_frequency_grid(frequency, assume_regular_frequency=False): """Utility to get grid parameters from a frequency array. Parameters ---------- frequency : array-like or `~astropy.units.Quantity` ['frequency'] input frequency grid assume_regular_frequency : bool (default = False) if True, then do not check whether frequency is a regular grid Returns ------- f0, df, N : scalar Parameters such that all(frequency == f0 + df * np.arange(N)) """ frequency = np.asarray(frequency) if frequency.ndim != 1: raise ValueError("frequency grid must be 1 dimensional") elif len(frequency) == 1: return frequency[0], frequency[0], 1 elif not (assume_regular_frequency or _is_regular(frequency)): raise ValueError("frequency must be a regular grid") return frequency[0], frequency[1] - frequency[0], len(frequency)
Validate the method argument, and if method='auto' choose the appropriate method.
def validate_method(method, dy, fit_mean, nterms, frequency, assume_regular_frequency): """ Validate the method argument, and if method='auto' choose the appropriate method. """ methods = available_methods() prefer_fast = len(frequency) > 200 and ( assume_regular_frequency or _is_regular(frequency) ) prefer_scipy = "scipy" in methods and dy is None and not fit_mean # automatically choose the appropriate method if method == "auto": if nterms != 1: if prefer_fast: method = "fastchi2" else: method = "chi2" elif prefer_fast: method = "fast" elif prefer_scipy: method = "scipy" else: method = "cython" if method not in METHODS: raise ValueError(f"invalid method: {method}") return method
Compute the Lomb-scargle Periodogram with a given method. Parameters ---------- t : array-like sequence of observation times y : array-like sequence of observations associated with times t dy : float or array-like, optional error or sequence of observational errors associated with times t frequency : array-like frequencies (not angular frequencies) at which to evaluate the periodogram. If not specified, optimal frequencies will be chosen using a heuristic which will attempt to provide sufficient frequency range and sampling so that peaks will not be missed. Note that in order to use method='fast', frequencies must be regularly spaced. method : str, optional specify the lomb scargle implementation to use. Options are: - 'auto': choose the best method based on the input - 'fast': use the O[N log N] fast method. Note that this requires evenly-spaced frequencies: by default this will be checked unless ``assume_regular_frequency`` is set to True. - `slow`: use the O[N^2] pure-python implementation - `chi2`: use the O[N^2] chi2/linear-fitting implementation - `fastchi2`: use the O[N log N] chi2 implementation. Note that this requires evenly-spaced frequencies: by default this will be checked unless `assume_regular_frequency` is set to True. - `scipy`: use ``scipy.signal.lombscargle``, which is an O[N^2] implementation written in C. Note that this does not support heteroskedastic errors. assume_regular_frequency : bool, optional if True, assume that the input frequency is of the form freq = f0 + df * np.arange(N). Only referenced if method is 'auto' or 'fast'. normalization : str, optional Normalization to use for the periodogram. Options are 'standard' or 'psd'. fit_mean : bool, optional if True, include a constant offset as part of the model at each frequency. This can lead to more accurate results, especially in the case of incomplete phase coverage. center_data : bool, optional if True, pre-center the data by subtracting the weighted mean of the input data. This is especially important if `fit_mean = False` method_kwds : dict, optional additional keywords to pass to the lomb-scargle method nterms : int, optional number of Fourier terms to use in the periodogram. Not supported with every method. Returns ------- PLS : array-like Lomb-Scargle power associated with each frequency omega
def lombscargle( t, y, dy=None, frequency=None, method="auto", assume_regular_frequency=False, normalization="standard", fit_mean=True, center_data=True, method_kwds=None, nterms=1, ): """ Compute the Lomb-scargle Periodogram with a given method. Parameters ---------- t : array-like sequence of observation times y : array-like sequence of observations associated with times t dy : float or array-like, optional error or sequence of observational errors associated with times t frequency : array-like frequencies (not angular frequencies) at which to evaluate the periodogram. If not specified, optimal frequencies will be chosen using a heuristic which will attempt to provide sufficient frequency range and sampling so that peaks will not be missed. Note that in order to use method='fast', frequencies must be regularly spaced. method : str, optional specify the lomb scargle implementation to use. Options are: - 'auto': choose the best method based on the input - 'fast': use the O[N log N] fast method. Note that this requires evenly-spaced frequencies: by default this will be checked unless ``assume_regular_frequency`` is set to True. - `slow`: use the O[N^2] pure-python implementation - `chi2`: use the O[N^2] chi2/linear-fitting implementation - `fastchi2`: use the O[N log N] chi2 implementation. Note that this requires evenly-spaced frequencies: by default this will be checked unless `assume_regular_frequency` is set to True. - `scipy`: use ``scipy.signal.lombscargle``, which is an O[N^2] implementation written in C. Note that this does not support heteroskedastic errors. assume_regular_frequency : bool, optional if True, assume that the input frequency is of the form freq = f0 + df * np.arange(N). Only referenced if method is 'auto' or 'fast'. normalization : str, optional Normalization to use for the periodogram. Options are 'standard' or 'psd'. fit_mean : bool, optional if True, include a constant offset as part of the model at each frequency. This can lead to more accurate results, especially in the case of incomplete phase coverage. center_data : bool, optional if True, pre-center the data by subtracting the weighted mean of the input data. This is especially important if `fit_mean = False` method_kwds : dict, optional additional keywords to pass to the lomb-scargle method nterms : int, optional number of Fourier terms to use in the periodogram. Not supported with every method. Returns ------- PLS : array-like Lomb-Scargle power associated with each frequency omega """ # frequencies should be one-dimensional arrays output_shape = frequency.shape frequency = frequency.ravel() # we'll need to adjust args and kwds for each method args = (t, y, dy) kwds = dict( frequency=frequency, center_data=center_data, fit_mean=fit_mean, normalization=normalization, nterms=nterms, **(method_kwds or {}), ) method = validate_method( method, dy=dy, fit_mean=fit_mean, nterms=nterms, frequency=frequency, assume_regular_frequency=assume_regular_frequency, ) # scipy doesn't support dy or fit_mean=True if method == "scipy": if kwds.pop("fit_mean"): raise ValueError("scipy method does not support fit_mean=True") if dy is not None: dy = np.ravel(np.asarray(dy)) if not np.allclose(dy[0], dy): raise ValueError("scipy method only supports uniform uncertainties dy") args = (t, y) # fast methods require frequency expressed as a grid if method.startswith("fast"): f0, df, Nf = _get_frequency_grid( kwds.pop("frequency"), assume_regular_frequency ) kwds.update(f0=f0, df=df, Nf=Nf) # only chi2 methods support nterms if not method.endswith("chi2"): if kwds.pop("nterms") != 1: raise ValueError( "nterms != 1 only supported with 'chi2' or 'fastchi2' methods" ) PLS = METHODS[method](*args, **kwds) return PLS.reshape(output_shape)
Compute the Lomb-Scargle design matrix at the given frequency. This is the matrix X such that the periodic model at the given frequency can be expressed :math:`\hat{y} = X \theta`. Parameters ---------- t : array-like, shape=(n_times,) times at which to compute the design matrix frequency : float frequency for the design matrix dy : float or array-like, optional data uncertainties: should be broadcastable with `t` bias : bool (default=True) If true, include a bias column in the matrix nterms : int (default=1) Number of Fourier terms to include in the model Returns ------- X : ndarray, shape=(n_times, n_parameters) The design matrix, where n_parameters = bool(bias) + 2 * nterms
def design_matrix(t, frequency, dy=None, bias=True, nterms=1): """Compute the Lomb-Scargle design matrix at the given frequency. This is the matrix X such that the periodic model at the given frequency can be expressed :math:`\\hat{y} = X \\theta`. Parameters ---------- t : array-like, shape=(n_times,) times at which to compute the design matrix frequency : float frequency for the design matrix dy : float or array-like, optional data uncertainties: should be broadcastable with `t` bias : bool (default=True) If true, include a bias column in the matrix nterms : int (default=1) Number of Fourier terms to include in the model Returns ------- X : ndarray, shape=(n_times, n_parameters) The design matrix, where n_parameters = bool(bias) + 2 * nterms """ t = np.asarray(t) frequency = np.asarray(frequency) if t.ndim != 1: raise ValueError("t should be one dimensional") if frequency.ndim != 0: raise ValueError("frequency must be a scalar") if nterms == 0 and not bias: raise ValueError("cannot have nterms=0 and no bias") if bias: cols = [np.ones_like(t)] else: cols = [] for i in range(1, nterms + 1): cols.append(np.sin(2 * np.pi * i * frequency * t)) cols.append(np.cos(2 * np.pi * i * frequency * t)) XT = np.vstack(cols) if dy is not None: XT /= dy return np.transpose(XT)
Compute the Lomb-Scargle model fit at a given frequency. Parameters ---------- t, y, dy : float or array-like The times, observations, and uncertainties to fit frequency : float The frequency at which to compute the model t_fit : float or array-like The times at which the fit should be computed center_data : bool (default=True) If True, center the input data before applying the fit fit_mean : bool (default=True) If True, include the bias as part of the model nterms : int (default=1) The number of Fourier terms to include in the fit Returns ------- y_fit : ndarray The model fit evaluated at each value of t_fit
def periodic_fit(t, y, dy, frequency, t_fit, center_data=True, fit_mean=True, nterms=1): """Compute the Lomb-Scargle model fit at a given frequency. Parameters ---------- t, y, dy : float or array-like The times, observations, and uncertainties to fit frequency : float The frequency at which to compute the model t_fit : float or array-like The times at which the fit should be computed center_data : bool (default=True) If True, center the input data before applying the fit fit_mean : bool (default=True) If True, include the bias as part of the model nterms : int (default=1) The number of Fourier terms to include in the fit Returns ------- y_fit : ndarray The model fit evaluated at each value of t_fit """ t, y, frequency = map(np.asarray, (t, y, frequency)) if dy is None: dy = np.ones_like(y) else: dy = np.asarray(dy) t_fit = np.asarray(t_fit) if t.ndim != 1: raise ValueError("t, y, dy should be one dimensional") if t_fit.ndim != 1: raise ValueError("t_fit should be one dimensional") if frequency.ndim != 0: raise ValueError("frequency should be a scalar") if center_data: w = dy**-2.0 y_mean = np.dot(y, w) / w.sum() y = y - y_mean else: y_mean = 0 X = design_matrix(t, frequency, dy=dy, bias=fit_mean, nterms=nterms) theta_MLE = np.linalg.solve(np.dot(X.T, X), np.dot(X.T, y / dy)) X_fit = design_matrix(t_fit, frequency, bias=fit_mean, nterms=nterms) return y_mean + np.dot(X_fit, theta_MLE)
Lomb-Scargle Periodogram. This is a wrapper of ``scipy.signal.lombscargle`` for computation of the Lomb-Scargle periodogram. This is a relatively fast version of the naive O[N^2] algorithm, but cannot handle heteroskedastic errors. Parameters ---------- t, y : array-like times, values, and errors of the data points. These should be broadcastable to the same shape. None should be `~astropy.units.Quantity`. frequency : array-like frequencies (not angular frequencies) at which to calculate periodogram normalization : str, optional Normalization to use for the periodogram. Options are 'standard', 'model', 'log', or 'psd'. center_data : bool, optional if True, pre-center the data by subtracting the weighted mean of the input data. Returns ------- power : array-like Lomb-Scargle power associated with each frequency. Units of the result depend on the normalization. References ---------- .. [1] M. Zechmeister and M. Kurster, A&A 496, 577-584 (2009) .. [2] W. Press et al, Numerical Recipes in C (2002) .. [3] Scargle, J.D. 1982, ApJ 263:835-853
def lombscargle_scipy(t, y, frequency, normalization="standard", center_data=True): """Lomb-Scargle Periodogram. This is a wrapper of ``scipy.signal.lombscargle`` for computation of the Lomb-Scargle periodogram. This is a relatively fast version of the naive O[N^2] algorithm, but cannot handle heteroskedastic errors. Parameters ---------- t, y : array-like times, values, and errors of the data points. These should be broadcastable to the same shape. None should be `~astropy.units.Quantity`. frequency : array-like frequencies (not angular frequencies) at which to calculate periodogram normalization : str, optional Normalization to use for the periodogram. Options are 'standard', 'model', 'log', or 'psd'. center_data : bool, optional if True, pre-center the data by subtracting the weighted mean of the input data. Returns ------- power : array-like Lomb-Scargle power associated with each frequency. Units of the result depend on the normalization. References ---------- .. [1] M. Zechmeister and M. Kurster, A&A 496, 577-584 (2009) .. [2] W. Press et al, Numerical Recipes in C (2002) .. [3] Scargle, J.D. 1982, ApJ 263:835-853 """ try: from scipy import signal except ImportError: raise ImportError("scipy must be installed to use lombscargle_scipy") t, y = np.broadcast_arrays(t, y) # Scipy requires floating-point input t = np.asarray(t, dtype=float) y = np.asarray(y, dtype=float) frequency = np.asarray(frequency, dtype=float) if t.ndim != 1: raise ValueError("t, y, dy should be one dimensional") if frequency.ndim != 1: raise ValueError("frequency should be one-dimensional") if center_data: y = y - y.mean() # Note: scipy input accepts angular frequencies p = signal.lombscargle(t, y, 2 * np.pi * frequency) if normalization == "psd": pass elif normalization == "standard": p *= 2 / (t.size * np.mean(y**2)) elif normalization == "log": p = -np.log(1 - 2 * p / (t.size * np.mean(y**2))) elif normalization == "model": p /= 0.5 * t.size * np.mean(y**2) - p else: raise ValueError(f"normalization='{normalization}' not recognized") return p
Lomb-Scargle Periodogram. This is a pure-python implementation of the original Lomb-Scargle formalism (e.g. [1]_, [2]_), with the addition of the floating mean (e.g. [3]_) Parameters ---------- t, y, dy : array-like times, values, and errors of the data points. These should be broadcastable to the same shape. None should be `~astropy.units.Quantity`. frequency : array-like frequencies (not angular frequencies) at which to calculate periodogram normalization : str, optional Normalization to use for the periodogram. Options are 'standard', 'model', 'log', or 'psd'. fit_mean : bool, optional if True, include a constant offset as part of the model at each frequency. This can lead to more accurate results, especially in the case of incomplete phase coverage. center_data : bool, optional if True, pre-center the data by subtracting the weighted mean of the input data. This is especially important if ``fit_mean = False`` Returns ------- power : array-like Lomb-Scargle power associated with each frequency. Units of the result depend on the normalization. References ---------- .. [1] W. Press et al, Numerical Recipes in C (2002) .. [2] Scargle, J.D. 1982, ApJ 263:835-853 .. [3] M. Zechmeister and M. Kurster, A&A 496, 577-584 (2009)
def lombscargle_slow( t, y, dy, frequency, normalization="standard", fit_mean=True, center_data=True ): """Lomb-Scargle Periodogram. This is a pure-python implementation of the original Lomb-Scargle formalism (e.g. [1]_, [2]_), with the addition of the floating mean (e.g. [3]_) Parameters ---------- t, y, dy : array-like times, values, and errors of the data points. These should be broadcastable to the same shape. None should be `~astropy.units.Quantity`. frequency : array-like frequencies (not angular frequencies) at which to calculate periodogram normalization : str, optional Normalization to use for the periodogram. Options are 'standard', 'model', 'log', or 'psd'. fit_mean : bool, optional if True, include a constant offset as part of the model at each frequency. This can lead to more accurate results, especially in the case of incomplete phase coverage. center_data : bool, optional if True, pre-center the data by subtracting the weighted mean of the input data. This is especially important if ``fit_mean = False`` Returns ------- power : array-like Lomb-Scargle power associated with each frequency. Units of the result depend on the normalization. References ---------- .. [1] W. Press et al, Numerical Recipes in C (2002) .. [2] Scargle, J.D. 1982, ApJ 263:835-853 .. [3] M. Zechmeister and M. Kurster, A&A 496, 577-584 (2009) """ if dy is None: dy = 1 t, y, dy = np.broadcast_arrays(t, y, dy) frequency = np.asarray(frequency) if t.ndim != 1: raise ValueError("t, y, dy should be one dimensional") if frequency.ndim != 1: raise ValueError("frequency should be one-dimensional") w = dy**-2.0 w /= w.sum() # if fit_mean is true, centering the data now simplifies the math below. if fit_mean or center_data: y = y - np.dot(w, y) omega = 2 * np.pi * frequency omega = omega.ravel()[np.newaxis, :] # make following arrays into column vectors t, y, dy, w = (x[:, np.newaxis] for x in (t, y, dy, w)) sin_omega_t = np.sin(omega * t) cos_omega_t = np.cos(omega * t) # compute time-shift tau # S2 = np.dot(w.T, np.sin(2 * omega * t) S2 = 2 * np.dot(w.T, sin_omega_t * cos_omega_t) # C2 = np.dot(w.T, np.cos(2 * omega * t) C2 = 2 * np.dot(w.T, 0.5 - sin_omega_t**2) if fit_mean: S = np.dot(w.T, sin_omega_t) C = np.dot(w.T, cos_omega_t) S2 -= 2 * S * C C2 -= C * C - S * S # compute components needed for the fit omega_t_tau = omega * t - 0.5 * np.arctan2(S2, C2) sin_omega_t_tau = np.sin(omega_t_tau) cos_omega_t_tau = np.cos(omega_t_tau) Y = np.dot(w.T, y) wy = w * y YCtau = np.dot(wy.T, cos_omega_t_tau) YStau = np.dot(wy.T, sin_omega_t_tau) CCtau = np.dot(w.T, cos_omega_t_tau * cos_omega_t_tau) SStau = np.dot(w.T, sin_omega_t_tau * sin_omega_t_tau) if fit_mean: Ctau = np.dot(w.T, cos_omega_t_tau) Stau = np.dot(w.T, sin_omega_t_tau) YCtau -= Y * Ctau YStau -= Y * Stau CCtau -= Ctau * Ctau SStau -= Stau * Stau p = YCtau * YCtau / CCtau + YStau * YStau / SStau YY = np.dot(w.T, y * y) if normalization == "standard": p /= YY elif normalization == "model": p /= YY - p elif normalization == "log": p = -np.log(1 - p / YY) elif normalization == "psd": p *= 0.5 * (dy**-2.0).sum() else: raise ValueError(f"normalization='{normalization}' not recognized") return p.ravel()
Find the bit (i.e. power of 2) immediately greater than or equal to N Note: this works for numbers up to 2 ** 64. Roughly equivalent to int(2 ** np.ceil(np.log2(N))).
def bitceil(N): """ Find the bit (i.e. power of 2) immediately greater than or equal to N Note: this works for numbers up to 2 ** 64. Roughly equivalent to int(2 ** np.ceil(np.log2(N))). """ return 1 << int(N - 1).bit_length()
Extirpolate the values (x, y) onto an integer grid range(N), using lagrange polynomial weights on the M nearest points. Parameters ---------- x : array-like array of abscissas y : array-like array of ordinates N : int number of integer bins to use. For best performance, N should be larger than the maximum of x M : int number of adjoining points on which to extirpolate. Returns ------- yN : ndarray N extirpolated values associated with range(N) Examples -------- >>> rng = np.random.default_rng(0) >>> x = 100 * rng.random(20) >>> y = np.sin(x) >>> y_hat = extirpolate(x, y) >>> x_hat = np.arange(len(y_hat)) >>> f = lambda x: np.sin(x / 10) >>> np.allclose(np.sum(y * f(x)), np.sum(y_hat * f(x_hat))) True Notes ----- This code is based on the C implementation of spread() presented in Numerical Recipes in C, Second Edition (Press et al. 1989; p.583).
def extirpolate(x, y, N=None, M=4): """ Extirpolate the values (x, y) onto an integer grid range(N), using lagrange polynomial weights on the M nearest points. Parameters ---------- x : array-like array of abscissas y : array-like array of ordinates N : int number of integer bins to use. For best performance, N should be larger than the maximum of x M : int number of adjoining points on which to extirpolate. Returns ------- yN : ndarray N extirpolated values associated with range(N) Examples -------- >>> rng = np.random.default_rng(0) >>> x = 100 * rng.random(20) >>> y = np.sin(x) >>> y_hat = extirpolate(x, y) >>> x_hat = np.arange(len(y_hat)) >>> f = lambda x: np.sin(x / 10) >>> np.allclose(np.sum(y * f(x)), np.sum(y_hat * f(x_hat))) True Notes ----- This code is based on the C implementation of spread() presented in Numerical Recipes in C, Second Edition (Press et al. 1989; p.583). """ x, y = map(np.ravel, np.broadcast_arrays(x, y)) if N is None: N = int(np.max(x) + 0.5 * M + 1) # Now use legendre polynomial weights to populate the results array; # This is an efficient recursive implementation (See Press et al. 1989) result = np.zeros(N, dtype=y.dtype) # first take care of the easy cases where x is an integer integers = x % 1 == 0 np.add.at(result, x[integers].astype(int), y[integers]) x, y = x[~integers], y[~integers] # For each remaining x, find the index describing the extirpolation range. # i.e. ilo[i] < x[i] < ilo[i] + M with x[i] in the center, # adjusted so that the limits are within the range 0...N ilo = np.clip((x - M // 2).astype(int), 0, N - M) numerator = y * np.prod(x - ilo - np.arange(M)[:, np.newaxis], 0) denominator = factorial(M - 1) for j in range(M): if j > 0: denominator *= j / (j - M) ind = ilo + (M - 1 - j) np.add.at(result, ind, numerator / (denominator * (x - ind))) return result
Compute (approximate) trigonometric sums for a number of frequencies. This routine computes weighted sine and cosine sums:: S_j = sum_i { h_i * sin(2 pi * f_j * t_i) } C_j = sum_i { h_i * cos(2 pi * f_j * t_i) } Where f_j = freq_factor * (f0 + j * df) for the values j in 1 ... N. The sums can be computed either by a brute force O[N^2] method, or by an FFT-based O[Nlog(N)] method. Parameters ---------- t : array-like array of input times h : array-like array weights for the sum df : float frequency spacing N : int number of frequency bins to return f0 : float, optional The low frequency to use freq_factor : float, optional Factor which multiplies the frequency use_fft : bool if True, use the approximate FFT algorithm to compute the result. This uses the FFT with Press & Rybicki's Lagrangian extirpolation. oversampling : int (default = 5) oversampling freq_factor for the approximation; roughly the number of time samples across the highest-frequency sinusoid. This parameter contains the trade-off between accuracy and speed. Not referenced if use_fft is False. Mfft : int The number of adjacent points to use in the FFT approximation. Not referenced if use_fft is False. Returns ------- S, C : ndarray summation arrays for frequencies f = df * np.arange(1, N + 1)
def trig_sum(t, h, df, N, f0=0, freq_factor=1, oversampling=5, use_fft=True, Mfft=4): """Compute (approximate) trigonometric sums for a number of frequencies. This routine computes weighted sine and cosine sums:: S_j = sum_i { h_i * sin(2 pi * f_j * t_i) } C_j = sum_i { h_i * cos(2 pi * f_j * t_i) } Where f_j = freq_factor * (f0 + j * df) for the values j in 1 ... N. The sums can be computed either by a brute force O[N^2] method, or by an FFT-based O[Nlog(N)] method. Parameters ---------- t : array-like array of input times h : array-like array weights for the sum df : float frequency spacing N : int number of frequency bins to return f0 : float, optional The low frequency to use freq_factor : float, optional Factor which multiplies the frequency use_fft : bool if True, use the approximate FFT algorithm to compute the result. This uses the FFT with Press & Rybicki's Lagrangian extirpolation. oversampling : int (default = 5) oversampling freq_factor for the approximation; roughly the number of time samples across the highest-frequency sinusoid. This parameter contains the trade-off between accuracy and speed. Not referenced if use_fft is False. Mfft : int The number of adjacent points to use in the FFT approximation. Not referenced if use_fft is False. Returns ------- S, C : ndarray summation arrays for frequencies f = df * np.arange(1, N + 1) """ df *= freq_factor f0 *= freq_factor if df <= 0: raise ValueError("df must be positive") t, h = map(np.ravel, np.broadcast_arrays(t, h)) if use_fft: Mfft = int(Mfft) if Mfft <= 0: raise ValueError("Mfft must be positive") # required size of fft is the power of 2 above the oversampling rate Nfft = bitceil(N * oversampling) t0 = t.min() if f0 > 0: h = h * np.exp(2j * np.pi * f0 * (t - t0)) tnorm = ((t - t0) * Nfft * df) % Nfft grid = extirpolate(tnorm, h, Nfft, Mfft) fftgrid = np.fft.ifft(grid)[:N] if t0 != 0: f = f0 + df * np.arange(N) fftgrid *= np.exp(2j * np.pi * t0 * f) C = Nfft * fftgrid.real S = Nfft * fftgrid.imag else: f = f0 + df * np.arange(N) C = np.dot(h, np.cos(2 * np.pi * f * t[:, np.newaxis])) S = np.dot(h, np.sin(2 * np.pi * f * t[:, np.newaxis])) return S, C
Generate some data for testing
def data(N=100, period=1, theta=[10, 2, 3], dy=1, rseed=0): """Generate some data for testing""" rng = np.random.default_rng(rseed) t = 20 * period * rng.random(N) omega = 2 * np.pi / period y = theta[0] + theta[1] * np.sin(omega * t) + theta[2] * np.cos(omega * t) dy = dy * (0.5 + rng.random(N)) y += dy * rng.standard_normal(N) return t, y, dy
Generate some data for testing
def make_data(N=100, period=1, theta=[10, 2, 3], dy=1, rseed=0, units=False): """Generate some data for testing""" rng = np.random.default_rng(rseed) t = 5 * period * rng.random(N) omega = 2 * np.pi / period y = theta[0] + theta[1] * np.sin(omega * t) + theta[2] * np.cos(omega * t) dy = dy * (0.5 + rng.random(N)) y += dy * rng.standard_normal(N) fmax = 5 if units: return t * u.day, y * u.mag, dy * u.mag, fmax / u.day else: return t, y, dy, fmax
Generate null hypothesis data
def null_data(N=1000, dy=1, rseed=0, units=False): """Generate null hypothesis data""" rng = np.random.default_rng(rseed) t = 100 * rng.random(N) dy = 0.5 * dy * (1 + rng.random(N)) y = dy * rng.standard_normal(N) fmax = 40 if units: return t * u.day, y * u.mag, dy * u.mag, fmax / u.day else: return t, y, dy, fmax
Generate some data for testing
def data(N=100, period=1, theta=[10, 2, 3], dy=1, rseed=0): """Generate some data for testing""" rng = np.random.default_rng(rseed) t = 5 * period * rng.random(N) omega = 2 * np.pi / period y = theta[0] + theta[1] * np.sin(omega * t) + theta[2] * np.cos(omega * t) dy = dy * (0.5 + rng.random(N)) y += dy * rng.standard_normal(N) return t, y, dy
Compute the Lomb-Scargle model fit at a given frequency Parameters ---------- t, y, dy : float or array-like The times, observations, and uncertainties to fit bands : str, or array-like The bands of each observation frequency : float The frequency at which to compute the model t_fit : float or array-like The times at which the fit should be computed center_data : bool (default=True) If True, center the input data before applying the fit nterms : int (default=1) The number of Fourier terms to include in the fit Returns ------- y_fit : ndarray The model fit evaluated at each value of t_fit
def periodic_fit( t, y, dy, bands, frequency, t_fit, bands_fit, center_data=True, nterms_base=1, nterms_band=1, reg_base=None, reg_band=1e-6, regularize_by_trace=True, ): """Compute the Lomb-Scargle model fit at a given frequency Parameters ---------- t, y, dy : float or array-like The times, observations, and uncertainties to fit bands : str, or array-like The bands of each observation frequency : float The frequency at which to compute the model t_fit : float or array-like The times at which the fit should be computed center_data : bool (default=True) If True, center the input data before applying the fit nterms : int (default=1) The number of Fourier terms to include in the fit Returns ------- y_fit : ndarray The model fit evaluated at each value of t_fit """ t, y, bands, frequency = map(np.asarray, (t, y, bands, frequency)) bands_fit = bands_fit[:, np.newaxis] unique_bands = np.unique(bands) unique_bands_fit = np.unique(bands_fit) if not set(unique_bands_fit).issubset(set(unique_bands)): raise ValueError( "bands_fit does not match training data: " f"input: {set(unique_bands_fit)} output: {set(unique_bands)}" ) t_fit, bands_fit = np.broadcast_arrays(t_fit, bands_fit) if dy is None: dy = np.ones_like(y) else: dy = np.asarray(dy) if t.ndim != 1 or y.ndim != 1 or dy.ndim != 1: raise ValueError("t, y, dy should be one dimensional") if frequency.ndim != 0: raise ValueError("frequency should be a scalar") # need to make sure all unique filters are represented u, i = np.unique( np.concatenate([bands_fit.ravel(), unique_bands]), return_inverse=True ) # Calculate ymeans ymeans = np.zeros( y.shape ) # An array of shape y, with each index given a filter specific mean if center_data: for band in unique_bands: mask = bands == band ymeans[mask] = np.average(y[mask], weights=1 / dy[mask] ** 2) y = y - ymeans ymeans_fit = ymeans[i[: -len(unique_bands)]] # Theta -- Construct X and M from t and bands, using weighting X = design_matrix( t, bands, frequency, dy=dy, nterms_base=nterms_base, nterms_band=nterms_band ) M = np.dot(X.T, X) regularization = construct_regularization( bands, nterms_base=nterms_base, nterms_band=nterms_band, reg_base=reg_base, reg_band=reg_band, ) if regularization is not None: diag = M.ravel(order="K")[ :: M.shape[0] + 1 ] # M is being affected by operations on diag if regularize_by_trace: diag += diag.sum() * np.asarray(regularization) else: diag += np.asarray(regularization) theta_MLE = np.linalg.solve(M, np.dot(X.T, y / dy)) # Fit to t_fit and bands_fit X_fit = design_matrix( t_fit.ravel(), bands_fit.ravel(), frequency, dy=None, nterms_base=nterms_base, nterms_band=nterms_band, ) if center_data: y_fit = ymeans_fit + np.dot(X_fit, theta_MLE) else: y_fit = np.dot(X_fit, theta_MLE) return y_fit.reshape(np.shape(t_fit))
Generate some data for testing
def data(N=100, period=1, theta=[10, 2, 3], nbands=3, dy=1, rseed=0): """Generate some data for testing""" t_arr = [] y_arr = [] band_arr = [] dy_arr = [] for band in range(nbands): rng = np.random.default_rng(rseed + band) t_band = 20 * period * rng.random(N) omega = 2 * np.pi / period y_band = ( theta[0] + theta[1] * np.sin(omega * t_band) + theta[2] * np.cos(omega * t_band) ) dy_band = dy * (0.5 + rng.random(N)) y_band += dy_band * rng.standard_normal(N) t_arr += list(t_band) y_arr += list(y_band) dy_arr += list(dy_band) band_arr += ["a" * (band + 1)] * N # labels bands as "a","aa","aaa",.... t_arr = np.array(t_arr) y_arr = np.array(y_arr) band_arr = np.array(band_arr) dy_arr = np.array(dy_arr) return t_arr, y_arr, band_arr, dy_arr
Generate an astropy.timeseries.TimeSeries table
def timeseries_data(): """Generate an astropy.timeseries.TimeSeries table""" rng = np.random.default_rng(1) deltas = 240 * rng.random(180) ts1 = TimeSeries(time_start="2011-01-01T00:00:00", time_delta=deltas * u.minute) # g band fluxes g_flux = [0] * 180 * u.mJy g_err = [0] * 180 * u.mJy y_g = np.round(3 + 2 * np.sin(10 * np.pi * ts1["time"].mjd[0:60]), 3) dy_g = np.round(0.01 * (0.5 + rng.random(60)), 3) # uncertainties g_flux.value[0:60] = y_g g_err.value[0:60] = dy_g ts1["g_flux"] = MaskedColumn(g_flux, mask=[False] * 60 + [True] * 120) ts1["g_err"] = MaskedColumn(g_err, mask=[False] * 60 + [True] * 120) # r band fluxes r_flux = [0] * 180 * u.mJy r_err = [0] * 180 * u.mJy y_r = np.round(3 + 2 * np.sin(10 * np.pi * ts1["time"].mjd[60:120]), 3) dy_r = np.round(0.01 * (0.5 + rng.random(60)), 3) # uncertainties r_flux.value[60:120] = y_r r_err.value[60:120] = dy_r ts1["r_flux"] = MaskedColumn(r_flux, mask=[True] * 60 + [False] * 60 + [True] * 60) ts1["r_err"] = MaskedColumn(r_err, mask=[True] * 60 + [False] * 60 + [True] * 60) # i band fluxes i_flux = [0] * 180 * u.mJy i_err = [0] * 180 * u.mJy y_i = np.round(3 + 2 * np.sin(10 * np.pi * ts1["time"].mjd[120:]), 3) dy_i = np.round(0.01 * (0.5 + rng.random(60)), 3) # uncertainties i_flux.value[120:] = y_i i_err.value[120:] = dy_i ts1["i_flux"] = MaskedColumn(i_flux, mask=[True] * 120 + [False] * 60) ts1["i_err"] = MaskedColumn(i_err, mask=[True] * 120 + [False] * 60) return ts1
Regression test for #12527: allow downsampling even if all bins fall before or beyond the time span of the data.
def test_downsample_edge_cases(time, time_bin_start, time_bin_end): """Regression test for #12527: allow downsampling even if all bins fall before or beyond the time span of the data.""" ts = TimeSeries(time=time, data=[np.ones(len(time))], names=["a"]) down = aggregate_downsample( ts, time_bin_start=time_bin_start, time_bin_end=time_bin_end ) assert len(down) == len(time_bin_start) assert all(down["time_bin_size"] >= 0) # bin lengths shall never be negative if ts.time.min() < time_bin_start[0] or time_bin_end is not None: assert down[ "a" ].mask.all() # all bins placed *beyond* the time span of the data elif ts.time.min() < time_bin_start[1]: assert ( down["a"][0] == ts["a"][0] )
A test on time precision limit supported by downsample(). It is related to an implementation details: that time comparison (and sorting indirectly) is done with relative time for computational efficiency. The relative time converted has a slight loss of precision, which worsens as the gap between a time and the base time increases, e.g., when downsampling a timeseries that combines current observation with archival data years back. This test is to document the acceptable precision limit. see also: https://github.com/astropy/astropy/pull/13069#issuecomment-1093069184
def test_time_precision_limit(diff_from_base): """ A test on time precision limit supported by downsample(). It is related to an implementation details: that time comparison (and sorting indirectly) is done with relative time for computational efficiency. The relative time converted has a slight loss of precision, which worsens as the gap between a time and the base time increases, e.g., when downsampling a timeseries that combines current observation with archival data years back. This test is to document the acceptable precision limit. see also: https://github.com/astropy/astropy/pull/13069#issuecomment-1093069184 """ precision_limit = 500 * u.ns from astropy.timeseries.downsample import _to_relative_longdouble t_base = Time("1980-01-01T12:30:31.000", format="isot", scale="tdb") t2 = t_base + diff_from_base t3 = t2 + precision_limit r_t2 = _to_relative_longdouble(t2, t_base) r_t3 = _to_relative_longdouble(t3, t_base) # ensure in the converted relative time, # t2 and t3 can still be correctly compared assert r_t3 > r_t2
Create a Gaussian/normal distribution. Parameters ---------- center : `~astropy.units.Quantity` The center of this distribution std : `~astropy.units.Quantity` or None The standard deviation/σ of this distribution. Shape must match and unit must be compatible with ``center``, or be `None` (if ``var`` or ``ivar`` are set). var : `~astropy.units.Quantity` or None The variance of this distribution. Shape must match and unit must be compatible with ``center``, or be `None` (if ``std`` or ``ivar`` are set). ivar : `~astropy.units.Quantity` or None The inverse variance of this distribution. Shape must match and unit must be compatible with ``center``, or be `None` (if ``std`` or ``var`` are set). n_samples : int The number of Monte Carlo samples to use with this distribution cls : class The class to use to create this distribution. Typically a `Distribution` subclass. Remaining keywords are passed into the constructor of the ``cls`` Returns ------- distr : `~astropy.uncertainty.Distribution` or object The sampled Gaussian distribution. The type will be the same as the parameter ``cls``.
def normal( center, *, std=None, var=None, ivar=None, n_samples, cls=Distribution, **kwargs ): """ Create a Gaussian/normal distribution. Parameters ---------- center : `~astropy.units.Quantity` The center of this distribution std : `~astropy.units.Quantity` or None The standard deviation/σ of this distribution. Shape must match and unit must be compatible with ``center``, or be `None` (if ``var`` or ``ivar`` are set). var : `~astropy.units.Quantity` or None The variance of this distribution. Shape must match and unit must be compatible with ``center``, or be `None` (if ``std`` or ``ivar`` are set). ivar : `~astropy.units.Quantity` or None The inverse variance of this distribution. Shape must match and unit must be compatible with ``center``, or be `None` (if ``std`` or ``var`` are set). n_samples : int The number of Monte Carlo samples to use with this distribution cls : class The class to use to create this distribution. Typically a `Distribution` subclass. Remaining keywords are passed into the constructor of the ``cls`` Returns ------- distr : `~astropy.uncertainty.Distribution` or object The sampled Gaussian distribution. The type will be the same as the parameter ``cls``. """ center = np.asanyarray(center) if var is not None: if std is None: std = np.asanyarray(var) ** 0.5 else: raise ValueError("normal cannot take both std and var") if ivar is not None: if std is None: std = np.asanyarray(ivar) ** -0.5 else: raise ValueError("normal cannot take both ivar and and std or var") if std is None: raise ValueError("normal requires one of std, var, or ivar") else: std = np.asanyarray(std) randshape = np.broadcast(std, center).shape + (n_samples,) samples = ( center[..., np.newaxis] + np.random.randn(*randshape) * std[..., np.newaxis] ) return cls(samples, **kwargs)
Create a Poisson distribution. Parameters ---------- center : `~astropy.units.Quantity` The center value of this distribution (i.e., λ). n_samples : int The number of Monte Carlo samples to use with this distribution cls : class The class to use to create this distribution. Typically a `Distribution` subclass. Remaining keywords are passed into the constructor of the ``cls`` Returns ------- distr : `~astropy.uncertainty.Distribution` or object The sampled Poisson distribution. The type will be the same as the parameter ``cls``.
def poisson(center, n_samples, cls=Distribution, **kwargs): """ Create a Poisson distribution. Parameters ---------- center : `~astropy.units.Quantity` The center value of this distribution (i.e., λ). n_samples : int The number of Monte Carlo samples to use with this distribution cls : class The class to use to create this distribution. Typically a `Distribution` subclass. Remaining keywords are passed into the constructor of the ``cls`` Returns ------- distr : `~astropy.uncertainty.Distribution` or object The sampled Poisson distribution. The type will be the same as the parameter ``cls``. """ # we convert to arrays because np.random.poisson has trouble with quantities has_unit = False if hasattr(center, "unit"): has_unit = True poissonarr = np.asanyarray(center.value) else: poissonarr = np.asanyarray(center) randshape = poissonarr.shape + (n_samples,) samples = np.random.poisson(poissonarr[..., np.newaxis], randshape) if has_unit: if center.unit == u.adu: warn( "ADUs were provided to poisson. ADUs are not strictly count" "units because they need the gain to be applied. It is " "recommended you apply the gain to convert to e.g. electrons." ) elif center.unit not in COUNT_UNITS: warn( f"Unit {center.unit} was provided to poisson, which is not one of" f' {COUNT_UNITS}, and therefore suspect as a "counting" unit. Ensure' " you mean to use Poisson statistics." ) # re-attach the unit samples = samples * center.unit return cls(samples, **kwargs)
Create a Uniform distriution from the lower and upper bounds. Note that this function requires keywords to be explicit, and requires either ``lower``/``upper`` or ``center``/``width``. Parameters ---------- lower : array-like The lower edge of this distribution. If a `~astropy.units.Quantity`, the distribution will have the same units as ``lower``. upper : `~astropy.units.Quantity` The upper edge of this distribution. Must match shape and if a `~astropy.units.Quantity` must have compatible units with ``lower``. center : array-like The center value of the distribution. Cannot be provided at the same time as ``lower``/``upper``. width : array-like The width of the distribution. Must have the same shape and compatible units with ``center`` (if any). n_samples : int The number of Monte Carlo samples to use with this distribution cls : class The class to use to create this distribution. Typically a `Distribution` subclass. Remaining keywords are passed into the constructor of the ``cls`` Returns ------- distr : `~astropy.uncertainty.Distribution` or object The sampled uniform distribution. The type will be the same as the parameter ``cls``.
def uniform( *, lower=None, upper=None, center=None, width=None, n_samples, cls=Distribution, **kwargs, ): """ Create a Uniform distriution from the lower and upper bounds. Note that this function requires keywords to be explicit, and requires either ``lower``/``upper`` or ``center``/``width``. Parameters ---------- lower : array-like The lower edge of this distribution. If a `~astropy.units.Quantity`, the distribution will have the same units as ``lower``. upper : `~astropy.units.Quantity` The upper edge of this distribution. Must match shape and if a `~astropy.units.Quantity` must have compatible units with ``lower``. center : array-like The center value of the distribution. Cannot be provided at the same time as ``lower``/``upper``. width : array-like The width of the distribution. Must have the same shape and compatible units with ``center`` (if any). n_samples : int The number of Monte Carlo samples to use with this distribution cls : class The class to use to create this distribution. Typically a `Distribution` subclass. Remaining keywords are passed into the constructor of the ``cls`` Returns ------- distr : `~astropy.uncertainty.Distribution` or object The sampled uniform distribution. The type will be the same as the parameter ``cls``. """ if center is None and width is None: lower = np.asanyarray(lower) upper = np.asanyarray(upper) if lower.shape != upper.shape: raise ValueError("lower and upper must have consistent shapes") elif upper is None and lower is None: center = np.asanyarray(center) width = np.asanyarray(width) lower = center - width / 2 upper = center + width / 2 else: raise ValueError( "either upper/lower or center/width must be given " "to uniform - other combinations are not valid" ) newshape = lower.shape + (n_samples,) if lower.shape == tuple() and upper.shape == tuple(): width = upper - lower # scalar else: width = (upper - lower)[:, np.newaxis] lower = lower[:, np.newaxis] samples = lower + width * np.random.uniform(size=newshape) return cls(samples, **kwargs)
Get n_samples from the first Distribution amount arrays. The logic of getting ``n_samples`` from the first |Distribution| is that the code will raise an appropriate exception later if distributions do not have the same ``n_samples``.
def get_n_samples(*arrays): """Get n_samples from the first Distribution amount arrays. The logic of getting ``n_samples`` from the first |Distribution| is that the code will raise an appropriate exception later if distributions do not have the same ``n_samples``. """ # TODO: add verification if another function needs it. for array in arrays: if is_distribution(array): return array.n_samples raise RuntimeError("no Distribution found! Please raise an issue.")
Broadcast arrays to a common shape. Like `numpy.broadcast_arrays`, applied to both distributions and other data. Note that ``subok`` is taken to mean whether or not subclasses of the distribution are allowed, i.e., for ``subok=False``, `~astropy.uncertainty.NdarrayDistribution` instances will be returned.
def broadcast_arrays(*args, subok=False): """Broadcast arrays to a common shape. Like `numpy.broadcast_arrays`, applied to both distributions and other data. Note that ``subok`` is taken to mean whether or not subclasses of the distribution are allowed, i.e., for ``subok=False``, `~astropy.uncertainty.NdarrayDistribution` instances will be returned. """ if not subok: args = tuple( arg.view(np.ndarray) if isinstance(arg, np.ndarray) else np.array(arg) for arg in args ) return args, {"subok": True}, True
Concatenate arrays. Like `numpy.concatenate`, but any array that is not already a |Distribution| is turned into one with identical samples.
def concatenate(arrays, axis=0, out=None, dtype=None, casting="same_kind"): """Concatenate arrays. Like `numpy.concatenate`, but any array that is not already a |Distribution| is turned into one with identical samples. """ n_samples = get_n_samples(*arrays, out) converted = tuple( array.distribution if is_distribution(array) else ( np.broadcast_to( array[..., np.newaxis], array.shape + (n_samples,), subok=True ) if getattr(array, "shape", False) else array ) for array in arrays ) if axis < 0: axis = axis - 1 # not in-place, just in case. kwargs = dict(axis=axis, dtype=dtype, casting=casting) if out is not None: if is_distribution(out): kwargs["out"] = out.distribution else: raise NotImplementedError return (converted,), kwargs, out
Initialize CDS units module.
def _initialize_module(): """Initialize CDS units module.""" # Local imports to avoid polluting top-level namespace import numpy as np from astropy import units as u from astropy.constants import si as _si from . import core # The CDS format also supports power-of-2 prefixes as defined here: # http://physics.nist.gov/cuu/Units/binary.html prefixes = core.si_prefixes + core.binary_prefixes # CDS only uses the short prefixes prefixes = [(short, short, factor) for (short, long, factor) in prefixes] # The following units are defined in alphabetical order, directly from # here: https://vizier.unistra.fr/viz-bin/Unit mapping = [ (["A"], u.A, "Ampere"), (["a"], u.a, "year", ["P"]), (["a0"], _si.a0, "Bohr radius"), (["al"], u.lyr, "Light year", ["c", "d"]), (["lyr"], u.lyr, "Light year"), (["alpha"], _si.alpha, "Fine structure constant"), ((["AA", "Å"], ["Angstrom", "Angstroem"]), u.AA, "Angstrom"), (["arcmin", "arcm"], u.arcminute, "minute of arc"), (["arcsec", "arcs"], u.arcsecond, "second of arc"), (["atm"], _si.atm, "atmosphere"), (["AU", "au"], u.au, "astronomical unit"), (["bar"], u.bar, "bar"), (["barn"], u.barn, "barn"), (["bit"], u.bit, "bit"), (["byte"], u.byte, "byte"), (["C"], u.C, "Coulomb"), (["c"], _si.c, "speed of light", ["p"]), (["cal"], 4.1854 * u.J, "calorie"), (["cd"], u.cd, "candela"), (["ct"], u.ct, "count"), (["D"], u.D, "Debye (dipole)"), (["d"], u.d, "Julian day", ["c"]), ((["deg", "°"], ["degree"]), u.degree, "degree"), (["dyn"], u.dyn, "dyne"), (["e"], _si.e, "electron charge", ["m"]), (["eps0"], _si.eps0, "electric constant"), (["erg"], u.erg, "erg"), (["eV"], u.eV, "electron volt"), (["F"], u.F, "Farad"), (["G"], _si.G, "Gravitation constant"), (["g"], u.g, "gram"), (["gauss"], u.G, "Gauss"), (["geoMass", "Mgeo"], u.M_earth, "Earth mass"), (["H"], u.H, "Henry"), (["h"], u.h, "hour", ["p"]), (["hr"], u.h, "hour"), (["\\h"], _si.h, "Planck constant"), (["Hz"], u.Hz, "Hertz"), (["inch"], 0.0254 * u.m, "inch"), (["J"], u.J, "Joule"), (["JD"], u.d, "Julian day", ["M"]), (["jovMass", "Mjup"], u.M_jup, "Jupiter mass"), (["Jy"], u.Jy, "Jansky"), (["K"], u.K, "Kelvin"), (["k"], _si.k_B, "Boltzmann"), (["l"], u.l, "litre", ["a"]), (["lm"], u.lm, "lumen"), (["Lsun", "solLum"], u.solLum, "solar luminosity"), (["lx"], u.lx, "lux"), (["m"], u.m, "meter"), (["mag"], u.mag, "magnitude"), (["me"], _si.m_e, "electron mass"), (["min"], u.minute, "minute"), (["MJD"], u.d, "Julian day"), (["mmHg"], 133.322387415 * u.Pa, "millimeter of mercury"), (["mol"], u.mol, "mole"), (["mp"], _si.m_p, "proton mass"), (["Msun", "solMass"], u.solMass, "solar mass"), ((["mu0", "µ0"], []), _si.mu0, "magnetic constant"), (["muB"], _si.muB, "Bohr magneton"), (["N"], u.N, "Newton"), (["Ohm"], u.Ohm, "Ohm"), (["Pa"], u.Pa, "Pascal"), (["pc"], u.pc, "parsec"), (["ph"], u.ph, "photon"), (["pi"], u.Unit(np.pi), "π"), (["pix"], u.pix, "pixel"), (["ppm"], u.Unit(1e-6), "parts per million"), (["R"], _si.R, "gas constant"), (["rad"], u.radian, "radian"), (["Rgeo"], _si.R_earth, "Earth equatorial radius"), (["Rjup"], _si.R_jup, "Jupiter equatorial radius"), (["Rsun", "solRad"], u.solRad, "solar radius"), (["Ry"], u.Ry, "Rydberg"), (["S"], u.S, "Siemens"), (["s", "sec"], u.s, "second"), (["sr"], u.sr, "steradian"), (["Sun"], u.Sun, "solar unit"), (["T"], u.T, "Tesla"), (["t"], 1e3 * u.kg, "metric tonne", ["c"]), (["u"], _si.u, "atomic mass", ["da", "a"]), (["V"], u.V, "Volt"), (["W"], u.W, "Watt"), (["Wb"], u.Wb, "Weber"), (["yr"], u.a, "year"), ] for entry in mapping: if len(entry) == 3: names, unit, doc = entry excludes = [] else: names, unit, doc, excludes = entry core.def_unit( names, unit, prefixes=prefixes, namespace=_ns, doc=doc, exclude_prefixes=excludes, ) core.def_unit(["µas"], u.microarcsecond, doc="microsecond of arc", namespace=_ns) core.def_unit(["mas"], u.milliarcsecond, doc="millisecond of arc", namespace=_ns) core.def_unit( ["---", "-"], u.dimensionless_unscaled, doc="dimensionless and unscaled", namespace=_ns, ) core.def_unit(["%"], u.percent, doc="percent", namespace=_ns) # The Vizier "standard" defines this in units of "kg s-3", but # that may not make a whole lot of sense, so here we just define # it as its own new disconnected unit. core.def_unit(["Crab"], prefixes=prefixes, namespace=_ns, doc="Crab (X-ray) flux")
Enable CDS units so they appear in results of `~astropy.units.UnitBase.find_equivalent_units` and `~astropy.units.UnitBase.compose`. This will disable all of the "default" `astropy.units` units, since there are some namespace clashes between the two. This may be used with the ``with`` statement to enable CDS units only temporarily.
def enable(): """ Enable CDS units so they appear in results of `~astropy.units.UnitBase.find_equivalent_units` and `~astropy.units.UnitBase.compose`. This will disable all of the "default" `astropy.units` units, since there are some namespace clashes between the two. This may be used with the ``with`` statement to enable CDS units only temporarily. """ # Local imports to avoid cyclical import and polluting namespace import inspect from .core import set_enabled_units return set_enabled_units(inspect.getmodule(enable))
Given a list of sequences, modules or dictionaries of units, or single units, return a flat set of all the units found.
def _flatten_units_collection(items): """ Given a list of sequences, modules or dictionaries of units, or single units, return a flat set of all the units found. """ if not isinstance(items, list): items = [items] result = set() for item in items: if isinstance(item, UnitBase): result.add(item) else: if isinstance(item, dict): units = item.values() elif inspect.ismodule(item): units = vars(item).values() elif isiterable(item): units = item else: continue for unit in units: if isinstance(unit, UnitBase): result.add(unit) return result
Normalizes equivalencies ensuring each is a 4-tuple. The resulting tuple is of the form:: (from_unit, to_unit, forward_func, backward_func) Parameters ---------- equivalencies : list of equivalency pairs Raises ------ ValueError if an equivalency cannot be interpreted
def _normalize_equivalencies(equivalencies): """Normalizes equivalencies ensuring each is a 4-tuple. The resulting tuple is of the form:: (from_unit, to_unit, forward_func, backward_func) Parameters ---------- equivalencies : list of equivalency pairs Raises ------ ValueError if an equivalency cannot be interpreted """ if equivalencies is None: return [] normalized = [] for i, equiv in enumerate(equivalencies): if len(equiv) == 2: funit, tunit = equiv a = b = lambda x: x elif len(equiv) == 3: funit, tunit, a = equiv b = a elif len(equiv) == 4: funit, tunit, a, b = equiv else: raise ValueError(f"Invalid equivalence entry {i}: {equiv!r}") if not ( funit is Unit(funit) and (tunit is None or tunit is Unit(tunit)) and callable(a) and callable(b) ): raise ValueError(f"Invalid equivalence entry {i}: {equiv!r}") normalized.append((funit, tunit, a, b)) return normalized
Sets the units enabled in the unit registry. These units are searched when using `UnitBase.find_equivalent_units`, for example. This may be used either permanently, or as a context manager using the ``with`` statement (see example below). Parameters ---------- units : list of sequence, dict, or module This is a list of things in which units may be found (sequences, dicts or modules), or units themselves. The entire set will be "enabled" for searching through by methods like `UnitBase.find_equivalent_units` and `UnitBase.compose`. Examples -------- >>> from astropy import units as u >>> with u.set_enabled_units([u.pc]): ... u.m.find_equivalent_units() ... Primary name | Unit definition | Aliases [ pc | 3.08568e+16 m | parsec , ] >>> u.m.find_equivalent_units() Primary name | Unit definition | Aliases [ AU | 1.49598e+11 m | au, astronomical_unit , Angstrom | 1e-10 m | AA, angstrom , cm | 0.01 m | centimeter , earthRad | 6.3781e+06 m | R_earth, Rearth , jupiterRad | 7.1492e+07 m | R_jup, Rjup, R_jupiter, Rjupiter , lsec | 2.99792e+08 m | lightsecond , lyr | 9.46073e+15 m | lightyear , m | irreducible | meter , micron | 1e-06 m | , pc | 3.08568e+16 m | parsec , solRad | 6.957e+08 m | R_sun, Rsun , ]
def set_enabled_units(units): """ Sets the units enabled in the unit registry. These units are searched when using `UnitBase.find_equivalent_units`, for example. This may be used either permanently, or as a context manager using the ``with`` statement (see example below). Parameters ---------- units : list of sequence, dict, or module This is a list of things in which units may be found (sequences, dicts or modules), or units themselves. The entire set will be "enabled" for searching through by methods like `UnitBase.find_equivalent_units` and `UnitBase.compose`. Examples -------- >>> from astropy import units as u >>> with u.set_enabled_units([u.pc]): ... u.m.find_equivalent_units() ... Primary name | Unit definition | Aliases [ pc | 3.08568e+16 m | parsec , ] >>> u.m.find_equivalent_units() Primary name | Unit definition | Aliases [ AU | 1.49598e+11 m | au, astronomical_unit , Angstrom | 1e-10 m | AA, angstrom , cm | 0.01 m | centimeter , earthRad | 6.3781e+06 m | R_earth, Rearth , jupiterRad | 7.1492e+07 m | R_jup, Rjup, R_jupiter, Rjupiter , lsec | 2.99792e+08 m | lightsecond , lyr | 9.46073e+15 m | lightyear , m | irreducible | meter , micron | 1e-06 m | , pc | 3.08568e+16 m | parsec , solRad | 6.957e+08 m | R_sun, Rsun , ] """ # get a context with a new registry, using equivalencies of the current one context = _UnitContext(equivalencies=get_current_unit_registry().equivalencies) # in this new current registry, enable the units requested get_current_unit_registry().set_enabled_units(units) return context
Adds to the set of units enabled in the unit registry. These units are searched when using `UnitBase.find_equivalent_units`, for example. This may be used either permanently, or as a context manager using the ``with`` statement (see example below). Parameters ---------- units : list of sequence, dict, or module This is a list of things in which units may be found (sequences, dicts or modules), or units themselves. The entire set will be added to the "enabled" set for searching through by methods like `UnitBase.find_equivalent_units` and `UnitBase.compose`. Examples -------- >>> from astropy import units as u >>> from astropy.units import imperial >>> with u.add_enabled_units(imperial): ... u.m.find_equivalent_units() ... Primary name | Unit definition | Aliases [ AU | 1.49598e+11 m | au, astronomical_unit , Angstrom | 1e-10 m | AA, angstrom , cm | 0.01 m | centimeter , earthRad | 6.3781e+06 m | R_earth, Rearth , ft | 0.3048 m | foot , fur | 201.168 m | furlong , inch | 0.0254 m | , jupiterRad | 7.1492e+07 m | R_jup, Rjup, R_jupiter, Rjupiter , lsec | 2.99792e+08 m | lightsecond , lyr | 9.46073e+15 m | lightyear , m | irreducible | meter , mi | 1609.34 m | mile , micron | 1e-06 m | , mil | 2.54e-05 m | thou , nmi | 1852 m | nauticalmile, NM , pc | 3.08568e+16 m | parsec , solRad | 6.957e+08 m | R_sun, Rsun , yd | 0.9144 m | yard , ]
def add_enabled_units(units): """ Adds to the set of units enabled in the unit registry. These units are searched when using `UnitBase.find_equivalent_units`, for example. This may be used either permanently, or as a context manager using the ``with`` statement (see example below). Parameters ---------- units : list of sequence, dict, or module This is a list of things in which units may be found (sequences, dicts or modules), or units themselves. The entire set will be added to the "enabled" set for searching through by methods like `UnitBase.find_equivalent_units` and `UnitBase.compose`. Examples -------- >>> from astropy import units as u >>> from astropy.units import imperial >>> with u.add_enabled_units(imperial): ... u.m.find_equivalent_units() ... Primary name | Unit definition | Aliases [ AU | 1.49598e+11 m | au, astronomical_unit , Angstrom | 1e-10 m | AA, angstrom , cm | 0.01 m | centimeter , earthRad | 6.3781e+06 m | R_earth, Rearth , ft | 0.3048 m | foot , fur | 201.168 m | furlong , inch | 0.0254 m | , jupiterRad | 7.1492e+07 m | R_jup, Rjup, R_jupiter, Rjupiter , lsec | 2.99792e+08 m | lightsecond , lyr | 9.46073e+15 m | lightyear , m | irreducible | meter , mi | 1609.34 m | mile , micron | 1e-06 m | , mil | 2.54e-05 m | thou , nmi | 1852 m | nauticalmile, NM , pc | 3.08568e+16 m | parsec , solRad | 6.957e+08 m | R_sun, Rsun , yd | 0.9144 m | yard , ] """ # get a context with a new registry, which is a copy of the current one context = _UnitContext(get_current_unit_registry()) # in this new current registry, enable the further units requested get_current_unit_registry().add_enabled_units(units) return context
Sets the equivalencies enabled in the unit registry. These equivalencies are used if no explicit equivalencies are given, both in unit conversion and in finding equivalent units. This is meant in particular for allowing angles to be dimensionless. Use with care. Parameters ---------- equivalencies : list of tuple list of equivalent pairs, e.g., as returned by `~astropy.units.equivalencies.dimensionless_angles`. Examples -------- Exponentiation normally requires dimensionless quantities. To avoid problems with complex phases:: >>> from astropy import units as u >>> with u.set_enabled_equivalencies(u.dimensionless_angles()): ... phase = 0.5 * u.cycle ... np.exp(1j*phase) # doctest: +FLOAT_CMP <Quantity -1.+1.2246468e-16j>
def set_enabled_equivalencies(equivalencies): """ Sets the equivalencies enabled in the unit registry. These equivalencies are used if no explicit equivalencies are given, both in unit conversion and in finding equivalent units. This is meant in particular for allowing angles to be dimensionless. Use with care. Parameters ---------- equivalencies : list of tuple list of equivalent pairs, e.g., as returned by `~astropy.units.equivalencies.dimensionless_angles`. Examples -------- Exponentiation normally requires dimensionless quantities. To avoid problems with complex phases:: >>> from astropy import units as u >>> with u.set_enabled_equivalencies(u.dimensionless_angles()): ... phase = 0.5 * u.cycle ... np.exp(1j*phase) # doctest: +FLOAT_CMP <Quantity -1.+1.2246468e-16j> """ # get a context with a new registry, using all units of the current one context = _UnitContext(get_current_unit_registry()) # in this new current registry, enable the equivalencies requested get_current_unit_registry().set_enabled_equivalencies(equivalencies) return context
Adds to the equivalencies enabled in the unit registry. These equivalencies are used if no explicit equivalencies are given, both in unit conversion and in finding equivalent units. This is meant in particular for allowing angles to be dimensionless. Since no equivalencies are enabled by default, generally it is recommended to use `set_enabled_equivalencies`. Parameters ---------- equivalencies : list of tuple list of equivalent pairs, e.g., as returned by `~astropy.units.equivalencies.dimensionless_angles`.
def add_enabled_equivalencies(equivalencies): """ Adds to the equivalencies enabled in the unit registry. These equivalencies are used if no explicit equivalencies are given, both in unit conversion and in finding equivalent units. This is meant in particular for allowing angles to be dimensionless. Since no equivalencies are enabled by default, generally it is recommended to use `set_enabled_equivalencies`. Parameters ---------- equivalencies : list of tuple list of equivalent pairs, e.g., as returned by `~astropy.units.equivalencies.dimensionless_angles`. """ # get a context with a new registry, which is a copy of the current one context = _UnitContext(get_current_unit_registry()) # in this new current registry, enable the further equivalencies requested get_current_unit_registry().add_enabled_equivalencies(equivalencies) return context
Set aliases for units. This is useful for handling alternate spellings for units, or misspelled units in files one is trying to read. Parameters ---------- aliases : dict of str, Unit The aliases to set. The keys must be the string aliases, and values must be the `astropy.units.Unit` that the alias will be mapped to. Raises ------ ValueError If the alias already defines a different unit. Examples -------- To temporarily allow for a misspelled 'Angstroem' unit:: >>> from astropy import units as u >>> with u.set_enabled_aliases({'Angstroem': u.Angstrom}): ... print(u.Unit("Angstroem", parse_strict="raise") == u.Angstrom) True
def set_enabled_aliases(aliases): """ Set aliases for units. This is useful for handling alternate spellings for units, or misspelled units in files one is trying to read. Parameters ---------- aliases : dict of str, Unit The aliases to set. The keys must be the string aliases, and values must be the `astropy.units.Unit` that the alias will be mapped to. Raises ------ ValueError If the alias already defines a different unit. Examples -------- To temporarily allow for a misspelled 'Angstroem' unit:: >>> from astropy import units as u >>> with u.set_enabled_aliases({'Angstroem': u.Angstrom}): ... print(u.Unit("Angstroem", parse_strict="raise") == u.Angstrom) True """ # get a context with a new registry, which is a copy of the current one context = _UnitContext(get_current_unit_registry()) # in this new current registry, enable the further equivalencies requested get_current_unit_registry().set_enabled_aliases(aliases) return context
Add aliases for units. This is useful for handling alternate spellings for units, or misspelled units in files one is trying to read. Since no aliases are enabled by default, generally it is recommended to use `set_enabled_aliases`. Parameters ---------- aliases : dict of str, Unit The aliases to add. The keys must be the string aliases, and values must be the `astropy.units.Unit` that the alias will be mapped to. Raises ------ ValueError If the alias already defines a different unit. Examples -------- To temporarily allow for a misspelled 'Angstroem' unit:: >>> from astropy import units as u >>> with u.add_enabled_aliases({'Angstroem': u.Angstrom}): ... print(u.Unit("Angstroem", parse_strict="raise") == u.Angstrom) True
def add_enabled_aliases(aliases): """ Add aliases for units. This is useful for handling alternate spellings for units, or misspelled units in files one is trying to read. Since no aliases are enabled by default, generally it is recommended to use `set_enabled_aliases`. Parameters ---------- aliases : dict of str, Unit The aliases to add. The keys must be the string aliases, and values must be the `astropy.units.Unit` that the alias will be mapped to. Raises ------ ValueError If the alias already defines a different unit. Examples -------- To temporarily allow for a misspelled 'Angstroem' unit:: >>> from astropy import units as u >>> with u.add_enabled_aliases({'Angstroem': u.Angstrom}): ... print(u.Unit("Angstroem", parse_strict="raise") == u.Angstrom) True """ # get a context with a new registry, which is a copy of the current one context = _UnitContext(get_current_unit_registry()) # in this new current registry, enable the further equivalencies requested get_current_unit_registry().add_enabled_aliases(aliases) return context
This is used to reconstruct units when passed around by multiprocessing.
def _recreate_irreducible_unit(cls, names, registered): """ This is used to reconstruct units when passed around by multiprocessing. """ registry = get_current_unit_registry().registry if names[0] in registry: # If in local registry return that object. return registry[names[0]] else: # otherwise, recreate the unit. unit = cls(names) if registered: # If not in local registry but registered in origin registry, # enable unit in local registry. get_current_unit_registry().add_enabled_units([unit]) return unit
Set up all of the standard metric prefixes for a unit. This function should not be used directly, but instead use the `prefixes` kwarg on `def_unit`. Parameters ---------- excludes : list of str, optional Any prefixes to exclude from creation to avoid namespace collisions. namespace : dict, optional When provided, inject the unit (and all of its aliases) into the given namespace dictionary. prefixes : list, optional When provided, it is a list of prefix definitions of the form: (short_names, long_tables, factor)
def _add_prefixes(u, excludes=[], namespace=None, prefixes=False): """ Set up all of the standard metric prefixes for a unit. This function should not be used directly, but instead use the `prefixes` kwarg on `def_unit`. Parameters ---------- excludes : list of str, optional Any prefixes to exclude from creation to avoid namespace collisions. namespace : dict, optional When provided, inject the unit (and all of its aliases) into the given namespace dictionary. prefixes : list, optional When provided, it is a list of prefix definitions of the form: (short_names, long_tables, factor) """ if prefixes is True: prefixes = si_prefixes elif prefixes is False: prefixes = [] for short, full, factor in prefixes: names = [] format = {} for prefix in short: if prefix in excludes: continue for alias in u.short_names: names.append(prefix + alias) # This is a hack to use Greek mu as a prefix # for some formatters. if prefix == "u": format["latex"] = r"\mu " + u.get_format_name("latex") format["unicode"] = "\N{MICRO SIGN}" + u.get_format_name("unicode") for key, val in u._format.items(): format.setdefault(key, prefix + val) for prefix in full: if prefix in excludes: continue for alias in u.long_names: names.append(prefix + alias) if len(names): PrefixUnit( names, CompositeUnit(factor, [u], [1], _error_check=False), namespace=namespace, format=format, )
Factory function for defining new units. Parameters ---------- s : str or list of str The name of the unit. If a list, the first element is the canonical (short) name, and the rest of the elements are aliases. represents : UnitBase instance, optional The unit that this named unit represents. If not provided, a new `IrreducibleUnit` is created. doc : str, optional A docstring describing the unit. format : dict, optional A mapping to format-specific representations of this unit. For example, for the ``Ohm`` unit, it might be nice to have it displayed as ``\Omega`` by the ``latex`` formatter. In that case, `format` argument should be set to:: {'latex': r'\Omega'} prefixes : bool or list, optional When `True`, generate all of the SI prefixed versions of the unit as well. For example, for a given unit ``m``, will generate ``mm``, ``cm``, ``km``, etc. When a list, it is a list of prefix definitions of the form: (short_names, long_tables, factor) Default is `False`. This function always returns the base unit object, even if multiple scaled versions of the unit were created. exclude_prefixes : list of str, optional If any of the SI prefixes need to be excluded, they may be listed here. For example, ``Pa`` can be interpreted either as "petaannum" or "Pascal". Therefore, when defining the prefixes for ``a``, ``exclude_prefixes`` should be set to ``["P"]``. namespace : dict, optional When provided, inject the unit (and all of its aliases and prefixes), into the given namespace dictionary. Returns ------- unit : `~astropy.units.UnitBase` The newly-defined unit, or a matching unit that was already defined.
def def_unit( s, represents=None, doc=None, format=None, prefixes=False, exclude_prefixes=[], namespace=None, ): """ Factory function for defining new units. Parameters ---------- s : str or list of str The name of the unit. If a list, the first element is the canonical (short) name, and the rest of the elements are aliases. represents : UnitBase instance, optional The unit that this named unit represents. If not provided, a new `IrreducibleUnit` is created. doc : str, optional A docstring describing the unit. format : dict, optional A mapping to format-specific representations of this unit. For example, for the ``Ohm`` unit, it might be nice to have it displayed as ``\\Omega`` by the ``latex`` formatter. In that case, `format` argument should be set to:: {'latex': r'\\Omega'} prefixes : bool or list, optional When `True`, generate all of the SI prefixed versions of the unit as well. For example, for a given unit ``m``, will generate ``mm``, ``cm``, ``km``, etc. When a list, it is a list of prefix definitions of the form: (short_names, long_tables, factor) Default is `False`. This function always returns the base unit object, even if multiple scaled versions of the unit were created. exclude_prefixes : list of str, optional If any of the SI prefixes need to be excluded, they may be listed here. For example, ``Pa`` can be interpreted either as "petaannum" or "Pascal". Therefore, when defining the prefixes for ``a``, ``exclude_prefixes`` should be set to ``["P"]``. namespace : dict, optional When provided, inject the unit (and all of its aliases and prefixes), into the given namespace dictionary. Returns ------- unit : `~astropy.units.UnitBase` The newly-defined unit, or a matching unit that was already defined. """ if represents is not None: result = Unit(s, represents, namespace=namespace, doc=doc, format=format) else: result = IrreducibleUnit(s, namespace=namespace, doc=doc, format=format) if prefixes: _add_prefixes( result, excludes=exclude_prefixes, namespace=namespace, prefixes=prefixes ) return result
Validate value is acceptable for conversion purposes. Will convert into an array if not a scalar, and can be converted into an array Parameters ---------- value : int or float value, or sequence of such values Returns ------- Scalar value or numpy array Raises ------ ValueError If value is not as expected
def _condition_arg(value): """ Validate value is acceptable for conversion purposes. Will convert into an array if not a scalar, and can be converted into an array Parameters ---------- value : int or float value, or sequence of such values Returns ------- Scalar value or numpy array Raises ------ ValueError If value is not as expected """ if isinstance(value, (np.ndarray, float, int, complex, np.void)): return value avalue = np.array(value) if avalue.dtype.kind not in ["i", "f", "c"]: raise ValueError( "Value not scalar compatible or convertible to " "an int, float, or complex array" ) return avalue
Function that just multiplies the value by unity. This is a separate function so it can be recognized and discarded in unit conversion.
def unit_scale_converter(val): """Function that just multiplies the value by unity. This is a separate function so it can be recognized and discarded in unit conversion. """ return 1.0 * _condition_arg(val)
From a list of target units (either as strings or unit objects) and physical types, return a list of Unit objects.
def _get_allowed_units(targets): """ From a list of target units (either as strings or unit objects) and physical types, return a list of Unit objects. """ allowed_units = [] for target in targets: try: unit = Unit(target) except (TypeError, ValueError): try: unit = get_physical_type(target)._unit except (TypeError, ValueError, KeyError): # KeyError for Enum raise ValueError(f"Invalid unit or physical type {target!r}.") from None allowed_units.append(unit) return allowed_units
Validates the object passed in to the wrapped function, ``arg``, with target unit or physical type, ``target``.
def _validate_arg_value( param_name, func_name, arg, targets, equivalencies, strict_dimensionless=False ): """ Validates the object passed in to the wrapped function, ``arg``, with target unit or physical type, ``target``. """ if len(targets) == 0: return allowed_units = _get_allowed_units(targets) # If dimensionless is an allowed unit and the argument is unit-less, # allow numbers or numpy arrays with numeric dtypes if ( dimensionless_unscaled in allowed_units and not strict_dimensionless and not hasattr(arg, "unit") ): if isinstance(arg, Number): return elif isinstance(arg, np.ndarray) and np.issubdtype(arg.dtype, np.number): return for allowed_unit in allowed_units: try: if arg.unit.is_equivalent(allowed_unit, equivalencies=equivalencies): break except AttributeError: # Either there is no .unit or no .is_equivalent if hasattr(arg, "unit"): error_msg = "a 'unit' attribute without an 'is_equivalent' method" else: error_msg = "no 'unit' attribute" raise TypeError( f"Argument '{param_name}' to function '{func_name}'" f" has {error_msg}. You should pass in an astropy " "Quantity instead." ) else: error_msg = ( f"Argument '{param_name}' to function '{func_name}' must " "be in units convertible to" ) if len(targets) > 1: targ_names = ", ".join([f"'{targ}'" for targ in targets]) raise UnitsError(f"{error_msg} one of: {targ_names}.") else: raise UnitsError(f"{error_msg} '{targets[0]}'.")
Enable deprecated units so they appear in results of `~astropy.units.UnitBase.find_equivalent_units` and `~astropy.units.UnitBase.compose`. This may be used with the ``with`` statement to enable deprecated units only temporarily.
def enable(): """ Enable deprecated units so they appear in results of `~astropy.units.UnitBase.find_equivalent_units` and `~astropy.units.UnitBase.compose`. This may be used with the ``with`` statement to enable deprecated units only temporarily. """ import inspect # Local import to avoid cyclical import # Local import to avoid polluting namespace from .core import add_enabled_units return add_enabled_units(inspect.getmodule(enable))
Allow angles to be equivalent to dimensionless (with 1 rad = 1 m/m = 1). It is special compared to other equivalency pairs in that it allows this independent of the power to which the angle is raised, and independent of whether it is part of a more complicated unit.
def dimensionless_angles(): """Allow angles to be equivalent to dimensionless (with 1 rad = 1 m/m = 1). It is special compared to other equivalency pairs in that it allows this independent of the power to which the angle is raised, and independent of whether it is part of a more complicated unit. """ return Equivalency([(si.radian, None)], "dimensionless_angles")
Allow logarithmic units to be converted to dimensionless fractions.
def logarithmic(): """Allow logarithmic units to be converted to dimensionless fractions.""" return Equivalency( [(dimensionless_unscaled, function_units.dex, np.log10, lambda x: 10.0**x)], "logarithmic", )
Returns a list of equivalence pairs that handle the conversion between parallax angle and distance.
def parallax(): """ Returns a list of equivalence pairs that handle the conversion between parallax angle and distance. """ def parallax_converter(x): x = np.asanyarray(x) d = 1 / x if isiterable(d): d[d < 0] = np.nan return d else: if d < 0: return np.array(np.nan) else: return d return Equivalency( [(si.arcsecond, astrophys.parsec, parallax_converter)], "parallax" )
Returns a list of equivalence pairs that handle spectral wavelength, wave number, frequency, and energy equivalencies. Allows conversions between wavelength units, wave number units, frequency units, and energy units as they relate to light. There are two types of wave number: * spectroscopic - :math:`1 / \lambda` (per meter) * angular - :math:`2 \pi / \lambda` (radian per meter)
def spectral(): """ Returns a list of equivalence pairs that handle spectral wavelength, wave number, frequency, and energy equivalencies. Allows conversions between wavelength units, wave number units, frequency units, and energy units as they relate to light. There are two types of wave number: * spectroscopic - :math:`1 / \\lambda` (per meter) * angular - :math:`2 \\pi / \\lambda` (radian per meter) """ c = _si.c.value h = _si.h.value hc = h * c two_pi = 2.0 * np.pi inv_m_spec = si.m**-1 inv_m_ang = si.radian / si.m return Equivalency( [ (si.m, si.Hz, lambda x: c / x), (si.m, si.J, lambda x: hc / x), (si.Hz, si.J, lambda x: h * x, lambda x: x / h), (si.m, inv_m_spec, lambda x: 1.0 / x), (si.Hz, inv_m_spec, lambda x: x / c, lambda x: c * x), (si.J, inv_m_spec, lambda x: x / hc, lambda x: hc * x), (inv_m_spec, inv_m_ang, lambda x: x * two_pi, lambda x: x / two_pi), (si.m, inv_m_ang, lambda x: two_pi / x), (si.Hz, inv_m_ang, lambda x: two_pi * x / c, lambda x: c * x / two_pi), (si.J, inv_m_ang, lambda x: x * two_pi / hc, lambda x: hc * x / two_pi), ], "spectral", )
Returns a list of equivalence pairs that handle spectral density with regard to wavelength and frequency. Parameters ---------- wav : `~astropy.units.Quantity` `~astropy.units.Quantity` associated with values being converted (e.g., wavelength or frequency). factor : array_like If ``wav`` is a |Unit| instead of a |Quantity| then ``factor`` is the value ``wav`` will be multiplied with to convert it to a |Quantity|. .. deprecated:: 7.0 ``factor`` is deprecated. Pass in ``wav`` as a |Quantity|, not as a |Unit|.
def spectral_density(wav, factor=None): """ Returns a list of equivalence pairs that handle spectral density with regard to wavelength and frequency. Parameters ---------- wav : `~astropy.units.Quantity` `~astropy.units.Quantity` associated with values being converted (e.g., wavelength or frequency). factor : array_like If ``wav`` is a |Unit| instead of a |Quantity| then ``factor`` is the value ``wav`` will be multiplied with to convert it to a |Quantity|. .. deprecated:: 7.0 ``factor`` is deprecated. Pass in ``wav`` as a |Quantity|, not as a |Unit|. """ from .core import UnitBase if isinstance(wav, UnitBase): if factor is None: raise ValueError("If `wav` is specified as a unit, `factor` should be set") wav = factor * wav # Convert to Quantity c_Aps = _si.c.to_value(si.AA / si.s) # Angstrom/s h_cgs = _si.h.cgs.value # erg * s hc = c_Aps * h_cgs # flux density f_la = cgs.erg / si.angstrom / si.cm**2 / si.s f_nu = cgs.erg / si.Hz / si.cm**2 / si.s nu_f_nu = cgs.erg / si.cm**2 / si.s la_f_la = nu_f_nu phot_f_la = astrophys.photon / (si.cm**2 * si.s * si.AA) phot_f_nu = astrophys.photon / (si.cm**2 * si.s * si.Hz) la_phot_f_la = astrophys.photon / (si.cm**2 * si.s) # luminosity density L_nu = cgs.erg / si.s / si.Hz L_la = cgs.erg / si.s / si.angstrom nu_L_nu = cgs.erg / si.s la_L_la = nu_L_nu phot_L_la = astrophys.photon / (si.s * si.AA) phot_L_nu = astrophys.photon / (si.s * si.Hz) # surface brightness (flux equiv) S_la = cgs.erg / si.angstrom / si.cm**2 / si.s / si.sr S_nu = cgs.erg / si.Hz / si.cm**2 / si.s / si.sr nu_S_nu = cgs.erg / si.cm**2 / si.s / si.sr la_S_la = nu_S_nu phot_S_la = astrophys.photon / (si.cm**2 * si.s * si.AA * si.sr) phot_S_nu = astrophys.photon / (si.cm**2 * si.s * si.Hz * si.sr) # surface brightness (luminosity equiv) SL_nu = cgs.erg / si.s / si.Hz / si.sr SL_la = cgs.erg / si.s / si.angstrom / si.sr nu_SL_nu = cgs.erg / si.s / si.sr la_SL_la = nu_SL_nu phot_SL_la = astrophys.photon / (si.s * si.AA * si.sr) phot_SL_nu = astrophys.photon / (si.s * si.Hz * si.sr) def f_la_to_f_nu(x): return x * (wav.to_value(si.AA, spectral()) ** 2 / c_Aps) def f_la_from_f_nu(x): return x / (wav.to_value(si.AA, spectral()) ** 2 / c_Aps) def f_nu_to_nu_f_nu(x): return x * wav.to_value(si.Hz, spectral()) def f_nu_from_nu_f_nu(x): return x / wav.to_value(si.Hz, spectral()) def f_la_to_la_f_la(x): return x * wav.to_value(si.AA, spectral()) def f_la_from_la_f_la(x): return x / wav.to_value(si.AA, spectral()) def phot_f_la_to_f_la(x): return hc * x / wav.to_value(si.AA, spectral()) def phot_f_la_from_f_la(x): return x * wav.to_value(si.AA, spectral()) / hc def phot_f_la_to_f_nu(x): return h_cgs * x * wav.to_value(si.AA, spectral()) def phot_f_la_from_f_nu(x): return x / (wav.to_value(si.AA, spectral()) * h_cgs) def phot_f_la_to_phot_f_nu(x): return x * wav.to_value(si.AA, spectral()) ** 2 / c_Aps def phot_f_la_from_phot_f_nu(x): return c_Aps * x / wav.to_value(si.AA, spectral()) ** 2 phot_f_nu_to_f_nu = phot_f_la_to_f_la phot_f_nu_from_f_nu = phot_f_la_from_f_la def phot_f_nu_to_f_la(x): return x * hc * c_Aps / wav.to_value(si.AA, spectral()) ** 3 def phot_f_nu_from_f_la(x): return x * wav.to_value(si.AA, spectral()) ** 3 / (hc * c_Aps) # for luminosity density L_nu_to_nu_L_nu = f_nu_to_nu_f_nu L_nu_from_nu_L_nu = f_nu_from_nu_f_nu L_la_to_la_L_la = f_la_to_la_f_la L_la_from_la_L_la = f_la_from_la_f_la phot_L_la_to_L_la = phot_f_la_to_f_la phot_L_la_from_L_la = phot_f_la_from_f_la phot_L_la_to_L_nu = phot_f_la_to_f_nu phot_L_la_from_L_nu = phot_f_la_from_f_nu phot_L_la_to_phot_L_nu = phot_f_la_to_phot_f_nu phot_L_la_from_phot_L_nu = phot_f_la_from_phot_f_nu phot_L_nu_to_L_nu = phot_f_nu_to_f_nu phot_L_nu_from_L_nu = phot_f_nu_from_f_nu phot_L_nu_to_L_la = phot_f_nu_to_f_la phot_L_nu_from_L_la = phot_f_nu_from_f_la return Equivalency( [ # flux (f_la, f_nu, f_la_to_f_nu, f_la_from_f_nu), (f_nu, nu_f_nu, f_nu_to_nu_f_nu, f_nu_from_nu_f_nu), (f_la, la_f_la, f_la_to_la_f_la, f_la_from_la_f_la), (phot_f_la, f_la, phot_f_la_to_f_la, phot_f_la_from_f_la), (phot_f_la, f_nu, phot_f_la_to_f_nu, phot_f_la_from_f_nu), (phot_f_la, phot_f_nu, phot_f_la_to_phot_f_nu, phot_f_la_from_phot_f_nu), (phot_f_nu, f_nu, phot_f_nu_to_f_nu, phot_f_nu_from_f_nu), (phot_f_nu, f_la, phot_f_nu_to_f_la, phot_f_nu_from_f_la), # integrated flux (la_phot_f_la, la_f_la, phot_f_la_to_f_la, phot_f_la_from_f_la), # luminosity (L_la, L_nu, f_la_to_f_nu, f_la_from_f_nu), (L_nu, nu_L_nu, L_nu_to_nu_L_nu, L_nu_from_nu_L_nu), (L_la, la_L_la, L_la_to_la_L_la, L_la_from_la_L_la), (phot_L_la, L_la, phot_L_la_to_L_la, phot_L_la_from_L_la), (phot_L_la, L_nu, phot_L_la_to_L_nu, phot_L_la_from_L_nu), (phot_L_la, phot_L_nu, phot_L_la_to_phot_L_nu, phot_L_la_from_phot_L_nu), (phot_L_nu, L_nu, phot_L_nu_to_L_nu, phot_L_nu_from_L_nu), (phot_L_nu, L_la, phot_L_nu_to_L_la, phot_L_nu_from_L_la), # surface brightness (flux equiv) (S_la, S_nu, f_la_to_f_nu, f_la_from_f_nu), (S_nu, nu_S_nu, f_nu_to_nu_f_nu, f_nu_from_nu_f_nu), (S_la, la_S_la, f_la_to_la_f_la, f_la_from_la_f_la), (phot_S_la, S_la, phot_f_la_to_f_la, phot_f_la_from_f_la), (phot_S_la, S_nu, phot_f_la_to_f_nu, phot_f_la_from_f_nu), (phot_S_la, phot_S_nu, phot_f_la_to_phot_f_nu, phot_f_la_from_phot_f_nu), (phot_S_nu, S_nu, phot_f_nu_to_f_nu, phot_f_nu_from_f_nu), (phot_S_nu, S_la, phot_f_nu_to_f_la, phot_f_nu_from_f_la), # surface brightness (luminosity equiv) (SL_la, SL_nu, f_la_to_f_nu, f_la_from_f_nu), (SL_nu, nu_SL_nu, L_nu_to_nu_L_nu, L_nu_from_nu_L_nu), (SL_la, la_SL_la, L_la_to_la_L_la, L_la_from_la_L_la), (phot_SL_la, SL_la, phot_L_la_to_L_la, phot_L_la_from_L_la), (phot_SL_la, SL_nu, phot_L_la_to_L_nu, phot_L_la_from_L_nu), (phot_SL_la, phot_SL_nu, phot_L_la_to_phot_L_nu, phot_L_la_from_phot_L_nu), (phot_SL_nu, SL_nu, phot_L_nu_to_L_nu, phot_L_nu_from_L_nu), (phot_SL_nu, SL_la, phot_L_nu_to_L_la, phot_L_nu_from_L_la), ], "spectral_density", {"wav": wav, "factor": factor}, )
Return the equivalency pairs for the radio convention for velocity. The radio convention for the relation between velocity and frequency is: :math:`V = c \frac{f_0 - f}{f_0} ; f(V) = f_0 ( 1 - V/c )` Parameters ---------- rest : `~astropy.units.Quantity` Any quantity supported by the standard spectral equivalencies (wavelength, energy, frequency, wave number). References ---------- `NRAO site defining the conventions <https://www.gb.nrao.edu/~fghigo/gbtdoc/doppler.html>`_ Examples -------- >>> import astropy.units as u >>> CO_restfreq = 115.27120*u.GHz # rest frequency of 12 CO 1-0 in GHz >>> radio_CO_equiv = u.doppler_radio(CO_restfreq) >>> measured_freq = 115.2832*u.GHz >>> radio_velocity = measured_freq.to(u.km/u.s, equivalencies=radio_CO_equiv) >>> radio_velocity # doctest: +FLOAT_CMP <Quantity -31.209092088877583 km / s>
def doppler_radio(rest): r""" Return the equivalency pairs for the radio convention for velocity. The radio convention for the relation between velocity and frequency is: :math:`V = c \frac{f_0 - f}{f_0} ; f(V) = f_0 ( 1 - V/c )` Parameters ---------- rest : `~astropy.units.Quantity` Any quantity supported by the standard spectral equivalencies (wavelength, energy, frequency, wave number). References ---------- `NRAO site defining the conventions <https://www.gb.nrao.edu/~fghigo/gbtdoc/doppler.html>`_ Examples -------- >>> import astropy.units as u >>> CO_restfreq = 115.27120*u.GHz # rest frequency of 12 CO 1-0 in GHz >>> radio_CO_equiv = u.doppler_radio(CO_restfreq) >>> measured_freq = 115.2832*u.GHz >>> radio_velocity = measured_freq.to(u.km/u.s, equivalencies=radio_CO_equiv) >>> radio_velocity # doctest: +FLOAT_CMP <Quantity -31.209092088877583 km / s> """ assert_is_spectral_unit(rest) ckms = _si.c.to_value("km/s") def to_vel_freq(x): restfreq = rest.to_value(si.Hz, equivalencies=spectral()) return (restfreq - x) / (restfreq) * ckms def from_vel_freq(x): restfreq = rest.to_value(si.Hz, equivalencies=spectral()) voverc = x / ckms return restfreq * (1 - voverc) def to_vel_wav(x): restwav = rest.to_value(si.AA, spectral()) return (x - restwav) / (x) * ckms def from_vel_wav(x): restwav = rest.to_value(si.AA, spectral()) return restwav * ckms / (ckms - x) def to_vel_en(x): resten = rest.to_value(si.eV, equivalencies=spectral()) return (resten - x) / (resten) * ckms def from_vel_en(x): resten = rest.to_value(si.eV, equivalencies=spectral()) voverc = x / ckms return resten * (1 - voverc) return Equivalency( [ (si.Hz, si.km / si.s, to_vel_freq, from_vel_freq), (si.AA, si.km / si.s, to_vel_wav, from_vel_wav), (si.eV, si.km / si.s, to_vel_en, from_vel_en), ], "doppler_radio", {"rest": rest}, )
Return the equivalency pairs for the optical convention for velocity. The optical convention for the relation between velocity and frequency is: :math:`V = c \frac{f_0 - f}{f } ; f(V) = f_0 ( 1 + V/c )^{-1}` Parameters ---------- rest : `~astropy.units.Quantity` Any quantity supported by the standard spectral equivalencies (wavelength, energy, frequency, wave number). References ---------- `NRAO site defining the conventions <https://www.gb.nrao.edu/~fghigo/gbtdoc/doppler.html>`_ Examples -------- >>> import astropy.units as u >>> CO_restfreq = 115.27120*u.GHz # rest frequency of 12 CO 1-0 in GHz >>> optical_CO_equiv = u.doppler_optical(CO_restfreq) >>> measured_freq = 115.2832*u.GHz >>> optical_velocity = measured_freq.to(u.km/u.s, equivalencies=optical_CO_equiv) >>> optical_velocity # doctest: +FLOAT_CMP <Quantity -31.20584348799674 km / s>
def doppler_optical(rest): r""" Return the equivalency pairs for the optical convention for velocity. The optical convention for the relation between velocity and frequency is: :math:`V = c \frac{f_0 - f}{f } ; f(V) = f_0 ( 1 + V/c )^{-1}` Parameters ---------- rest : `~astropy.units.Quantity` Any quantity supported by the standard spectral equivalencies (wavelength, energy, frequency, wave number). References ---------- `NRAO site defining the conventions <https://www.gb.nrao.edu/~fghigo/gbtdoc/doppler.html>`_ Examples -------- >>> import astropy.units as u >>> CO_restfreq = 115.27120*u.GHz # rest frequency of 12 CO 1-0 in GHz >>> optical_CO_equiv = u.doppler_optical(CO_restfreq) >>> measured_freq = 115.2832*u.GHz >>> optical_velocity = measured_freq.to(u.km/u.s, equivalencies=optical_CO_equiv) >>> optical_velocity # doctest: +FLOAT_CMP <Quantity -31.20584348799674 km / s> """ assert_is_spectral_unit(rest) ckms = _si.c.to_value("km/s") def to_vel_freq(x): restfreq = rest.to_value(si.Hz, equivalencies=spectral()) return ckms * (restfreq - x) / x def from_vel_freq(x): restfreq = rest.to_value(si.Hz, equivalencies=spectral()) voverc = x / ckms return restfreq / (1 + voverc) def to_vel_wav(x): restwav = rest.to_value(si.AA, spectral()) return ckms * (x / restwav - 1) def from_vel_wav(x): restwav = rest.to_value(si.AA, spectral()) voverc = x / ckms return restwav * (1 + voverc) def to_vel_en(x): resten = rest.to_value(si.eV, equivalencies=spectral()) return ckms * (resten - x) / x def from_vel_en(x): resten = rest.to_value(si.eV, equivalencies=spectral()) voverc = x / ckms return resten / (1 + voverc) return Equivalency( [ (si.Hz, si.km / si.s, to_vel_freq, from_vel_freq), (si.AA, si.km / si.s, to_vel_wav, from_vel_wav), (si.eV, si.km / si.s, to_vel_en, from_vel_en), ], "doppler_optical", {"rest": rest}, )
Return the equivalency pairs for the relativistic convention for velocity. The full relativistic convention for the relation between velocity and frequency is: :math:`V = c \frac{f_0^2 - f^2}{f_0^2 + f^2} ; f(V) = f_0 \frac{\left(1 - (V/c)^2\right)^{1/2}}{(1+V/c)}` Parameters ---------- rest : `~astropy.units.Quantity` Any quantity supported by the standard spectral equivalencies (wavelength, energy, frequency, wave number). References ---------- `NRAO site defining the conventions <https://www.gb.nrao.edu/~fghigo/gbtdoc/doppler.html>`_ Examples -------- >>> import astropy.units as u >>> CO_restfreq = 115.27120*u.GHz # rest frequency of 12 CO 1-0 in GHz >>> relativistic_CO_equiv = u.doppler_relativistic(CO_restfreq) >>> measured_freq = 115.2832*u.GHz >>> relativistic_velocity = measured_freq.to(u.km/u.s, equivalencies=relativistic_CO_equiv) >>> relativistic_velocity # doctest: +FLOAT_CMP <Quantity -31.207467619351537 km / s> >>> measured_velocity = 1250 * u.km/u.s >>> relativistic_frequency = measured_velocity.to(u.GHz, equivalencies=relativistic_CO_equiv) >>> relativistic_frequency # doctest: +FLOAT_CMP <Quantity 114.79156866993588 GHz> >>> relativistic_wavelength = measured_velocity.to(u.mm, equivalencies=relativistic_CO_equiv) >>> relativistic_wavelength # doctest: +FLOAT_CMP <Quantity 2.6116243681798923 mm>
def doppler_relativistic(rest): r""" Return the equivalency pairs for the relativistic convention for velocity. The full relativistic convention for the relation between velocity and frequency is: :math:`V = c \frac{f_0^2 - f^2}{f_0^2 + f^2} ; f(V) = f_0 \frac{\left(1 - (V/c)^2\right)^{1/2}}{(1+V/c)}` Parameters ---------- rest : `~astropy.units.Quantity` Any quantity supported by the standard spectral equivalencies (wavelength, energy, frequency, wave number). References ---------- `NRAO site defining the conventions <https://www.gb.nrao.edu/~fghigo/gbtdoc/doppler.html>`_ Examples -------- >>> import astropy.units as u >>> CO_restfreq = 115.27120*u.GHz # rest frequency of 12 CO 1-0 in GHz >>> relativistic_CO_equiv = u.doppler_relativistic(CO_restfreq) >>> measured_freq = 115.2832*u.GHz >>> relativistic_velocity = measured_freq.to(u.km/u.s, equivalencies=relativistic_CO_equiv) >>> relativistic_velocity # doctest: +FLOAT_CMP <Quantity -31.207467619351537 km / s> >>> measured_velocity = 1250 * u.km/u.s >>> relativistic_frequency = measured_velocity.to(u.GHz, equivalencies=relativistic_CO_equiv) >>> relativistic_frequency # doctest: +FLOAT_CMP <Quantity 114.79156866993588 GHz> >>> relativistic_wavelength = measured_velocity.to(u.mm, equivalencies=relativistic_CO_equiv) >>> relativistic_wavelength # doctest: +FLOAT_CMP <Quantity 2.6116243681798923 mm> """ assert_is_spectral_unit(rest) ckms = _si.c.to_value("km/s") def to_vel_freq(x): restfreq = rest.to_value(si.Hz, equivalencies=spectral()) return (restfreq**2 - x**2) / (restfreq**2 + x**2) * ckms def from_vel_freq(x): restfreq = rest.to_value(si.Hz, equivalencies=spectral()) voverc = x / ckms return restfreq * ((1 - voverc) / (1 + (voverc))) ** 0.5 def to_vel_wav(x): restwav = rest.to_value(si.AA, spectral()) return (x**2 - restwav**2) / (restwav**2 + x**2) * ckms def from_vel_wav(x): restwav = rest.to_value(si.AA, spectral()) voverc = x / ckms return restwav * ((1 + voverc) / (1 - voverc)) ** 0.5 def to_vel_en(x): resten = rest.to_value(si.eV, spectral()) return (resten**2 - x**2) / (resten**2 + x**2) * ckms def from_vel_en(x): resten = rest.to_value(si.eV, spectral()) voverc = x / ckms return resten * ((1 - voverc) / (1 + (voverc))) ** 0.5 return Equivalency( [ (si.Hz, si.km / si.s, to_vel_freq, from_vel_freq), (si.AA, si.km / si.s, to_vel_wav, from_vel_wav), (si.eV, si.km / si.s, to_vel_en, from_vel_en), ], "doppler_relativistic", {"rest": rest}, )
Returns the equivalence between Doppler redshift (unitless) and radial velocity. .. note:: This equivalency is not compatible with cosmological redshift in `astropy.cosmology.units`.
def doppler_redshift(): """ Returns the equivalence between Doppler redshift (unitless) and radial velocity. .. note:: This equivalency is not compatible with cosmological redshift in `astropy.cosmology.units`. """ rv_unit = si.km / si.s C_KMS = _si.c.to_value(rv_unit) def convert_z_to_rv(z): zponesq = (1 + z) ** 2 return C_KMS * (zponesq - 1) / (zponesq + 1) def convert_rv_to_z(rv): beta = rv / C_KMS return np.sqrt((1 + beta) / (1 - beta)) - 1 return Equivalency( [(dimensionless_unscaled, rv_unit, convert_z_to_rv, convert_rv_to_z)], "doppler_redshift", )
Returns the equivalence between amu and molar mass.
def molar_mass_amu(): """ Returns the equivalence between amu and molar mass. """ return Equivalency([(si.g / si.mol, misc.u)], "molar_mass_amu")
Returns a list of equivalence pairs that handle the conversion between mass and energy.
def mass_energy(): """ Returns a list of equivalence pairs that handle the conversion between mass and energy. """ c2 = _si.c.value**2 return Equivalency( [ (si.kg, si.J, lambda x: x * c2, lambda x: x / c2), (si.kg / si.m**2, si.J / si.m**2, lambda x: x * c2, lambda x: x / c2), (si.kg / si.m**3, si.J / si.m**3, lambda x: x * c2, lambda x: x / c2), (si.kg / si.s, si.J / si.s, lambda x: x * c2, lambda x: x / c2), ], "mass_energy", )
Defines the conversion between Jy/sr and "brightness temperature", :math:`T_B`, in Kelvins. The brightness temperature is a unit very commonly used in radio astronomy. See, e.g., "Tools of Radio Astronomy" (Wilson 2009) eqn 8.16 and eqn 8.19 (these pages are available on `google books <https://books.google.com/books?id=9KHw6R8rQEMC&pg=PA179&source=gbs_toc_r&cad=4#v=onepage&q&f=false>`__). :math:`T_B \equiv S_\nu / \left(2 k \nu^2 / c^2 \right)` If the input is in Jy/beam or Jy (assuming it came from a single beam), the beam area is essential for this computation: the brightness temperature is inversely proportional to the beam area. Parameters ---------- frequency : `~astropy.units.Quantity` The observed ``spectral`` equivalent `~astropy.units.Unit` (e.g., frequency or wavelength). The variable is named 'frequency' because it is more commonly used in radio astronomy. BACKWARD COMPATIBILITY NOTE: previous versions of the brightness temperature equivalency used the keyword ``disp``, which is no longer supported. beam_area : `~astropy.units.Quantity` ['solid angle'] Beam area in angular units, i.e. steradian equivalent Examples -------- Arecibo C-band beam:: >>> import numpy as np >>> from astropy import units as u >>> beam_sigma = 50*u.arcsec >>> beam_area = 2*np.pi*(beam_sigma)**2 >>> freq = 5*u.GHz >>> equiv = u.brightness_temperature(freq) >>> (1*u.Jy/beam_area).to(u.K, equivalencies=equiv) # doctest: +FLOAT_CMP <Quantity 3.526295144567176 K> VLA synthetic beam:: >>> bmaj = 15*u.arcsec >>> bmin = 15*u.arcsec >>> fwhm_to_sigma = 1./(8*np.log(2))**0.5 >>> beam_area = 2.*np.pi*(bmaj*bmin*fwhm_to_sigma**2) >>> freq = 5*u.GHz >>> equiv = u.brightness_temperature(freq) >>> (u.Jy/beam_area).to(u.K, equivalencies=equiv) # doctest: +FLOAT_CMP <Quantity 217.2658703625732 K> Any generic surface brightness: >>> surf_brightness = 1e6*u.MJy/u.sr >>> surf_brightness.to(u.K, equivalencies=u.brightness_temperature(500*u.GHz)) # doctest: +FLOAT_CMP <Quantity 130.1931904778803 K>
def brightness_temperature(frequency, beam_area=None): r""" Defines the conversion between Jy/sr and "brightness temperature", :math:`T_B`, in Kelvins. The brightness temperature is a unit very commonly used in radio astronomy. See, e.g., "Tools of Radio Astronomy" (Wilson 2009) eqn 8.16 and eqn 8.19 (these pages are available on `google books <https://books.google.com/books?id=9KHw6R8rQEMC&pg=PA179&source=gbs_toc_r&cad=4#v=onepage&q&f=false>`__). :math:`T_B \equiv S_\nu / \left(2 k \nu^2 / c^2 \right)` If the input is in Jy/beam or Jy (assuming it came from a single beam), the beam area is essential for this computation: the brightness temperature is inversely proportional to the beam area. Parameters ---------- frequency : `~astropy.units.Quantity` The observed ``spectral`` equivalent `~astropy.units.Unit` (e.g., frequency or wavelength). The variable is named 'frequency' because it is more commonly used in radio astronomy. BACKWARD COMPATIBILITY NOTE: previous versions of the brightness temperature equivalency used the keyword ``disp``, which is no longer supported. beam_area : `~astropy.units.Quantity` ['solid angle'] Beam area in angular units, i.e. steradian equivalent Examples -------- Arecibo C-band beam:: >>> import numpy as np >>> from astropy import units as u >>> beam_sigma = 50*u.arcsec >>> beam_area = 2*np.pi*(beam_sigma)**2 >>> freq = 5*u.GHz >>> equiv = u.brightness_temperature(freq) >>> (1*u.Jy/beam_area).to(u.K, equivalencies=equiv) # doctest: +FLOAT_CMP <Quantity 3.526295144567176 K> VLA synthetic beam:: >>> bmaj = 15*u.arcsec >>> bmin = 15*u.arcsec >>> fwhm_to_sigma = 1./(8*np.log(2))**0.5 >>> beam_area = 2.*np.pi*(bmaj*bmin*fwhm_to_sigma**2) >>> freq = 5*u.GHz >>> equiv = u.brightness_temperature(freq) >>> (u.Jy/beam_area).to(u.K, equivalencies=equiv) # doctest: +FLOAT_CMP <Quantity 217.2658703625732 K> Any generic surface brightness: >>> surf_brightness = 1e6*u.MJy/u.sr >>> surf_brightness.to(u.K, equivalencies=u.brightness_temperature(500*u.GHz)) # doctest: +FLOAT_CMP <Quantity 130.1931904778803 K> """ nu = frequency.to(si.GHz, spectral()) factor_Jy = (2 * _si.k_B * si.K * nu**2 / _si.c**2).to(astrophys.Jy).value factor_K = (astrophys.Jy / (2 * _si.k_B * nu**2 / _si.c**2)).to(si.K).value if beam_area is not None: beam = beam_area.to_value(si.sr) def convert_Jy_to_K(x_jybm): return x_jybm / beam / factor_Jy def convert_K_to_Jy(x_K): return x_K * beam / factor_K return Equivalency( [ (astrophys.Jy, si.K, convert_Jy_to_K, convert_K_to_Jy), (astrophys.Jy / astrophys.beam, si.K, convert_Jy_to_K, convert_K_to_Jy), ], "brightness_temperature", {"frequency": frequency, "beam_area": beam_area}, ) else: def convert_JySr_to_K(x_jysr): return x_jysr / factor_Jy def convert_K_to_JySr(x_K): return x_K / factor_K # multiplied by 1x for 1 steradian return Equivalency( [(astrophys.Jy / si.sr, si.K, convert_JySr_to_K, convert_K_to_JySr)], "brightness_temperature", {"frequency": frequency, "beam_area": beam_area}, )
Convert between the ``beam`` unit, which is commonly used to express the area of a radio telescope resolution element, and an area on the sky. This equivalency also supports direct conversion between ``Jy/beam`` and ``Jy/steradian`` units, since that is a common operation. Parameters ---------- beam_area : unit-like The area of the beam in angular area units (e.g., steradians) Must have angular area equivalent units.
def beam_angular_area(beam_area): """ Convert between the ``beam`` unit, which is commonly used to express the area of a radio telescope resolution element, and an area on the sky. This equivalency also supports direct conversion between ``Jy/beam`` and ``Jy/steradian`` units, since that is a common operation. Parameters ---------- beam_area : unit-like The area of the beam in angular area units (e.g., steradians) Must have angular area equivalent units. """ return Equivalency( [ (astrophys.beam, Unit(beam_area)), (astrophys.beam**-1, Unit(beam_area) ** -1), (astrophys.Jy / astrophys.beam, astrophys.Jy / Unit(beam_area)), ], "beam_angular_area", {"beam_area": beam_area}, )
Defines the conversion between Jy/sr and "thermodynamic temperature", :math:`T_{CMB}`, in Kelvins. The thermodynamic temperature is a unit very commonly used in cosmology. See eqn 8 in [1]. :math:`K_{CMB} \equiv I_\nu / \left(2 k \nu^2 / c^2 f(\nu) \right)` with :math:`f(\nu) = \frac{ x^2 e^x}{(e^x - 1 )^2}` where :math:`x = h \nu / k T` Parameters ---------- frequency : `~astropy.units.Quantity` The observed `spectral` equivalent `~astropy.units.Unit` (e.g., frequency or wavelength). Must have spectral units. T_cmb : `~astropy.units.Quantity` ['temperature'] or None The CMB temperature at z=0. If `None`, the default cosmology will be used to get this temperature. Must have units of temperature. Notes ----- For broad band receivers, this conversion do not hold as it highly depends on the frequency References ---------- .. [1] Planck 2013 results. IX. HFI spectral response https://arxiv.org/abs/1303.5070 Examples -------- Planck HFI 143 GHz:: >>> from astropy import units as u >>> from astropy.cosmology import Planck15 >>> freq = 143 * u.GHz >>> equiv = u.thermodynamic_temperature(freq, Planck15.Tcmb0) >>> (1. * u.mK).to(u.MJy / u.sr, equivalencies=equiv) # doctest: +FLOAT_CMP <Quantity 0.37993172 MJy / sr>
def thermodynamic_temperature(frequency, T_cmb=None): r"""Defines the conversion between Jy/sr and "thermodynamic temperature", :math:`T_{CMB}`, in Kelvins. The thermodynamic temperature is a unit very commonly used in cosmology. See eqn 8 in [1]. :math:`K_{CMB} \equiv I_\nu / \left(2 k \nu^2 / c^2 f(\nu) \right)` with :math:`f(\nu) = \frac{ x^2 e^x}{(e^x - 1 )^2}` where :math:`x = h \nu / k T` Parameters ---------- frequency : `~astropy.units.Quantity` The observed `spectral` equivalent `~astropy.units.Unit` (e.g., frequency or wavelength). Must have spectral units. T_cmb : `~astropy.units.Quantity` ['temperature'] or None The CMB temperature at z=0. If `None`, the default cosmology will be used to get this temperature. Must have units of temperature. Notes ----- For broad band receivers, this conversion do not hold as it highly depends on the frequency References ---------- .. [1] Planck 2013 results. IX. HFI spectral response https://arxiv.org/abs/1303.5070 Examples -------- Planck HFI 143 GHz:: >>> from astropy import units as u >>> from astropy.cosmology import Planck15 >>> freq = 143 * u.GHz >>> equiv = u.thermodynamic_temperature(freq, Planck15.Tcmb0) >>> (1. * u.mK).to(u.MJy / u.sr, equivalencies=equiv) # doctest: +FLOAT_CMP <Quantity 0.37993172 MJy / sr> """ nu = frequency.to(si.GHz, spectral()) if T_cmb is None: from astropy.cosmology import default_cosmology T_cmb = default_cosmology.get().Tcmb0 def f(nu, T_cmb=T_cmb): x = _si.h * nu / _si.k_B / T_cmb return x**2 * np.exp(x) / np.expm1(x) ** 2 def convert_Jy_to_K(x_jybm): factor = (f(nu) * 2 * _si.k_B * si.K * nu**2 / _si.c**2).to_value(astrophys.Jy) return x_jybm / factor def convert_K_to_Jy(x_K): factor = (astrophys.Jy / (f(nu) * 2 * _si.k_B * nu**2 / _si.c**2)).to_value( si.K ) return x_K / factor return Equivalency( [(astrophys.Jy / si.sr, si.K, convert_Jy_to_K, convert_K_to_Jy)], "thermodynamic_temperature", {"frequency": frequency, "T_cmb": T_cmb}, )
Convert between Kelvin, Celsius, Rankine and Fahrenheit here because Unit and CompositeUnit cannot do addition or subtraction properly.
def temperature(): """Convert between Kelvin, Celsius, Rankine and Fahrenheit here because Unit and CompositeUnit cannot do addition or subtraction properly. """ from .imperial import deg_F as F from .imperial import deg_R as R K = si.K C = si.deg_C return Equivalency( [ (K, C, lambda x: x - 273.15, lambda x: x + 273.15), (C, F, lambda x: x * 1.8 + 32.0, lambda x: (x - 32.0) / 1.8), (K, F, lambda x: x * 1.8 - 459.67, lambda x: (x + 459.67) / 1.8), (R, F, lambda x: x - 459.67, lambda x: x + 459.67), (R, C, lambda x: (x - 491.67) * (5 / 9), lambda x: x * 1.8 + 491.67), (R, K, lambda x: x * (5 / 9), lambda x: x * 1.8), ], "temperature", )
Convert between Kelvin and keV(eV) to an equivalent amount.
def temperature_energy(): """Convert between Kelvin and keV(eV) to an equivalent amount.""" e = _si.e.value k_B = _si.k_B.value return Equivalency( [(si.K, si.eV, lambda x: x / (e / k_B), lambda x: x * (e / k_B))], "temperature_energy", )
Convert between pixel distances (in units of ``pix``) and other units, given a particular ``pixscale``. Parameters ---------- pixscale : `~astropy.units.Quantity` The pixel scale either in units of <unit>/pixel or pixel/<unit>.
def pixel_scale(pixscale): """ Convert between pixel distances (in units of ``pix``) and other units, given a particular ``pixscale``. Parameters ---------- pixscale : `~astropy.units.Quantity` The pixel scale either in units of <unit>/pixel or pixel/<unit>. """ decomposed = pixscale.unit.decompose() dimensions = dict(zip(decomposed.bases, decomposed.powers)) pix_power = dimensions.get(misc.pix, 0) if pix_power == -1: physical_unit = Unit(pixscale * misc.pix) elif pix_power == 1: physical_unit = Unit(misc.pix / pixscale) else: raise UnitsError( "The pixel scale unit must have pixel dimensionality of 1 or -1." ) return Equivalency( [(misc.pix, physical_unit)], "pixel_scale", {"pixscale": pixscale} )
Convert between lengths (to be interpreted as lengths in the focal plane) and angular units with a specified ``platescale``. Parameters ---------- platescale : `~astropy.units.Quantity` The pixel scale either in units of distance/pixel or distance/angle.
def plate_scale(platescale): """ Convert between lengths (to be interpreted as lengths in the focal plane) and angular units with a specified ``platescale``. Parameters ---------- platescale : `~astropy.units.Quantity` The pixel scale either in units of distance/pixel or distance/angle. """ if platescale.unit.is_equivalent(si.arcsec / si.m): platescale_val = platescale.to_value(si.radian / si.m) elif platescale.unit.is_equivalent(si.m / si.arcsec): platescale_val = (1 / platescale).to_value(si.radian / si.m) else: raise UnitsError("The pixel scale must be in angle/distance or distance/angle") return Equivalency( [(si.m, si.radian, lambda d: d * platescale_val, lambda a: a / platescale_val)], "plate_scale", {"platescale": platescale}, )
Enable Imperial units so they appear in results of `~astropy.units.UnitBase.find_equivalent_units` and `~astropy.units.UnitBase.compose`. This may be used with the ``with`` statement to enable Imperial units only temporarily.
def enable(): """ Enable Imperial units so they appear in results of `~astropy.units.UnitBase.find_equivalent_units` and `~astropy.units.UnitBase.compose`. This may be used with the ``with`` statement to enable Imperial units only temporarily. """ # Local import to avoid cyclical import # Local import to avoid polluting namespace import inspect from .core import add_enabled_units return add_enabled_units(inspect.getmodule(enable))
An equivalency for converting linear flux units ("maggys") defined relative to a standard source into a standardized system. Parameters ---------- flux0 : `~astropy.units.Quantity` The flux of a magnitude-0 object in the "maggy" system.
def zero_point_flux(flux0): """ An equivalency for converting linear flux units ("maggys") defined relative to a standard source into a standardized system. Parameters ---------- flux0 : `~astropy.units.Quantity` The flux of a magnitude-0 object in the "maggy" system. """ flux_unit0 = Unit(flux0) return [(maggy, flux_unit0)]
Return the `PhysicalType` instance associated with the name of a physical type.
def _physical_type_from_str(name): """ Return the `PhysicalType` instance associated with the name of a physical type. """ if name == "unknown": raise ValueError("cannot uniquely identify an 'unknown' physical type.") elif name in _attrname_physical_mapping: return _attrname_physical_mapping[name] # convert attribute-accessible elif name in _name_physical_mapping: return _name_physical_mapping[name] else: raise ValueError(f"{name!r} is not a known physical type.")
If a unit contains a temperature unit besides kelvin, then replace that unit with kelvin. Temperatures cannot be converted directly between K, °F, °C, and °Ra, in particular since there would be different conversions for T and ΔT. However, each of these temperatures each represents the physical type. Replacing the different temperature units with kelvin allows the physical type to be treated consistently.
def _replace_temperatures_with_kelvin(unit): """ If a unit contains a temperature unit besides kelvin, then replace that unit with kelvin. Temperatures cannot be converted directly between K, °F, °C, and °Ra, in particular since there would be different conversions for T and ΔT. However, each of these temperatures each represents the physical type. Replacing the different temperature units with kelvin allows the physical type to be treated consistently. """ physical_type_id = unit._get_physical_type_id() physical_type_id_components = [] substitution_was_made = False for base, power in physical_type_id: if base in ["deg_F", "deg_C", "deg_R"]: base = "K" substitution_was_made = True physical_type_id_components.append((base, power)) if substitution_was_made: return core.Unit._from_physical_type_id(tuple(physical_type_id_components)) else: return unit
Convert a string or `set` of strings into a `set` containing string representations of physical types. The strings provided in ``physical_type_input`` can each contain multiple physical types that are separated by a regular slash. Underscores are treated as spaces so that variable names could be identical to physical type names.
def _standardize_physical_type_names(physical_type_input): """ Convert a string or `set` of strings into a `set` containing string representations of physical types. The strings provided in ``physical_type_input`` can each contain multiple physical types that are separated by a regular slash. Underscores are treated as spaces so that variable names could be identical to physical type names. """ if isinstance(physical_type_input, str): physical_type_input = {physical_type_input} standardized_physical_types = set() for ptype_input in physical_type_input: if not isinstance(ptype_input, str): raise ValueError(f"expecting a string, but got {ptype_input}") input_set = set(ptype_input.split("/")) processed_set = {s.strip().replace("_", " ") for s in input_set} standardized_physical_types |= processed_set return standardized_physical_types
Add a mapping between a unit and the corresponding physical type(s). If a physical type already exists for a unit, add new physical type names so long as those names are not already in use for other physical types. Parameters ---------- unit : `~astropy.units.Unit` The unit to be represented by the physical type. name : `str` or `set` of `str` A `str` representing the name of the physical type of the unit, or a `set` containing strings that represent one or more names of physical types. Raises ------ ValueError If a physical type name is already in use for another unit, or if attempting to name a unit as ``"unknown"``. Notes ----- For a list of physical types, see `astropy.units.physical`.
def def_physical_type(unit, name): """ Add a mapping between a unit and the corresponding physical type(s). If a physical type already exists for a unit, add new physical type names so long as those names are not already in use for other physical types. Parameters ---------- unit : `~astropy.units.Unit` The unit to be represented by the physical type. name : `str` or `set` of `str` A `str` representing the name of the physical type of the unit, or a `set` containing strings that represent one or more names of physical types. Raises ------ ValueError If a physical type name is already in use for another unit, or if attempting to name a unit as ``"unknown"``. Notes ----- For a list of physical types, see `astropy.units.physical`. """ physical_type_id = unit._get_physical_type_id() physical_type_names = _standardize_physical_type_names(name) if "unknown" in physical_type_names: raise ValueError("cannot uniquely define an unknown physical type") names_for_other_units = set(_unit_physical_mapping.keys()).difference( _physical_unit_mapping.get(physical_type_id, {}) ) names_already_in_use = physical_type_names & names_for_other_units if names_already_in_use: raise ValueError( "the following physical type names are already in use: " f"{names_already_in_use}." ) unit_already_in_use = physical_type_id in _physical_unit_mapping if unit_already_in_use: physical_type = _physical_unit_mapping[physical_type_id] physical_type_names |= set(physical_type) physical_type.__init__(unit, physical_type_names) else: physical_type = PhysicalType(unit, physical_type_names) _physical_unit_mapping[physical_type_id] = physical_type for ptype in physical_type: _unit_physical_mapping[ptype] = physical_type_id for ptype_name in physical_type_names: _name_physical_mapping[ptype_name] = physical_type # attribute-accessible name attr_name = ptype_name.replace(" ", "_").replace("(", "").replace(")", "") _attrname_physical_mapping[attr_name] = physical_type
Return the physical type that corresponds to a unit (or another physical type representation). Parameters ---------- obj : quantity-like or `~astropy.units.PhysicalType`-like An object that (implicitly or explicitly) has a corresponding physical type. This object may be a unit, a `~astropy.units.Quantity`, an object that can be converted to a `~astropy.units.Quantity` (such as a number or array), a string that contains a name of a physical type, or a `~astropy.units.PhysicalType` instance. Returns ------- `~astropy.units.PhysicalType` A representation of the physical type(s) of the unit. Notes ----- For a list of physical types, see `astropy.units.physical`. Examples -------- The physical type may be retrieved from a unit or a `~astropy.units.Quantity`. >>> import astropy.units as u >>> u.get_physical_type(u.meter ** -2) PhysicalType('column density') >>> u.get_physical_type(0.62 * u.barn * u.Mpc) PhysicalType('volume') The physical type may also be retrieved by providing a `str` that contains the name of a physical type. >>> u.get_physical_type("energy") PhysicalType({'energy', 'torque', 'work'}) Numbers and arrays of numbers correspond to a dimensionless physical type. >>> u.get_physical_type(1) PhysicalType('dimensionless')
def get_physical_type(obj): """ Return the physical type that corresponds to a unit (or another physical type representation). Parameters ---------- obj : quantity-like or `~astropy.units.PhysicalType`-like An object that (implicitly or explicitly) has a corresponding physical type. This object may be a unit, a `~astropy.units.Quantity`, an object that can be converted to a `~astropy.units.Quantity` (such as a number or array), a string that contains a name of a physical type, or a `~astropy.units.PhysicalType` instance. Returns ------- `~astropy.units.PhysicalType` A representation of the physical type(s) of the unit. Notes ----- For a list of physical types, see `astropy.units.physical`. Examples -------- The physical type may be retrieved from a unit or a `~astropy.units.Quantity`. >>> import astropy.units as u >>> u.get_physical_type(u.meter ** -2) PhysicalType('column density') >>> u.get_physical_type(0.62 * u.barn * u.Mpc) PhysicalType('volume') The physical type may also be retrieved by providing a `str` that contains the name of a physical type. >>> u.get_physical_type("energy") PhysicalType({'energy', 'torque', 'work'}) Numbers and arrays of numbers correspond to a dimensionless physical type. >>> u.get_physical_type(1) PhysicalType('dimensionless') """ if isinstance(obj, PhysicalType): return obj if isinstance(obj, str): return _physical_type_from_str(obj) if isinstance(obj, core.UnitBase): unit = obj else: try: unit = quantity.Quantity(obj, copy=COPY_IF_NEEDED).unit except TypeError as exc: raise TypeError(f"{obj} does not correspond to a physical type.") from exc unit = _replace_temperatures_with_kelvin(unit) physical_type_id = unit._get_physical_type_id() unit_has_known_physical_type = physical_type_id in _physical_unit_mapping if unit_has_known_physical_type: return _physical_unit_mapping[physical_type_id] else: return PhysicalType(unit, "unknown")
Checks for physical types using lazy import. This also allows user-defined physical types to be accessible from the :mod:`astropy.units.physical` module. See `PEP 562 <https://www.python.org/dev/peps/pep-0562/>`_ Parameters ---------- name : str The name of the attribute in this module. If it is already defined, then this function is not called. Returns ------- ptype : `~astropy.units.physical.PhysicalType` Raises ------ AttributeError If the ``name`` does not correspond to a physical type
def __getattr__(name): """Checks for physical types using lazy import. This also allows user-defined physical types to be accessible from the :mod:`astropy.units.physical` module. See `PEP 562 <https://www.python.org/dev/peps/pep-0562/>`_ Parameters ---------- name : str The name of the attribute in this module. If it is already defined, then this function is not called. Returns ------- ptype : `~astropy.units.physical.PhysicalType` Raises ------ AttributeError If the ``name`` does not correspond to a physical type """ if name in _attrname_physical_mapping: return _attrname_physical_mapping[name] raise AttributeError(f"module {__name__!r} has no attribute {name!r}")
Return contents directory (__all__ + all physical type names).
def __dir__(): """Return contents directory (__all__ + all physical type names).""" return list(set(__all__) | set(_attrname_physical_mapping.keys()))
Return a boolean array where two arrays are element-wise equal within a tolerance. Parameters ---------- a, b : array-like or `~astropy.units.Quantity` Input values or arrays to compare rtol : array-like or `~astropy.units.Quantity` The relative tolerance for the comparison, which defaults to ``1e-5``. If ``rtol`` is a :class:`~astropy.units.Quantity`, then it must be dimensionless. atol : number or `~astropy.units.Quantity` The absolute tolerance for the comparison. The units (or lack thereof) of ``a``, ``b``, and ``atol`` must be consistent with each other. If `None`, ``atol`` defaults to zero in the appropriate units. equal_nan : `bool` Whether to compare NaN’s as equal. If `True`, NaNs in ``a`` will be considered equal to NaN’s in ``b``. Notes ----- This is a :class:`~astropy.units.Quantity`-aware version of :func:`numpy.isclose`. However, this differs from the `numpy` function in that the default for the absolute tolerance here is zero instead of ``atol=1e-8`` in `numpy`, as there is no natural way to set a default *absolute* tolerance given two inputs that may have differently scaled units. Raises ------ `~astropy.units.UnitsError` If the dimensions of ``a``, ``b``, or ``atol`` are incompatible, or if ``rtol`` is not dimensionless. See Also -------- allclose
def isclose(a, b, rtol=1.0e-5, atol=None, equal_nan=False): """ Return a boolean array where two arrays are element-wise equal within a tolerance. Parameters ---------- a, b : array-like or `~astropy.units.Quantity` Input values or arrays to compare rtol : array-like or `~astropy.units.Quantity` The relative tolerance for the comparison, which defaults to ``1e-5``. If ``rtol`` is a :class:`~astropy.units.Quantity`, then it must be dimensionless. atol : number or `~astropy.units.Quantity` The absolute tolerance for the comparison. The units (or lack thereof) of ``a``, ``b``, and ``atol`` must be consistent with each other. If `None`, ``atol`` defaults to zero in the appropriate units. equal_nan : `bool` Whether to compare NaN’s as equal. If `True`, NaNs in ``a`` will be considered equal to NaN’s in ``b``. Notes ----- This is a :class:`~astropy.units.Quantity`-aware version of :func:`numpy.isclose`. However, this differs from the `numpy` function in that the default for the absolute tolerance here is zero instead of ``atol=1e-8`` in `numpy`, as there is no natural way to set a default *absolute* tolerance given two inputs that may have differently scaled units. Raises ------ `~astropy.units.UnitsError` If the dimensions of ``a``, ``b``, or ``atol`` are incompatible, or if ``rtol`` is not dimensionless. See Also -------- allclose """ return np.isclose(*_unquantify_allclose_arguments(a, b, rtol, atol), equal_nan)
Whether two arrays are element-wise equal within a tolerance. Parameters ---------- a, b : array-like or `~astropy.units.Quantity` Input values or arrays to compare rtol : array-like or `~astropy.units.Quantity` The relative tolerance for the comparison, which defaults to ``1e-5``. If ``rtol`` is a :class:`~astropy.units.Quantity`, then it must be dimensionless. atol : number or `~astropy.units.Quantity` The absolute tolerance for the comparison. The units (or lack thereof) of ``a``, ``b``, and ``atol`` must be consistent with each other. If `None`, ``atol`` defaults to zero in the appropriate units. equal_nan : `bool` Whether to compare NaN’s as equal. If `True`, NaNs in ``a`` will be considered equal to NaN’s in ``b``. Notes ----- This is a :class:`~astropy.units.Quantity`-aware version of :func:`numpy.allclose`. However, this differs from the `numpy` function in that the default for the absolute tolerance here is zero instead of ``atol=1e-8`` in `numpy`, as there is no natural way to set a default *absolute* tolerance given two inputs that may have differently scaled units. Raises ------ `~astropy.units.UnitsError` If the dimensions of ``a``, ``b``, or ``atol`` are incompatible, or if ``rtol`` is not dimensionless. See Also -------- isclose
def allclose(a, b, rtol=1.0e-5, atol=None, equal_nan=False) -> bool: """ Whether two arrays are element-wise equal within a tolerance. Parameters ---------- a, b : array-like or `~astropy.units.Quantity` Input values or arrays to compare rtol : array-like or `~astropy.units.Quantity` The relative tolerance for the comparison, which defaults to ``1e-5``. If ``rtol`` is a :class:`~astropy.units.Quantity`, then it must be dimensionless. atol : number or `~astropy.units.Quantity` The absolute tolerance for the comparison. The units (or lack thereof) of ``a``, ``b``, and ``atol`` must be consistent with each other. If `None`, ``atol`` defaults to zero in the appropriate units. equal_nan : `bool` Whether to compare NaN’s as equal. If `True`, NaNs in ``a`` will be considered equal to NaN’s in ``b``. Notes ----- This is a :class:`~astropy.units.Quantity`-aware version of :func:`numpy.allclose`. However, this differs from the `numpy` function in that the default for the absolute tolerance here is zero instead of ``atol=1e-8`` in `numpy`, as there is no natural way to set a default *absolute* tolerance given two inputs that may have differently scaled units. Raises ------ `~astropy.units.UnitsError` If the dimensions of ``a``, ``b``, or ``atol`` are incompatible, or if ``rtol`` is not dimensionless. See Also -------- isclose """ return np.allclose(*_unquantify_allclose_arguments(a, b, rtol, atol), equal_nan)
Enable the VOUnit-required extra units so they appear in results of `~astropy.units.UnitBase.find_equivalent_units` and `~astropy.units.UnitBase.compose`, and are recognized in the ``Unit('...')`` idiom.
def _enable(): """ Enable the VOUnit-required extra units so they appear in results of `~astropy.units.UnitBase.find_equivalent_units` and `~astropy.units.UnitBase.compose`, and are recognized in the ``Unit('...')`` idiom. """ # Local import to avoid cyclical import # Local import to avoid polluting namespace import inspect from .core import add_enabled_units return add_enabled_units(inspect.getmodule(_enable))
Recursively extract field names from a dtype.
def _names_from_dtype(dtype): """Recursively extract field names from a dtype.""" names = [] for name in dtype.names: subdtype = dtype.fields[name][0].base if subdtype.names: names.append([name, _names_from_dtype(subdtype)]) else: names.append(name) return tuple(names)
Recursively normalize, inferring upper level names for unadorned tuples. Generally, we want the field names to be organized like dtypes, as in ``(['pv', ('p', 'v')], 't')``. But we automatically infer upper field names if the list is absent from items like ``(('p', 'v'), 't')``, by concatenating the names inside the tuple.
def _normalize_names(names): """Recursively normalize, inferring upper level names for unadorned tuples. Generally, we want the field names to be organized like dtypes, as in ``(['pv', ('p', 'v')], 't')``. But we automatically infer upper field names if the list is absent from items like ``(('p', 'v'), 't')``, by concatenating the names inside the tuple. """ result = [] for name in names: if isinstance(name, str) and len(name) > 0: result.append(name) elif ( isinstance(name, list) and len(name) == 2 and isinstance(name[0], str) and len(name[0]) > 0 and isinstance(name[1], tuple) and len(name[1]) > 0 ): result.append([name[0], _normalize_names(name[1])]) elif isinstance(name, tuple) and len(name) > 0: new_tuple = _normalize_names(name) name = "".join([(i[0] if isinstance(i, list) else i) for i in new_tuple]) result.append([name, new_tuple]) else: raise ValueError( f"invalid entry {name!r}. Should be a name, " "tuple of names, or 2-element list of the " "form [name, tuple of names]." ) return tuple(result)
Make a `StructuredUnit` of one unit, with the structure of a `numpy.dtype`. Parameters ---------- unit : UnitBase The unit that will be filled into the structure. dtype : `numpy.dtype` The structure for the StructuredUnit. Returns ------- StructuredUnit
def _structured_unit_like_dtype( unit: UnitBase | StructuredUnit, dtype: np.dtype ) -> StructuredUnit: """Make a `StructuredUnit` of one unit, with the structure of a `numpy.dtype`. Parameters ---------- unit : UnitBase The unit that will be filled into the structure. dtype : `numpy.dtype` The structure for the StructuredUnit. Returns ------- StructuredUnit """ if isinstance(unit, StructuredUnit): # If unit is structured, it should match the dtype. This function is # only used in Quantity, which performs this check, so it's fine to # return as is. return unit # Make a structured unit units = [] for name in dtype.names: subdtype = dtype.fields[name][0] if subdtype.names is not None: units.append(_structured_unit_like_dtype(unit, subdtype)) else: units.append(unit) return StructuredUnit(tuple(units), names=dtype.names)
Get the first sentence from a string and remove any carriage returns.
def _get_first_sentence(s: str) -> str: """ Get the first sentence from a string and remove any carriage returns. """ x = re.match(r".*?\S\.\s", s) if x is not None: s = x.group(0) return s.replace("\n", " ")