body
stringlengths
26
98.2k
body_hash
int64
-9,222,864,604,528,158,000
9,221,803,474B
docstring
stringlengths
1
16.8k
path
stringlengths
5
230
name
stringlengths
1
96
repository_name
stringlengths
7
89
lang
stringclasses
1 value
body_without_docstring
stringlengths
20
98.2k
def clusterQuality(self, cluster, fet=1): '\n returns the L-ratio and Isolation Distance measures\n calculated on the principal components of the energy in a spike matrix\n ' if (self.waveforms is None): return None (nSpikes, nElectrodes, _) = self.waveforms.shape wvs = self.waveforms.copy() E = np.sqrt(np.nansum((self.waveforms ** 2), axis=2)) zeroIdx = (np.sum(E, 0) == [0, 0, 0, 0]) E = E[:, (~ zeroIdx)] wvs = wvs[:, (~ zeroIdx), :] normdWaves = (wvs.T / E.T).T PCA_m = self.getParam(normdWaves, 'PCA', fet=fet) badIdx = (np.sum(PCA_m, axis=0) == 0) PCA_m = PCA_m[:, (~ badIdx)] idx = (self.spk_clusters == cluster) nClustSpikes = np.count_nonzero(idx) try: d = self._mahal(PCA_m, PCA_m[idx, :]) M_noise = d[(~ idx)] df = np.prod((fet, nElectrodes)) from scipy import stats L = np.sum((1 - stats.chi2.cdf(M_noise, df))) L_ratio = (L / nClustSpikes) if (nClustSpikes < (nSpikes / 2)): M_noise.sort() isolation_dist = M_noise[nClustSpikes] else: isolation_dist = np.nan except Exception: isolation_dist = L_ratio = np.nan return (L_ratio, isolation_dist)
-791,362,355,700,851,100
returns the L-ratio and Isolation Distance measures calculated on the principal components of the energy in a spike matrix
ephysiopy/common/spikecalcs.py
clusterQuality
rhayman/ephysiopy
python
def clusterQuality(self, cluster, fet=1): '\n returns the L-ratio and Isolation Distance measures\n calculated on the principal components of the energy in a spike matrix\n ' if (self.waveforms is None): return None (nSpikes, nElectrodes, _) = self.waveforms.shape wvs = self.waveforms.copy() E = np.sqrt(np.nansum((self.waveforms ** 2), axis=2)) zeroIdx = (np.sum(E, 0) == [0, 0, 0, 0]) E = E[:, (~ zeroIdx)] wvs = wvs[:, (~ zeroIdx), :] normdWaves = (wvs.T / E.T).T PCA_m = self.getParam(normdWaves, 'PCA', fet=fet) badIdx = (np.sum(PCA_m, axis=0) == 0) PCA_m = PCA_m[:, (~ badIdx)] idx = (self.spk_clusters == cluster) nClustSpikes = np.count_nonzero(idx) try: d = self._mahal(PCA_m, PCA_m[idx, :]) M_noise = d[(~ idx)] df = np.prod((fet, nElectrodes)) from scipy import stats L = np.sum((1 - stats.chi2.cdf(M_noise, df))) L_ratio = (L / nClustSpikes) if (nClustSpikes < (nSpikes / 2)): M_noise.sort() isolation_dist = M_noise[nClustSpikes] else: isolation_dist = np.nan except Exception: isolation_dist = L_ratio = np.nan return (L_ratio, isolation_dist)
def _mahal(self, u, v): "\n gets the mahalanobis distance between two vectors u and v\n a blatant copy of the Mathworks fcn as it doesn't require the\n covariance matrix to be calculated which is a pain if there\n are NaNs in the matrix\n " u_sz = u.shape v_sz = v.shape if (u_sz[1] != v_sz[1]): warnings.warn('Input size mismatch: matrices must have same num of columns') if (v_sz[0] < v_sz[1]): warnings.warn('Too few rows: v must have more rows than columns') if (np.any(np.imag(u)) or np.any(np.imag(v))): warnings.warn('No complex inputs are allowed') m = np.nanmean(v, axis=0) M = np.tile(m, reps=(u_sz[0], 1)) C = (v - np.tile(m, reps=(v_sz[0], 1))) (_, R) = np.linalg.qr(C) ri = np.linalg.solve(R.T, (u - M).T) d = (np.sum((ri * ri), 0).T * (v_sz[0] - 1)) return d
-7,699,833,769,099,782,000
gets the mahalanobis distance between two vectors u and v a blatant copy of the Mathworks fcn as it doesn't require the covariance matrix to be calculated which is a pain if there are NaNs in the matrix
ephysiopy/common/spikecalcs.py
_mahal
rhayman/ephysiopy
python
def _mahal(self, u, v): "\n gets the mahalanobis distance between two vectors u and v\n a blatant copy of the Mathworks fcn as it doesn't require the\n covariance matrix to be calculated which is a pain if there\n are NaNs in the matrix\n " u_sz = u.shape v_sz = v.shape if (u_sz[1] != v_sz[1]): warnings.warn('Input size mismatch: matrices must have same num of columns') if (v_sz[0] < v_sz[1]): warnings.warn('Too few rows: v must have more rows than columns') if (np.any(np.imag(u)) or np.any(np.imag(v))): warnings.warn('No complex inputs are allowed') m = np.nanmean(v, axis=0) M = np.tile(m, reps=(u_sz[0], 1)) C = (v - np.tile(m, reps=(v_sz[0], 1))) (_, R) = np.linalg.qr(C) ri = np.linalg.solve(R.T, (u - M).T) d = (np.sum((ri * ri), 0).T * (v_sz[0] - 1)) return d
def thetaModIdx(self, x1): '\n Calculates a theta modulation index of a spike train based on the cells\n autocorrelogram\n\n Parameters\n ----------\n x1: np.array\n The spike time-series\n Returns\n -------\n thetaMod: float\n The difference of the values at the first peak and trough of the\n autocorrelogram\n ' y = self.xcorr(x1) (corr, _) = np.histogram(y[(y != 0)], bins=201, range=np.array([(- 500), 500])) from scipy.signal import periodogram (freqs, power) = periodogram(corr, fs=200, return_onesided=True) b = signal.boxcar(3) h = signal.filtfilt(b, 3, power) sqd_amp = (h ** 2) theta_band_max_idx = np.nonzero((sqd_amp == np.max(sqd_amp[np.logical_and((freqs > 6), (freqs < 11))])))[0][0] mtbp = np.mean(sqd_amp[(theta_band_max_idx - 1):(theta_band_max_idx + 1)]) other_band_idx = np.logical_and((freqs > 2), (freqs < 50)) mobp = np.mean(sqd_amp[other_band_idx]) return ((mtbp - mobp) / (mtbp + mobp))
8,879,195,594,928,940,000
Calculates a theta modulation index of a spike train based on the cells autocorrelogram Parameters ---------- x1: np.array The spike time-series Returns ------- thetaMod: float The difference of the values at the first peak and trough of the autocorrelogram
ephysiopy/common/spikecalcs.py
thetaModIdx
rhayman/ephysiopy
python
def thetaModIdx(self, x1): '\n Calculates a theta modulation index of a spike train based on the cells\n autocorrelogram\n\n Parameters\n ----------\n x1: np.array\n The spike time-series\n Returns\n -------\n thetaMod: float\n The difference of the values at the first peak and trough of the\n autocorrelogram\n ' y = self.xcorr(x1) (corr, _) = np.histogram(y[(y != 0)], bins=201, range=np.array([(- 500), 500])) from scipy.signal import periodogram (freqs, power) = periodogram(corr, fs=200, return_onesided=True) b = signal.boxcar(3) h = signal.filtfilt(b, 3, power) sqd_amp = (h ** 2) theta_band_max_idx = np.nonzero((sqd_amp == np.max(sqd_amp[np.logical_and((freqs > 6), (freqs < 11))])))[0][0] mtbp = np.mean(sqd_amp[(theta_band_max_idx - 1):(theta_band_max_idx + 1)]) other_band_idx = np.logical_and((freqs > 2), (freqs < 50)) mobp = np.mean(sqd_amp[other_band_idx]) return ((mtbp - mobp) / (mtbp + mobp))
def thetaModIdxV2(self, x1): '\n This is a simpler alternative to the thetaModIdx method in that it\n calculates the difference between the normalized temporal\n autocorrelogram at the trough between 50-70ms and the\n peak between 100-140ms over their sum (data is binned into 5ms bins)\n\n Measure used in Cacucci et al., 2004 and Kropff et al 2015\n ' y = self.xcorr(x1) (corr, bins) = np.histogram(y[(y != 0)], bins=201, range=np.array([(- 500), 500])) bins = bins[0:(- 1)] corr = (corr / float(np.max(corr))) thetaAntiPhase = np.min(corr[np.logical_and((bins > 50), (bins < 70))]) thetaPhase = np.max(corr[np.logical_and((bins > 100), (bins < 140))]) return ((thetaPhase - thetaAntiPhase) / (thetaPhase + thetaAntiPhase))
-8,577,724,921,252,220,000
This is a simpler alternative to the thetaModIdx method in that it calculates the difference between the normalized temporal autocorrelogram at the trough between 50-70ms and the peak between 100-140ms over their sum (data is binned into 5ms bins) Measure used in Cacucci et al., 2004 and Kropff et al 2015
ephysiopy/common/spikecalcs.py
thetaModIdxV2
rhayman/ephysiopy
python
def thetaModIdxV2(self, x1): '\n This is a simpler alternative to the thetaModIdx method in that it\n calculates the difference between the normalized temporal\n autocorrelogram at the trough between 50-70ms and the\n peak between 100-140ms over their sum (data is binned into 5ms bins)\n\n Measure used in Cacucci et al., 2004 and Kropff et al 2015\n ' y = self.xcorr(x1) (corr, bins) = np.histogram(y[(y != 0)], bins=201, range=np.array([(- 500), 500])) bins = bins[0:(- 1)] corr = (corr / float(np.max(corr))) thetaAntiPhase = np.min(corr[np.logical_and((bins > 50), (bins < 70))]) thetaPhase = np.max(corr[np.logical_and((bins > 100), (bins < 140))]) return ((thetaPhase - thetaAntiPhase) / (thetaPhase + thetaAntiPhase))
def thetaBandMaxFreq(self, x1): '\n Calculates the frequency with the max power in the theta band (6-12Hz)\n of a spike trains autocorrelogram. Partly to look for differences\n in theta frequency in different running directions a la Blair\n See Welday paper - https://doi.org/10.1523/jneurosci.0712-11.2011\n ' y = self.xcorr(x1) (corr, _) = np.histogram(y[(y != 0)], bins=201, range=np.array([(- 500), 500])) from scipy.signal import periodogram (freqs, power) = periodogram(corr, fs=200, return_onesided=True) power_masked = np.ma.MaskedArray(power, np.logical_or((freqs < 6), (freqs > 12))) return freqs[np.argmax(power_masked)]
5,875,807,191,050,528,000
Calculates the frequency with the max power in the theta band (6-12Hz) of a spike trains autocorrelogram. Partly to look for differences in theta frequency in different running directions a la Blair See Welday paper - https://doi.org/10.1523/jneurosci.0712-11.2011
ephysiopy/common/spikecalcs.py
thetaBandMaxFreq
rhayman/ephysiopy
python
def thetaBandMaxFreq(self, x1): '\n Calculates the frequency with the max power in the theta band (6-12Hz)\n of a spike trains autocorrelogram. Partly to look for differences\n in theta frequency in different running directions a la Blair\n See Welday paper - https://doi.org/10.1523/jneurosci.0712-11.2011\n ' y = self.xcorr(x1) (corr, _) = np.histogram(y[(y != 0)], bins=201, range=np.array([(- 500), 500])) from scipy.signal import periodogram (freqs, power) = periodogram(corr, fs=200, return_onesided=True) power_masked = np.ma.MaskedArray(power, np.logical_or((freqs < 6), (freqs > 12))) return freqs[np.argmax(power_masked)]
def smoothSpikePosCount(self, x1, npos, sigma=3.0, shuffle=None): '\n Returns a spike train the same length as num pos samples that has been\n smoothed in time with a gaussian kernel M in width and standard\n deviation equal to sigma\n\n Parameters\n --------------\n x1 : np.array\n The pos indices the spikes occured at\n npos : int\n The number of position samples captured\n sigma : float\n the standard deviation of the gaussian used to smooth the spike\n train\n shuffle: int\n The number of seconds to shift the spike train by. Default None\n\n Returns\n -----------\n smoothed_spikes : np.array\n The smoothed spike train\n ' spk_hist = np.bincount(x1, minlength=npos) if (shuffle is not None): spk_hist = np.roll(spk_hist, int((shuffle * 50))) h = signal.gaussian(13, sigma) h = (h / float(np.sum(h))) return signal.filtfilt(h.ravel(), 1, spk_hist)
-6,908,954,358,747,898,000
Returns a spike train the same length as num pos samples that has been smoothed in time with a gaussian kernel M in width and standard deviation equal to sigma Parameters -------------- x1 : np.array The pos indices the spikes occured at npos : int The number of position samples captured sigma : float the standard deviation of the gaussian used to smooth the spike train shuffle: int The number of seconds to shift the spike train by. Default None Returns ----------- smoothed_spikes : np.array The smoothed spike train
ephysiopy/common/spikecalcs.py
smoothSpikePosCount
rhayman/ephysiopy
python
def smoothSpikePosCount(self, x1, npos, sigma=3.0, shuffle=None): '\n Returns a spike train the same length as num pos samples that has been\n smoothed in time with a gaussian kernel M in width and standard\n deviation equal to sigma\n\n Parameters\n --------------\n x1 : np.array\n The pos indices the spikes occured at\n npos : int\n The number of position samples captured\n sigma : float\n the standard deviation of the gaussian used to smooth the spike\n train\n shuffle: int\n The number of seconds to shift the spike train by. Default None\n\n Returns\n -----------\n smoothed_spikes : np.array\n The smoothed spike train\n ' spk_hist = np.bincount(x1, minlength=npos) if (shuffle is not None): spk_hist = np.roll(spk_hist, int((shuffle * 50))) h = signal.gaussian(13, sigma) h = (h / float(np.sum(h))) return signal.filtfilt(h.ravel(), 1, spk_hist)
def ifr_sp_corr(self, x1, speed, minSpeed=2.0, maxSpeed=40.0, sigma=3, shuffle=False, nShuffles=100, minTime=30, plot=False): '\n x1 : np.array\n The indices of pos at which the cluster fired\n speed: np.array (1 x nSamples)\n instantaneous speed\n minSpeed: int\n speeds below this value are ignored - defaults to 2cm/s as with\n Kropff et al., 2015\n ' speed = speed.ravel() posSampRate = 50 nSamples = len(speed) spk_hist = np.bincount(x1, minlength=nSamples) h = signal.gaussian(13, sigma) h = (h / float(np.sum(h))) lowSpeedIdx = (speed < minSpeed) highSpeedIdx = (speed > maxSpeed) speed_filt = speed[(~ np.logical_or(lowSpeedIdx, highSpeedIdx))] spk_hist_filt = spk_hist[(~ np.logical_or(lowSpeedIdx, highSpeedIdx))] spk_sm = signal.filtfilt(h.ravel(), 1, spk_hist_filt) sm_spk_rate = (spk_sm * posSampRate) res = stats.pearsonr(sm_spk_rate, speed_filt) if plot: (_, sp_bin_edges) = np.histogram(speed_filt, bins=50) sp_dig = np.digitize(speed_filt, sp_bin_edges, right=True) spks_per_sp_bin = [spk_hist_filt[(sp_dig == i)] for i in range(len(sp_bin_edges))] rate_per_sp_bin = [] for x in spks_per_sp_bin: rate_per_sp_bin.append((np.mean(x) * posSampRate)) rate_filter = signal.gaussian(5, 1.0) rate_filter = (rate_filter / np.sum(rate_filter)) binned_spk_rate = signal.filtfilt(rate_filter, 1, rate_per_sp_bin) spk_binning_edges = np.linspace(np.min(sm_spk_rate), np.max(sm_spk_rate), len(sp_bin_edges)) (speed_mesh, spk_mesh) = np.meshgrid(sp_bin_edges, spk_binning_edges) (binned_rate, _, _) = np.histogram2d(speed_filt, sm_spk_rate, bins=[sp_bin_edges, spk_binning_edges]) from ephysiopy.common.utils import blurImage sm_binned_rate = blurImage(binned_rate, 5) fig = plt.figure() ax = fig.add_subplot(111) from matplotlib.colors import LogNorm speed_mesh = speed_mesh[:(- 1), :(- 1)] spk_mesh = spk_mesh[:(- 1), :(- 1)] ax.pcolormesh(speed_mesh, spk_mesh, sm_binned_rate, norm=LogNorm(), alpha=0.5, shading='nearest', edgecolors='None') ax.plot(sp_bin_edges, binned_spk_rate, 'r') lr = stats.linregress(speed_filt, sm_spk_rate) end_point = (lr.intercept + ((sp_bin_edges[(- 1)] - sp_bin_edges[0]) * lr.slope)) ax.plot([np.min(sp_bin_edges), np.max(sp_bin_edges)], [lr.intercept, end_point], 'r--') ax.set_xlim(np.min(sp_bin_edges), np.max(sp_bin_edges[(- 2)])) ax.set_ylim(0, (np.nanmax(binned_spk_rate) * 1.1)) ax.set_ylabel('Firing rate(Hz)') ax.set_xlabel('Running speed(cm/s)') ax.set_title('Intercept: {0:.3f} Slope: {1:.5f}\nPearson: {2:.5f}'.format(lr.intercept, lr.slope, lr.rvalue)) if shuffle: timeSteps = np.random.randint((30 * posSampRate), (nSamples - (30 * posSampRate)), nShuffles) shuffled_results = [] for t in timeSteps: spk_count = np.roll(spk_hist, t) spk_count_filt = spk_count[(~ lowSpeedIdx)] spk_count_sm = signal.filtfilt(h.ravel(), 1, spk_count_filt) shuffled_results.append(stats.pearsonr(spk_count_sm, speed_filt)[0]) if plot: fig = plt.figure() ax = fig.add_subplot(1, 1, 1) ax.hist(np.abs(shuffled_results), 20) ylims = ax.get_ylim() ax.vlines(res, ylims[0], ylims[1], 'r') if isinstance(fig, plt.Figure): return fig
-7,873,371,224,588,896,000
x1 : np.array The indices of pos at which the cluster fired speed: np.array (1 x nSamples) instantaneous speed minSpeed: int speeds below this value are ignored - defaults to 2cm/s as with Kropff et al., 2015
ephysiopy/common/spikecalcs.py
ifr_sp_corr
rhayman/ephysiopy
python
def ifr_sp_corr(self, x1, speed, minSpeed=2.0, maxSpeed=40.0, sigma=3, shuffle=False, nShuffles=100, minTime=30, plot=False): '\n x1 : np.array\n The indices of pos at which the cluster fired\n speed: np.array (1 x nSamples)\n instantaneous speed\n minSpeed: int\n speeds below this value are ignored - defaults to 2cm/s as with\n Kropff et al., 2015\n ' speed = speed.ravel() posSampRate = 50 nSamples = len(speed) spk_hist = np.bincount(x1, minlength=nSamples) h = signal.gaussian(13, sigma) h = (h / float(np.sum(h))) lowSpeedIdx = (speed < minSpeed) highSpeedIdx = (speed > maxSpeed) speed_filt = speed[(~ np.logical_or(lowSpeedIdx, highSpeedIdx))] spk_hist_filt = spk_hist[(~ np.logical_or(lowSpeedIdx, highSpeedIdx))] spk_sm = signal.filtfilt(h.ravel(), 1, spk_hist_filt) sm_spk_rate = (spk_sm * posSampRate) res = stats.pearsonr(sm_spk_rate, speed_filt) if plot: (_, sp_bin_edges) = np.histogram(speed_filt, bins=50) sp_dig = np.digitize(speed_filt, sp_bin_edges, right=True) spks_per_sp_bin = [spk_hist_filt[(sp_dig == i)] for i in range(len(sp_bin_edges))] rate_per_sp_bin = [] for x in spks_per_sp_bin: rate_per_sp_bin.append((np.mean(x) * posSampRate)) rate_filter = signal.gaussian(5, 1.0) rate_filter = (rate_filter / np.sum(rate_filter)) binned_spk_rate = signal.filtfilt(rate_filter, 1, rate_per_sp_bin) spk_binning_edges = np.linspace(np.min(sm_spk_rate), np.max(sm_spk_rate), len(sp_bin_edges)) (speed_mesh, spk_mesh) = np.meshgrid(sp_bin_edges, spk_binning_edges) (binned_rate, _, _) = np.histogram2d(speed_filt, sm_spk_rate, bins=[sp_bin_edges, spk_binning_edges]) from ephysiopy.common.utils import blurImage sm_binned_rate = blurImage(binned_rate, 5) fig = plt.figure() ax = fig.add_subplot(111) from matplotlib.colors import LogNorm speed_mesh = speed_mesh[:(- 1), :(- 1)] spk_mesh = spk_mesh[:(- 1), :(- 1)] ax.pcolormesh(speed_mesh, spk_mesh, sm_binned_rate, norm=LogNorm(), alpha=0.5, shading='nearest', edgecolors='None') ax.plot(sp_bin_edges, binned_spk_rate, 'r') lr = stats.linregress(speed_filt, sm_spk_rate) end_point = (lr.intercept + ((sp_bin_edges[(- 1)] - sp_bin_edges[0]) * lr.slope)) ax.plot([np.min(sp_bin_edges), np.max(sp_bin_edges)], [lr.intercept, end_point], 'r--') ax.set_xlim(np.min(sp_bin_edges), np.max(sp_bin_edges[(- 2)])) ax.set_ylim(0, (np.nanmax(binned_spk_rate) * 1.1)) ax.set_ylabel('Firing rate(Hz)') ax.set_xlabel('Running speed(cm/s)') ax.set_title('Intercept: {0:.3f} Slope: {1:.5f}\nPearson: {2:.5f}'.format(lr.intercept, lr.slope, lr.rvalue)) if shuffle: timeSteps = np.random.randint((30 * posSampRate), (nSamples - (30 * posSampRate)), nShuffles) shuffled_results = [] for t in timeSteps: spk_count = np.roll(spk_hist, t) spk_count_filt = spk_count[(~ lowSpeedIdx)] spk_count_sm = signal.filtfilt(h.ravel(), 1, spk_count_filt) shuffled_results.append(stats.pearsonr(spk_count_sm, speed_filt)[0]) if plot: fig = plt.figure() ax = fig.add_subplot(1, 1, 1) ax.hist(np.abs(shuffled_results), 20) ylims = ax.get_ylim() ax.vlines(res, ylims[0], ylims[1], 'r') if isinstance(fig, plt.Figure): return fig
def half_amp_dur(self, waveforms): '\n Half amplitude duration of a spike\n\n Parameters\n ----------\n A: ndarray\n An nSpikes x nElectrodes x nSamples array\n\n Returns\n -------\n had: float\n The half-amplitude duration for the channel (electrode) that has\n the strongest (highest amplitude) signal. Units are ms\n ' from scipy import optimize best_chan = np.argmax(np.max(np.mean(waveforms, 0), 1)) mn_wvs = np.mean(waveforms, 0) wvs = mn_wvs[best_chan, :] half_amp = (np.max(wvs) / 2) half_amp = (np.zeros_like(wvs) + half_amp) t = np.linspace(0, (1 / 1000.0), 50) from scipy.interpolate import BPoly p1 = BPoly.from_derivatives(t, wvs[:, np.newaxis]) p2 = BPoly.from_derivatives(t, half_amp[:, np.newaxis]) xs = np.r_[(t, t)] xs.sort() x_min = xs.min() x_max = xs.max() x_mid = (xs[:(- 1)] + (np.diff(xs) / 2)) roots = set() for val in x_mid: (root, infodict, ier, mesg) = optimize.fsolve((lambda x: (p1(x) - p2(x))), val, full_output=True) if ((ier == 1) and (x_min < root < x_max)): roots.add(root[0]) roots = list(roots) if (len(roots) > 1): r = np.abs(np.diff(roots[0:2]))[0] else: r = np.nan return r
3,749,305,657,729,343,500
Half amplitude duration of a spike Parameters ---------- A: ndarray An nSpikes x nElectrodes x nSamples array Returns ------- had: float The half-amplitude duration for the channel (electrode) that has the strongest (highest amplitude) signal. Units are ms
ephysiopy/common/spikecalcs.py
half_amp_dur
rhayman/ephysiopy
python
def half_amp_dur(self, waveforms): '\n Half amplitude duration of a spike\n\n Parameters\n ----------\n A: ndarray\n An nSpikes x nElectrodes x nSamples array\n\n Returns\n -------\n had: float\n The half-amplitude duration for the channel (electrode) that has\n the strongest (highest amplitude) signal. Units are ms\n ' from scipy import optimize best_chan = np.argmax(np.max(np.mean(waveforms, 0), 1)) mn_wvs = np.mean(waveforms, 0) wvs = mn_wvs[best_chan, :] half_amp = (np.max(wvs) / 2) half_amp = (np.zeros_like(wvs) + half_amp) t = np.linspace(0, (1 / 1000.0), 50) from scipy.interpolate import BPoly p1 = BPoly.from_derivatives(t, wvs[:, np.newaxis]) p2 = BPoly.from_derivatives(t, half_amp[:, np.newaxis]) xs = np.r_[(t, t)] xs.sort() x_min = xs.min() x_max = xs.max() x_mid = (xs[:(- 1)] + (np.diff(xs) / 2)) roots = set() for val in x_mid: (root, infodict, ier, mesg) = optimize.fsolve((lambda x: (p1(x) - p2(x))), val, full_output=True) if ((ier == 1) and (x_min < root < x_max)): roots.add(root[0]) roots = list(roots) if (len(roots) > 1): r = np.abs(np.diff(roots[0:2]))[0] else: r = np.nan return r
def p2t_time(self, waveforms): '\n The peak to trough time of a spike in ms\n\n Parameters\n ----------\n cluster: int\n the cluster whose waveforms are to be analysed\n\n Returns\n -------\n p2t: float\n The mean peak-to-trough time for the channel (electrode) that has\n the strongest (highest amplitude) signal. Units are ms\n ' best_chan = np.argmax(np.max(np.mean(waveforms, 0), 1)) tP = self.getParam(waveforms, param='tP') tT = self.getParam(waveforms, param='tT') mn_tP = np.mean(tP, 0) mn_tT = np.mean(tT, 0) p2t = np.abs((mn_tP[best_chan] - mn_tT[best_chan])) return (p2t * 1000)
3,933,080,969,911,125,500
The peak to trough time of a spike in ms Parameters ---------- cluster: int the cluster whose waveforms are to be analysed Returns ------- p2t: float The mean peak-to-trough time for the channel (electrode) that has the strongest (highest amplitude) signal. Units are ms
ephysiopy/common/spikecalcs.py
p2t_time
rhayman/ephysiopy
python
def p2t_time(self, waveforms): '\n The peak to trough time of a spike in ms\n\n Parameters\n ----------\n cluster: int\n the cluster whose waveforms are to be analysed\n\n Returns\n -------\n p2t: float\n The mean peak-to-trough time for the channel (electrode) that has\n the strongest (highest amplitude) signal. Units are ms\n ' best_chan = np.argmax(np.max(np.mean(waveforms, 0), 1)) tP = self.getParam(waveforms, param='tP') tT = self.getParam(waveforms, param='tT') mn_tP = np.mean(tP, 0) mn_tT = np.mean(tT, 0) p2t = np.abs((mn_tP[best_chan] - mn_tT[best_chan])) return (p2t * 1000)
def plotClusterSpace(self, waveforms, param='Amp', clusts=None, bins=256, **kwargs): '\n TODO: aspect of plot boxes in ImageGrid not right as scaled by range of\n values now\n ' from ephysiopy.dacq2py.tintcolours import colours as tcols import matplotlib.colors as colors from itertools import combinations from mpl_toolkits.axes_grid1 import ImageGrid self.scaling = np.full(4, 15) amps = self.getParam(waveforms, param=param) bad_electrodes = np.setdiff1d(np.array(range(4)), np.array(np.sum(amps, 0).nonzero())[0]) cmap = np.tile(tcols[0], (bins, 1)) cmap[0] = (1, 1, 1) cmap = colors.ListedColormap(cmap) cmap._init() alpha_vals = np.ones((cmap.N + 3)) alpha_vals[0] = 0 cmap._lut[:, (- 1)] = alpha_vals cmb = combinations(range(4), 2) if ('fig' in kwargs): fig = kwargs['fig'] else: fig = plt.figure(figsize=(8, 6)) grid = ImageGrid(fig, 111, nrows_ncols=(2, 3), axes_pad=0.1, aspect=False) if ('Amp' in param): myRange = np.vstack(((self.scaling * 0), (self.scaling * 2))) else: myRange = None clustCMap0 = np.tile(tcols[0], (bins, 1)) clustCMap0[0] = (1, 1, 1) clustCMap0 = colors.ListedColormap(clustCMap0) clustCMap0._init() clustCMap0._lut[:, (- 1)] = alpha_vals for (i, c) in enumerate(cmb): if (c not in bad_electrodes): (h, ye, xe) = np.histogram2d(amps[:, c[0]], amps[:, c[1]], range=myRange[:, c].T, bins=bins) (x, y) = np.meshgrid(xe[0:(- 1)], ye[0:(- 1)]) grid[i].pcolormesh(x, y, h, cmap=clustCMap0, shading='nearest', edgecolors='face') (h, ye, xe) = np.histogram2d(amps[:, c[0]], amps[:, c[1]], range=myRange[:, c].T, bins=bins) clustCMap = np.tile(tcols[1], (bins, 1)) clustCMap[0] = (1, 1, 1) clustCMap = colors.ListedColormap(clustCMap) clustCMap._init() clustCMap._lut[:, (- 1)] = alpha_vals grid[i].pcolormesh(x, y, h, cmap=clustCMap, shading='nearest', edgecolors='face') s = ((str((c[0] + 1)) + ' v ') + str((c[1] + 1))) grid[i].text(0.05, 0.95, s, va='top', ha='left', size='small', color='k', transform=grid[i].transAxes) grid[i].set_xlim(xe.min(), xe.max()) grid[i].set_ylim(ye.min(), ye.max()) plt.setp([a.get_xticklabels() for a in grid], visible=False) plt.setp([a.get_yticklabels() for a in grid], visible=False) return fig
2,114,415,613,261,320,700
TODO: aspect of plot boxes in ImageGrid not right as scaled by range of values now
ephysiopy/common/spikecalcs.py
plotClusterSpace
rhayman/ephysiopy
python
def plotClusterSpace(self, waveforms, param='Amp', clusts=None, bins=256, **kwargs): '\n TODO: aspect of plot boxes in ImageGrid not right as scaled by range of\n values now\n ' from ephysiopy.dacq2py.tintcolours import colours as tcols import matplotlib.colors as colors from itertools import combinations from mpl_toolkits.axes_grid1 import ImageGrid self.scaling = np.full(4, 15) amps = self.getParam(waveforms, param=param) bad_electrodes = np.setdiff1d(np.array(range(4)), np.array(np.sum(amps, 0).nonzero())[0]) cmap = np.tile(tcols[0], (bins, 1)) cmap[0] = (1, 1, 1) cmap = colors.ListedColormap(cmap) cmap._init() alpha_vals = np.ones((cmap.N + 3)) alpha_vals[0] = 0 cmap._lut[:, (- 1)] = alpha_vals cmb = combinations(range(4), 2) if ('fig' in kwargs): fig = kwargs['fig'] else: fig = plt.figure(figsize=(8, 6)) grid = ImageGrid(fig, 111, nrows_ncols=(2, 3), axes_pad=0.1, aspect=False) if ('Amp' in param): myRange = np.vstack(((self.scaling * 0), (self.scaling * 2))) else: myRange = None clustCMap0 = np.tile(tcols[0], (bins, 1)) clustCMap0[0] = (1, 1, 1) clustCMap0 = colors.ListedColormap(clustCMap0) clustCMap0._init() clustCMap0._lut[:, (- 1)] = alpha_vals for (i, c) in enumerate(cmb): if (c not in bad_electrodes): (h, ye, xe) = np.histogram2d(amps[:, c[0]], amps[:, c[1]], range=myRange[:, c].T, bins=bins) (x, y) = np.meshgrid(xe[0:(- 1)], ye[0:(- 1)]) grid[i].pcolormesh(x, y, h, cmap=clustCMap0, shading='nearest', edgecolors='face') (h, ye, xe) = np.histogram2d(amps[:, c[0]], amps[:, c[1]], range=myRange[:, c].T, bins=bins) clustCMap = np.tile(tcols[1], (bins, 1)) clustCMap[0] = (1, 1, 1) clustCMap = colors.ListedColormap(clustCMap) clustCMap._init() clustCMap._lut[:, (- 1)] = alpha_vals grid[i].pcolormesh(x, y, h, cmap=clustCMap, shading='nearest', edgecolors='face') s = ((str((c[0] + 1)) + ' v ') + str((c[1] + 1))) grid[i].text(0.05, 0.95, s, va='top', ha='left', size='small', color='k', transform=grid[i].transAxes) grid[i].set_xlim(xe.min(), xe.max()) grid[i].set_ylim(ye.min(), ye.max()) plt.setp([a.get_xticklabels() for a in grid], visible=False) plt.setp([a.get_yticklabels() for a in grid], visible=False) return fig
def _fix_int_dtypes(self, df: pd.DataFrame) -> None: '\n Mutate DataFrame to set dtypes for int columns containing NaN values."\n ' for col in df: if (('float' in df[col].dtype.name) and df[col].hasnans): notna_series = df[col].dropna().values if np.isclose(notna_series, notna_series.astype(int)).all(): df[col] = np.where(df[col].isnull(), None, df[col]) df[col] = df[col].astype(pd.Int64Dtype())
1,217,929,678,337,796,400
Mutate DataFrame to set dtypes for int columns containing NaN values."
airflow/providers/amazon/aws/transfers/mysql_to_s3.py
_fix_int_dtypes
alphasights/airflow
python
def _fix_int_dtypes(self, df: pd.DataFrame) -> None: '\n \n ' for col in df: if (('float' in df[col].dtype.name) and df[col].hasnans): notna_series = df[col].dropna().values if np.isclose(notna_series, notna_series.astype(int)).all(): df[col] = np.where(df[col].isnull(), None, df[col]) df[col] = df[col].astype(pd.Int64Dtype())
def test_transforms(): 'Test basic transforms' xfm = np.random.randn(4, 4).astype(np.float32) new_xfm = xfm.dot(rotate(180, (1, 0, 0)).dot(rotate((- 90), (0, 1, 0)))) new_xfm = new_xfm.dot(rotate(90, (0, 0, 1)).dot(rotate(90, (0, 1, 0)))) new_xfm = new_xfm.dot(rotate(90, (1, 0, 0))) assert_allclose(xfm, new_xfm) new_xfm = translate((1, (- 1), 1)).dot(translate(((- 1), 1, (- 1)))).dot(xfm) assert_allclose(xfm, new_xfm) new_xfm = scale((1, 2, 3)).dot(scale((1, (1.0 / 2.0), (1.0 / 3.0)))).dot(xfm) assert_allclose(xfm, new_xfm) xfm = ortho((- 1), 1, (- 1), 1, (- 1), 1) assert_equal(xfm.shape, (4, 4)) xfm = frustum((- 1), 1, (- 1), 1, (- 1), 1) assert_equal(xfm.shape, (4, 4)) xfm = perspective(1, 1, (- 1), 1) assert_equal(xfm.shape, (4, 4))
-5,003,356,567,224,349,000
Test basic transforms
vispy/util/tests/test_transforms.py
test_transforms
izaid/vispy
python
def test_transforms(): xfm = np.random.randn(4, 4).astype(np.float32) new_xfm = xfm.dot(rotate(180, (1, 0, 0)).dot(rotate((- 90), (0, 1, 0)))) new_xfm = new_xfm.dot(rotate(90, (0, 0, 1)).dot(rotate(90, (0, 1, 0)))) new_xfm = new_xfm.dot(rotate(90, (1, 0, 0))) assert_allclose(xfm, new_xfm) new_xfm = translate((1, (- 1), 1)).dot(translate(((- 1), 1, (- 1)))).dot(xfm) assert_allclose(xfm, new_xfm) new_xfm = scale((1, 2, 3)).dot(scale((1, (1.0 / 2.0), (1.0 / 3.0)))).dot(xfm) assert_allclose(xfm, new_xfm) xfm = ortho((- 1), 1, (- 1), 1, (- 1), 1) assert_equal(xfm.shape, (4, 4)) xfm = frustum((- 1), 1, (- 1), 1, (- 1), 1) assert_equal(xfm.shape, (4, 4)) xfm = perspective(1, 1, (- 1), 1) assert_equal(xfm.shape, (4, 4))
@property def search_path(self): '\n Search first the vendor package then as a natural package.\n ' (yield (self.vendor_pkg + '.')) (yield '')
-4,364,949,470,265,435,600
Search first the vendor package then as a natural package.
virtual/lib/python3.8/site-packages/setuptools/extern/__init__.py
search_path
MARTIN-OMOLLO/PITCH
python
@property def search_path(self): '\n \n ' (yield (self.vendor_pkg + '.')) (yield )
def _module_matches_namespace(self, fullname): 'Figure out if the target module is vendored.' (root, base, target) = fullname.partition((self.root_name + '.')) return ((not root) and any(map(target.startswith, self.vendored_names)))
1,613,554,549,028,283,000
Figure out if the target module is vendored.
virtual/lib/python3.8/site-packages/setuptools/extern/__init__.py
_module_matches_namespace
MARTIN-OMOLLO/PITCH
python
def _module_matches_namespace(self, fullname): (root, base, target) = fullname.partition((self.root_name + '.')) return ((not root) and any(map(target.startswith, self.vendored_names)))
def load_module(self, fullname): '\n Iterate over the search path to locate and load fullname.\n ' (root, base, target) = fullname.partition((self.root_name + '.')) for prefix in self.search_path: try: extant = (prefix + target) __import__(extant) mod = sys.modules[extant] sys.modules[fullname] = mod return mod except ImportError: pass else: raise ImportError("The '{target}' package is required; normally this is bundled with this package so if you get this warning, consult the packager of your distribution.".format(**locals()))
-5,310,695,123,598,882,000
Iterate over the search path to locate and load fullname.
virtual/lib/python3.8/site-packages/setuptools/extern/__init__.py
load_module
MARTIN-OMOLLO/PITCH
python
def load_module(self, fullname): '\n \n ' (root, base, target) = fullname.partition((self.root_name + '.')) for prefix in self.search_path: try: extant = (prefix + target) __import__(extant) mod = sys.modules[extant] sys.modules[fullname] = mod return mod except ImportError: pass else: raise ImportError("The '{target}' package is required; normally this is bundled with this package so if you get this warning, consult the packager of your distribution.".format(**locals()))
def find_spec(self, fullname, path=None, target=None): 'Return a module spec for vendored names.' return (importlib.util.spec_from_loader(fullname, self) if self._module_matches_namespace(fullname) else None)
7,781,275,529,721,239,000
Return a module spec for vendored names.
virtual/lib/python3.8/site-packages/setuptools/extern/__init__.py
find_spec
MARTIN-OMOLLO/PITCH
python
def find_spec(self, fullname, path=None, target=None): return (importlib.util.spec_from_loader(fullname, self) if self._module_matches_namespace(fullname) else None)
def install(self): '\n Install this importer into sys.meta_path if not already present.\n ' if (self not in sys.meta_path): sys.meta_path.append(self)
7,688,392,835,112,288,000
Install this importer into sys.meta_path if not already present.
virtual/lib/python3.8/site-packages/setuptools/extern/__init__.py
install
MARTIN-OMOLLO/PITCH
python
def install(self): '\n \n ' if (self not in sys.meta_path): sys.meta_path.append(self)
def passthrough(x): 'Return x.' return x
-3,003,717,923,206,876,700
Return x.
tests/tools_tests.py
passthrough
nasqueron/pywikibot
python
def passthrough(x): return x
def test_wrapper(self): 'Create a test instance and verify the wrapper redirects.' obj = self.DummyClass() wrapped = tools.ContextManagerWrapper(obj) self.assertIs(wrapped.class_var, obj.class_var) self.assertIs(wrapped.instance_var, obj.instance_var) self.assertIs(wrapped._wrapped, obj) self.assertFalse(obj.closed) with wrapped as unwrapped: self.assertFalse(obj.closed) self.assertIs(unwrapped, obj) unwrapped.class_var = 47 self.assertTrue(obj.closed) self.assertEqual(wrapped.class_var, 47)
-4,267,953,912,771,476,000
Create a test instance and verify the wrapper redirects.
tests/tools_tests.py
test_wrapper
nasqueron/pywikibot
python
def test_wrapper(self): obj = self.DummyClass() wrapped = tools.ContextManagerWrapper(obj) self.assertIs(wrapped.class_var, obj.class_var) self.assertIs(wrapped.instance_var, obj.instance_var) self.assertIs(wrapped._wrapped, obj) self.assertFalse(obj.closed) with wrapped as unwrapped: self.assertFalse(obj.closed) self.assertIs(unwrapped, obj) unwrapped.class_var = 47 self.assertTrue(obj.closed) self.assertEqual(wrapped.class_var, 47)
def test_exec_wrapper(self): 'Check that the wrapper permits exceptions.' wrapper = tools.ContextManagerWrapper(self.DummyClass()) self.assertFalse(wrapper.closed) with self.assertRaisesRegex(ZeroDivisionError, '(integer division or modulo by zero|division by zero)'): with wrapper: (1 / 0) self.assertTrue(wrapper.closed)
6,113,006,524,318,023,000
Check that the wrapper permits exceptions.
tests/tools_tests.py
test_exec_wrapper
nasqueron/pywikibot
python
def test_exec_wrapper(self): wrapper = tools.ContextManagerWrapper(self.DummyClass()) self.assertFalse(wrapper.closed) with self.assertRaisesRegex(ZeroDivisionError, '(integer division or modulo by zero|division by zero)'): with wrapper: (1 / 0) self.assertTrue(wrapper.closed)
@classmethod def setUpClass(cls): 'Define base_file and original_content.' super(OpenArchiveTestCase, cls).setUpClass() cls.base_file = join_xml_data_path('article-pyrus.xml') with open(cls.base_file, 'rb') as f: cls.original_content = f.read().replace(b'\r\n', b'\n')
-3,331,918,296,333,598,000
Define base_file and original_content.
tests/tools_tests.py
setUpClass
nasqueron/pywikibot
python
@classmethod def setUpClass(cls): super(OpenArchiveTestCase, cls).setUpClass() cls.base_file = join_xml_data_path('article-pyrus.xml') with open(cls.base_file, 'rb') as f: cls.original_content = f.read().replace(b'\r\n', b'\n')
def _get_content(self, *args, **kwargs): 'Use open_archive and return content using a with-statement.' with tools.open_archive(*args, **kwargs) as f: return f.read().replace(b'\r\n', b'\n')
9,127,821,848,596,652,000
Use open_archive and return content using a with-statement.
tests/tools_tests.py
_get_content
nasqueron/pywikibot
python
def _get_content(self, *args, **kwargs): with tools.open_archive(*args, **kwargs) as f: return f.read().replace(b'\r\n', b'\n')
def test_open_archive_normal(self): 'Test open_archive with no compression in the standard library.' self.assertEqual(self._get_content(self.base_file), self.original_content)
3,668,411,129,950,133,000
Test open_archive with no compression in the standard library.
tests/tools_tests.py
test_open_archive_normal
nasqueron/pywikibot
python
def test_open_archive_normal(self): self.assertEqual(self._get_content(self.base_file), self.original_content)
def test_open_archive_bz2(self): 'Test open_archive with bz2 compressor in the standard library.' self.assertEqual(self._get_content((self.base_file + '.bz2')), self.original_content) self.assertEqual(self._get_content((self.base_file + '.bz2'), use_extension=False), self.original_content)
1,617,084,888,435,620,000
Test open_archive with bz2 compressor in the standard library.
tests/tools_tests.py
test_open_archive_bz2
nasqueron/pywikibot
python
def test_open_archive_bz2(self): self.assertEqual(self._get_content((self.base_file + '.bz2')), self.original_content) self.assertEqual(self._get_content((self.base_file + '.bz2'), use_extension=False), self.original_content)
@require_modules('bz2file') def test_open_archive_with_bz2file(self): 'Test open_archive when bz2file library.' old_bz2 = tools.bz2 try: tools.bz2 = __import__('bz2file') self.assertEqual(self._get_content((self.base_file + '.bz2')), self.original_content) self.assertEqual(self._get_content((self.base_file + '.bz2'), use_extension=False), self.original_content) finally: tools.bz2 = old_bz2
-4,334,069,004,180,986,000
Test open_archive when bz2file library.
tests/tools_tests.py
test_open_archive_with_bz2file
nasqueron/pywikibot
python
@require_modules('bz2file') def test_open_archive_with_bz2file(self): old_bz2 = tools.bz2 try: tools.bz2 = __import__('bz2file') self.assertEqual(self._get_content((self.base_file + '.bz2')), self.original_content) self.assertEqual(self._get_content((self.base_file + '.bz2'), use_extension=False), self.original_content) finally: tools.bz2 = old_bz2
def test_open_archive_without_bz2(self): 'Test open_archive when bz2 and bz2file are not available.' old_bz2 = tools.bz2 BZ2_IMPORT_ERROR = 'This is a fake exception message that is used when bz2 and bz2file is not importable' try: tools.bz2 = ImportError(BZ2_IMPORT_ERROR) self.assertRaisesRegex(ImportError, BZ2_IMPORT_ERROR, self._get_content, (self.base_file + '.bz2')) finally: tools.bz2 = old_bz2
-2,222,823,590,668,212,000
Test open_archive when bz2 and bz2file are not available.
tests/tools_tests.py
test_open_archive_without_bz2
nasqueron/pywikibot
python
def test_open_archive_without_bz2(self): old_bz2 = tools.bz2 BZ2_IMPORT_ERROR = 'This is a fake exception message that is used when bz2 and bz2file is not importable' try: tools.bz2 = ImportError(BZ2_IMPORT_ERROR) self.assertRaisesRegex(ImportError, BZ2_IMPORT_ERROR, self._get_content, (self.base_file + '.bz2')) finally: tools.bz2 = old_bz2
def test_open_archive_gz(self): 'Test open_archive with gz compressor in the standard library.' self.assertEqual(self._get_content((self.base_file + '.gz')), self.original_content)
-7,963,129,132,435,812,000
Test open_archive with gz compressor in the standard library.
tests/tools_tests.py
test_open_archive_gz
nasqueron/pywikibot
python
def test_open_archive_gz(self): self.assertEqual(self._get_content((self.base_file + '.gz')), self.original_content)
def test_open_archive_7z(self): 'Test open_archive with 7za if installed.' FAILED_TO_OPEN_7ZA = 'Unexpected STDERR output from 7za ' try: subprocess.Popen(['7za'], stdout=subprocess.PIPE).stdout.close() except OSError: raise unittest.SkipTest('7za not installed') self.assertEqual(self._get_content((self.base_file + '.7z')), self.original_content) self.assertRaisesRegex(OSError, FAILED_TO_OPEN_7ZA, self._get_content, (self.base_file + '_invalid.7z'), use_extension=True)
7,100,321,156,077,318,000
Test open_archive with 7za if installed.
tests/tools_tests.py
test_open_archive_7z
nasqueron/pywikibot
python
def test_open_archive_7z(self): FAILED_TO_OPEN_7ZA = 'Unexpected STDERR output from 7za ' try: subprocess.Popen(['7za'], stdout=subprocess.PIPE).stdout.close() except OSError: raise unittest.SkipTest('7za not installed') self.assertEqual(self._get_content((self.base_file + '.7z')), self.original_content) self.assertRaisesRegex(OSError, FAILED_TO_OPEN_7ZA, self._get_content, (self.base_file + '_invalid.7z'), use_extension=True)
def _get_content(self, *args, **kwargs): 'Use open_compressed and return content using a with-statement.' if (kwargs.get('use_extension') is False): kwargs['use_extension'] = True with tools.open_compressed(*args, **kwargs) as f: content = f.read().replace(b'\r\n', b'\n') self.assertOneDeprecation(self.INSTEAD) return content
-4,683,509,650,639,901,000
Use open_compressed and return content using a with-statement.
tests/tools_tests.py
_get_content
nasqueron/pywikibot
python
def _get_content(self, *args, **kwargs): if (kwargs.get('use_extension') is False): kwargs['use_extension'] = True with tools.open_compressed(*args, **kwargs) as f: content = f.read().replace(b'\r\n', b'\n') self.assertOneDeprecation(self.INSTEAD) return content
@classmethod def setUpClass(cls): 'Define base_file and original_content.' super(OpenArchiveWriteTestCase, cls).setUpClass() cls.base_file = join_xml_data_path('article-pyrus.xml') with open(cls.base_file, 'rb') as f: cls.original_content = f.read().replace(b'\r\n', b'\n')
-8,544,897,831,322,012,000
Define base_file and original_content.
tests/tools_tests.py
setUpClass
nasqueron/pywikibot
python
@classmethod def setUpClass(cls): super(OpenArchiveWriteTestCase, cls).setUpClass() cls.base_file = join_xml_data_path('article-pyrus.xml') with open(cls.base_file, 'rb') as f: cls.original_content = f.read().replace(b'\r\n', b'\n')
def test_invalid_modes(self): 'Test various invalid mode configurations.' INVALID_MODE_RA = 'Invalid mode: "ra"' INVALID_MODE_RT = 'Invalid mode: "rt"' INVALID_MODE_BR = 'Invalid mode: "br"' MN_DETECTION_ONLY = 'Magic number detection only when reading' self.assertRaisesRegex(ValueError, INVALID_MODE_RA, tools.open_archive, '/dev/null', 'ra') self.assertRaisesRegex(ValueError, INVALID_MODE_RT, tools.open_archive, '/dev/null', 'rt') self.assertRaisesRegex(ValueError, INVALID_MODE_BR, tools.open_archive, '/dev/null', 'br') self.assertRaisesRegex(ValueError, MN_DETECTION_ONLY, tools.open_archive, '/dev/null', 'wb', False)
1,863,774,189,160,259,600
Test various invalid mode configurations.
tests/tools_tests.py
test_invalid_modes
nasqueron/pywikibot
python
def test_invalid_modes(self): INVALID_MODE_RA = 'Invalid mode: "ra"' INVALID_MODE_RT = 'Invalid mode: "rt"' INVALID_MODE_BR = 'Invalid mode: "br"' MN_DETECTION_ONLY = 'Magic number detection only when reading' self.assertRaisesRegex(ValueError, INVALID_MODE_RA, tools.open_archive, '/dev/null', 'ra') self.assertRaisesRegex(ValueError, INVALID_MODE_RT, tools.open_archive, '/dev/null', 'rt') self.assertRaisesRegex(ValueError, INVALID_MODE_BR, tools.open_archive, '/dev/null', 'br') self.assertRaisesRegex(ValueError, MN_DETECTION_ONLY, tools.open_archive, '/dev/null', 'wb', False)
def test_binary_mode(self): 'Test that it uses binary mode.' with tools.open_archive(self.base_file, 'r') as f: self.assertEqual(f.mode, 'rb') self.assertIsInstance(f.read(), bytes)
1,719,401,108,466,029,300
Test that it uses binary mode.
tests/tools_tests.py
test_binary_mode
nasqueron/pywikibot
python
def test_binary_mode(self): with tools.open_archive(self.base_file, 'r') as f: self.assertEqual(f.mode, 'rb') self.assertIsInstance(f.read(), bytes)
def test_write_archive_bz2(self): 'Test writing a bz2 archive.' content = self._write_content('.bz2') with open((self.base_file + '.bz2'), 'rb') as f: self.assertEqual(content, f.read())
5,309,385,862,999,258,000
Test writing a bz2 archive.
tests/tools_tests.py
test_write_archive_bz2
nasqueron/pywikibot
python
def test_write_archive_bz2(self): content = self._write_content('.bz2') with open((self.base_file + '.bz2'), 'rb') as f: self.assertEqual(content, f.read())
def test_write_archive_gz(self): 'Test writing a gz archive.' content = self._write_content('.gz') self.assertEqual(content[:3], b'\x1f\x8b\x08')
-2,919,605,937,170,006,500
Test writing a gz archive.
tests/tools_tests.py
test_write_archive_gz
nasqueron/pywikibot
python
def test_write_archive_gz(self): content = self._write_content('.gz') self.assertEqual(content[:3], b'\x1f\x8b\x08')
def test_write_archive_7z(self): 'Test writing an archive as a 7z archive.' FAILED_TO_WRITE_7Z = 'It is not possible to write a 7z file.' self.assertRaisesRegex(NotImplementedError, FAILED_TO_WRITE_7Z, tools.open_archive, '/dev/null.7z', mode='wb')
559,217,299,130,519,900
Test writing an archive as a 7z archive.
tests/tools_tests.py
test_write_archive_7z
nasqueron/pywikibot
python
def test_write_archive_7z(self): FAILED_TO_WRITE_7Z = 'It is not possible to write a 7z file.' self.assertRaisesRegex(NotImplementedError, FAILED_TO_WRITE_7Z, tools.open_archive, '/dev/null.7z', mode='wb')
def test_single(self): 'Test that it returns the dict itself when there is only one.' self.assertEqual(tools.merge_unique_dicts(self.dct1), self.dct1) self.assertEqual(tools.merge_unique_dicts(**self.dct1), self.dct1)
-1,434,746,132,220,353,800
Test that it returns the dict itself when there is only one.
tests/tools_tests.py
test_single
nasqueron/pywikibot
python
def test_single(self): self.assertEqual(tools.merge_unique_dicts(self.dct1), self.dct1) self.assertEqual(tools.merge_unique_dicts(**self.dct1), self.dct1)
def test_multiple(self): 'Test that it actually merges dicts.' self.assertEqual(tools.merge_unique_dicts(self.dct1, self.dct2), self.dct_both) self.assertEqual(tools.merge_unique_dicts(self.dct2, **self.dct1), self.dct_both)
-3,050,643,579,955,607,000
Test that it actually merges dicts.
tests/tools_tests.py
test_multiple
nasqueron/pywikibot
python
def test_multiple(self): self.assertEqual(tools.merge_unique_dicts(self.dct1, self.dct2), self.dct_both) self.assertEqual(tools.merge_unique_dicts(self.dct2, **self.dct1), self.dct_both)
def test_different_type(self): 'Test that the keys can be different types.' self.assertEqual(tools.merge_unique_dicts({'1': 'str'}, {1: 'int'}), {'1': 'str', 1: 'int'})
5,023,755,342,220,285,000
Test that the keys can be different types.
tests/tools_tests.py
test_different_type
nasqueron/pywikibot
python
def test_different_type(self): self.assertEqual(tools.merge_unique_dicts({'1': 'str'}, {1: 'int'}), {'1': 'str', 1: 'int'})
def test_conflict(self): 'Test that it detects conflicts.' self.assertRaisesRegex(ValueError, '42', tools.merge_unique_dicts, self.dct1, **{'42': 'bad'}) self.assertRaisesRegex(ValueError, '42', tools.merge_unique_dicts, self.dct1, self.dct1) self.assertRaisesRegex(ValueError, '42', tools.merge_unique_dicts, self.dct1, **self.dct1)
-6,342,496,012,056,625,000
Test that it detects conflicts.
tests/tools_tests.py
test_conflict
nasqueron/pywikibot
python
def test_conflict(self): self.assertRaisesRegex(ValueError, '42', tools.merge_unique_dicts, self.dct1, **{'42': 'bad'}) self.assertRaisesRegex(ValueError, '42', tools.merge_unique_dicts, self.dct1, self.dct1) self.assertRaisesRegex(ValueError, '42', tools.merge_unique_dicts, self.dct1, **self.dct1)
def test_show_default_marker(self): 'Test marker is shown without kwargs.' stop = 2 it = list(tools.islice_with_ellipsis(self.it, stop)) self.assertEqual(len(it), (stop + 1)) self.assertEqual(it[:(- 1)], self.it[:stop]) self.assertEqual(it[(- 1)], '…')
-5,647,136,174,958,706,000
Test marker is shown without kwargs.
tests/tools_tests.py
test_show_default_marker
nasqueron/pywikibot
python
def test_show_default_marker(self): stop = 2 it = list(tools.islice_with_ellipsis(self.it, stop)) self.assertEqual(len(it), (stop + 1)) self.assertEqual(it[:(- 1)], self.it[:stop]) self.assertEqual(it[(- 1)], '…')
def test_show_custom_marker(self): 'Test correct marker is shown with kwargs..' stop = 2 it = list(tools.islice_with_ellipsis(self.it, stop, marker='new')) self.assertEqual(len(it), (stop + 1)) self.assertEqual(it[:(- 1)], self.it[:stop]) self.assertNotEqual(it[(- 1)], '…') self.assertEqual(it[(- 1)], 'new')
-1,190,494,465,889,196,800
Test correct marker is shown with kwargs..
tests/tools_tests.py
test_show_custom_marker
nasqueron/pywikibot
python
def test_show_custom_marker(self): stop = 2 it = list(tools.islice_with_ellipsis(self.it, stop, marker='new')) self.assertEqual(len(it), (stop + 1)) self.assertEqual(it[:(- 1)], self.it[:stop]) self.assertNotEqual(it[(- 1)], '…') self.assertEqual(it[(- 1)], 'new')
def test_show_marker_with_start_stop(self): 'Test marker is shown with start and stop without kwargs.' start = 1 stop = 3 it = list(tools.islice_with_ellipsis(self.it, start, stop)) self.assertEqual(len(it), ((stop - start) + 1)) self.assertEqual(it[:(- 1)], self.it[start:stop]) self.assertEqual(it[(- 1)], '…')
3,370,575,341,154,751,000
Test marker is shown with start and stop without kwargs.
tests/tools_tests.py
test_show_marker_with_start_stop
nasqueron/pywikibot
python
def test_show_marker_with_start_stop(self): start = 1 stop = 3 it = list(tools.islice_with_ellipsis(self.it, start, stop)) self.assertEqual(len(it), ((stop - start) + 1)) self.assertEqual(it[:(- 1)], self.it[start:stop]) self.assertEqual(it[(- 1)], '…')
def test_show_custom_marker_with_start_stop(self): 'Test marker is shown with start and stop with kwargs.' start = 1 stop = 3 it = list(tools.islice_with_ellipsis(self.it, start, stop, marker='new')) self.assertEqual(len(it), ((stop - start) + 1)) self.assertEqual(it[:(- 1)], self.it[start:stop]) self.assertNotEqual(it[(- 1)], '…') self.assertEqual(it[(- 1)], 'new')
2,889,475,869,026,697,000
Test marker is shown with start and stop with kwargs.
tests/tools_tests.py
test_show_custom_marker_with_start_stop
nasqueron/pywikibot
python
def test_show_custom_marker_with_start_stop(self): start = 1 stop = 3 it = list(tools.islice_with_ellipsis(self.it, start, stop, marker='new')) self.assertEqual(len(it), ((stop - start) + 1)) self.assertEqual(it[:(- 1)], self.it[start:stop]) self.assertNotEqual(it[(- 1)], '…') self.assertEqual(it[(- 1)], 'new')
def test_show_marker_with_stop_zero(self): 'Test marker is shown with stop for non empty iterable.' stop = 0 it = list(tools.islice_with_ellipsis(self.it, stop)) self.assertEqual(len(it), (stop + 1)) self.assertEqual(it[(- 1)], '…')
8,517,314,535,977,047,000
Test marker is shown with stop for non empty iterable.
tests/tools_tests.py
test_show_marker_with_stop_zero
nasqueron/pywikibot
python
def test_show_marker_with_stop_zero(self): stop = 0 it = list(tools.islice_with_ellipsis(self.it, stop)) self.assertEqual(len(it), (stop + 1)) self.assertEqual(it[(- 1)], '…')
def test_do_not_show_marker_with_stop_zero(self): 'Test marker is shown with stop for empty iterable.' stop = 0 it = list(tools.islice_with_ellipsis(self.it_null, stop)) self.assertEqual(len(it), stop)
-2,326,337,750,535,510,500
Test marker is shown with stop for empty iterable.
tests/tools_tests.py
test_do_not_show_marker_with_stop_zero
nasqueron/pywikibot
python
def test_do_not_show_marker_with_stop_zero(self): stop = 0 it = list(tools.islice_with_ellipsis(self.it_null, stop)) self.assertEqual(len(it), stop)
def test_do_not_show_marker(self): 'Test marker is not shown when no marker is specified.' import itertools stop = 2 it_1 = list(tools.islice_with_ellipsis(self.it, stop, marker=None)) it_2 = list(itertools.islice(self.it, stop)) self.assertEqual(it_1, it_2)
-4,157,401,792,266,222,000
Test marker is not shown when no marker is specified.
tests/tools_tests.py
test_do_not_show_marker
nasqueron/pywikibot
python
def test_do_not_show_marker(self): import itertools stop = 2 it_1 = list(tools.islice_with_ellipsis(self.it, stop, marker=None)) it_2 = list(itertools.islice(self.it, stop)) self.assertEqual(it_1, it_2)
def test_do_not_show_marker_when_get_all(self): 'Test marker is not shown when all elements are retrieved.' stop = None it = list(tools.islice_with_ellipsis(self.it, stop)) self.assertEqual(len(it), len(self.it)) self.assertEqual(it, self.it) self.assertNotEqual(it[(- 1)], '…')
-6,378,311,575,050,977,000
Test marker is not shown when all elements are retrieved.
tests/tools_tests.py
test_do_not_show_marker_when_get_all
nasqueron/pywikibot
python
def test_do_not_show_marker_when_get_all(self): stop = None it = list(tools.islice_with_ellipsis(self.it, stop)) self.assertEqual(len(it), len(self.it)) self.assertEqual(it, self.it) self.assertNotEqual(it[(- 1)], '…')
def test_accept_only_keyword_marker(self): "Test that the only kwargs accepted is 'marker'." GENERATOR_NOT_CALLABLE = "'generator' object is not callable" self.assertRaisesRegex(TypeError, GENERATOR_NOT_CALLABLE, tools.islice_with_ellipsis(self.it, 1, t=''))
7,600,603,567,777,064,000
Test that the only kwargs accepted is 'marker'.
tests/tools_tests.py
test_accept_only_keyword_marker
nasqueron/pywikibot
python
def test_accept_only_keyword_marker(self): GENERATOR_NOT_CALLABLE = "'generator' object is not callable" self.assertRaisesRegex(TypeError, GENERATOR_NOT_CALLABLE, tools.islice_with_ellipsis(self.it, 1, t=))
def __contains__(self, item): 'Override to not process some items.' if (item in self.skip_list): return True else: return super(SkipList, self).__contains__(item)
5,224,048,271,776,758,000
Override to not process some items.
tests/tools_tests.py
__contains__
nasqueron/pywikibot
python
def __contains__(self, item): if (item in self.skip_list): return True else: return super(SkipList, self).__contains__(item)
def add(self, item): 'Override to not add some items.' if (item in self.process_again_list): return else: return super(ProcessAgainList, self).add(item)
-8,827,108,811,324,081,000
Override to not add some items.
tests/tools_tests.py
add
nasqueron/pywikibot
python
def add(self, item): if (item in self.process_again_list): return else: return super(ProcessAgainList, self).add(item)
def __contains__(self, item): 'Override to stop on encountering items.' if (item in self.stop_list): raise StopIteration else: return super(ContainsStopList, self).__contains__(item)
-8,770,174,865,988,611,000
Override to stop on encountering items.
tests/tools_tests.py
__contains__
nasqueron/pywikibot
python
def __contains__(self, item): if (item in self.stop_list): raise StopIteration else: return super(ContainsStopList, self).__contains__(item)
def add(self, item): 'Override to not continue on encountering items.' if (item in self.stop_list): raise StopIteration else: super(AddStopList, self).add(item)
-3,560,789,555,579,234,300
Override to not continue on encountering items.
tests/tools_tests.py
add
nasqueron/pywikibot
python
def add(self, item): if (item in self.stop_list): raise StopIteration else: super(AddStopList, self).add(item)
def _test_dedup_int(self, deduped, deduper, key=None): 'Test filter_unique results for int.' if (not key): key = passthrough self.assertEqual(len(deduped), 0) self.assertEqual(next(deduper), 1) self.assertEqual(next(deduper), 3) if (key in (hash, passthrough)): if isinstance(deduped, tools.OrderedDict): self.assertEqual(list(deduped.keys()), [1, 3]) elif isinstance(deduped, collections.Mapping): self.assertCountEqual(list(deduped.keys()), [1, 3]) else: self.assertEqual(deduped, set([1, 3])) self.assertEqual(next(deduper), 2) self.assertEqual(next(deduper), 4) if (key in (hash, passthrough)): if isinstance(deduped, tools.OrderedDict): self.assertEqual(list(deduped.keys()), [1, 3, 2, 4]) elif isinstance(deduped, collections.Mapping): self.assertCountEqual(list(deduped.keys()), [1, 2, 3, 4]) else: self.assertEqual(deduped, set([1, 2, 3, 4])) self.assertRaises(StopIteration, next, deduper)
6,018,367,884,307,071,000
Test filter_unique results for int.
tests/tools_tests.py
_test_dedup_int
nasqueron/pywikibot
python
def _test_dedup_int(self, deduped, deduper, key=None): if (not key): key = passthrough self.assertEqual(len(deduped), 0) self.assertEqual(next(deduper), 1) self.assertEqual(next(deduper), 3) if (key in (hash, passthrough)): if isinstance(deduped, tools.OrderedDict): self.assertEqual(list(deduped.keys()), [1, 3]) elif isinstance(deduped, collections.Mapping): self.assertCountEqual(list(deduped.keys()), [1, 3]) else: self.assertEqual(deduped, set([1, 3])) self.assertEqual(next(deduper), 2) self.assertEqual(next(deduper), 4) if (key in (hash, passthrough)): if isinstance(deduped, tools.OrderedDict): self.assertEqual(list(deduped.keys()), [1, 3, 2, 4]) elif isinstance(deduped, collections.Mapping): self.assertCountEqual(list(deduped.keys()), [1, 2, 3, 4]) else: self.assertEqual(deduped, set([1, 2, 3, 4])) self.assertRaises(StopIteration, next, deduper)
def _test_dedup_str(self, deduped, deduper, key=None): 'Test filter_unique results for str.' if (not key): key = passthrough self.assertEqual(len(deduped), 0) self.assertEqual(next(deduper), '1') self.assertEqual(next(deduper), '3') if (key in (hash, passthrough)): if isinstance(deduped, collections.Mapping): self.assertEqual(deduped.keys(), [key('1'), key('3')]) else: self.assertEqual(deduped, set([key('1'), key('3')])) self.assertEqual(next(deduper), '2') self.assertEqual(next(deduper), '4') if (key in (hash, passthrough)): if isinstance(deduped, collections.Mapping): self.assertEqual(deduped.keys(), [key(i) for i in self.strs]) else: self.assertEqual(deduped, set((key(i) for i in self.strs))) self.assertRaises(StopIteration, next, deduper)
2,363,046,913,805,858,300
Test filter_unique results for str.
tests/tools_tests.py
_test_dedup_str
nasqueron/pywikibot
python
def _test_dedup_str(self, deduped, deduper, key=None): if (not key): key = passthrough self.assertEqual(len(deduped), 0) self.assertEqual(next(deduper), '1') self.assertEqual(next(deduper), '3') if (key in (hash, passthrough)): if isinstance(deduped, collections.Mapping): self.assertEqual(deduped.keys(), [key('1'), key('3')]) else: self.assertEqual(deduped, set([key('1'), key('3')])) self.assertEqual(next(deduper), '2') self.assertEqual(next(deduper), '4') if (key in (hash, passthrough)): if isinstance(deduped, collections.Mapping): self.assertEqual(deduped.keys(), [key(i) for i in self.strs]) else: self.assertEqual(deduped, set((key(i) for i in self.strs))) self.assertRaises(StopIteration, next, deduper)
def test_set(self): 'Test filter_unique with a set.' deduped = set() deduper = tools.filter_unique(self.ints, container=deduped) self._test_dedup_int(deduped, deduper)
-4,737,709,360,573,862,000
Test filter_unique with a set.
tests/tools_tests.py
test_set
nasqueron/pywikibot
python
def test_set(self): deduped = set() deduper = tools.filter_unique(self.ints, container=deduped) self._test_dedup_int(deduped, deduper)
def test_dict(self): 'Test filter_unique with a dict.' deduped = {} deduper = tools.filter_unique(self.ints, container=deduped) self._test_dedup_int(deduped, deduper)
63,968,957,205,018,136
Test filter_unique with a dict.
tests/tools_tests.py
test_dict
nasqueron/pywikibot
python
def test_dict(self): deduped = {} deduper = tools.filter_unique(self.ints, container=deduped) self._test_dedup_int(deduped, deduper)
def test_OrderedDict(self): 'Test filter_unique with a OrderedDict.' deduped = tools.OrderedDict() deduper = tools.filter_unique(self.ints, container=deduped) self._test_dedup_int(deduped, deduper)
-548,688,444,290,108,200
Test filter_unique with a OrderedDict.
tests/tools_tests.py
test_OrderedDict
nasqueron/pywikibot
python
def test_OrderedDict(self): deduped = tools.OrderedDict() deduper = tools.filter_unique(self.ints, container=deduped) self._test_dedup_int(deduped, deduper)
def test_int_hash(self): 'Test filter_unique with ints using hash as key.' deduped = set() deduper = tools.filter_unique(self.ints, container=deduped, key=hash) self._test_dedup_int(deduped, deduper, hash)
2,554,941,613,787,697,700
Test filter_unique with ints using hash as key.
tests/tools_tests.py
test_int_hash
nasqueron/pywikibot
python
def test_int_hash(self): deduped = set() deduper = tools.filter_unique(self.ints, container=deduped, key=hash) self._test_dedup_int(deduped, deduper, hash)
def test_int_id(self): 'Test filter_unique with ints using id as key.' deduped = set() deduper = tools.filter_unique(self.ints, container=deduped, key=id) self._test_dedup_int(deduped, deduper, id)
7,581,880,135,276,090,000
Test filter_unique with ints using id as key.
tests/tools_tests.py
test_int_id
nasqueron/pywikibot
python
def test_int_id(self): deduped = set() deduper = tools.filter_unique(self.ints, container=deduped, key=id) self._test_dedup_int(deduped, deduper, id)
def test_obj(self): 'Test filter_unique with objects.' deduped = set() deduper = tools.filter_unique(self.decs, container=deduped) self._test_dedup_int(deduped, deduper)
581,004,810,495,251,000
Test filter_unique with objects.
tests/tools_tests.py
test_obj
nasqueron/pywikibot
python
def test_obj(self): deduped = set() deduper = tools.filter_unique(self.decs, container=deduped) self._test_dedup_int(deduped, deduper)
def test_obj_hash(self): 'Test filter_unique with objects using hash as key.' deduped = set() deduper = tools.filter_unique(self.decs, container=deduped, key=hash) self._test_dedup_int(deduped, deduper, hash)
6,557,149,698,377,117,000
Test filter_unique with objects using hash as key.
tests/tools_tests.py
test_obj_hash
nasqueron/pywikibot
python
def test_obj_hash(self): deduped = set() deduper = tools.filter_unique(self.decs, container=deduped, key=hash) self._test_dedup_int(deduped, deduper, hash)
def test_obj_id(self): 'Test filter_unique with objects using id as key, which fails.' deduped = set() deduper = tools.filter_unique(self.decs, container=deduped, key=id) self.assertEqual(len(deduped), 0) for _ in self.decs: self.assertEqual(id(next(deduper)), deduped.pop()) self.assertRaises(StopIteration, next, deduper) deduper_ids = list(tools.filter_unique(self.decs, key=id)) self.assertNotEqual(len(deduper_ids), len(set(deduper_ids)))
7,523,221,529,014,139,000
Test filter_unique with objects using id as key, which fails.
tests/tools_tests.py
test_obj_id
nasqueron/pywikibot
python
def test_obj_id(self): deduped = set() deduper = tools.filter_unique(self.decs, container=deduped, key=id) self.assertEqual(len(deduped), 0) for _ in self.decs: self.assertEqual(id(next(deduper)), deduped.pop()) self.assertRaises(StopIteration, next, deduper) deduper_ids = list(tools.filter_unique(self.decs, key=id)) self.assertNotEqual(len(deduper_ids), len(set(deduper_ids)))
def test_str(self): 'Test filter_unique with str.' deduped = set() deduper = tools.filter_unique(self.strs, container=deduped) self._test_dedup_str(deduped, deduper)
-7,648,199,100,553,032,000
Test filter_unique with str.
tests/tools_tests.py
test_str
nasqueron/pywikibot
python
def test_str(self): deduped = set() deduper = tools.filter_unique(self.strs, container=deduped) self._test_dedup_str(deduped, deduper)
def test_str_hash(self): 'Test filter_unique with str using hash as key.' deduped = set() deduper = tools.filter_unique(self.strs, container=deduped, key=hash) self._test_dedup_str(deduped, deduper, hash)
2,405,488,532,924,476,000
Test filter_unique with str using hash as key.
tests/tools_tests.py
test_str_hash
nasqueron/pywikibot
python
def test_str_hash(self): deduped = set() deduper = tools.filter_unique(self.strs, container=deduped, key=hash) self._test_dedup_str(deduped, deduper, hash)
@unittest.skipIf((not tools.PY2), 'str in Py3 behave like objects and id as key fails') def test_str_id(self): 'Test str using id as key.' deduped = set() deduper = tools.filter_unique(self.strs, container=deduped, key=id) self._test_dedup_str(deduped, deduper, id)
-1,790,066,798,828,475,000
Test str using id as key.
tests/tools_tests.py
test_str_id
nasqueron/pywikibot
python
@unittest.skipIf((not tools.PY2), 'str in Py3 behave like objects and id as key fails') def test_str_id(self): deduped = set() deduper = tools.filter_unique(self.strs, container=deduped, key=id) self._test_dedup_str(deduped, deduper, id)
def test_for_resumable(self): 'Test filter_unique is resumable after a for loop.' gen2 = tools.filter_unique(self.ints) deduped = [] for item in gen2: deduped.append(item) if (len(deduped) == 3): break self.assertEqual(deduped, [1, 3, 2]) last = next(gen2) self.assertEqual(last, 4) self.assertRaises(StopIteration, next, gen2)
-5,043,707,769,899,936,000
Test filter_unique is resumable after a for loop.
tests/tools_tests.py
test_for_resumable
nasqueron/pywikibot
python
def test_for_resumable(self): gen2 = tools.filter_unique(self.ints) deduped = [] for item in gen2: deduped.append(item) if (len(deduped) == 3): break self.assertEqual(deduped, [1, 3, 2]) last = next(gen2) self.assertEqual(last, 4) self.assertRaises(StopIteration, next, gen2)
def test_skip(self): 'Test filter_unique with a container that skips items.' deduped = SkipList() deduper = tools.filter_unique(self.ints, container=deduped) deduped_out = list(deduper) self.assertCountEqual(deduped, deduped_out) self.assertEqual(deduped, set([2, 4]))
1,663,633,413,850,212,600
Test filter_unique with a container that skips items.
tests/tools_tests.py
test_skip
nasqueron/pywikibot
python
def test_skip(self): deduped = SkipList() deduper = tools.filter_unique(self.ints, container=deduped) deduped_out = list(deduper) self.assertCountEqual(deduped, deduped_out) self.assertEqual(deduped, set([2, 4]))
def test_process_again(self): 'Test filter_unique with an ignoring container.' deduped = ProcessAgainList() deduper = tools.filter_unique(self.ints, container=deduped) deduped_out = list(deduper) self.assertEqual(deduped_out, [1, 3, 2, 1, 1, 4]) self.assertEqual(deduped, set([2, 4]))
-1,911,708,214,690,534,700
Test filter_unique with an ignoring container.
tests/tools_tests.py
test_process_again
nasqueron/pywikibot
python
def test_process_again(self): deduped = ProcessAgainList() deduper = tools.filter_unique(self.ints, container=deduped) deduped_out = list(deduper) self.assertEqual(deduped_out, [1, 3, 2, 1, 1, 4]) self.assertEqual(deduped, set([2, 4]))
def test_stop(self): 'Test filter_unique with an ignoring container.' deduped = ContainsStopList() deduped.stop_list = [2] deduper = tools.filter_unique(self.ints, container=deduped) deduped_out = list(deduper) self.assertCountEqual(deduped, deduped_out) self.assertEqual(deduped, set([1, 3])) self.assertRaises(StopIteration, next, deduper) deduped = AddStopList() deduped.stop_list = [4] deduper = tools.filter_unique(self.ints, container=deduped) deduped_out = list(deduper) self.assertCountEqual(deduped, deduped_out) self.assertEqual(deduped, set([1, 2, 3])) self.assertRaises(StopIteration, next, deduper)
-6,087,706,758,696,055,000
Test filter_unique with an ignoring container.
tests/tools_tests.py
test_stop
nasqueron/pywikibot
python
def test_stop(self): deduped = ContainsStopList() deduped.stop_list = [2] deduper = tools.filter_unique(self.ints, container=deduped) deduped_out = list(deduper) self.assertCountEqual(deduped, deduped_out) self.assertEqual(deduped, set([1, 3])) self.assertRaises(StopIteration, next, deduper) deduped = AddStopList() deduped.stop_list = [4] deduper = tools.filter_unique(self.ints, container=deduped) deduped_out = list(deduper) self.assertCountEqual(deduped, deduped_out) self.assertEqual(deduped, set([1, 2, 3])) self.assertRaises(StopIteration, next, deduper)
def __new__(cls, name, bases, dct): 'Create a new test case class.' def create_test(method): def test_method(self): 'Test getargspec.' expected = method(1, 2) returned = self.getargspec(method) self.assertEqual(returned, expected) self.assertIsInstance(returned, self.expected_class) self.assertNoDeprecation() return test_method for (attr, tested_method) in list(dct.items()): if attr.startswith('_method_test_'): suffix = attr[len('_method_test_'):] cls.add_method(dct, ('test_method_' + suffix), create_test(tested_method), doc_suffix='on {0}'.format(suffix)) dct['net'] = False return super(MetaTestArgSpec, cls).__new__(cls, name, bases, dct)
-8,840,490,780,989,338,000
Create a new test case class.
tests/tools_tests.py
__new__
nasqueron/pywikibot
python
def __new__(cls, name, bases, dct): def create_test(method): def test_method(self): 'Test getargspec.' expected = method(1, 2) returned = self.getargspec(method) self.assertEqual(returned, expected) self.assertIsInstance(returned, self.expected_class) self.assertNoDeprecation() return test_method for (attr, tested_method) in list(dct.items()): if attr.startswith('_method_test_'): suffix = attr[len('_method_test_'):] cls.add_method(dct, ('test_method_' + suffix), create_test(tested_method), doc_suffix='on {0}'.format(suffix)) dct['net'] = False return super(MetaTestArgSpec, cls).__new__(cls, name, bases, dct)
def _method_test_args(self, param): 'Test method with two positional arguments.' return (['self', 'param'], None, None, None)
6,077,924,824,687,960,000
Test method with two positional arguments.
tests/tools_tests.py
_method_test_args
nasqueron/pywikibot
python
def _method_test_args(self, param): return (['self', 'param'], None, None, None)
def _method_test_kwargs(self, param=42): 'Test method with one positional and one keyword argument.' return (['self', 'param'], None, None, (42,))
2,637,302,384,798,835,700
Test method with one positional and one keyword argument.
tests/tools_tests.py
_method_test_kwargs
nasqueron/pywikibot
python
def _method_test_kwargs(self, param=42): return (['self', 'param'], None, None, (42,))
def _method_test_varargs(self, param, *var): 'Test method with two positional arguments and var args.' return (['self', 'param'], 'var', None, None)
-8,122,617,885,005,337,000
Test method with two positional arguments and var args.
tests/tools_tests.py
_method_test_varargs
nasqueron/pywikibot
python
def _method_test_varargs(self, param, *var): return (['self', 'param'], 'var', None, None)
def _method_test_varkwargs(self, param, **var): 'Test method with two positional arguments and var kwargs.' return (['self', 'param'], None, 'var', None)
6,348,691,979,645,983,000
Test method with two positional arguments and var kwargs.
tests/tools_tests.py
_method_test_varkwargs
nasqueron/pywikibot
python
def _method_test_varkwargs(self, param, **var): return (['self', 'param'], None, 'var', None)
def _method_test_vars(self, param, *args, **kwargs): 'Test method with two positional arguments and both var args.' return (['self', 'param'], 'args', 'kwargs', None)
-7,342,400,291,848,552,000
Test method with two positional arguments and both var args.
tests/tools_tests.py
_method_test_vars
nasqueron/pywikibot
python
def _method_test_vars(self, param, *args, **kwargs): return (['self', 'param'], 'args', 'kwargs', None)
def getargspec(self, method): 'Call tested getargspec function.' return tools.getargspec(method)
-3,962,492,708,510,394,000
Call tested getargspec function.
tests/tools_tests.py
getargspec
nasqueron/pywikibot
python
def getargspec(self, method): return tools.getargspec(method)
def getargspec(self, method): "Call inspect's getargspec function." with warnings.catch_warnings(): if (tools.PYTHON_VERSION >= (3, 5)): warnings.simplefilter('ignore', DeprecationWarning) return inspect.getargspec(method)
-6,408,349,756,444,476,000
Call inspect's getargspec function.
tests/tools_tests.py
getargspec
nasqueron/pywikibot
python
def getargspec(self, method): with warnings.catch_warnings(): if (tools.PYTHON_VERSION >= (3, 5)): warnings.simplefilter('ignore', DeprecationWarning) return inspect.getargspec(method)
def patch(self, name): 'Patch up <name> in self.setUp.' patcher = mock.patch(name) self.addCleanup(patcher.stop) return patcher.start()
-8,015,875,073,429,200,000
Patch up <name> in self.setUp.
tests/tools_tests.py
patch
nasqueron/pywikibot
python
def patch(self, name): patcher = mock.patch(name) self.addCleanup(patcher.stop) return patcher.start()
def setUp(self): 'Patch a variety of dependencies.' super(TestFileModeChecker, self).setUp() self.stat = self.patch('os.stat') self.chmod = self.patch('os.chmod') self.file = '~FakeFile'
1,632,057,437,270,286,800
Patch a variety of dependencies.
tests/tools_tests.py
setUp
nasqueron/pywikibot
python
def setUp(self): super(TestFileModeChecker, self).setUp() self.stat = self.patch('os.stat') self.chmod = self.patch('os.chmod') self.file = '~FakeFile'
def test_auto_chmod_for_dir(self): 'Do not chmod files that have mode private_files_permission.' self.stat.return_value.st_mode = 16768 tools.file_mode_checker(self.file, mode=384) self.stat.assert_called_with(self.file) self.assertFalse(self.chmod.called)
-778,208,593,761,305,900
Do not chmod files that have mode private_files_permission.
tests/tools_tests.py
test_auto_chmod_for_dir
nasqueron/pywikibot
python
def test_auto_chmod_for_dir(self): self.stat.return_value.st_mode = 16768 tools.file_mode_checker(self.file, mode=384) self.stat.assert_called_with(self.file) self.assertFalse(self.chmod.called)
def test_auto_chmod_OK(self): 'Do not chmod files that have mode private_files_permission.' self.stat.return_value.st_mode = 33152 tools.file_mode_checker(self.file, mode=384) self.stat.assert_called_with(self.file) self.assertFalse(self.chmod.called)
150,273,854,964,530,340
Do not chmod files that have mode private_files_permission.
tests/tools_tests.py
test_auto_chmod_OK
nasqueron/pywikibot
python
def test_auto_chmod_OK(self): self.stat.return_value.st_mode = 33152 tools.file_mode_checker(self.file, mode=384) self.stat.assert_called_with(self.file) self.assertFalse(self.chmod.called)
def test_auto_chmod_not_OK(self): 'Chmod files that do not have mode private_files_permission.' self.stat.return_value.st_mode = 33188 tools.file_mode_checker(self.file, mode=384) self.stat.assert_called_with(self.file) self.chmod.assert_called_once_with(self.file, 384)
-8,264,303,141,372,550,000
Chmod files that do not have mode private_files_permission.
tests/tools_tests.py
test_auto_chmod_not_OK
nasqueron/pywikibot
python
def test_auto_chmod_not_OK(self): self.stat.return_value.st_mode = 33188 tools.file_mode_checker(self.file, mode=384) self.stat.assert_called_with(self.file) self.chmod.assert_called_once_with(self.file, 384)
def setUp(self): 'Setup tests.' super(TestFileShaCalculator, self).setUp()
2,380,888,882,065,735,700
Setup tests.
tests/tools_tests.py
setUp
nasqueron/pywikibot
python
def setUp(self): super(TestFileShaCalculator, self).setUp()
def test_md5_complete_calculation(self): 'Test md5 of complete file.' res = tools.compute_file_hash(self.filename, sha='md5') self.assertIn(res, ('5d7265e290e6733e1e2020630262a6f3', '2c941f2fa7e6e629d165708eb02b67f7'))
-8,072,161,407,093,740,000
Test md5 of complete file.
tests/tools_tests.py
test_md5_complete_calculation
nasqueron/pywikibot
python
def test_md5_complete_calculation(self): res = tools.compute_file_hash(self.filename, sha='md5') self.assertIn(res, ('5d7265e290e6733e1e2020630262a6f3', '2c941f2fa7e6e629d165708eb02b67f7'))
def test_md5_partial_calculation(self): 'Test md5 of partial file (1024 bytes).' res = tools.compute_file_hash(self.filename, sha='md5', bytes_to_read=1024) self.assertIn(res, ('edf6e1accead082b6b831a0a600704bc', 'be0227b6d490baa49e6d7e131c7f596b'))
6,977,463,010,757,062,000
Test md5 of partial file (1024 bytes).
tests/tools_tests.py
test_md5_partial_calculation
nasqueron/pywikibot
python
def test_md5_partial_calculation(self): res = tools.compute_file_hash(self.filename, sha='md5', bytes_to_read=1024) self.assertIn(res, ('edf6e1accead082b6b831a0a600704bc', 'be0227b6d490baa49e6d7e131c7f596b'))
def test_sha1_complete_calculation(self): 'Test sha1 of complete file.' res = tools.compute_file_hash(self.filename, sha='sha1') self.assertIn(res, ('1c12696e1119493a625aa818a35c41916ce32d0c', '146121e6d0461916c9a0fab00dc718acdb6a6b14'))
1,132,385,081,542,025,700
Test sha1 of complete file.
tests/tools_tests.py
test_sha1_complete_calculation
nasqueron/pywikibot
python
def test_sha1_complete_calculation(self): res = tools.compute_file_hash(self.filename, sha='sha1') self.assertIn(res, ('1c12696e1119493a625aa818a35c41916ce32d0c', '146121e6d0461916c9a0fab00dc718acdb6a6b14'))
def test_sha1_partial_calculation(self): 'Test sha1 of partial file (1024 bytes).' res = tools.compute_file_hash(self.filename, sha='sha1', bytes_to_read=1024) self.assertIn(res, ('e56fa7bd5cfdf6bb7e2d8649dd9216c03e7271e6', '617ce7d539848885b52355ed597a042dae1e726f'))
2,189,167,603,415,891,000
Test sha1 of partial file (1024 bytes).
tests/tools_tests.py
test_sha1_partial_calculation
nasqueron/pywikibot
python
def test_sha1_partial_calculation(self): res = tools.compute_file_hash(self.filename, sha='sha1', bytes_to_read=1024) self.assertIn(res, ('e56fa7bd5cfdf6bb7e2d8649dd9216c03e7271e6', '617ce7d539848885b52355ed597a042dae1e726f'))
def test_sha224_complete_calculation(self): 'Test sha224 of complete file.' res = tools.compute_file_hash(self.filename, sha='sha224') self.assertIn(res, ('3d350d9d9eca074bd299cb5ffe1b325a9f589b2bcd7ba1c033ab4d33', '4a2cf33b7da01f7b0530b2cc624e1180c8651b20198e9387aee0c767'))
-1,636,782,885,229,818,600
Test sha224 of complete file.
tests/tools_tests.py
test_sha224_complete_calculation
nasqueron/pywikibot
python
def test_sha224_complete_calculation(self): res = tools.compute_file_hash(self.filename, sha='sha224') self.assertIn(res, ('3d350d9d9eca074bd299cb5ffe1b325a9f589b2bcd7ba1c033ab4d33', '4a2cf33b7da01f7b0530b2cc624e1180c8651b20198e9387aee0c767'))
def test_sha224_partial_calculation(self): 'Test sha224 of partial file (1024 bytes).' res = tools.compute_file_hash(self.filename, sha='sha224', bytes_to_read=1024) self.assertIn(res, ('affa8cb79656a9b6244a079f8af91c9271e382aa9d5aa412b599e169', '486467144e683aefd420d576250c4cc984e6d7bf10c85d36e3d249d2'))
9,067,358,240,009,453,000
Test sha224 of partial file (1024 bytes).
tests/tools_tests.py
test_sha224_partial_calculation
nasqueron/pywikibot
python
def test_sha224_partial_calculation(self): res = tools.compute_file_hash(self.filename, sha='sha224', bytes_to_read=1024) self.assertIn(res, ('affa8cb79656a9b6244a079f8af91c9271e382aa9d5aa412b599e169', '486467144e683aefd420d576250c4cc984e6d7bf10c85d36e3d249d2'))
@classproperty def bar(cls): 'Class property method.' return cls._bar
1,559,728,526,125,838,600
Class property method.
tests/tools_tests.py
bar
nasqueron/pywikibot
python
@classproperty def bar(cls): return cls._bar
def test_classproperty(self): 'Test for classproperty decorator.' self.assertEqual(Foo.bar, 'baz') self.assertEqual(Foo.bar, Foo._bar)
6,343,514,514,572,085,000
Test for classproperty decorator.
tests/tools_tests.py
test_classproperty
nasqueron/pywikibot
python
def test_classproperty(self): self.assertEqual(Foo.bar, 'baz') self.assertEqual(Foo.bar, Foo._bar)
def __init__(self): 'Create instance with dummy values.' self.instance_var = 1337 self.closed = False
5,299,018,231,384,815,000
Create instance with dummy values.
tests/tools_tests.py
__init__
nasqueron/pywikibot
python
def __init__(self): self.instance_var = 1337 self.closed = False
def close(self): 'Just store that it has been closed.' self.closed = True
-4,535,228,252,227,769,000
Just store that it has been closed.
tests/tools_tests.py
close
nasqueron/pywikibot
python
def close(self): self.closed = True
def test_method(self): 'Test getargspec.' expected = method(1, 2) returned = self.getargspec(method) self.assertEqual(returned, expected) self.assertIsInstance(returned, self.expected_class) self.assertNoDeprecation()
-8,777,568,974,570,606,000
Test getargspec.
tests/tools_tests.py
test_method
nasqueron/pywikibot
python
def test_method(self): expected = method(1, 2) returned = self.getargspec(method) self.assertEqual(returned, expected) self.assertIsInstance(returned, self.expected_class) self.assertNoDeprecation()
def path_to_url(path): '\n Convert a path to a file: URL. The path will be made absolute and have\n quoted path parts.\n ' path = os.path.normpath(os.path.abspath(path)) url = urlparse.urljoin('file:', urllib2.pathname2url(path)) return url
1,249,888,226,016,398,300
Convert a path to a file: URL. The path will be made absolute and have quoted path parts.
poetry/packages/utils/utils.py
path_to_url
jancespivo/poetry
python
def path_to_url(path): '\n Convert a path to a file: URL. The path will be made absolute and have\n quoted path parts.\n ' path = os.path.normpath(os.path.abspath(path)) url = urlparse.urljoin('file:', urllib2.pathname2url(path)) return url
def is_installable_dir(path): 'Return True if `path` is a directory containing a setup.py file.' if (not os.path.isdir(path)): return False setup_py = os.path.join(path, 'setup.py') if os.path.isfile(setup_py): return True return False
-7,312,794,735,601,680,000
Return True if `path` is a directory containing a setup.py file.
poetry/packages/utils/utils.py
is_installable_dir
jancespivo/poetry
python
def is_installable_dir(path): if (not os.path.isdir(path)): return False setup_py = os.path.join(path, 'setup.py') if os.path.isfile(setup_py): return True return False
def is_archive_file(name): 'Return True if `name` is a considered as an archive file.' ext = splitext(name)[1].lower() if (ext in ARCHIVE_EXTENSIONS): return True return False
-1,030,535,540,759,138,600
Return True if `name` is a considered as an archive file.
poetry/packages/utils/utils.py
is_archive_file
jancespivo/poetry
python
def is_archive_file(name): ext = splitext(name)[1].lower() if (ext in ARCHIVE_EXTENSIONS): return True return False
def splitext(path): 'Like os.path.splitext, but take off .tar too' (base, ext) = posixpath.splitext(path) if base.lower().endswith('.tar'): ext = (base[(- 4):] + ext) base = base[:(- 4)] return (base, ext)
7,530,089,719,296,582,000
Like os.path.splitext, but take off .tar too
poetry/packages/utils/utils.py
splitext
jancespivo/poetry
python
def splitext(path): (base, ext) = posixpath.splitext(path) if base.lower().endswith('.tar'): ext = (base[(- 4):] + ext) base = base[:(- 4)] return (base, ext)
def __init__(self, A, a=1.0, dtype=None, copy=False): 'Initializes `expm_multiply_parallel`. \n\n Parameters\n -----------\n A : {array_like, scipy.sparse matrix}\n The operator (matrix) whose exponential is to be calculated.\n a : scalar, optional\n scalar value multiplying generator matrix :math:`A` in matrix exponential: :math:`\\mathrm{e}^{aA}`.\n dtype : numpy.dtype, optional\n data type specified for the total operator :math:`\\mathrm{e}^{aA}`. Default is: `numpy.result_type(A.dtype,min_scalar_type(a),float64)`.\n copy : bool, optional\n if `True` the matrix is copied otherwise the matrix is stored by reference. \n\n ' if (_np.array(a).ndim == 0): self._a = a else: raise ValueError('a must be scalar value.') self._A = _sp.csr_matrix(A, copy=copy) if (A.shape[0] != A.shape[1]): raise ValueError('A must be a square matrix.') a_dtype_min = _np.min_scalar_type(self._a) if (dtype is None): self._dtype = _np.result_type(A.dtype, a_dtype_min, _np.float64) else: min_dtype = _np.result_type(A.dtype, a_dtype_min, _np.float32) if (not _np.can_cast(min_dtype, dtype)): raise ValueError('dtype not sufficient to represent a*A to at least float32 precision.') self._dtype = dtype tol = (_np.finfo(self._dtype).eps / 2) tol_dtype = _np.finfo(self._dtype).eps.dtype self._tol = _np.array(tol, dtype=tol_dtype) mu = (_wrapper_csr_trace(self._A.indptr, self._A.indices, self._A.data) / self._A.shape[0]) self._mu = _np.array(mu, dtype=self._dtype) self._A_1_norm = _wrapper_csr_1_norm(self._A.indptr, self._A.indices, self._A.data, self._mu) self._calculate_partition()
1,455,410,426,730,194,700
Initializes `expm_multiply_parallel`. Parameters ----------- A : {array_like, scipy.sparse matrix} The operator (matrix) whose exponential is to be calculated. a : scalar, optional scalar value multiplying generator matrix :math:`A` in matrix exponential: :math:`\mathrm{e}^{aA}`. dtype : numpy.dtype, optional data type specified for the total operator :math:`\mathrm{e}^{aA}`. Default is: `numpy.result_type(A.dtype,min_scalar_type(a),float64)`. copy : bool, optional if `True` the matrix is copied otherwise the matrix is stored by reference.
quspin/tools/expm_multiply_parallel_core/expm_multiply_parallel_core.py
__init__
markusschmitt/QuSpin
python
def __init__(self, A, a=1.0, dtype=None, copy=False): 'Initializes `expm_multiply_parallel`. \n\n Parameters\n -----------\n A : {array_like, scipy.sparse matrix}\n The operator (matrix) whose exponential is to be calculated.\n a : scalar, optional\n scalar value multiplying generator matrix :math:`A` in matrix exponential: :math:`\\mathrm{e}^{aA}`.\n dtype : numpy.dtype, optional\n data type specified for the total operator :math:`\\mathrm{e}^{aA}`. Default is: `numpy.result_type(A.dtype,min_scalar_type(a),float64)`.\n copy : bool, optional\n if `True` the matrix is copied otherwise the matrix is stored by reference. \n\n ' if (_np.array(a).ndim == 0): self._a = a else: raise ValueError('a must be scalar value.') self._A = _sp.csr_matrix(A, copy=copy) if (A.shape[0] != A.shape[1]): raise ValueError('A must be a square matrix.') a_dtype_min = _np.min_scalar_type(self._a) if (dtype is None): self._dtype = _np.result_type(A.dtype, a_dtype_min, _np.float64) else: min_dtype = _np.result_type(A.dtype, a_dtype_min, _np.float32) if (not _np.can_cast(min_dtype, dtype)): raise ValueError('dtype not sufficient to represent a*A to at least float32 precision.') self._dtype = dtype tol = (_np.finfo(self._dtype).eps / 2) tol_dtype = _np.finfo(self._dtype).eps.dtype self._tol = _np.array(tol, dtype=tol_dtype) mu = (_wrapper_csr_trace(self._A.indptr, self._A.indices, self._A.data) / self._A.shape[0]) self._mu = _np.array(mu, dtype=self._dtype) self._A_1_norm = _wrapper_csr_1_norm(self._A.indptr, self._A.indices, self._A.data, self._mu) self._calculate_partition()
@property def a(self): 'scalar: value multiplying generator matrix :math:`A` in matrix exponential: :math:`\\mathrm{e}^{aA}`' return self._a
-3,851,025,710,648,072,700
scalar: value multiplying generator matrix :math:`A` in matrix exponential: :math:`\mathrm{e}^{aA}`
quspin/tools/expm_multiply_parallel_core/expm_multiply_parallel_core.py
a
markusschmitt/QuSpin
python
@property def a(self): 'scalar: value multiplying generator matrix :math:`A` in matrix exponential: :math:`\\mathrm{e}^{aA}`' return self._a