code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
# check inputs # N.B., ensure int8 so we can use cython optimisation h = HaplotypeArray(np.asarray(h), copy=False) if h.min() < 0: raise NotImplementedError('missing calls are not supported') # initialise n_variants = h.n_variants # number of rows, i.e., variants n_haplotypes = h.n_haplotypes # number of columns, i.e., haplotypes n_pairs = (n_haplotypes * (n_haplotypes - 1)) // 2 # compute the shared prefix length between all pairs of haplotypes spl = pairwise_shared_prefix_lengths(memoryview_safe(np.asarray(h))) # compute EHH by counting the number of shared prefixes extending beyond # each variant minlength = None if truncate else n_variants + 1 b = np.bincount(spl, minlength=minlength) c = np.cumsum(b[::-1])[:-1] ehh = (c / n_pairs)[::-1] return ehh
def ehh_decay(h, truncate=False)
Compute the decay of extended haplotype homozygosity (EHH) moving away from the first variant. Parameters ---------- h : array_like, int, shape (n_variants, n_haplotypes) Haplotype array. truncate : bool, optional If True, the return array will exclude trailing zeros. Returns ------- ehh : ndarray, float, shape (n_variants, ) EHH at successive variants from the first variant.
5.557188
4.834898
1.149391
# check inputs # N.B., ensure int8 so we can use cython optimisation h = HaplotypeArray(np.asarray(h), copy=False) if h.max() > 1: raise NotImplementedError('only biallelic variants are supported') if h.min() < 0: raise NotImplementedError('missing calls are not supported') # sort by prefix indices = h.prefix_argsort() h = np.take(h, indices, axis=1) # paint painting = paint_shared_prefixes(memoryview_safe(np.asarray(h))) return painting, indices
def voight_painting(h)
Paint haplotypes, assigning a unique integer to each shared haplotype prefix. Parameters ---------- h : array_like, int, shape (n_variants, n_haplotypes) Haplotype array. Returns ------- painting : ndarray, int, shape (n_variants, n_haplotypes) Painting array. indices : ndarray, int, shape (n_hapotypes,) Haplotype indices after sorting by prefix.
8.174321
5.725085
1.427808
import seaborn as sns from matplotlib.colors import ListedColormap import matplotlib.pyplot as plt if flank == 'left': painting = painting[::-1] n_colors = painting.max() palette = sns.color_palette(palette, n_colors) # use white for singleton haplotypes cmap = ListedColormap(['white'] + palette) # setup axes if ax is None: w = plt.rcParams['figure.figsize'][0] h = height_factor*painting.shape[1] fig, ax = plt.subplots(figsize=(w, h)) sns.despine(ax=ax, bottom=True, left=True) ax.pcolormesh(painting.T, cmap=cmap) ax.set_xticks([]) ax.set_yticks([]) ax.set_xlim(0, painting.shape[0]) ax.set_ylim(0, painting.shape[1]) return ax
def plot_voight_painting(painting, palette='colorblind', flank='right', ax=None, height_factor=0.01)
Plot a painting of shared haplotype prefixes. Parameters ---------- painting : array_like, int, shape (n_variants, n_haplotypes) Painting array. ax : axes, optional The axes on which to draw. If not provided, a new figure will be created. palette : string, optional A Seaborn palette name. flank : {'right', 'left'}, optional If left, painting will be reversed along first axis. height_factor : float, optional If no axes provided, determine height of figure by multiplying height of painting array by this number. Returns ------- ax : axes
2.273869
2.110427
1.077445
import matplotlib.pyplot as plt from matplotlib.gridspec import GridSpec import seaborn as sns # check inputs h = asarray_ndim(h, 2) if index is None: # use midpoint index = h.shape[0] // 2 # divide data into two flanks hl = h[:index+1][::-1] hr = h[index:] # paint both flanks pl, il = voight_painting(hl) pr, ir = voight_painting(hr) # compute ehh decay for both flanks el = ehh_decay(hl, truncate=False) er = ehh_decay(hr, truncate=False) # setup figure # fixed height for EHH decay subplot h_ehh = plt.rcParams['figure.figsize'][1] // 3 # add height for paintings h_painting = height_factor*h.shape[1] if fig is None: w = plt.rcParams['figure.figsize'][0] h = h_ehh + h_painting fig = plt.figure(figsize=(w, h)) # setup gridspec gs = GridSpec(2, 2, width_ratios=[hl.shape[0], hr.shape[0]], height_ratios=[h_painting, h_ehh]) # plot paintings ax = fig.add_subplot(gs[0, 0]) sns.despine(ax=ax, left=True, bottom=True) plot_voight_painting(pl, palette=palette, flank='left', ax=ax) ax = fig.add_subplot(gs[0, 1]) sns.despine(ax=ax, left=True, bottom=True) plot_voight_painting(pr, palette=palette, flank='right', ax=ax) # plot ehh ax = fig.add_subplot(gs[1, 0]) sns.despine(ax=ax, offset=3) x = np.arange(el.shape[0]) y = el ax.fill_between(x, 0, y) ax.set_ylim(0, 1) ax.set_yticks([0, 1]) ax.set_ylabel('EHH') ax.invert_xaxis() ax = fig.add_subplot(gs[1, 1]) sns.despine(ax=ax, left=True, right=False, offset=3) ax.yaxis.tick_right() ax.set_ylim(0, 1) ax.set_yticks([0, 1]) x = np.arange(er.shape[0]) y = er ax.fill_between(x, 0, y) # tidy up fig.tight_layout() return fig
def fig_voight_painting(h, index=None, palette='colorblind', height_factor=0.01, fig=None)
Make a figure of shared haplotype prefixes for both left and right flanks, centred on some variant of choice. Parameters ---------- h : array_like, int, shape (n_variants, n_haplotypes) Haplotype array. index : int, optional Index of the variant within the haplotype array to centre on. If not provided, the middle variant will be used. palette : string, optional A Seaborn palette name. height_factor : float, optional If no axes provided, determine height of figure by multiplying height of painting array by this number. fig : figure The figure on which to draw. If not provided, a new figure will be created. Returns ------- fig : figure Notes ----- N.B., the ordering of haplotypes on the left and right flanks will be different. This means that haplotypes on the right flank **will not** correspond to haplotypes on the left flank at the same vertical position.
2.22909
2.195085
1.015491
# check inputs if map_pos is None: # integrate over physical distance map_pos = pos else: map_pos = asarray_ndim(map_pos, 1) check_dim0_aligned(pos, map_pos) # compute physical gaps physical_gaps = np.diff(pos) # compute genetic gaps gaps = np.diff(map_pos).astype('f8') if is_accessible is not None: # compute accessible gaps is_accessible = asarray_ndim(is_accessible, 1) assert is_accessible.shape[0] > pos[-1], \ 'accessibility array too short' accessible_gaps = np.zeros_like(physical_gaps) for i in range(1, len(pos)): # N.B., expect pos is 1-based n_access = np.count_nonzero(is_accessible[pos[i-1]-1:pos[i]-1]) accessible_gaps[i-1] = n_access # adjust using accessibility scaling = accessible_gaps / physical_gaps gaps = gaps * scaling elif gap_scale is not None and gap_scale > 0: scaling = np.ones(gaps.shape, dtype='f8') loc_scale = physical_gaps > gap_scale scaling[loc_scale] = gap_scale / physical_gaps[loc_scale] gaps = gaps * scaling if max_gap is not None and max_gap > 0: # deal with very large gaps gaps[physical_gaps > max_gap] = -1 return gaps
def compute_ihh_gaps(pos, map_pos, gap_scale, max_gap, is_accessible)
Compute spacing between variants for integrating haplotype homozygosity. Parameters ---------- pos : array_like, int, shape (n_variants,) Variant positions (physical distance). map_pos : array_like, float, shape (n_variants,) Variant positions (genetic map distance). gap_scale : int, optional Rescale distance between variants if gap is larger than this value. max_gap : int, optional Do not report scores if EHH spans a gap larger than this number of base pairs. is_accessible : array_like, bool, optional Genome accessibility array. If provided, distance between variants will be computed as the number of accessible bases between them. Returns ------- gaps : ndarray, float, shape (n_variants - 1,)
3.100428
2.803179
1.10604
# check inputs h = asarray_ndim(h, 2) check_integer_dtype(h) h = memoryview_safe(h) # # check there are no invariant sites # ac = h.count_alleles() # assert np.all(ac.is_segregating()), 'please remove non-segregating sites' if use_threads and multiprocessing.cpu_count() > 1: # create pool pool = ThreadPool(2) # scan forward result_fwd = pool.apply_async(nsl01_scan, args=(h,)) # scan backward result_rev = pool.apply_async(nsl01_scan, args=(h[::-1],)) # wait for both to finish pool.close() pool.join() # obtain results nsl0_fwd, nsl1_fwd = result_fwd.get() nsl0_rev, nsl1_rev = result_rev.get() else: # scan forward nsl0_fwd, nsl1_fwd = nsl01_scan(h) # scan backward nsl0_rev, nsl1_rev = nsl01_scan(h[::-1]) # handle backwards nsl0_rev = nsl0_rev[::-1] nsl1_rev = nsl1_rev[::-1] # compute unstandardized score nsl0 = nsl0_fwd + nsl0_rev nsl1 = nsl1_fwd + nsl1_rev score = np.log(nsl1 / nsl0) return score
def nsl(h, use_threads=True)
Compute the unstandardized number of segregating sites by length (nSl) for each variant, comparing the reference and alternate alleles, after Ferrer-Admetlla et al. (2014). Parameters ---------- h : array_like, int, shape (n_variants, n_haplotypes) Haplotype array. use_threads : bool, optional If True use multiple threads to compute. Returns ------- score : ndarray, float, shape (n_variants,) Notes ----- This function will calculate nSl for all variants. To exclude variants below a given minor allele frequency, filter the input haplotype array before passing to this function. This function computes nSl by comparing the reference and alternate alleles. These can be polarised by switching the sign for any variant where the reference allele is derived. This function does nothing about nSl calculations where haplotype homozygosity extends up to the first or last variant. There may be edge effects. Note that the unstandardized score is returned. Usually these scores are then standardized in different allele frequency bins. See Also -------- standardize_by_allele_count
2.813623
2.658876
1.0582
# check inputs h1 = asarray_ndim(h1, 2) check_integer_dtype(h1) h2 = asarray_ndim(h2, 2) check_integer_dtype(h2) check_dim0_aligned(h1, h2) h1 = memoryview_safe(h1) h2 = memoryview_safe(h2) if use_threads and multiprocessing.cpu_count() > 1: # use multiple threads # setup threadpool pool = ThreadPool(min(4, multiprocessing.cpu_count())) # scan forward res1_fwd = pool.apply_async(nsl_scan, args=(h1,)) res2_fwd = pool.apply_async(nsl_scan, args=(h2,)) # scan backward res1_rev = pool.apply_async(nsl_scan, args=(h1[::-1],)) res2_rev = pool.apply_async(nsl_scan, args=(h2[::-1],)) # wait for both to finish pool.close() pool.join() # obtain results nsl1_fwd = res1_fwd.get() nsl2_fwd = res2_fwd.get() nsl1_rev = res1_rev.get() nsl2_rev = res2_rev.get() # cleanup pool.terminate() else: # compute without threads # scan forward nsl1_fwd = nsl_scan(h1) nsl2_fwd = nsl_scan(h2) # scan backward nsl1_rev = nsl_scan(h1[::-1]) nsl2_rev = nsl_scan(h2[::-1]) # handle reverse scans nsl1_rev = nsl1_rev[::-1] nsl2_rev = nsl2_rev[::-1] # compute unstandardized score nsl1 = nsl1_fwd + nsl1_rev nsl2 = nsl2_fwd + nsl2_rev score = np.log(nsl1 / nsl2) return score
def xpnsl(h1, h2, use_threads=True)
Cross-population version of the NSL statistic. Parameters ---------- h1 : array_like, int, shape (n_variants, n_haplotypes) Haplotype array for the first population. h2 : array_like, int, shape (n_variants, n_haplotypes) Haplotype array for the second population. use_threads : bool, optional If True use multiple threads to compute. Returns ------- score : ndarray, float, shape (n_variants,) Unstandardized XPNSL scores.
1.956562
1.808496
1.081872
# check inputs h = HaplotypeArray(h, copy=False) # number of haplotypes n = h.n_haplotypes # compute haplotype frequencies f = h.distinct_frequencies() # estimate haplotype diversity hd = (1 - np.sum(f**2)) * n / (n - 1) return hd
def haplotype_diversity(h)
Estimate haplotype diversity. Parameters ---------- h : array_like, int, shape (n_variants, n_haplotypes) Haplotype array. Returns ------- hd : float Haplotype diversity.
3.881711
3.745231
1.036441
hd = moving_statistic(values=h, statistic=haplotype_diversity, size=size, start=start, stop=stop, step=step) return hd
def moving_haplotype_diversity(h, size, start=0, stop=None, step=None)
Estimate haplotype diversity in moving windows. Parameters ---------- h : array_like, int, shape (n_variants, n_haplotypes) Haplotype array. size : int The window size (number of variants). start : int, optional The index at which to start. stop : int, optional The index at which to stop. step : int, optional The number of variants between start positions of windows. If not given, defaults to the window size, i.e., non-overlapping windows. Returns ------- hd : ndarray, float, shape (n_windows,) Haplotype diversity.
4.451364
5.769802
0.771493
# check inputs h = HaplotypeArray(h, copy=False) # compute haplotype frequencies f = h.distinct_frequencies() # compute H1 h1 = np.sum(f**2) # compute H12 h12 = np.sum(f[:2])**2 + np.sum(f[2:]**2) # compute H123 h123 = np.sum(f[:3])**2 + np.sum(f[3:]**2) # compute H2/H1 h2 = h1 - f[0]**2 h2_h1 = h2 / h1 return h1, h12, h123, h2_h1
def garud_h(h)
Compute the H1, H12, H123 and H2/H1 statistics for detecting signatures of soft sweeps, as defined in Garud et al. (2015). Parameters ---------- h : array_like, int, shape (n_variants, n_haplotypes) Haplotype array. Returns ------- h1 : float H1 statistic (sum of squares of haplotype frequencies). h12 : float H12 statistic (sum of squares of haplotype frequencies, combining the two most common haplotypes into a single frequency). h123 : float H123 statistic (sum of squares of haplotype frequencies, combining the three most common haplotypes into a single frequency). h2_h1 : float H2/H1 statistic, indicating the "softness" of a sweep.
2.802237
2.050823
1.366397
gh = moving_statistic(values=h, statistic=garud_h, size=size, start=start, stop=stop, step=step) h1 = gh[:, 0] h12 = gh[:, 1] h123 = gh[:, 2] h2_h1 = gh[:, 3] return h1, h12, h123, h2_h1
def moving_garud_h(h, size, start=0, stop=None, step=None)
Compute the H1, H12, H123 and H2/H1 statistics for detecting signatures of soft sweeps, as defined in Garud et al. (2015), in moving windows, Parameters ---------- h : array_like, int, shape (n_variants, n_haplotypes) Haplotype array. size : int The window size (number of variants). start : int, optional The index at which to start. stop : int, optional The index at which to stop. step : int, optional The number of variants between start positions of windows. If not given, defaults to the window size, i.e., non-overlapping windows. Returns ------- h1 : ndarray, float, shape (n_windows,) H1 statistics (sum of squares of haplotype frequencies). h12 : ndarray, float, shape (n_windows,) H12 statistics (sum of squares of haplotype frequencies, combining the two most common haplotypes into a single frequency). h123 : ndarray, float, shape (n_windows,) H123 statistics (sum of squares of haplotype frequencies, combining the three most common haplotypes into a single frequency). h2_h1 : ndarray, float, shape (n_windows,) H2/H1 statistics, indicating the "softness" of a sweep.
3.078356
2.410369
1.277131
import matplotlib.pyplot as plt import seaborn as sns # check inputs h = HaplotypeArray(h, copy=False) # setup figure if ax is None: width = plt.rcParams['figure.figsize'][0] height = width / 10 fig, ax = plt.subplots(figsize=(width, height)) sns.despine(ax=ax, left=True) # count distinct haplotypes hc = h.distinct_counts() # setup palette n_colors = np.count_nonzero(hc > 1) palette = sns.color_palette(palette, n_colors) # paint frequencies x1 = 0 for i, c in enumerate(hc): x2 = x1 + c if c > 1: color = palette[i] else: color = singleton_color ax.axvspan(x1, x2, color=color) x1 = x2 # tidy up ax.set_xlim(0, h.shape[1]) ax.set_yticks([]) return ax
def plot_haplotype_frequencies(h, palette='Paired', singleton_color='w', ax=None)
Plot haplotype frequencies. Parameters ---------- h : array_like, int, shape (n_variants, n_haplotypes) Haplotype array. palette : string, optional A Seaborn palette name. singleton_color : string, optional Color to paint singleton haplotypes. ax : axes, optional The axes on which to draw. If not provided, a new figure will be created. Returns ------- ax : axes
2.430124
2.522438
0.963403
# determine windows windows = np.asarray(list(index_windows(h, size=size, start=start, stop=stop, step=None))) # setup output hr = np.zeros((windows.shape[0], h.shape[1]), dtype='i4') # iterate over windows for i, (window_start, window_stop) in enumerate(windows): # extract haplotypes for the current window hw = h[window_start:window_stop] # count haplotypes hc = hw.distinct_counts() # ensure sorted descending hc.sort() hc = hc[::-1] # compute ranks for non-singleton haplotypes cp = 0 for j, c in enumerate(hc): if c > 1: hr[i, cp:cp+c] = j+1 cp += c return hr
def moving_hfs_rank(h, size, start=0, stop=None)
Helper function for plotting haplotype frequencies in moving windows. Parameters ---------- h : array_like, int, shape (n_variants, n_haplotypes) Haplotype array. size : int The window size (number of variants). start : int, optional The index at which to start. stop : int, optional The index at which to stop. Returns ------- hr : ndarray, int, shape (n_windows, n_haplotypes) Haplotype rank array.
3.577598
3.199792
1.118072
import matplotlib as mpl import matplotlib.pyplot as plt import seaborn as sns # setup figure if ax is None: fig, ax = plt.subplots() # compute haplotype frequencies # N.B., here we use a haplotype rank data structure to enable the use of # pcolormesh() which is a lot faster than any other type of plotting # function hr = moving_hfs_rank(h, size=size, start=start, stop=stop) # truncate to n most common haplotypes if n: hr[hr > n] = 0 # compute window start and stop positions windows = moving_statistic(pos, statistic=lambda v: (v[0], v[-1]), size=size, start=start, stop=stop) # create color map colors = [singleton_color] + sns.color_palette(palette, n_colors=hr.max()) cmap = mpl.colors.ListedColormap(colors) # draw colors x = np.append(windows[:, 0], windows[-1, -1]) y = np.arange(h.shape[1]+1) ax.pcolormesh(x, y, hr.T, cmap=cmap) # tidy up ax.set_xlim(windows[0, 0], windows[-1, -1]) ax.set_ylim(0, h.shape[1]) ax.set_ylabel('haplotype count') ax.set_xlabel('position (bp)') return ax
def plot_moving_haplotype_frequencies(pos, h, size, start=0, stop=None, n=None, palette='Paired', singleton_color='w', ax=None)
Plot haplotype frequencies in moving windows over the genome. Parameters ---------- pos : array_like, int, shape (n_items,) Variant positions, using 1-based coordinates, in ascending order. h : array_like, int, shape (n_variants, n_haplotypes) Haplotype array. size : int The window size (number of variants). start : int, optional The index at which to start. stop : int, optional The index at which to stop. n : int, optional Color only the `n` most frequent haplotypes (by default, all non-singleton haplotypes are colored). palette : string, optional A Seaborn palette name. singleton_color : string, optional Color to paint singleton haplotypes. ax : axes, optional The axes on which to draw. If not provided, a new figure will be created. Returns ------- ax : axes
3.221464
3.163503
1.018322
d1 = moving_tajima_d(ac1, size=size, start=start, stop=stop, step=step) d2 = moving_tajima_d(ac2, size=size, start=start, stop=stop, step=step) delta = d1 - d2 delta_z = (delta - np.mean(delta)) / np.std(delta) return delta_z
def moving_delta_tajima_d(ac1, ac2, size, start=0, stop=None, step=None)
Compute the difference in Tajima's D between two populations in moving windows. Parameters ---------- ac1 : array_like, int, shape (n_variants, n_alleles) Allele counts array for the first population. ac2 : array_like, int, shape (n_variants, n_alleles) Allele counts array for the second population. size : int The window size (number of variants). start : int, optional The index at which to start. stop : int, optional The index at which to stop. step : int, optional The number of variants between start positions of windows. If not given, defaults to the window size, i.e., non-overlapping windows. Returns ------- delta_d : ndarray, float, shape (n_windows,) Standardized delta Tajima's D. See Also -------- allel.stats.diversity.moving_tajima_d
1.924526
2.139789
0.8994
# copy and sort the array y = np.array(x).flatten() y.sort() # setup bins bins = [y[0]] # determine step size step = len(y) // n # add bin edges for i in range(step, len(y), step): # get value at this index v = y[i] # only add bin edge if larger than previous if v > bins[-1]: bins.append(v) # fix last bin edge bins[-1] = y[-1] return np.array(bins)
def make_similar_sized_bins(x, n)
Utility function to create a set of bins over the range of values in `x` such that each bin contains roughly the same number of values. Parameters ---------- x : array_like The values to be binned. n : int The number of bins to create. Returns ------- bins : ndarray An array of bin edges. Notes ----- The actual number of bins returned may be less than `n` if `x` contains integer values and any single value is represented more than len(x)//n times.
3.36796
3.610729
0.932764
score = asarray_ndim(score, 1) return (score - np.nanmean(score)) / np.nanstd(score)
def standardize(score)
Centre and scale to unit variance.
4.078053
3.738752
1.090752
from scipy.stats import binned_statistic # check inputs score = asarray_ndim(score, 1) aac = asarray_ndim(aac, 1) check_dim0_aligned(score, aac) # remove nans nonan = ~np.isnan(score) score_nonan = score[nonan] aac_nonan = aac[nonan] if bins is None: # make our own similar sized bins # how many bins to make? if n_bins is None: # something vaguely reasonable n_bins = np.max(aac) // 2 # make bins bins = make_similar_sized_bins(aac_nonan, n_bins) else: # user-provided bins bins = asarray_ndim(bins, 1) mean_score, _, _ = binned_statistic(aac_nonan, score_nonan, statistic=np.mean, bins=bins) std_score, _, _ = binned_statistic(aac_nonan, score_nonan, statistic=np.std, bins=bins) if diagnostics: import matplotlib.pyplot as plt x = (bins[:-1] + bins[1:]) / 2 plt.figure() plt.fill_between(x, mean_score - std_score, mean_score + std_score, alpha=.5, label='std') plt.plot(x, mean_score, marker='o', label='mean') plt.grid(axis='y') plt.xlabel('Alternate allele count') plt.ylabel('Unstandardized score') plt.title('Standardization diagnostics') plt.legend() # apply standardization score_standardized = np.empty_like(score) for i in range(len(bins) - 1): x1 = bins[i] x2 = bins[i + 1] if i == 0: # first bin loc = (aac < x2) elif i == len(bins) - 2: # last bin loc = (aac >= x1) else: # middle bins loc = (aac >= x1) & (aac < x2) m = mean_score[i] s = std_score[i] score_standardized[loc] = (score[loc] - m) / s return score_standardized, bins
def standardize_by_allele_count(score, aac, bins=None, n_bins=None, diagnostics=True)
Standardize `score` within allele frequency bins. Parameters ---------- score : array_like, float The score to be standardized, e.g., IHS or NSL. aac : array_like, int An array of alternate allele counts. bins : array_like, int, optional Allele count bins, overrides `n_bins`. n_bins : int, optional Number of allele count bins to use. diagnostics : bool, optional If True, plot some diagnostic information about the standardization. Returns ------- score_standardized : ndarray, float Standardized scores. bins : ndarray, int Allele count bins used for standardization.
2.100507
2.07336
1.013093
# normalise and check inputs ac1 = AlleleCountsArray(ac1) ac2 = AlleleCountsArray(ac2) ac3 = AlleleCountsArray(ac3) check_dim0_aligned(ac1, ac2, ac3) # compute fst fst12 = moving_hudson_fst(ac1, ac2, size=window_size, start=window_start, stop=window_stop, step=window_step) fst13 = moving_hudson_fst(ac1, ac3, size=window_size, start=window_start, stop=window_stop, step=window_step) fst23 = moving_hudson_fst(ac2, ac3, size=window_size, start=window_start, stop=window_stop, step=window_step) # clip fst values to avoid infinite if fst is 1 for x in fst12, fst13, fst23: np.clip(x, a_min=0, a_max=0.99999, out=x) # compute fst transform t12 = -np.log(1 - fst12) t13 = -np.log(1 - fst13) t23 = -np.log(1 - fst23) # compute pbs ret = (t12 + t13 - t23) / 2 if normed: # compute pbs normalising constant norm = 1 + (t12 + t13 + t23) / 2 ret = ret / norm return ret
def pbs(ac1, ac2, ac3, window_size, window_start=0, window_stop=None, window_step=None, normed=True)
Compute the population branching statistic (PBS) which performs a comparison of allele frequencies between three populations to detect genome regions that are unusually differentiated in one population relative to the other two populations. Parameters ---------- ac1 : array_like, int Allele counts from the first population. ac2 : array_like, int Allele counts from the second population. ac3 : array_like, int Allele counts from the third population. window_size : int The window size (number of variants) within which to compute PBS values. window_start : int, optional The variant index at which to start windowed calculations. window_stop : int, optional The variant index at which to stop windowed calculations. window_step : int, optional The number of variants between start positions of windows. If not given, defaults to the window size, i.e., non-overlapping windows. normed : bool, optional If True (default), use the normalised version of PBS, also known as PBSn1 [2]_. Otherwise, use the PBS statistic as originally defined in [1]_. Returns ------- pbs : ndarray, float Windowed PBS values. Notes ----- The F:sub:`ST` calculations use Hudson's estimator. References ---------- .. [1] Yi et al., "Sequencing of Fifty Human Exomes Reveals Adaptation to High Altitude", Science, 329(5987): 75–78, 2 July 2010. .. [2] Malaspinas et al., "A genomic history of Aboriginal Australia", Nature. volume 538, pages 207–214, 13 October 2016.
2.157999
2.173892
0.992689
windows = index_windows(values, size, start, stop, step) # setup output out = np.array([statistic(values[i:j], **kwargs) for i, j in windows]) return out
def moving_statistic(values, statistic, size, start=0, stop=None, step=None, **kwargs)
Calculate a statistic in a moving window over `values`. Parameters ---------- values : array_like The data to summarise. statistic : function The statistic to compute within each window. size : int The window size (number of values). start : int, optional The index at which to start. stop : int, optional The index at which to stop. step : int, optional The distance between start positions of windows. If not given, defaults to the window size, i.e., non-overlapping windows. kwargs Additional keyword arguments are passed through to the `statistic` function. Returns ------- out : ndarray, shape (n_windows,) Examples -------- >>> import allel >>> values = [2, 5, 8, 16] >>> allel.moving_statistic(values, np.sum, size=2) array([ 7, 24]) >>> allel.moving_statistic(values, np.sum, size=2, step=1) array([ 7, 13, 24])
4.132967
7.458116
0.554157
# determine step if stop is None: stop = len(values) if step is None: # non-overlapping step = size # iterate over windows for window_start in range(start, stop, step): window_stop = window_start + size if window_stop > stop: # ensure all windows are equal sized return yield (window_start, window_stop)
def index_windows(values, size, start, stop, step)
Convenience function to construct windows for the :func:`moving_statistic` function.
3.675659
3.991235
0.920933
last = False # determine start and stop positions if start is None: start = pos[0] if stop is None: stop = pos[-1] if step is None: # non-overlapping step = size windows = [] for window_start in range(start, stop, step): # determine window stop window_stop = window_start + size if window_stop >= stop: # last window window_stop = stop last = True else: window_stop -= 1 windows.append([window_start, window_stop]) if last: break return np.asarray(windows)
def position_windows(pos, size, start, stop, step)
Convenience function to construct windows for the :func:`windowed_statistic` and :func:`windowed_count` functions.
2.665921
2.800678
0.951884
start_locs = np.searchsorted(pos, windows[:, 0]) stop_locs = np.searchsorted(pos, windows[:, 1], side='right') locs = np.column_stack((start_locs, stop_locs)) return locs
def window_locations(pos, windows)
Locate indices in `pos` corresponding to the start and stop positions of `windows`.
2.384029
1.971983
1.20895
# assume sorted positions if not isinstance(pos, SortedIndex): pos = SortedIndex(pos, copy=False) # setup windows if windows is None: windows = position_windows(pos, size, start, stop, step) else: windows = asarray_ndim(windows, 2) # find window locations locs = window_locations(pos, windows) # count number of items in each window counts = np.diff(locs, axis=1).reshape(-1) return counts, windows
def windowed_count(pos, size=None, start=None, stop=None, step=None, windows=None)
Count the number of items in windows over a single chromosome/contig. Parameters ---------- pos : array_like, int, shape (n_items,) The item positions in ascending order, using 1-based coordinates.. size : int, optional The window size (number of bases). start : int, optional The position at which to start (1-based). stop : int, optional The position at which to stop (1-based). step : int, optional The distance between start positions of windows. If not given, defaults to the window size, i.e., non-overlapping windows. windows : array_like, int, shape (n_windows, 2), optional Manually specify the windows to use as a sequence of (window_start, window_stop) positions, using 1-based coordinates. Overrides the size/start/stop/step parameters. Returns ------- counts : ndarray, int, shape (n_windows,) The number of items in each window. windows : ndarray, int, shape (n_windows, 2) The windows used, as an array of (window_start, window_stop) positions, using 1-based coordinates. Notes ----- The window stop positions are included within a window. The final window will be truncated to the specified stop position, and so may be smaller than the other windows. Examples -------- Non-overlapping windows:: >>> import allel >>> pos = [1, 7, 12, 15, 28] >>> counts, windows = allel.windowed_count(pos, size=10) >>> counts array([2, 2, 1]) >>> windows array([[ 1, 10], [11, 20], [21, 28]]) Half-overlapping windows:: >>> counts, windows = allel.windowed_count(pos, size=10, step=5) >>> counts array([2, 3, 2, 0, 1]) >>> windows array([[ 1, 10], [ 6, 15], [11, 20], [16, 25], [21, 28]])
3.497408
3.942016
0.887213
# calculate window sizes if is_accessible is None: # N.B., window stops are included n_bases = np.diff(windows, axis=1).reshape(-1) + 1 else: n_bases = np.array([np.count_nonzero(is_accessible[i-1:j]) for i, j in windows]) # deal with multidimensional x if x.ndim == 1: pass elif x.ndim == 2: n_bases = n_bases[:, None] else: raise NotImplementedError('only arrays of 1 or 2 dimensions supported') # calculate density per-base with ignore_invalid(): y = np.where(n_bases > 0, x / n_bases, fill) # restore to 1-dimensional if n_bases.ndim > 1: n_bases = n_bases.reshape(-1) return y, n_bases
def per_base(x, windows, is_accessible=None, fill=np.nan)
Calculate the per-base value of a windowed statistic. Parameters ---------- x : array_like, shape (n_windows,) The statistic to average per-base. windows : array_like, int, shape (n_windows, 2) The windows used, as an array of (window_start, window_stop) positions using 1-based coordinates. is_accessible : array_like, bool, shape (len(contig),), optional Boolean array indicating accessibility status for all positions in the chromosome/contig. fill : object, optional Use this value where there are no accessible bases in a window. Returns ------- y : ndarray, float, shape (n_windows,) The input array divided by the number of (accessible) bases in each window. n_bases : ndarray, int, shape (n_windows,) The number of (accessible) bases in each window
3.297151
3.097034
1.064615
pos_accessible, = np.nonzero(is_accessible) pos_accessible += 1 # convert to 1-based coordinates # N.B., need some care in handling start and stop positions, these are # genomic positions at which to start and stop the windows if start: pos_accessible = pos_accessible[pos_accessible >= start] if stop: pos_accessible = pos_accessible[pos_accessible <= stop] # now construct moving windows windows = moving_statistic(pos_accessible, lambda v: [v[0], v[-1]], size=size, step=step) return windows
def equally_accessible_windows(is_accessible, size, start=0, stop=None, step=None)
Create windows each containing the same number of accessible bases. Parameters ---------- is_accessible : array_like, bool, shape (n_bases,) Array defining accessible status of all bases on a contig/chromosome. size : int Window size (number of accessible bases). start : int, optional The genome position at which to start. stop : int, optional The genome position at which to stop. step : int, optional The number of accessible sites between start positions of windows. If not given, defaults to the window size, i.e., non-overlapping windows. Use half the window size to get half-overlapping windows. Returns ------- windows : ndarray, int, shape (n_windows, 2) Window start/stop positions (1-based).
5.548234
5.772595
0.961133
if context['user'].has_perm('attachments.add_attachment'): return { 'form': AttachmentForm(), 'form_url': add_url_for_obj(obj), 'next': context.request.build_absolute_uri(), } else: return {'form': None}
def attachment_form(context, obj)
Renders a "upload attachment" form. The user must own ``attachments.add_attachment permission`` to add attachments.
3.879322
3.88872
0.997583
if context['user'].has_perm('attachments.delete_foreign_attachments') or ( context['user'] == attachment.creator and context['user'].has_perm('attachments.delete_attachment') ): return { 'next': context.request.build_absolute_uri(), 'delete_url': reverse( 'attachments:delete', kwargs={'attachment_pk': attachment.pk} ), } return {'delete_url': None}
def attachment_delete_link(context, attachment)
Renders a html link to the delete view of the given attachment. Returns no content if the request-user has no permission to delete attachments. The user must own either the ``attachments.delete_attachment`` permission and is the creator of the attachment, that he can delete it or he has ``attachments.delete_foreign_attachments`` which allows him to delete all attachments.
3.11341
2.582671
1.2055
return 'attachments/{app}_{model}/{pk}/{filename}'.format( app=instance.content_object._meta.app_label, model=instance.content_object._meta.object_name.lower(), pk=instance.content_object.pk, filename=filename, )
def attachment_upload(instance, filename)
Stores the attachment in a "per module/appname/primary key" folder
2.383163
2.079283
1.146147
if output_file is None: if enable_scroll: # Add a new axes which will be used as scroll bar. axpos = plt.axes([0.12, 0.1, 0.625, 0.03]) spos = Slider(axpos, "Scroll", 10, len(self.pyfile.lines)) def update(val): pos = spos.val self.ax.axis([0, 1, pos, pos - 10]) self.fig.canvas.draw_idle() spos.on_changed(update) plt.show(block=blocking) else: plt.savefig(output_file)
def show_heatmap(self, blocking=True, output_file=None, enable_scroll=False)
Method to actually display the heatmap created. @param blocking: When set to False makes an unblocking plot show. @param output_file: If not None the heatmap image is output to this file. Supported formats: (eps, pdf, pgf, png, ps, raw, rgba, svg, svgz) @param enable_scroll: Flag used add a scroll bar to scroll long files.
3.837347
3.743881
1.024965
self.line_profiler = pprofile.Profile() self.line_profiler.runfile( open(self.pyfile.path, "r"), {}, self.pyfile.path )
def __profile_file(self)
Method used to profile the given file line by line.
9.242121
7.038261
1.313126
if self.line_profiler is None: return {} # the [0] is because pprofile.Profile.file_dict stores the line_dict # in a list so that it can be modified in a thread-safe way # see https://github.com/vpelletier/pprofile/blob/da3d60a1b59a061a0e2113bf768b7cb4bf002ccb/pprofile.py#L398 return self.line_profiler.file_dict[self.pyfile.path][0].line_dict
def __get_line_profile_data(self)
Method to procure line profiles. @return: Line profiles if the file has been profiles else empty dictionary.
10.116194
10.391701
0.973488
# Read lines from file. with open(self.pyfile.path, "r") as file_to_read: for line in file_to_read: # Remove return char from the end of the line and add a # space in the beginning for better visibility. self.pyfile.lines.append(" " + line.strip("\n")) # Total number of lines in file. self.pyfile.length = len(self.pyfile.lines) # Fetch line profiles. line_profiles = self.__get_line_profile_data() # Creating an array of data points. As the profile keys are 1 indexed # we should range from 1 to line_count + 1 and not 0 to line_count. arr = [] for line_num in range(1, self.pyfile.length + 1): if line_num in line_profiles: # line_profiles[i] will have multiple entries if line i is # invoked from multiple places in the code. Here we sum over # each invocation to get the total time spent on that line. line_times = [ ltime for _, ltime in line_profiles[line_num].values() ] arr.append([sum(line_times)]) else: arr.append([0.0]) # Create nd-array from list of data points. self.pyfile.data = np.array(arr)
def __fetch_heatmap_data_from_profile(self)
Method to create heatmap data from profile information.
4.378494
4.304142
1.017275
# Define the heatmap plot. height = len(self.pyfile.lines) / 3 width = max(map(lambda x: len(x), self.pyfile.lines)) / 8 self.fig, self.ax = plt.subplots(figsize=(width, height)) # Set second sub plot to occupy bottom 20% plt.subplots_adjust(bottom=0.20) # Heat scale orange to red heatmap = self.ax.pcolor(self.pyfile.data, cmap="OrRd") # X Axis # Remove X axis. self.ax.xaxis.set_visible(False) # Y Axis # Create lables for y-axis ticks row_labels = range(1, self.pyfile.length + 1) # Set y-tick labels. self.ax.set_yticklabels(row_labels, minor=False) # Put y-axis major ticks at the middle of each cell. self.ax.set_yticks(np.arange(self.pyfile.data.shape[0]) + 0.5, minor=False) # Inver y-axis to have top down line numbers self.ax.invert_yaxis() # Plot definitions # Set plot y-axis label. plt.ylabel("Line Number") # Annotate each cell with lines in file in order. max_time_spent_on_a_line = max(self.pyfile.data) for i, line in enumerate(self.pyfile.lines): # In order to ensure easy readability of the code, we need to # invert colour of text display for darker colours which # correspond to higher amount of time spent on the line. if self.pyfile.data[i] >= 0.7 * max_time_spent_on_a_line: color = (1.0, 1.0, 1.0) # White text else: color = (0.0, 0.0, 0.0) # Black text plt.text( 0.0, i + 0.5, line, ha="left", va="center", color=color, clip_on=True, ) # Define legend cbar = plt.colorbar(heatmap) cbar.set_label("# of seconds")
def __create_heatmap_plot(self)
Method to actually create the heatmap from profile stats.
3.702285
3.651086
1.014023
# Create command line parser. parser = argparse.ArgumentParser() # Adding command line arguments. parser.add_argument("-o", "--out", help="Output file", default=None) parser.add_argument( "pyfile", help="Python file to be profiled", default=None ) # Parse command line arguments. arguments = parser.parse_args() if arguments.pyfile is not None: # Core functionality. pyheat = PyHeat(arguments.pyfile) pyheat.create_heatmap() pyheat.show_heatmap(output_file=arguments.out, enable_scroll=True) pyheat.close_heatmap() else: # Print command help parser.print_help()
def main()
Starting point for the program execution.
3.658683
3.588584
1.019534
if ndigits is None: ndigits = 0 return self.__class__( amount=self.amount.quantize(Decimal('1e' + str(-ndigits))), currency=self.currency)
def round(self, ndigits=0)
Rounds the amount using the current ``Decimal`` rounding algorithm.
4.072252
3.661433
1.112202
sys.path.insert(0, os.getcwd()) logging.basicConfig(level=logging.INFO, handlers=[logging.StreamHandler()]) parser = argparse.ArgumentParser(description="Manage Application", add_help=False) parser.add_argument('app', metavar='app', type=str, help='Application module path') parser.add_argument('--config', type=str, help='Path to configuration.') parser.add_argument('--version', action="version", version=__version__) args_, subargs_ = parser.parse_known_args(sys.argv[1:]) if args_.config: os.environ[CONFIGURATION_ENVIRON_VARIABLE] = args_.config from gunicorn.util import import_app app_uri = args_.app if ':' not in app_uri: app_uri += ':app' try: app = import_app(app_uri) app.uri = app_uri app.logger.info('Application is loaded: %s' % app.name) except Exception as exc: logging.exception(exc) raise sys.exit(1) app.manage(*subargs_, prog='muffin %s' % args_.app)
def run()
CLI endpoint.
3.362551
3.328443
1.010248
def wrapper(func): header = '\n'.join([s for s in (func.__doc__ or '').split('\n') if not s.strip().startswith(':')]) parser = self.parsers.add_parser(func.__name__, description=header) args, vargs, kw, defs, kwargs, kwdefs, anns = inspect.getfullargspec(func) defs = defs or [] kwargs_ = dict(zip(args[-len(defs):], defs)) docs = dict(PARAM_RE.findall(func.__doc__ or "")) def process_arg(name, *, value=..., **opts): argname = name.replace('_', '-').lower() arghelp = docs.get(vargs, '') if value is ...: return parser.add_argument(argname, help=arghelp, **opts) if isinstance(value, bool): if value: return parser.add_argument( "--no-" + argname, dest=name, action="store_false", help="Disable %s" % (arghelp or name).lower()) return parser.add_argument( "--" + argname, dest=name, action="store_true", help="Enable %s" % (arghelp or name).lower()) if isinstance(value, list): return parser.add_argument( "--" + argname, action="append", default=value, help=arghelp) return parser.add_argument( "--" + argname, type=anns.get(name, type(value)), default=value, help=arghelp + ' [%s]' % repr(value)) if vargs: process_arg('*', nargs="*", metavar=vargs) for name, value in (kwdefs or {}).items(): process_arg(name, value=value) for name in args: process_arg(name, value=kwargs_.get(name, ...)) self.handlers[func.__name__] = func func.parser = parser return func if callable(init): init.__init__ = True return wrapper(init) def decorator(func): func.__init__ = bool(init) return wrapper(func) return decorator
def command(self, init=False)
Define CLI command.
2.980294
2.929942
1.017185
if router is None: router = app.router handler = to_coroutine(handler) resources = [] for path in paths: # Register any exception to app if isinstance(path, type) and issubclass(path, BaseException): app._error_handlers[path] = handler continue # Ensure that names are unique name = str(name or '') rname, rnum = name, 2 while rname in router: rname = "%s%d" % (name, rnum) rnum += 1 path = parse(path) if isinstance(path, RETYPE): resource = RawReResource(path, name=rname) router.register_resource(resource) else: resource = router.add_resource(path, name=rname) for method in methods or [METH_ANY]: method = method.upper() resource.add_route(method, handler) resources.append(resource) return resources
def routes_register(app, handler, *paths, methods=None, router=None, name=None)
Register routes.
3.818318
3.775804
1.01126
parsed = re.sre_parse.parse(path) for case, _ in parsed: if case not in (re.sre_parse.LITERAL, re.sre_parse.ANY): break else: return path path = path.strip('^$') def parse_(match): [part] = match.groups() match = DYNR_RE.match(part) params = match.groupdict() return '(?P<%s>%s)' % (params['var'], params['re'] or '[^{}/]+') return re.compile('^%s$' % DYNS_RE.sub(parse_, path))
def parse(path)
Parse URL path and convert it to regexp if needed.
4.705942
4.509855
1.04348
parsed = re.sre_parse.parse(self._pattern.pattern) subgroups = {n:str(v) for n, v in enumerate(subgroups, 1)} groups_ = dict(parsed.pattern.groupdict) subgroups.update({ groups_[k0]: str(v0) for k0, v0 in groups.items() if k0 in groups_ }) path = ''.join(str(val) for val in Traverser(parsed, subgroups)) return URL.build(path=path, encoded=True)
def url_for(self, *subgroups, **groups)
Build URL.
5.965344
5.825681
1.023974
value = negate = chr(value) while value == negate: value = choice(self.literals) yield value
def state_not_literal(self, value)
Parse not literal.
14.903009
13.898149
1.072302
min_, max_, value = value value = [val for val in Traverser(value, self.groups)] if not min_ and max_: for val in value: if isinstance(val, required): min_ = 1 break for val in value * min_: yield val
def state_max_repeat(self, value)
Parse repeatable parts.
9.689876
8.447779
1.147032
value = [val for val in Traverser(value, self.groups)] if not value or not value[0]: for val in self.literals - set(value): return (yield val) yield value[0]
def state_in(self, value)
Parse ranges.
11.025874
10.232718
1.077512
if value == re.sre_parse.CATEGORY_DIGIT: return (yield '0') if value == re.sre_parse.CATEGORY_WORD: return (yield 'x')
def state_category(value)
Parse categories.
6.933387
6.227973
1.113265
num, *_, parsed = value if num in self.groups: return (yield required(self.groups[num])) yield from Traverser(parsed, groups=self.groups)
def state_subpattern(self, value)
Parse subpatterns.
16.915947
14.667678
1.153281
if isinstance(secret, str): secret = secret.encode(encoding) if isinstance(value, str): value = value.encode(encoding) if isinstance(digestmod, str): digestmod = getattr(hashlib, digestmod, hashlib.sha1) hm = hmac.new(secret, digestmod=digestmod) hm.update(value) return hm.hexdigest()
def create_signature(secret, value, digestmod='sha256', encoding='utf-8')
Create HMAC Signature from secret for value.
1.815694
1.699767
1.068202
return hmac.compare_digest(signature, create_signature(*args, **kwargs))
def check_signature(signature, *args, **kwargs)
Check for the signature is correct.
4.542671
3.986631
1.139476
salt = ''.join(random.sample(SALT_CHARS, salt_length)) signature = create_signature(salt, password, digestmod=digestmod) return '$'.join((digestmod, salt, signature))
def generate_password_hash(password, digestmod='sha256', salt_length=8)
Hash a password with given method and salt length.
3.855955
4.028919
0.95707
package = sys.modules[package_name] return { name: importlib.import_module(package_name + '.' + name) for _, name, _ in pkgutil.walk_packages(package.__path__) if not submodules or name in submodules }
def import_submodules(package_name, *submodules)
Import all submodules by package name.
2.554299
2.556634
0.999086
def wrapper(method): method = to_coroutine(method) setattr(method, ROUTE_PARAMS_ATTR, (paths, methods, name)) if handler and not hasattr(handler, method.__name__): setattr(handler, method.__name__, method) return method return wrapper
def register(*paths, methods=None, name=None, handler=None)
Mark Handler.method to aiohttp handler. It uses when registration of the handler with application is postponed. :: class AwesomeHandler(Handler): def get(self, request): return "I'm awesome!" @register('/awesome/best') def best(self, request): return "I'm best!"
4.445278
5.449153
0.815774
docs = getattr(view, '__doc__', None) view = to_coroutine(view) methods = methods or ['GET'] if METH_ANY in methods: methods = METH_ALL def proxy(self, *args, **kwargs): return view(*args, **kwargs) params = {m.lower(): proxy for m in methods} params['methods'] = methods if docs: params['__doc__'] = docs return type(name or view.__name__, (cls,), params)
def from_view(cls, view, *methods, name=None)
Create a handler class from function or coroutine.
3.690965
3.314548
1.113565
cls.app = app if cls.app is not None: for _, m in inspect.getmembers(cls, predicate=inspect.isfunction): if not hasattr(m, ROUTE_PARAMS_ATTR): continue paths_, methods_, name_ = getattr(m, ROUTE_PARAMS_ATTR) name_ = name_ or ("%s.%s" % (cls.name, m.__name__)) delattr(m, ROUTE_PARAMS_ATTR) cls.app.register(*paths_, methods=methods_, name=name_, handler=cls)(m) @coroutine @functools.wraps(cls) def handler(request): return cls().dispatch(request, view=view) if not paths: paths = ["/%s" % cls.__name__] return routes_register( app, handler, *paths, methods=methods, router=router, name=name or cls.name)
def bind(cls, app, *paths, methods=None, name=None, router=None, view=None)
Bind to the given application.
3.553853
3.47043
1.024038
if cls.app is None: return register(*args, handler=cls, **kwargs) return cls.app.register(*args, handler=cls, **kwargs)
def register(cls, *args, **kwargs)
Register view to handler.
4.495387
3.666326
1.226128
if view is None and request.method not in self.methods: raise HTTPMethodNotAllowed(request.method, self.methods) method = getattr(self, view or request.method.lower()) response = await method(request, **kwargs) return await self.make_response(request, response)
async def dispatch(self, request, view=None, **kwargs)
Dispatch request.
3.248433
2.963771
1.096047
while iscoroutine(response): response = await response if isinstance(response, StreamResponse): return response if isinstance(response, str): return Response(text=response, content_type='text/html') if isinstance(response, bytes): return Response(body=response, content_type='text/html') return Response(text=json.dumps(response), content_type='application/json')
async def make_response(self, request, response)
Convert a handler result to web response.
2.168294
2.008909
1.079339
if request.content_type in {'application/x-www-form-urlencoded', 'multipart/form-data'}: return await request.post() if request.content_type == 'application/json': return await request.json() return await request.text()
async def parse(self, request)
Return a coroutine which parses data from request depends on content-type. Usage: :: def post(self, request): data = await self.parse(request) # ...
2.368665
2.227536
1.063357
self.app = app for name, ptype in self.dependencies.items(): if name not in app.ps or not isinstance(app.ps[name], ptype): raise PluginException( 'Plugin `%s` requires for plugin `%s` to be installed to the application.' % ( self.name, ptype)) for oname, dvalue in self.defaults.items(): aname = ('%s_%s' % (self.name, oname)).upper() self.cfg.setdefault(oname, app.cfg.get(aname, dvalue)) app.cfg.setdefault(aname, self.cfg[oname])
def setup(self, app)
Initialize the plugin. Fill the plugin's options from application.
3.959815
3.689188
1.073357
@web.middleware async def middleware(request, handler): try: return await handler(request) except Exception as exc: for cls in type(exc).mro(): if cls in app._error_handlers: request.exception = exc response = await app._error_handlers[cls](request) return response raise return middleware
def _exc_middleware_factory(app)
Handle exceptions. Route exceptions to handlers if they are registered in application.
2.803114
2.878675
0.973751
if isinstance(methods, str): methods = [methods] def wrapper(view): if handler is None: handler_ = view methods_ = methods or [METH_ANY] if isfunction(handler_) or ismethod(handler_): handler_ = Handler.from_view(view, *methods_, name=name) handler_.bind(self, *paths, methods=methods_, name=name) else: view_name = view.__name__ if not hasattr(handler, view_name): setattr(handler, view_name, to_coroutine(view)) name_ = name or view_name handler.bind(self, *paths, methods=methods, name=name_, view=view_name) return view # Support for @app.register(func) if len(paths) == 1 and callable(paths[0]): view = paths[0] if isclass(view) and issubclass(view, BaseException): return wrapper paths = [] return wrapper(view) return wrapper
def register(self, *paths, methods=None, name=None, handler=None)
Register function/coroutine/muffin.Handler with the application. Usage example: .. code-block:: python @app.register('/hello') def hello(request): return 'Hello World!'
3.353996
3.837464
0.874014
config = LStruct(self.defaults) module = config['CONFIG'] = os.environ.get( CONFIGURATION_ENVIRON_VARIABLE, config['CONFIG']) if module: try: module = import_module(module) config.update({ name: getattr(module, name) for name in dir(module) if name == name.upper() and not name.startswith('_') }) except ImportError as exc: config.CONFIG = None self.logger.error("Error importing %s: %s", module, exc) # Patch configuration from ENV for name in config: if name.startswith('_') or name != name.upper() or name not in os.environ: continue try: config[name] = json.loads(os.environ[name]) except ValueError: pass return config
def cfg(self)
Load the application configuration. This method loads configuration from python module.
3.833775
3.742921
1.024274
source = plugin if isinstance(plugin, str): module, _, attr = plugin.partition(':') module = import_module(module) plugin = getattr(module, attr or 'Plugin', None) if isinstance(plugin, types.ModuleType): plugin = getattr(module, 'Plugin', None) if plugin is None: raise MuffinException('Plugin is not found %r' % source) name = name or plugin.name if name in self.ps: raise MuffinException('Plugin with name `%s` is already intalled.' % name) if isinstance(plugin, type): plugin = plugin(**opts) if hasattr(plugin, 'setup'): plugin.setup(self) if hasattr(plugin, 'middleware') and plugin.middleware not in self.middlewares: self.middlewares.append(plugin.middleware) if hasattr(plugin, 'startup'): self.on_startup.append(plugin.startup) if hasattr(plugin, 'cleanup'): self.on_cleanup.append(plugin.cleanup) # Save plugin links self.ps[name] = plugin return plugin
def install(self, plugin, name=None, **opts)
Install plugin to the application.
2.652138
2.586711
1.025293
if self.frozen: return False if self._error_handlers: self.middlewares.append(_exc_middleware_factory(self)) # Register static paths for path in self.cfg.STATIC_FOLDERS: self.router.register_resource(SafeStaticResource(self.cfg.STATIC_PREFIX, path)) await super(Application, self).startup()
async def startup(self)
Start the application. Support for start-callbacks and lock the application's configuration and plugins.
8.879982
8.25675
1.075482
self.middlewares.append(web.middleware(to_coroutine(func)))
def middleware(self, func)
Register given middleware (v1).
12.756689
10.964072
1.163499
expected = getattr(settings, 'HONEYPOT_VALUE', '') if callable(expected): expected = expected() return val == expected
def honeypot_equals(val)
Default verifier used if HONEYPOT_VERIFIER is not specified. Ensures val == HONEYPOT_VALUE or HONEYPOT_VALUE() if it's a callable.
5.572617
4.370236
1.275129
verifier = getattr(settings, 'HONEYPOT_VERIFIER', honeypot_equals) if request.method == 'POST': field = field_name or settings.HONEYPOT_FIELD_NAME if field not in request.POST or not verifier(request.POST[field]): resp = render_to_string('honeypot/honeypot_error.html', {'fieldname': field}) return HttpResponseBadRequest(resp)
def verify_honeypot_value(request, field_name)
Verify that request.POST[field_name] is a valid honeypot. Ensures that the field exists and passes verification according to HONEYPOT_VERIFIER.
3.170713
3.124417
1.014818
# hack to reverse arguments if called with str param if isinstance(func, six.string_types): func, field_name = field_name, func def decorated(func): def inner(request, *args, **kwargs): response = verify_honeypot_value(request, field_name) if response: return response else: return func(request, *args, **kwargs) return wraps(func, assigned=available_attrs(func))(inner) if func is None: def decorator(func): return decorated(func) return decorator return decorated(func)
def check_honeypot(func=None, field_name=None)
Check request.POST for valid honeypot field. Takes an optional field_name that defaults to HONEYPOT_FIELD_NAME if not specified.
2.890679
2.985103
0.968368
# borrowing liberally from django's csrf_exempt def wrapped(*args, **kwargs): return view_func(*args, **kwargs) wrapped.honeypot_exempt = True return wraps(view_func, assigned=available_attrs(view_func))(wrapped)
def honeypot_exempt(view_func)
Mark view as exempt from honeypot validation
3.173992
3.005466
1.056073
if not field_name: field_name = settings.HONEYPOT_FIELD_NAME value = getattr(settings, 'HONEYPOT_VALUE', '') if callable(value): value = value() return {'fieldname': field_name, 'value': value}
def render_honeypot_field(field_name=None)
Renders honeypot field named field_name (defaults to HONEYPOT_FIELD_NAME).
2.58112
2.365477
1.091163
import argparse import textwrap parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter, description=textwrap.dedent('''\ Command line utility to generate .svg badges. This utility can be used to generate .svg badge images, using configurable thresholds for coloring. Values can be passed as string, integer or floating point. The type will be detected automatically. Running the utility with a --file option will result in the .svg image being written to file. Without the --file option the .svg file content will be written to stdout, so can be redirected to a file. Some thresholds have been built in to save time. To use these thresholds you can simply specify the template name instead of threshold value/color pairs. examples: Here are some usage specific examples that may save time on defining thresholds. Pylint anybadge.py --value=2.22 --file=pylint.svg pylint anybadge.py --label=pylint --value=2.22 --file=pylint.svg \\ 2=red 4=orange 8=yellow 10=green Coverage anybadge.py --value=65 --file=coverage.svg coverage anybadge.py --label=coverage --value=65 --suffix='%%' --file=coverage.svg \\ 50=red 60=orange 80=yellow 100=green CI Pipeline anybadge.py --label=pipeline --value=passing --file=pipeline.svg \\ passing=green failing=red ''')) parser.add_argument('-l', '--label', type=str, help='The badge label.') parser.add_argument('-v', '--value', type=str, help='The badge value.', required=True) parser.add_argument('-m', '--value-format', type=str, default=None, help='Formatting string for value (e.g. "%%.2f" for 2dp floats)') parser.add_argument('-c', '--color', type=str, help='For fixed color badges use --color' 'to specify the badge color.', default=DEFAULT_COLOR) parser.add_argument('-p', '--prefix', type=str, help='Optional prefix for value.', default='') parser.add_argument('-s', '--suffix', type=str, help='Optional suffix for value.', default='') parser.add_argument('-d', '--padding', type=int, help='Number of characters to pad on ' 'either side of the badge text.', default=NUM_PADDING_CHARS) parser.add_argument('-n', '--font', type=str, help='Font name. Supported fonts: ' ','.join(['"%s"' % x for x in FONT_WIDTHS.keys()]), default=DEFAULT_FONT) parser.add_argument('-z', '--font-size', type=int, help='Font size.', default=DEFAULT_FONT_SIZE) parser.add_argument('-t', '--template', type=str, help='Location of alternative ' 'template .svg file.', default=TEMPLATE_SVG) parser.add_argument('-u', '--use-max', action='store_true', help='Use the maximum threshold color when the value exceeds the ' 'maximum threshold.') parser.add_argument('-f', '--file', type=str, help='Output file location.') parser.add_argument('-o', '--overwrite', action='store_true', help='Overwrite output file if it already exists.') parser.add_argument('-r', '--text-color', type=str, help='Text color. Single value affects both label' 'and value colors. A comma separated pair ' 'affects label and value text respectively.', default=DEFAULT_TEXT_COLOR) parser.add_argument('args', nargs=argparse.REMAINDER, help='Pairs of <upper>=<color>. ' 'For example 2=red 4=orange 6=yellow 8=good. ' 'Read this as "Less than 2 = red, less than 4 = orange...".') return parser.parse_args()
def parse_args()
Parse the command line arguments.
3.905239
3.881821
1.006033
# Parse command line arguments args = parse_args() label = args.label threshold_text = args.args suffix = args.suffix # Check whether thresholds were sent as one word, and is in the # list of templates. If so, swap in the template. if len(args.args) == 1 and args.args[0] in BADGE_TEMPLATES: template_name = args.args[0] template_dict = BADGE_TEMPLATES[template_name] threshold_text = template_dict['threshold'].split(' ') if not args.label: label = template_dict['label'] if not args.suffix and 'suffix' in template_dict: suffix = template_dict['suffix'] if not label: raise ValueError('Label has not been set. Please use --label argument.') # Create threshold list from args threshold_list = [x.split('=') for x in threshold_text] threshold_dict = {x[0]: x[1] for x in threshold_list} # Create badge object badge = Badge(label, args.value, value_prefix=args.prefix, value_suffix=suffix, default_color=args.color, num_padding_chars=args.padding, font_name=args.font, font_size=args.font_size, template=args.template, use_max_when_value_exceeds=args.use_max, thresholds=threshold_dict, value_format=args.value_format, text_color=args.text_color) if args.file: # Write badge SVG to file badge.write_badge(args.file, overwrite=args.overwrite) else: print(badge.badge_svg_text)
def main()
Generate a badge based on command line arguments.
3.605532
3.404263
1.059123
try: a = float(self.value) b = int(a) except ValueError: return False else: return a == b
def value_is_int(self)
Identify whether the value text is an int.
3.338303
2.942923
1.134349
return self.get_font_width(font_name=self.font_name, font_size=self.font_size)
def font_width(self)
Return the badge font width.
3.400949
3.143653
1.081846
return self.get_text_width(' ') + self.label_width + \ int(float(self.font_width) * float(self.num_padding_chars))
def color_split_position(self)
The SVG x position where the color split should occur.
11.142128
9.8192
1.134729
return self.get_text_width(' ' + ' ' * int(float(self.num_padding_chars) * 2.0)) \ + self.label_width + self.value_width
def badge_width(self)
The total width of badge. >>> badge = Badge('pylint', '5', font_name='DejaVu Sans,Verdana,Geneva,sans-serif', ... font_size=11) >>> badge.badge_width 91
8.074352
9.741892
0.828828
# Identify whether template is a file or the actual template text if len(self.template.split('\n')) == 1: with open(self.template, mode='r') as file_handle: badge_text = file_handle.read() else: badge_text = self.template return badge_text.replace('{{ badge width }}', str(self.badge_width)) \ .replace('{{ font name }}', self.font_name) \ .replace('{{ font size }}', str(self.font_size)) \ .replace('{{ label }}', self.label) \ .replace('{{ value }}', self.value_text) \ .replace('{{ label anchor }}', str(self.label_anchor)) \ .replace('{{ label anchor shadow }}', str(self.label_anchor_shadow)) \ .replace('{{ value anchor }}', str(self.value_anchor)) \ .replace('{{ value anchor shadow }}', str(self.value_anchor_shadow)) \ .replace('{{ color }}', self.badge_color_code) \ .replace('{{ label text color }}', self.label_text_color) \ .replace('{{ value text color }}', self.value_text_color) \ .replace('{{ color split x }}', str(self.color_split_position)) \ .replace('{{ value width }}', str(self.badge_width - self.color_split_position))
def badge_svg_text(self)
The badge SVG text.
2.301442
2.250804
1.022498
return len(text) * self.get_font_width(self.font_name, self.font_size)
def get_text_width(self, text)
Return the width of text. This implementation assumes a fixed font of: font-family="DejaVu Sans,Verdana,Geneva,sans-serif" font-size="11" >>> badge = Badge('x', 1, font_name='DejaVu Sans,Verdana,Geneva,sans-serif', font_size=11) >>> badge.get_text_width('pylint') 42
3.691097
4.451964
0.829094
# If no thresholds were passed then return the default color if not self.thresholds: return self.default_color if self.value_type == str: if self.value in self.thresholds: return self.thresholds[self.value] else: return self.default_color # Convert the threshold dictionary into a sorted list of lists threshold_list = [[self.value_type(i[0]), i[1]] for i in self.thresholds.items()] threshold_list.sort(key=lambda x: x[0]) color = None for threshold, color in threshold_list: if float(self.value) < float(threshold): return color # If we drop out the top of the range then return the last max color if color and self.use_max_when_value_exceeds: return color else: return self.default_color
def badge_color(self)
Find the badge color based on the thresholds.
3.291011
3.078486
1.069036
# Validate path (part 1) if file_path.endswith('/'): raise Exception('File location may not be a directory.') # Get absolute filepath path = os.path.abspath(file_path) if not path.lower().endswith('.svg'): path += '.svg' # Validate path (part 2) if not overwrite and os.path.exists(path): raise Exception('File "{}" already exists.'.format(path)) with open(path, mode='w') as file_handle: file_handle.write(self.badge_svg_text)
def write_badge(self, file_path, overwrite=False)
Write badge to file.
3.098847
3.081875
1.005507
global DEFAULT_SERVER_PORT, DEFAULT_SERVER_LISTEN_ADDRESS, DEFAULT_LOGGING_LEVEL # Check for environment variables if 'ANYBADGE_PORT' in environ: DEFAULT_SERVER_PORT = environ['ANYBADGE_PORT'] if 'ANYBADGE_LISTEN_ADDRESS' in environ: DEFAULT_SERVER_LISTEN_ADDRESS = environ['ANYBADGE_LISTEN_ADDRESS'] if 'ANYBADGE_LOG_LEVEL' in environ: DEFAULT_LOGGING_LEVEL = logging.getLevelName(environ['ANYBADGE_LOG_LEVEL']) # Parse command line args args = parse_args() # Set logging level logging_level = DEFAULT_LOGGING_LEVEL if args.debug: logging_level = logging.DEBUG logging.basicConfig(format='%(asctime)-15s %(levelname)s:%(filename)s(%(lineno)d):%(funcName)s: %(message)s', level=logging_level) logger.info('Starting up anybadge server.') run(listen_address=args.listen_address, port=args.port)
def main()
Run server.
2.231319
2.177221
1.024847
name_dispatch = { ast.Name: "id", ast.Attribute: "attr", ast.Call: "func", ast.FunctionDef: "name", ast.ClassDef: "name", ast.Subscript: "value", } # This is a new ast type in Python 3 if hasattr(ast, "arg"): name_dispatch[ast.arg] = "arg" while not isinstance(obj, str): assert type(obj) in name_dispatch obj = getattr(obj, name_dispatch[type(obj)]) return obj
def get_object_name(obj)
Return the name of a given object
3.340395
3.081795
1.083912
return attr.value.id if isinstance(attr.value, ast.Name) else None
def get_attribute_name_id(attr)
Return the attribute name identifier
6.482441
6.97545
0.929322
if not method.args.args: return False first_arg = method.args.args[0] first_arg_name = get_object_name(first_arg) return first_arg_name == arg_name
def is_class_method_bound(method, arg_name=BOUND_METHOD_ARGUMENT_NAME)
Return whether a class method is bound to the class
3.409397
3.193754
1.06752
return [ node for node in cls.body if isinstance(node, ast.FunctionDef) ]
def get_class_methods(cls)
Return methods associated with a given class
4.072762
3.935611
1.034849
return [ target for node in cls.body if isinstance(node, ast.Assign) for target in node.targets ]
def get_class_variables(cls)
Return class variables associated with a given class
5.155317
5.234011
0.984965
node_attributes = [ child for child in ast.walk(node) if isinstance(child, ast.Attribute) and get_attribute_name_id(child) == bound_name_classifier ] node_function_call_names = [ get_object_name(child) for child in ast.walk(node) if isinstance(child, ast.Call) ] node_instance_variables = [ attribute for attribute in node_attributes if get_object_name(attribute) not in node_function_call_names ] return node_instance_variables
def get_instance_variables(node, bound_name_classifier=BOUND_METHOD_ARGUMENT_NAME)
Return instance variables used in an AST node
2.319423
2.260033
1.026278
return [ child for child in ast.walk(node) if isinstance(child, ast.ClassDef) ]
def get_module_classes(node)
Return classes associated with a given module
3.491954
3.185154
1.096322
return [ os.path.join(root, filename) for root, directories, filenames in os.walk(directory) for filename in filenames ]
def recursively_get_files_from_directory(directory)
Return all filenames under recursively found in a directory
2.341351
2.198281
1.065083
if isinstance(key, int): return SeedID(key) if key not in SeedID._member_map_: extend_enum(SeedID, key, default) return SeedID[key]
def get(key, default=-1)
Backport support for original codes.
5.852302
5.142142
1.138106
if isinstance(key, int): return PriorityLevel(key) if key not in PriorityLevel._member_map_: extend_enum(PriorityLevel, key, default) return PriorityLevel[key]
def get(key, default=-1)
Backport support for original codes.
4.86712
4.224414
1.152141
if isinstance(key, int): return TOS_THR(key) if key not in TOS_THR._member_map_: extend_enum(TOS_THR, key, default) return TOS_THR[key]
def get(key, default=-1)
Backport support for original codes.
4.819141
4.525868
1.064799
if not (isinstance(value, int) and 0 <= value <= 1): raise ValueError('%r is not a valid %s' % (value, cls.__name__)) extend_enum(cls, 'Unassigned [%d]' % value, value) return cls(value) super()._missing_(value)
def _missing_(cls, value)
Lookup function used when value is not found.
4.971387
5.194452
0.957057
if isinstance(key, int): return Registration(key) if key not in Registration._member_map_: extend_enum(Registration, key, default) return Registration[key]
def get(key, default=-1)
Backport support for original codes.
6.904724
5.936065
1.163182
if isinstance(key, int): return ErrorCode(key) if key not in ErrorCode._member_map_: extend_enum(ErrorCode, key, default) return ErrorCode[key]
def get(key, default=-1)
Backport support for original codes.
4.544353
3.972285
1.144015
from pcapkit.protocols.protocol import Protocol try: flag = issubclass(value, Protocol) except TypeError: flag = issubclass(type(value), Protocol) if flag or isinstance(value, Protocol): value = value.__index__() if isinstance(value, tuple): value = r'|'.join(value) with contextlib.suppress(Exception): return sum(1 for data in self.__data__ if re.fullmatch(value, data, re.IGNORECASE) is not None) return 0
def count(self, value)
S.count(value) -> integer -- return number of occurrences of value
4.008952
3.943568
1.01658
if start is not None and start < 0: start = max(len(self) + start, 0) if stop is not None and stop < 0: stop += len(self) try: if not isinstance(start, numbers.Integral): start = self.index(start) if not isinstance(stop, numbers.Integral): stop = self.index(stop) except IndexNotFound: raise IntError('slice indices must be integers or have an __index__ method') from None from pcapkit.protocols.protocol import Protocol try: flag = issubclass(value, Protocol) except TypeError: flag = issubclass(type(value), Protocol) if flag or isinstance(value, Protocol): value = value.__index__() if isinstance(value, tuple): value = r'|'.join(value) try: for index, data in enumerate(self.__data__[start:stop]): if re.fullmatch(value, data, re.IGNORECASE): return index except Exception: raise IndexNotFound(f'{value!r} is not in {self.__class__.__name__!r}')
def index(self, value, start=0, stop=None)
S.index(value, [start, [stop]]) -> integer -- return first index of value. Raises ValueError if the value is not present. Supporting start and stop arguments is optional, but recommended.
2.868403
2.886238
0.993821
return self.__alias__.index(value, start, stop)
def index(self, value, start=None, stop=None)
Return first index of value.
13.25986
9.419764
1.407664
if isinstance(key, int): return NAT_Traversal(key) if key not in NAT_Traversal._member_map_: extend_enum(NAT_Traversal, key, default) return NAT_Traversal[key]
def get(key, default=-1)
Backport support for original codes.
6.982631
6.226829
1.121378
if length is None: length = len(self) _vers = self._read_unpack(1) _type = self._read_unpack(1) _tlen = self._read_unpack(2) _rtid = self._read_id_numbers() _area = self._read_id_numbers() _csum = self._read_fileng(2) _autp = self._read_unpack(2) ospf = dict( version=_vers, type=TYPE.get(_type), len=_tlen, router_id=_rtid, area_id=_area, chksum=_csum, autype=AUTH.get(_autp) or 'Reserved', ) if _autp == 2: ospf['auth'] = self._read_encrypt_auth() else: ospf['auth'] = self._read_fileng(8) length = ospf['len'] - 24 ospf['packet'] = self._read_packet(header=24, payload=length) return self._decode_next_layer(ospf, length)
def read_ospf(self, length)
Read Open Shortest Path First. Structure of OSPF header [RFC 2328]: 0 1 2 3 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | Version # | Type | Packet length | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | Router ID | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | Area ID | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | Checksum | AuType | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | Authentication | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | Authentication | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ Octets Bits Name Description 0 0 ospf.version Version # 1 8 ospf.type Type (0/1) 2 16 ospf.len Packet Length (header includes) 4 32 ospf.router_id Router ID 8 64 ospf.area_id Area ID 12 96 ospf.chksum Checksum 14 112 ospf.autype AuType 16 128 ospf.auth Authentication
3.60043
3.120723
1.153716
_byte = self._read_fileng(4) _addr = '.'.join([str(_) for _ in _byte]) return _addr
def _read_id_numbers(self)
Read router and area IDs.
12.279835
9.116343
1.347013