code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
import matplotlib.pyplot as plt # check inputs m_square = ensure_square(m) # blank out lower triangle and flip up/down m_square = np.tril(m_square)[::-1, :] # set up axes if ax is None: # make a square figure with enough pixels to represent each variant x = m_square.shape[0] / plt.rcParams['figure.dpi'] x = max(x, plt.rcParams['figure.figsize'][0]) fig, ax = plt.subplots(figsize=(x, x)) fig.tight_layout(pad=0) # setup imshow arguments if imshow_kwargs is None: imshow_kwargs = dict() imshow_kwargs.setdefault('interpolation', 'none') imshow_kwargs.setdefault('cmap', 'Greys') imshow_kwargs.setdefault('vmin', 0) imshow_kwargs.setdefault('vmax', 1) # plot as image im = ax.imshow(m_square, **imshow_kwargs) # tidy up ax.set_xticks([]) ax.set_yticks([]) for s in 'bottom', 'right': ax.spines[s].set_visible(False) if colorbar: plt.gcf().colorbar(im, shrink=.5, pad=0) return ax
def plot_pairwise_ld(m, colorbar=True, ax=None, imshow_kwargs=None)
Plot a matrix of genotype linkage disequilibrium values between all pairs of variants. Parameters ---------- m : array_like Array of linkage disequilibrium values in condensed form. colorbar : bool, optional If True, add a colorbar to the current figure. ax : axes, optional The axes on which to draw. If not provided, a new figure will be created. imshow_kwargs : dict-like, optional Additional keyword arguments passed through to :func:`matplotlib.pyplot.imshow`. Returns ------- ax : axes The axes on which the plot was drawn.
2.713747
2.745183
0.988548
import h5py h5f = None if isinstance(parent, str): h5f = h5py.File(parent, mode='a') parent = h5f try: kwargs.setdefault('chunks', True) # auto-chunking kwargs.setdefault('dtype', a.dtype) kwargs.setdefault('compression', 'gzip') h5d = parent.require_dataset(name, shape=a.shape, **kwargs) h5d[...] = a return h5d finally: if h5f is not None: h5f.close()
def array_to_hdf5(a, parent, name, **kwargs)
Write a Numpy array to an HDF5 dataset. Parameters ---------- a : ndarray Data to write. parent : string or h5py group Parent HDF5 file or group. If a string, will be treated as HDF5 file name. name : string Name or path of dataset to write data into. kwargs : keyword arguments Passed through to h5py require_dataset() function. Returns ------- h5d : h5py dataset
2.376976
2.320915
1.024154
import h5py h5f = None if len(args) == 1: group = args[0] elif len(args) == 2: file_path, node_path = args h5f = h5py.File(file_path, mode='r') try: group = h5f[node_path] except Exception as e: h5f.close() raise e else: raise ValueError('bad arguments; expected group or (file_path, ' 'node_path), found %s' % repr(args)) try: if not isinstance(group, h5py.Group): raise ValueError('expected group, found %r' % group) # determine dataset names to load available_dataset_names = [n for n in group.keys() if isinstance(group[n], h5py.Dataset)] names = kwargs.pop('names', available_dataset_names) names = [str(n) for n in names] # needed for PY2 for n in names: if n not in set(group.keys()): raise ValueError('name not found: %s' % n) if not isinstance(group[n], h5py.Dataset): raise ValueError('name does not refer to a dataset: %s, %r' % (n, group[n])) # check datasets are aligned datasets = [group[n] for n in names] length = datasets[0].shape[0] for d in datasets[1:]: if d.shape[0] != length: raise ValueError('datasets must be of equal length') # determine start and stop parameters for load start = kwargs.pop('start', 0) stop = kwargs.pop('stop', length) # check condition condition = kwargs.pop('condition', None) # type: np.ndarray condition = asarray_ndim(condition, 1, allow_none=True) if condition is not None and condition.size != length: raise ValueError('length of condition does not match length ' 'of datasets') # setup output data dtype = [(n, d.dtype, d.shape[1:]) for n, d in zip(names, datasets)] ra = np.empty(length, dtype=dtype) for n, d in zip(names, datasets): a = d[start:stop] if condition is not None: a = np.compress(condition[start:stop], a, axis=0) ra[n] = a return ra finally: if h5f is not None: h5f.close()
def recarray_from_hdf5_group(*args, **kwargs)
Load a recarray from columns stored as separate datasets with an HDF5 group. Either provide an h5py group as a single positional argument, or provide two positional arguments giving the HDF5 file path and the group node path within the file. The following optional parameters may be given. Parameters ---------- start : int, optional Index to start loading from. stop : int, optional Index to finish loading at. condition : array_like, bool, optional A 1-dimensional boolean array of the same length as the columns of the table to load, indicating a selection of rows to load.
2.273063
2.1976
1.034339
import h5py h5f = None if isinstance(parent, str): h5f = h5py.File(parent, mode='a') parent = h5f try: h5g = parent.require_group(name) for n in ra.dtype.names: array_to_hdf5(ra[n], h5g, n, **kwargs) return h5g finally: if h5f is not None: h5f.close()
def recarray_to_hdf5_group(ra, parent, name, **kwargs)
Write each column in a recarray to a dataset in an HDF5 group. Parameters ---------- ra : recarray Numpy recarray to store. parent : string or h5py group Parent HDF5 file or group. If a string, will be treated as HDF5 file name. name : string Name or path of group to write data into. kwargs : keyword arguments Passed through to h5py require_dataset() function. Returns ------- h5g : h5py group
2.20334
2.305754
0.955583
# check inputs data = np.asarray(data) if data.ndim < 2: raise ValueError('data must have 2 or more dimensions') sel0 = asarray_ndim(sel0, 1, allow_none=True) sel1 = asarray_ndim(sel1, 1, allow_none=True) # ensure indices if sel0 is not None and sel0.dtype.kind == 'b': sel0, = np.nonzero(sel0) if sel1 is not None and sel1.dtype.kind == 'b': sel1, = np.nonzero(sel1) # ensure leading dimension indices can be broadcast correctly if sel0 is not None and sel1 is not None: sel0 = sel0[:, np.newaxis] # deal with None arguments if sel0 is None: sel0 = _total_slice if sel1 is None: sel1 = _total_slice return data[sel0, sel1]
def subset(data, sel0, sel1)
Apply selections on first and second axes.
2.359144
2.368465
0.996064
if vm == 'numexpr': import numexpr as ne return ne.evaluate(expression, local_dict=self) else: if PY2: # locals must be a mapping m = {k: self[k] for k in self.dtype.names} else: m = self return eval(expression, dict(), m)
def eval(self, expression, vm='python')
Evaluate an expression against the table columns. Parameters ---------- expression : string Expression to evaluate. vm : {'numexpr', 'python'} Virtual machine to use. Returns ------- result : ndarray
3.971251
3.872973
1.025375
condition = self.eval(expression, vm=vm) return self.compress(condition)
def query(self, expression, vm='python')
Evaluate expression and then use it to extract rows from the table. Parameters ---------- expression : string Expression to evaluate. vm : {'numexpr', 'python'} Virtual machine to use. Returns ------- result : structured array
9.932966
16.114544
0.616398
if not isinstance(others, (list, tuple)): others = others, tup = (self.values,) + tuple(o.values for o in others) out = np.concatenate(tup, axis=0) out = type(self)(out) return out
def concatenate(self, others)
Concatenate arrays.
3.46991
3.233542
1.073099
if self.mask is None: raise ValueError('no mask is set') # apply the mask data = np.array(self.values, copy=copy) data[self.mask, ...] = value if copy: out = type(self)(data) # wrap out.is_phased = self.is_phased # don't set mask because it has been filled in else: out = self out.mask = None # reset mask return out
def fill_masked(self, value=-1, copy=True)
Fill masked genotype calls with a given value. Parameters ---------- value : int, optional The fill value. copy : bool, optional If False, modify the array in place. Returns ------- g : GenotypeArray Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 1]], ... [[0, 1], [1, 1]], ... [[0, 2], [-1, -1]]], dtype='i1') >>> mask = [[True, False], [False, True], [False, False]] >>> g.mask = mask >>> g.fill_masked().values array([[[-1, -1], [ 0, 1]], [[ 0, 1], [-1, -1]], [[ 0, 2], [-1, -1]]], dtype=int8)
4.821746
5.850645
0.824139
out = np.all(self.values >= 0, axis=-1) # handle mask if self.mask is not None: out &= ~self.mask return out
def is_called(self)
Find non-missing genotype calls. Returns ------- out : ndarray, bool, shape (n_variants, n_samples) Array where elements are True if the genotype call matches the condition. Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 1]], ... [[0, 1], [1, 1]], ... [[0, 2], [-1, -1]]]) >>> g.is_called() array([[ True, True], [ True, True], [ True, False]]) >>> v = g[:, 1] >>> v <GenotypeVector shape=(3, 2) dtype=int64> 0/1 1/1 ./. >>> v.is_called() array([ True, True, False])
7.287458
9.891003
0.736777
out = np.any(self.values < 0, axis=-1) # handle mask if self.mask is not None: out |= self.mask return out
def is_missing(self)
Find missing genotype calls. Returns ------- out : ndarray, bool, shape (n_variants, n_samples) Array where elements are True if the genotype call matches the condition. Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 1]], ... [[0, 1], [1, 1]], ... [[0, 2], [-1, -1]]]) >>> g.is_missing() array([[False, False], [False, False], [False, True]]) >>> v = g[:, 1] >>> v <GenotypeVector shape=(3, 2) dtype=int64> 0/1 1/1 ./. >>> v.is_missing() array([False, False, True])
6.288374
9.849273
0.638461
if allele is None: allele1 = self.values[..., 0, np.newaxis] other_alleles = self.values[..., 1:] tmp = (allele1 >= 0) & (allele1 == other_alleles) out = np.all(tmp, axis=-1) else: out = np.all(self.values == allele, axis=-1) # handle mask if self.mask is not None: out &= ~self.mask return out
def is_hom(self, allele=None)
Find genotype calls that are homozygous. Parameters ---------- allele : int, optional Allele index. Returns ------- out : ndarray, bool, shape (n_variants, n_samples) Array where elements are True if the genotype call matches the condition. Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 1]], ... [[0, 1], [1, 1]], ... [[2, 2], [-1, -1]]]) >>> g.is_hom() array([[ True, False], [False, True], [ True, False]]) >>> g.is_hom(allele=1) array([[False, False], [False, True], [False, False]]) >>> v = g[:, 0] >>> v <GenotypeVector shape=(3, 2) dtype=int64> 0/0 0/1 2/2 >>> v.is_hom() array([ True, False, True])
3.116936
3.432898
0.907961
allele1 = self.values[..., 0, np.newaxis] other_alleles = self.values[..., 1:] tmp = (allele1 > 0) & (allele1 == other_alleles) out = np.all(tmp, axis=-1) # handle mask if self.mask is not None: out &= ~self.mask return out
def is_hom_alt(self)
Find genotype calls that are homozygous for any alternate (i.e., non-reference) allele. Returns ------- out : ndarray, bool, shape (n_variants, n_samples) Array where elements are True if the genotype call matches the condition. Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 1]], ... [[0, 1], [1, 1]], ... [[2, 2], [-1, -1]]]) >>> g.is_hom_alt() array([[False, False], [False, True], [ True, False]]) >>> v = g[:, 1] >>> v <GenotypeVector shape=(3, 2) dtype=int64> 0/1 1/1 ./. >>> v.is_hom_alt() array([False, True, False])
4.420081
4.765714
0.927475
allele1 = self.values[..., 0, np.newaxis] # type: np.ndarray other_alleles = self.values[..., 1:] # type: np.ndarray out = np.all(self.values >= 0, axis=-1) & np.any(allele1 != other_alleles, axis=-1) if allele is not None: out &= np.any(self.values == allele, axis=-1) # handle mask if self.mask is not None: out &= ~self.mask return out
def is_het(self, allele=None)
Find genotype calls that are heterozygous. Returns ------- out : ndarray, bool, shape (n_variants, n_samples) Array where elements are True if the genotype call matches the condition. allele : int, optional Heterozygous allele. Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 1]], ... [[0, 1], [1, 1]], ... [[0, 2], [-1, -1]]]) >>> g.is_het() array([[False, True], [ True, False], [ True, False]]) >>> g.is_het(2) array([[False, False], [False, False], [ True, False]]) >>> v = g[:, 0] >>> v <GenotypeVector shape=(3, 2) dtype=int64> 0/0 0/1 0/2 >>> v.is_het() array([False, True, True])
3.059862
3.774045
0.810765
# guard conditions if not len(call) == self.shape[-1]: raise ValueError('invalid call ploidy: %s', repr(call)) if self.ndim == 2: call = np.asarray(call)[np.newaxis, :] else: call = np.asarray(call)[np.newaxis, np.newaxis, :] out = np.all(self.values == call, axis=-1) # handle mask if self.mask is not None: out &= ~self.mask return out
def is_call(self, call)
Locate genotypes with a given call. Parameters ---------- call : array_like, int, shape (ploidy,) The genotype call to find. Returns ------- out : ndarray, bool, shape (n_variants, n_samples) Array where elements are True if the genotype is `call`. Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 1]], ... [[0, 1], [1, 1]], ... [[0, 2], [-1, -1]]]) >>> g.is_call((0, 2)) array([[False, False], [False, False], [ True, False]]) >>> v = g[:, 0] >>> v <GenotypeVector shape=(3, 2) dtype=int64> 0/0 0/1 0/2 >>> v.is_call((0, 2)) array([False, False, True])
3.968924
3.77363
1.051752
b = self.is_called() return np.sum(b, axis=axis)
def count_called(self, axis=None)
Count called genotypes. Parameters ---------- axis : int, optional Axis over which to count, or None to perform overall count.
6.513745
9.194711
0.708423
b = self.is_missing() return np.sum(b, axis=axis)
def count_missing(self, axis=None)
Count missing genotypes. Parameters ---------- axis : int, optional Axis over which to count, or None to perform overall count.
6.229849
9.86246
0.631673
b = self.is_hom(allele=allele) return np.sum(b, axis=axis)
def count_hom(self, allele=None, axis=None)
Count homozygous genotypes. Parameters ---------- allele : int, optional Allele index. axis : int, optional Axis over which to count, or None to perform overall count.
3.957366
7.173445
0.551669
b = self.is_hom_ref() return np.sum(b, axis=axis)
def count_hom_ref(self, axis=None)
Count homozygous reference genotypes. Parameters ---------- axis : int, optional Axis over which to count, or None to perform overall count.
4.63908
7.731669
0.60001
b = self.is_hom_alt() return np.sum(b, axis=axis)
def count_hom_alt(self, axis=None)
Count homozygous alternate genotypes. Parameters ---------- axis : int, optional Axis over which to count, or None to perform overall count.
4.696029
8.191338
0.573292
b = self.is_het(allele=allele) return np.sum(b, axis=axis)
def count_het(self, allele=None, axis=None)
Count heterozygous genotypes. Parameters ---------- allele : int, optional Allele index. axis : int, optional Axis over which to count, or None to perform overall count.
3.837384
6.785017
0.565567
b = self.is_call(call=call) return np.sum(b, axis=axis)
def count_call(self, call, axis=None)
Count genotypes with a given call. Parameters ---------- call : array_like, int, shape (ploidy,) The genotype call to find. axis : int, optional Axis over which to count, or None to perform overall count.
5.660569
8.452914
0.669659
# count number of alternate alleles out = np.empty(self.shape[:-1], dtype=dtype) np.sum(self.values == 0, axis=-1, out=out) # fill missing calls if fill != 0: m = self.is_missing() out[m] = fill # handle mask if self.mask is not None: out[self.mask] = fill return out
def to_n_ref(self, fill=0, dtype='i1')
Transform each genotype call into the number of reference alleles. Parameters ---------- fill : int, optional Use this value to represent missing calls. dtype : dtype, optional Output dtype. Returns ------- out : ndarray, int8, shape (n_variants, n_samples) Array of ref alleles per genotype call. Notes ----- By default this function returns 0 for missing genotype calls **and** for homozygous non-reference genotype calls. Use the `fill` argument to change how missing calls are represented. Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 1]], ... [[0, 2], [1, 1]], ... [[2, 2], [-1, -1]]]) >>> g.to_n_ref() array([[2, 1], [1, 0], [0, 0]], dtype=int8) >>> g.to_n_ref(fill=-1) array([[ 2, 1], [ 1, 0], [ 0, -1]], dtype=int8) >>> v = g[:, 0] >>> v <GenotypeVector shape=(3, 2) dtype=int64> 0/0 0/2 2/2 >>> v.to_n_ref() array([2, 1, 0], dtype=int8)
3.601097
4.280884
0.841204
# determine alleles to count if max_allele is None: max_allele = self.max() alleles = list(range(max_allele + 1)) # set up output array outshape = self.shape[:-1] + (len(alleles),) out = np.zeros(outshape, dtype=dtype) for allele in alleles: # count alleles along ploidy dimension allele_match = self.values == allele if self.mask is not None: allele_match &= ~self.mask[..., np.newaxis] np.sum(allele_match, axis=-1, out=out[..., allele]) if self.ndim == 2: out = GenotypeAlleleCountsVector(out) elif self.ndim == 3: out = GenotypeAlleleCountsArray(out) return out
def to_allele_counts(self, max_allele=None, dtype='u1')
Transform genotype calls into allele counts per call. Parameters ---------- max_allele : int, optional Highest allele index. Provide this value to speed up computation. dtype : dtype, optional Output dtype. Returns ------- out : ndarray, uint8, shape (n_variants, n_samples, len(alleles)) Array of allele counts per call. Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 1]], ... [[0, 2], [1, 1]], ... [[2, 2], [-1, -1]]]) >>> g.to_allele_counts() <GenotypeAlleleCountsArray shape=(3, 2, 3) dtype=uint8> 2:0:0 1:1:0 1:0:1 0:2:0 0:0:2 0:0:0 >>> v = g[:, 0] >>> v <GenotypeVector shape=(3, 2) dtype=int64> 0/0 0/2 2/2 >>> v.to_allele_counts() <GenotypeAlleleCountsVector shape=(3, 3) dtype=uint8> 2:0:0 1:0:1 0:0:2
2.797571
2.98951
0.935796
# how many characters needed per allele call? if max_allele is None: max_allele = np.max(self) if max_allele <= 0: max_allele = 1 nchar = int(np.floor(np.log10(max_allele))) + 1 # convert to string a = self.astype((np.string_, nchar)).view(np.chararray) # recode missing alleles a[self < 0] = b'.' if self.mask is not None: a[self.mask] = b'.' # determine allele call separator if self.is_phased is None: sep = b'/' else: sep = np.empty(self.shape[:-1], dtype='S1').view(np.chararray) sep[self.is_phased] = b'|' sep[~self.is_phased] = b'/' # join via separator, coping with any ploidy gt = a[..., 0] for i in range(1, self.ploidy): gt = gt + sep + a[..., i] return gt
def to_gt(self, max_allele=None)
Convert genotype calls to VCF-style string representation. Returns ------- gt : ndarray, string, shape (n_variants, n_samples) Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 1]], ... [[0, 2], [1, 1]], ... [[1, 2], [2, 1]], ... [[2, 2], [-1, -1]]]) >>> g.to_gt() chararray([[b'0/0', b'0/1'], [b'0/2', b'1/1'], [b'1/2', b'2/1'], [b'2/2', b'./.']], dtype='|S3') >>> v = g[:, 0] >>> v <GenotypeVector shape=(4, 2) dtype=int64> 0/0 0/2 1/2 2/2 >>> v.to_gt() chararray([b'0/0', b'0/2', b'1/2', b'2/2'], dtype='|S3') >>> g.is_phased = np.ones(g.shape[:-1]) >>> g.to_gt() chararray([[b'0|0', b'0|1'], [b'0|2', b'1|1'], [b'1|2', b'2|1'], [b'2|2', b'.|.']], dtype='|S3') >>> v = g[:, 0] >>> v <GenotypeVector shape=(4, 2) dtype=int64> 0|0 0|2 1|2 2|2 >>> v.to_gt() chararray([b'0|0', b'0|2', b'1|2', b'2|2'], dtype='|S3')
3.485275
3.232559
1.078178
h = self.to_haplotypes() hm = h.map_alleles(mapping, copy=copy) if self.ndim == 2: gm = GenotypeVector(hm) else: gm = hm.to_genotypes(ploidy=self.ploidy) return gm
def map_alleles(self, mapping, copy=True)
Transform alleles via a mapping. Parameters ---------- mapping : ndarray, int8, shape (n_variants, max_allele) An array defining the allele mapping for each variant. copy : bool, optional If True, return a new array; if False, apply mapping in place (only applies for arrays with dtype int8; all other dtypes require a copy). Returns ------- gm : GenotypeArray Examples -------- >>> import allel >>> import numpy as np >>> g = allel.GenotypeArray([[[0, 0], [0, 1]], ... [[0, 2], [1, 1]], ... [[1, 2], [2, 1]], ... [[2, 2], [-1, -1]]], dtype='i1') >>> mapping = np.array([[1, 2, 0], ... [2, 0, 1], ... [2, 1, 0], ... [0, 2, 1]], dtype='i1') >>> g.map_alleles(mapping) <GenotypeArray shape=(4, 2, 2) dtype=int8> 1/1 1/2 2/1 0/0 1/0 0/1 1/1 ./. >>> v = g[:, 0] >>> v <GenotypeVector shape=(4, 2) dtype=int8> 0/0 0/2 1/2 2/2 >>> v.map_alleles(mapping) <GenotypeVector shape=(4, 2) dtype=int8> 1/1 2/1 1/0 1/1 Notes ----- If a mask has been set, it is ignored by this function. For arrays with dtype int8 an optimised implementation is used which is faster and uses far less memory. It is recommended to convert arrays to dtype int8 where possible before calling this method. See Also -------- create_allele_mapping
3.896335
4.797552
0.812151
check_ploidy(self.ploidy, 2) if boundscheck: amx = self.max() if amx > 14: raise ValueError('max allele for packing is 14, found %s' % amx) amn = self.min() if amn < -1: raise ValueError('min allele for packing is -1, found %s' % amn) # pack data values = memoryview_safe(self.values) packed = genotype_array_pack_diploid(values) return packed
def to_packed(self, boundscheck=True)
Pack diploid genotypes into a single byte for each genotype, using the left-most 4 bits for the first allele and the right-most 4 bits for the second allele. Allows single byte encoding of diploid genotypes for variants with up to 15 alleles. Parameters ---------- boundscheck : bool, optional If False, do not check that minimum and maximum alleles are compatible with bit-packing. Returns ------- packed : ndarray, uint8, shape (n_variants, n_samples) Bit-packed genotype array. Notes ----- If a mask has been set, it is ignored by this function. Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 1]], ... [[0, 2], [1, 1]], ... [[2, 2], [-1, -1]]], dtype='i1') >>> g.to_packed() array([[ 0, 1], [ 2, 17], [ 34, 239]], dtype=uint8)
4.724641
5.228294
0.903668
# check arguments packed = np.asarray(packed) check_ndim(packed, 2) check_dtype(packed, 'u1') packed = memoryview_safe(packed) data = genotype_array_unpack_diploid(packed) return cls(data)
def from_packed(cls, packed)
Unpack diploid genotypes that have been bit-packed into single bytes. Parameters ---------- packed : ndarray, uint8, shape (n_variants, n_samples) Bit-packed diploid genotype array. Returns ------- g : GenotypeArray, shape (n_variants, n_samples, 2) Genotype array. Examples -------- >>> import allel >>> import numpy as np >>> packed = np.array([[0, 1], ... [2, 17], ... [34, 239]], dtype='u1') >>> allel.GenotypeArray.from_packed(packed) <GenotypeArray shape=(3, 2, 2) dtype=int8> 0/0 0/1 0/2 1/1 2/2 ./.
7.828063
6.877555
1.138204
h = self.to_haplotypes() m = h.to_sparse(format=format, **kwargs) return m
def to_sparse(self, format='csr', **kwargs)
Convert into a sparse matrix. Parameters ---------- format : {'coo', 'csc', 'csr', 'dia', 'dok', 'lil'} Sparse matrix format. kwargs : keyword arguments Passed through to sparse matrix constructor. Returns ------- m : scipy.sparse.spmatrix Sparse matrix Notes ----- If a mask has been set, it is ignored by this function. Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 0]], ... [[0, 1], [0, 1]], ... [[1, 1], [0, 0]], ... [[0, 0], [-1, -1]]], dtype='i1') >>> m = g.to_sparse(format='csr') >>> m <4x4 sparse matrix of type '<class 'numpy.int8'>' with 6 stored elements in Compressed Sparse Row format> >>> m.data array([ 1, 1, 1, 1, -1, -1], dtype=int8) >>> m.indices array([1, 3, 0, 1, 2, 3], dtype=int32) >>> m.indptr array([0, 0, 2, 4, 6], dtype=int32)
6.165518
8.324248
0.74067
h = HaplotypeArray.from_sparse(m, order=order, out=out) g = h.to_genotypes(ploidy=ploidy) return g
def from_sparse(m, ploidy, order=None, out=None)
Construct a genotype array from a sparse matrix. Parameters ---------- m : scipy.sparse.spmatrix Sparse matrix ploidy : int The sample ploidy. order : {'C', 'F'}, optional Whether to store data in C (row-major) or Fortran (column-major) order in memory. out : ndarray, shape (n_variants, n_samples), optional Use this array as the output buffer. Returns ------- g : GenotypeArray, shape (n_variants, n_samples, ploidy) Genotype array. Examples -------- >>> import allel >>> import numpy as np >>> import scipy.sparse >>> data = np.array([ 1, 1, 1, 1, -1, -1], dtype=np.int8) >>> indices = np.array([1, 3, 0, 1, 2, 3], dtype=np.int32) >>> indptr = np.array([0, 0, 2, 4, 6], dtype=np.int32) >>> m = scipy.sparse.csr_matrix((data, indices, indptr)) >>> g = allel.GenotypeArray.from_sparse(m, ploidy=2) >>> g <GenotypeArray shape=(4, 2, 2) dtype=int8> 0/0 0/0 0/1 0/1 1/1 0/0 0/0 ./.
3.612068
7.012381
0.515099
# N.B., this implementation is obscure and uses more memory than # necessary, TODO review # define the range of possible indices, e.g., diploid => (0, 1) index_range = np.arange(0, self.ploidy, dtype='u1') # create a random index for each genotype call indices = np.random.choice(index_range, size=self.n_calls, replace=True) # reshape genotype data so it's suitable for passing to np.choose # by merging the variants and samples dimensions choices = self.reshape(-1, self.ploidy).T # now use random indices to haploidify data = np.choose(indices, choices) # reshape the haploidified data to restore the variants and samples # dimensions data = data.reshape((self.n_variants, self.n_samples)) # view as haplotype array h = HaplotypeArray(data, copy=False) return h
def haploidify_samples(self)
Construct a pseudo-haplotype for each sample by randomly selecting an allele from each genotype call. Returns ------- h : HaplotypeArray Notes ----- If a mask has been set, it is ignored by this function. Examples -------- >>> import allel >>> import numpy as np >>> np.random.seed(42) >>> g = allel.GenotypeArray([[[0, 0], [0, 1]], ... [[0, 2], [1, 1]], ... [[1, 2], [2, 1]], ... [[2, 2], [-1, -1]]]) >>> g.haploidify_samples() <HaplotypeArray shape=(4, 2) dtype=int64> 0 1 0 1 1 1 2 . >>> g = allel.GenotypeArray([[[0, 0, 0], [0, 0, 1]], ... [[0, 1, 1], [1, 1, 1]], ... [[0, 1, 2], [-1, -1, -1]]]) >>> g.haploidify_samples() <HaplotypeArray shape=(3, 2) dtype=int64> 0 0 1 1 2 .
6.041229
5.025996
1.201996
# check inputs subpop = _normalize_subpop_arg(subpop, self.shape[1]) # determine alleles to count if max_allele is None: max_allele = self.max() # use optimisations values = memoryview_safe(self.values) mask = memoryview_safe(self.mask).view(dtype='u1') if self.mask is not None else None if subpop is None and mask is None: ac = genotype_array_count_alleles(values, max_allele) elif subpop is None: ac = genotype_array_count_alleles_masked(values, mask, max_allele) elif mask is None: ac = genotype_array_count_alleles_subpop(values, max_allele, subpop) else: ac = genotype_array_count_alleles_subpop_masked(values, mask, max_allele, subpop) return AlleleCountsArray(ac, copy=False)
def count_alleles(self, max_allele=None, subpop=None)
Count the number of calls of each allele per variant. Parameters ---------- max_allele : int, optional The highest allele index to count. Alleles above this will be ignored. subpop : sequence of ints, optional Indices of samples to include in count. Returns ------- ac : AlleleCountsArray Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 1]], ... [[0, 2], [1, 1]], ... [[2, 2], [-1, -1]]]) >>> g.count_alleles() <AlleleCountsArray shape=(3, 3) dtype=int32> 3 1 0 1 2 1 0 0 2 >>> g.count_alleles(max_allele=1) <AlleleCountsArray shape=(3, 2) dtype=int32> 3 1 1 2 0 0
2.599481
2.567846
1.012319
if max_allele is None: max_allele = self.max() out = {name: self.count_alleles(max_allele=max_allele, subpop=subpop) for name, subpop in subpops.items()} return out
def count_alleles_subpops(self, subpops, max_allele=None)
Count alleles for multiple subpopulations simultaneously. Parameters ---------- subpops : dict (string -> sequence of ints) Mapping of subpopulation names to sample indices. max_allele : int, optional The highest allele index to count. Alleles above this will be ignored. Returns ------- out : dict (string -> AlleleCountsArray) A mapping of subpopulation names to allele counts arrays.
2.713171
2.72116
0.997064
return compress_haplotype_array(self, condition, axis=axis, cls=type(self), compress=np.compress, out=out)
def compress(self, condition, axis=0, out=None)
Return selected slices of an array along given axis. Parameters ---------- condition : array_like, bool Array that selects which entries to return. N.B., if len(condition) is less than the size of the given axis, then output is truncated to the length of the condition array. axis : int, optional Axis along which to take slices. If None, work on the flattened array. out : ndarray, optional Output array. Its type is preserved and it must be of the right shape to hold the output. Returns ------- out : HaplotypeArray A copy of the array without the slices along axis for which `condition` is false. Examples -------- >>> import allel >>> h = allel.HaplotypeArray([[0, 0, 0, 1], ... [0, 1, 1, 1], ... [0, 2, -1, -1]], dtype='i1') >>> h.compress([True, False, True], axis=0) <HaplotypeArray shape=(2, 4) dtype=int8> 0 0 0 1 0 2 . . >>> h.compress([True, False, True, False], axis=1) <HaplotypeArray shape=(3, 2) dtype=int8> 0 0 0 1 0 .
10.01543
11.919974
0.840222
return take_haplotype_array(self, indices, axis=axis, cls=type(self), take=np.take, out=out, mode=mode)
def take(self, indices, axis=0, out=None, mode='raise')
Take elements from an array along an axis. This function does the same thing as "fancy" indexing (indexing arrays using arrays); however, it can be easier to use if you need elements along a given axis. Parameters ---------- indices : array_like The indices of the values to extract. axis : int, optional The axis over which to select values. out : ndarray, optional If provided, the result will be placed in this array. It should be of the appropriate shape and dtype. mode : {'raise', 'wrap', 'clip'}, optional Specifies how out-of-bounds indices will behave. * 'raise' -- raise an error (default) * 'wrap' -- wrap around * 'clip' -- clip to the range 'clip' mode means that all indices that are too large are replaced by the index that addresses the last element along that axis. Note that this disables indexing with negative numbers. Returns ------- subarray : ndarray Examples -------- >>> import allel >>> h = allel.HaplotypeArray([[0, 0, 0, 1], ... [0, 1, 1, 1], ... [0, 2, -1, -1]], dtype='i1') >>> h.take([0, 2], axis=0) <HaplotypeArray shape=(2, 4) dtype=int8> 0 0 0 1 0 2 . . >>> h.take([0, 2], axis=1) <HaplotypeArray shape=(3, 2) dtype=int8> 0 0 0 1 0 .
8.132723
13.18678
0.616733
return subset_haplotype_array(self, sel0, sel1, cls=type(self), subset=subset)
def subset(self, sel0=None, sel1=None)
Make a sub-selection of variants and haplotypes. Parameters ---------- sel0 : array_like Boolean array or array of indices selecting variants. sel1 : array_like Boolean array or array of indices selecting haplotypes. Returns ------- out : HaplotypeArray See Also -------- HaplotypeArray.take, HaplotypeArray.compress
11.859823
12.429276
0.954185
return concatenate_haplotype_array(self, others, axis=axis, cls=type(self), concatenate=np.concatenate)
def concatenate(self, others, axis=0)
Join a sequence of arrays along an existing axis. Parameters ---------- others : sequence of array_like The arrays must have the same shape, except in the dimension corresponding to `axis` (the first, by default). axis : int, optional The axis along which the arrays will be joined. Default is 0. Returns ------- res : ndarray The concatenated array. Examples -------- >>> import allel >>> h = allel.HaplotypeArray([[0, 0, 0, 1], ... [0, 1, 1, 1], ... [0, 2, -1, -1]], dtype='i1') >>> h.concatenate([h], axis=0) <HaplotypeArray shape=(6, 4) dtype=int8> 0 0 0 1 0 1 1 1 0 2 . . 0 0 0 1 0 1 1 1 0 2 . . >>> h.concatenate([h], axis=1) <HaplotypeArray shape=(3, 8) dtype=int8> 0 0 0 1 0 0 0 1 0 1 1 1 0 1 1 1 0 2 . . 0 2 . .
9.877668
16.464909
0.599922
# check ploidy is compatible if (self.shape[1] % ploidy) > 0: raise ValueError('incompatible ploidy') # reshape newshape = (self.shape[0], -1, ploidy) data = self.reshape(newshape) # wrap g = GenotypeArray(data, copy=copy) return g
def to_genotypes(self, ploidy, copy=False)
Reshape a haplotype array to view it as genotypes by restoring the ploidy dimension. Parameters ---------- ploidy : int The sample ploidy. copy : bool, optional If True, make a copy of data. Returns ------- g : ndarray, int, shape (n_variants, n_samples, ploidy) Genotype array (sharing same underlying buffer). copy : bool, optional If True, copy the data. Examples -------- >>> import allel >>> h = allel.HaplotypeArray([[0, 0, 0, 1], ... [0, 1, 1, 1], ... [0, 2, -1, -1]], dtype='i1') >>> h.to_genotypes(ploidy=2) <GenotypeArray shape=(3, 2, 2) dtype=int8> 0/0 0/1 0/1 1/1 0/2 ./.
3.30364
4.594717
0.719008
import scipy.sparse # check arguments f = { 'bsr': scipy.sparse.bsr_matrix, 'coo': scipy.sparse.coo_matrix, 'csc': scipy.sparse.csc_matrix, 'csr': scipy.sparse.csr_matrix, 'dia': scipy.sparse.dia_matrix, 'dok': scipy.sparse.dok_matrix, 'lil': scipy.sparse.lil_matrix } if format not in f: raise ValueError('invalid format: %r' % format) # create sparse matrix m = f[format](self, **kwargs) return m
def to_sparse(self, format='csr', **kwargs)
Convert into a sparse matrix. Parameters ---------- format : {'coo', 'csc', 'csr', 'dia', 'dok', 'lil'} Sparse matrix format. kwargs : keyword arguments Passed through to sparse matrix constructor. Returns ------- m : scipy.sparse.spmatrix Sparse matrix Examples -------- >>> import allel >>> h = allel.HaplotypeArray([[0, 0, 0, 0], ... [0, 1, 0, 1], ... [1, 1, 0, 0], ... [0, 0, -1, -1]], dtype='i1') >>> m = h.to_sparse(format='csr') >>> m <4x4 sparse matrix of type '<class 'numpy.int8'>' with 6 stored elements in Compressed Sparse Row format> >>> m.data array([ 1, 1, 1, 1, -1, -1], dtype=int8) >>> m.indices array([1, 3, 0, 1, 2, 3], dtype=int32) >>> m.indptr array([0, 0, 2, 4, 6], dtype=int32)
1.912631
2.189958
0.873364
import scipy.sparse # check arguments if not scipy.sparse.isspmatrix(m): raise ValueError('not a sparse matrix: %r' % m) # convert to dense array data = m.toarray(order=order, out=out) # wrap h = HaplotypeArray(data) return h
def from_sparse(m, order=None, out=None)
Construct a haplotype array from a sparse matrix. Parameters ---------- m : scipy.sparse.spmatrix Sparse matrix order : {'C', 'F'}, optional Whether to store data in C (row-major) or Fortran (column-major) order in memory. out : ndarray, shape (n_variants, n_samples), optional Use this array as the output buffer. Returns ------- h : HaplotypeArray, shape (n_variants, n_haplotypes) Haplotype array. Examples -------- >>> import allel >>> import numpy as np >>> import scipy.sparse >>> data = np.array([ 1, 1, 1, 1, -1, -1], dtype=np.int8) >>> indices = np.array([1, 3, 0, 1, 2, 3], dtype=np.int32) >>> indptr = np.array([0, 0, 2, 4, 6], dtype=np.int32) >>> m = scipy.sparse.csr_matrix((data, indices, indptr)) >>> h = allel.HaplotypeArray.from_sparse(m) >>> h <HaplotypeArray shape=(4, 4) dtype=int8> 0 0 0 0 0 1 0 1 1 1 0 0 0 0 . .
4.713394
5.074734
0.928796
# check inputs subpop = _normalize_subpop_arg(subpop, self.shape[1]) # determine alleles to count if max_allele is None: max_allele = self.max() # use optimisations values = memoryview_safe(self.values) if subpop is None: ac = haplotype_array_count_alleles(values, max_allele) else: ac = haplotype_array_count_alleles_subpop(values, max_allele, subpop) return AlleleCountsArray(ac, copy=False)
def count_alleles(self, max_allele=None, subpop=None)
Count the number of calls of each allele per variant. Parameters ---------- max_allele : int, optional The highest allele index to count. Alleles greater than this index will be ignored. subpop : array_like, int, optional Indices of haplotypes to include. Returns ------- ac : AlleleCountsArray, int, shape (n_variants, n_alleles) Examples -------- >>> import allel >>> h = allel.HaplotypeArray([[0, 0, 0, 1], ... [0, 1, 1, 1], ... [0, 2, -1, -1]], dtype='i1') >>> ac = h.count_alleles() >>> ac <AlleleCountsArray shape=(3, 3) dtype=int32> 3 1 0 1 3 0 1 0 1
3.844482
4.044878
0.950457
# check inputs mapping = asarray_ndim(mapping, 2) check_dim0_aligned(self, mapping) # use optimisation mapping = np.asarray(mapping, dtype=self.dtype) mapping = memoryview_safe(mapping) values = memoryview_safe(self.values) data = haplotype_array_map_alleles(values, mapping, copy=copy) return HaplotypeArray(data, copy=False)
def map_alleles(self, mapping, copy=True)
Transform alleles via a mapping. Parameters ---------- mapping : ndarray, int8, shape (n_variants, max_allele) An array defining the allele mapping for each variant. copy : bool, optional If True, return a new array; if False, apply mapping in place (only applies for arrays with dtype int8; all other dtypes require a copy). Returns ------- hm : HaplotypeArray Examples -------- >>> import allel >>> import numpy as np >>> h = allel.HaplotypeArray([[0, 0, 0, 1], ... [0, 1, 1, 1], ... [0, 2, -1, -1]], dtype='i1') >>> mapping = np.array([[1, 2, 0], ... [2, 0, 1], ... [2, 1, 0]], dtype='i1') >>> h.map_alleles(mapping) <HaplotypeArray shape=(3, 4) dtype=int8> 1 1 1 2 2 0 0 0 2 0 . . Notes ----- For arrays with dtype int8 an optimised implementation is used which is faster and uses far less memory. It is recommended to convert arrays to dtype int8 where possible before calling this method. See Also -------- allel.model.util.create_allele_mapping
4.570766
4.647543
0.98348
# setup collection d = collections.defaultdict(set) # iterate over haplotypes for i in range(self.shape[1]): # hash the haplotype k = hash(self.values[:, i].tobytes()) # collect d[k].add(i) # extract sets, sorted by most common return sorted(d.values(), key=len, reverse=True)
def distinct(self)
Return sets of indices for each distinct haplotype.
5.476073
4.187057
1.307857
# hash the haplotypes k = [hash(self.values[:, i].tobytes()) for i in range(self.shape[1])] # count and sort # noinspection PyArgumentList counts = sorted(collections.Counter(k).values(), reverse=True) return np.asarray(counts)
def distinct_counts(self)
Return counts for each distinct haplotype.
7.262399
5.7694
1.258779
c = self.distinct_counts() n = self.shape[1] return c / n
def distinct_frequencies(self)
Return frequencies for each distinct haplotype.
12.647501
9.430546
1.341121
an = np.sum(self, axis=1)[:, None] with ignore_invalid(): af = np.where(an > 0, self / an, fill) return af
def to_frequencies(self, fill=np.nan)
Compute allele frequencies. Parameters ---------- fill : float, optional Value to use when number of allele calls is 0. Returns ------- af : ndarray, float, shape (n_variants, n_alleles) Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 1]], ... [[0, 2], [1, 1]], ... [[2, 2], [-1, -1]]]) >>> ac = g.count_alleles() >>> ac.to_frequencies() array([[0.75, 0.25, 0. ], [0.25, 0.5 , 0.25], [0. , 0. , 1. ]])
6.735044
9.440577
0.713414
out = np.empty(self.shape[0], dtype='i1') out.fill(-1) for i in range(self.shape[1]): d = self.values[:, i] > 0 out[d] = i return out
def max_allele(self)
Return the highest allele index for each variant. Returns ------- n : ndarray, int, shape (n_variants,) Allele index array. Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 1]], ... [[0, 2], [1, 1]], ... [[2, 2], [-1, -1]]]) >>> ac = g.count_alleles() >>> ac.max_allele() array([1, 2, 2], dtype=int8)
4.235273
4.493279
0.94258
if allele is None: return self.allelism() <= 1 else: return (self.allelism() == 1) & (self.values[:, allele] > 0)
def is_non_segregating(self, allele=None)
Find non-segregating variants (where at most one allele is observed). Parameters ---------- allele : int, optional Allele index. Returns ------- out : ndarray, bool, shape (n_variants,) Boolean array where elements are True if variant matches the condition. Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 0]], ... [[0, 0], [0, 1]], ... [[0, 2], [1, 1]], ... [[2, 2], [-1, -1]]]) >>> ac = g.count_alleles() >>> ac.is_non_segregating() array([ True, False, False, True]) >>> ac.is_non_segregating(allele=2) array([False, False, False, True])
5.191349
6.394024
0.811906
loc = self.is_biallelic() & (self.max_allele() == 1) if min_mac is not None: # noinspection PyAugmentAssignment loc = loc & (self.values[:, :2].min(axis=1) >= min_mac) return loc
def is_biallelic_01(self, min_mac=None)
Find variants biallelic for the reference (0) and first alternate (1) allele. Parameters ---------- min_mac : int, optional Minimum minor allele count. Returns ------- out : ndarray, bool, shape (n_variants,) Boolean array where elements are True if variant matches the condition.
3.981393
4.895738
0.813237
# ensure correct dimensionality and matching dtype mapping = asarray_ndim(mapping, 2, dtype=self.dtype) check_dim0_aligned(self, mapping) check_dim1_aligned(self, mapping) # use optimisation out = allele_counts_array_map_alleles(self.values, mapping, max_allele) # wrap and return return type(self)(out)
def map_alleles(self, mapping, max_allele=None)
Transform alleles via a mapping. Parameters ---------- mapping : ndarray, int8, shape (n_variants, max_allele) An array defining the allele mapping for each variant. max_allele : int, optional Highest allele index expected in the output. If not provided will be determined from maximum value in `mapping`. Returns ------- ac : AlleleCountsArray Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 0]], ... [[0, 0], [0, 1]], ... [[0, 2], [1, 1]], ... [[2, 2], [-1, -1]]]) >>> ac = g.count_alleles() >>> ac <AlleleCountsArray shape=(4, 3) dtype=int32> 4 0 0 3 1 0 1 2 1 0 0 2 >>> mapping = [[1, 0, 2], ... [1, 0, 2], ... [2, 1, 0], ... [1, 2, 0]] >>> ac.map_alleles(mapping) <AlleleCountsArray shape=(4, 3) dtype=int32> 0 4 0 1 3 0 1 2 1 2 0 0 See Also -------- create_allele_mapping
5.232012
6.688132
0.782283
if self._is_unique is None: t = self.values[:-1] == self.values[1:] # type: np.ndarray self._is_unique = ~np.any(t) return self._is_unique
def is_unique(self)
True if no duplicate entries.
4.143051
3.765106
1.100381
left = bisect.bisect_left(self, key) right = bisect.bisect_right(self, key) diff = right - left if diff == 0: raise KeyError(key) elif diff == 1: return left else: return slice(left, right)
def locate_key(self, key)
Get index location for the requested key. Parameters ---------- key : object Value to locate. Returns ------- loc : int or slice Location of `key` (will be slice if there are duplicate entries). Examples -------- >>> import allel >>> idx = allel.SortedIndex([3, 6, 6, 11]) >>> idx.locate_key(3) 0 >>> idx.locate_key(11) 3 >>> idx.locate_key(6) slice(1, 3, None) >>> try: ... idx.locate_key(2) ... except KeyError as e: ... print(e) ... 2
2.666773
2.638113
1.010864
# check inputs other = SortedIndex(other, copy=False) # find intersection assume_unique = self.is_unique and other.is_unique loc = np.in1d(self, other, assume_unique=assume_unique) loc_other = np.in1d(other, self, assume_unique=assume_unique) return loc, loc_other
def locate_intersection(self, other)
Locate the intersection with another array. Parameters ---------- other : array_like, int Array of values to intersect. Returns ------- loc : ndarray, bool Boolean array with location of intersection. loc_other : ndarray, bool Boolean array with location in `other` of intersection. Examples -------- >>> import allel >>> idx1 = allel.SortedIndex([3, 6, 11, 20, 35]) >>> idx2 = allel.SortedIndex([4, 6, 20, 39]) >>> loc1, loc2 = idx1.locate_intersection(idx2) >>> loc1 array([False, True, False, True, False]) >>> loc2 array([False, True, True, False]) >>> idx1[loc1] <SortedIndex shape=(2,) dtype=int64> [6, 20] >>> idx2[loc2] <SortedIndex shape=(2,) dtype=int64> [6, 20]
3.314739
3.866084
0.857389
# check inputs keys = SortedIndex(keys, copy=False) # find intersection loc, found = self.locate_intersection(keys) if strict and np.any(~found): raise KeyError(keys[~found]) return loc
def locate_keys(self, keys, strict=True)
Get index locations for the requested keys. Parameters ---------- keys : array_like Array of keys to locate. strict : bool, optional If True, raise KeyError if any keys are not found in the index. Returns ------- loc : ndarray, bool Boolean array with location of values. Examples -------- >>> import allel >>> idx1 = allel.SortedIndex([3, 6, 11, 20, 35]) >>> idx2 = allel.SortedIndex([4, 6, 20, 39]) >>> loc = idx1.locate_keys(idx2, strict=False) >>> loc array([False, True, False, True, False]) >>> idx1[loc] <SortedIndex shape=(2,) dtype=int64> [6, 20]
6.691301
11.409792
0.586453
loc = self.locate_keys(other, strict=False) return self.compress(loc, axis=0)
def intersect(self, other)
Intersect with `other` sorted index. Parameters ---------- other : array_like, int Array of values to intersect with. Returns ------- out : SortedIndex Values in common. Examples -------- >>> import allel >>> idx1 = allel.SortedIndex([3, 6, 11, 20, 35]) >>> idx2 = allel.SortedIndex([4, 6, 20, 39]) >>> idx1.intersect(idx2) <SortedIndex shape=(2,) dtype=int64> [6, 20]
14.764068
19.107475
0.772685
# locate start and stop indices if start is None: start_index = 0 else: start_index = bisect.bisect_left(self, start) if stop is None: stop_index = len(self) else: stop_index = bisect.bisect_right(self, stop) if stop_index - start_index == 0: raise KeyError(start, stop) loc = slice(start_index, stop_index) return loc
def locate_range(self, start=None, stop=None)
Locate slice of index containing all entries within `start` and `stop` values **inclusive**. Parameters ---------- start : int, optional Start value. stop : int, optional Stop value. Returns ------- loc : slice Slice object. Examples -------- >>> import allel >>> idx = allel.SortedIndex([3, 6, 11, 20, 35]) >>> loc = idx.locate_range(4, 32) >>> loc slice(1, 4, None) >>> idx[loc] <SortedIndex shape=(3,) dtype=int64> [6, 11, 20]
2.071114
2.635195
0.785944
try: loc = self.locate_range(start=start, stop=stop) except KeyError: return self.values[0:0] else: return self[loc]
def intersect_range(self, start=None, stop=None)
Intersect with range defined by `start` and `stop` values **inclusive**. Parameters ---------- start : int, optional Start value. stop : int, optional Stop value. Returns ------- idx : SortedIndex Examples -------- >>> import allel >>> idx = allel.SortedIndex([3, 6, 11, 20, 35]) >>> idx.intersect_range(4, 32) <SortedIndex shape=(3,) dtype=int64> [6, 11, 20]
4.707631
7.397881
0.636349
# check inputs starts = asarray_ndim(starts, 1) stops = asarray_ndim(stops, 1) check_dim0_aligned(starts, stops) # find indices of start and stop values in idx start_indices = np.searchsorted(self, starts) stop_indices = np.searchsorted(self, stops, side='right') # find intervals overlapping at least one value loc_ranges = start_indices < stop_indices # find values within at least one interval loc = np.zeros(self.shape, dtype=np.bool) for i, j in zip(start_indices[loc_ranges], stop_indices[loc_ranges]): loc[i:j] = True return loc, loc_ranges
def locate_intersection_ranges(self, starts, stops)
Locate the intersection with a set of ranges. Parameters ---------- starts : array_like, int Range start values. stops : array_like, int Range stop values. Returns ------- loc : ndarray, bool Boolean array with location of entries found. loc_ranges : ndarray, bool Boolean array with location of ranges containing one or more entries. Examples -------- >>> import allel >>> import numpy as np >>> idx = allel.SortedIndex([3, 6, 11, 20, 35]) >>> ranges = np.array([[0, 2], [6, 17], [12, 15], [31, 35], ... [100, 120]]) >>> starts = ranges[:, 0] >>> stops = ranges[:, 1] >>> loc, loc_ranges = idx.locate_intersection_ranges(starts, stops) >>> loc array([False, True, True, False, True]) >>> loc_ranges array([False, True, False, True, False]) >>> idx[loc] <SortedIndex shape=(3,) dtype=int64> [6, 11, 35] >>> ranges[loc_ranges] array([[ 6, 17], [31, 35]])
3.036023
2.764273
1.098308
loc, found = self.locate_intersection_ranges(starts, stops) if strict and np.any(~found): raise KeyError(starts[~found], stops[~found]) return loc
def locate_ranges(self, starts, stops, strict=True)
Locate items within the given ranges. Parameters ---------- starts : array_like, int Range start values. stops : array_like, int Range stop values. strict : bool, optional If True, raise KeyError if any ranges contain no entries. Returns ------- loc : ndarray, bool Boolean array with location of entries found. Examples -------- >>> import allel >>> import numpy as np >>> idx = allel.SortedIndex([3, 6, 11, 20, 35]) >>> ranges = np.array([[0, 2], [6, 17], [12, 15], [31, 35], ... [100, 120]]) >>> starts = ranges[:, 0] >>> stops = ranges[:, 1] >>> loc = idx.locate_ranges(starts, stops, strict=False) >>> loc array([False, True, True, False, True]) >>> idx[loc] <SortedIndex shape=(3,) dtype=int64> [6, 11, 35]
5.397748
8.420008
0.641062
loc = self.locate_ranges(starts, stops, strict=False) return self.compress(loc, axis=0)
def intersect_ranges(self, starts, stops)
Intersect with a set of ranges. Parameters ---------- starts : array_like, int Range start values. stops : array_like, int Range stop values. Returns ------- idx : SortedIndex Examples -------- >>> import allel >>> import numpy as np >>> idx = allel.SortedIndex([3, 6, 11, 20, 35]) >>> ranges = np.array([[0, 2], [6, 17], [12, 15], [31, 35], ... [100, 120]]) >>> starts = ranges[:, 0] >>> stops = ranges[:, 1] >>> idx.intersect_ranges(starts, stops) <SortedIndex shape=(3,) dtype=int64> [6, 11, 35]
9.53031
12.290308
0.775433
# check inputs other = UniqueIndex(other) # find intersection assume_unique = True loc = np.in1d(self, other, assume_unique=assume_unique) loc_other = np.in1d(other, self, assume_unique=assume_unique) return loc, loc_other
def locate_intersection(self, other)
Locate the intersection with another array. Parameters ---------- other : array_like Array to intersect. Returns ------- loc : ndarray, bool Boolean array with location of intersection. loc_other : ndarray, bool Boolean array with location in `other` of intersection. Examples -------- >>> import allel >>> idx1 = allel.UniqueIndex(['A', 'C', 'B', 'F'], dtype=object) >>> idx2 = allel.UniqueIndex(['X', 'F', 'G', 'C', 'Z'], dtype=object) >>> loc1, loc2 = idx1.locate_intersection(idx2) >>> loc1 array([False, True, False, True]) >>> loc2 array([False, True, False, True, False]) >>> idx1[loc1] <UniqueIndex shape=(2,) dtype=object> ['C', 'F'] >>> idx2[loc2] <UniqueIndex shape=(2,) dtype=object> ['F', 'C']
3.736309
4.25067
0.878993
# check inputs keys = UniqueIndex(keys) # find intersection loc, found = self.locate_intersection(keys) if strict and np.any(~found): raise KeyError(keys[~found]) return loc
def locate_keys(self, keys, strict=True)
Get index locations for the requested keys. Parameters ---------- keys : array_like Array of keys to locate. strict : bool, optional If True, raise KeyError if any keys are not found in the index. Returns ------- loc : ndarray, bool Boolean array with location of keys. Examples -------- >>> import allel >>> idx = allel.UniqueIndex(['A', 'C', 'B', 'F']) >>> idx.locate_keys(['F', 'C']) array([False, True, False, True]) >>> idx.locate_keys(['X', 'F', 'G', 'C', 'Z'], strict=False) array([False, True, False, True])
7.079412
11.413717
0.620255
loc1 = self.l1.locate_key(k1) if k2 is None: return loc1 if isinstance(loc1, slice): offset = loc1.start try: loc2 = SortedIndex(self.l2[loc1], copy=False).locate_key(k2) except KeyError: # reraise with more information raise KeyError(k1, k2) else: if isinstance(loc2, slice): loc = slice(offset + loc2.start, offset + loc2.stop) else: # assume singleton loc = offset + loc2 else: # singleton match in l1 v = self.l2[loc1] if v == k2: loc = loc1 else: raise KeyError(k1, k2) return loc
def locate_key(self, k1, k2=None)
Get index location for the requested key. Parameters ---------- k1 : object Level 1 key. k2 : object, optional Level 2 key. Returns ------- loc : int or slice Location of requested key (will be slice if there are duplicate entries). Examples -------- >>> import allel >>> chrom = ['chr1', 'chr1', 'chr2', 'chr2', 'chr2', 'chr3'] >>> pos = [1, 4, 2, 5, 5, 3] >>> idx = allel.SortedMultiIndex(chrom, pos) >>> idx.locate_key('chr1') slice(0, 2, None) >>> idx.locate_key('chr1', 4) 1 >>> idx.locate_key('chr2', 5) slice(3, 5, None) >>> try: ... idx.locate_key('chr3', 4) ... except KeyError as e: ... print(e) ... ('chr3', 4)
3.050552
3.372256
0.904603
loc1 = self.l1.locate_key(key) if start is None and stop is None: loc = loc1 elif isinstance(loc1, slice): offset = loc1.start idx = SortedIndex(self.l2[loc1], copy=False) try: loc2 = idx.locate_range(start, stop) except KeyError: raise KeyError(key, start, stop) else: loc = slice(offset + loc2.start, offset + loc2.stop) else: # singleton match in l1 v = self.l2[loc1] if start <= v <= stop: loc = loc1 else: raise KeyError(key, start, stop) # ensure slice is always returned if not isinstance(loc, slice): loc = slice(loc, loc + 1) return loc
def locate_range(self, key, start=None, stop=None)
Locate slice of index containing all entries within the range `key`:`start`-`stop` **inclusive**. Parameters ---------- key : object Level 1 key value. start : object, optional Level 2 start value. stop : object, optional Level 2 stop value. Returns ------- loc : slice Slice object. Examples -------- >>> import allel >>> chrom = ['chr1', 'chr1', 'chr2', 'chr2', 'chr2', 'chr3'] >>> pos = [1, 4, 2, 5, 5, 3] >>> idx = allel.SortedMultiIndex(chrom, pos) >>> idx.locate_range('chr1') slice(0, 2, None) >>> idx.locate_range('chr1', 1, 4) slice(0, 2, None) >>> idx.locate_range('chr2', 3, 7) slice(3, 5, None) >>> try: ... idx.locate_range('chr3', 4, 9) ... except KeyError as e: ... print(e) ('chr3', 4, 9)
3.196776
3.485458
0.917175
if pos is None: # we just want the region for a chromosome if chrom in self.chrom_ranges: # return previously cached result return self.chrom_ranges[chrom] else: loc_chrom = np.nonzero(self.chrom == chrom)[0] if len(loc_chrom) == 0: raise KeyError(chrom) slice_chrom = slice(min(loc_chrom), max(loc_chrom) + 1) # cache the result self.chrom_ranges[chrom] = slice_chrom return slice_chrom else: slice_chrom = self.locate_key(chrom) pos_chrom = SortedIndex(self.pos[slice_chrom]) try: idx_within_chrom = pos_chrom.locate_key(pos) except KeyError: raise KeyError(chrom, pos) if isinstance(idx_within_chrom, slice): return slice(slice_chrom.start + idx_within_chrom.start, slice_chrom.start + idx_within_chrom.stop) else: return slice_chrom.start + idx_within_chrom
def locate_key(self, chrom, pos=None)
Get index location for the requested key. Parameters ---------- chrom : object Chromosome or contig. pos : int, optional Position within chromosome or contig. Returns ------- loc : int or slice Location of requested key (will be slice if there are duplicate entries). Examples -------- >>> import allel >>> chrom = ['chr2', 'chr2', 'chr1', 'chr1', 'chr1', 'chr3'] >>> pos = [1, 4, 2, 5, 5, 3] >>> idx = allel.ChromPosIndex(chrom, pos) >>> idx.locate_key('chr1') slice(2, 5, None) >>> idx.locate_key('chr2', 4) 1 >>> idx.locate_key('chr1', 5) slice(3, 5, None) >>> try: ... idx.locate_key('chr3', 4) ... except KeyError as e: ... print(e) ... ('chr3', 4)
2.438907
2.555122
0.954517
slice_chrom = self.locate_key(chrom) if start is None and stop is None: return slice_chrom else: pos_chrom = SortedIndex(self.pos[slice_chrom]) try: slice_within_chrom = pos_chrom.locate_range(start, stop) except KeyError: raise KeyError(chrom, start, stop) loc = slice(slice_chrom.start + slice_within_chrom.start, slice_chrom.start + slice_within_chrom.stop) return loc
def locate_range(self, chrom, start=None, stop=None)
Locate slice of index containing all entries within the range `key`:`start`-`stop` **inclusive**. Parameters ---------- chrom : object Chromosome or contig. start : int, optional Position start value. stop : int, optional Position stop value. Returns ------- loc : slice Slice object. Examples -------- >>> import allel >>> chrom = ['chr2', 'chr2', 'chr1', 'chr1', 'chr1', 'chr3'] >>> pos = [1, 4, 2, 5, 5, 3] >>> idx = allel.ChromPosIndex(chrom, pos) >>> idx.locate_range('chr1') slice(2, 5, None) >>> idx.locate_range('chr2', 1, 4) slice(0, 2, None) >>> idx.locate_range('chr1', 3, 7) slice(3, 5, None) >>> try: ... idx.locate_range('chr3', 4, 9) ... except KeyError as e: ... print(e) ('chr3', 4, 9)
3.350034
3.766571
0.889412
if index is None: pass elif isinstance(index, str): index = SortedIndex(self[index], copy=False) elif isinstance(index, (tuple, list)) and len(index) == 2: index = SortedMultiIndex(self[index[0]], self[index[1]], copy=False) else: raise ValueError('invalid index argument, expected string or ' 'pair of strings, found %s' % repr(index)) self.index = index
def set_index(self, index)
Set or reset the index. Parameters ---------- index : string or pair of strings, optional Names of columns to use for positional index, e.g., 'POS' if table contains a 'POS' column and records from a single chromosome/contig, or ('CHROM', 'POS') if table contains records from multiple chromosomes/contigs.
2.77499
2.586705
1.07279
if self.index is None: raise ValueError('no index has been set') if isinstance(self.index, SortedIndex): # ignore chrom loc = self.index.locate_key(position) else: loc = self.index.locate_key(chrom, position) return self[loc]
def query_position(self, chrom=None, position=None)
Query the table, returning row or rows matching the given genomic position. Parameters ---------- chrom : string, optional Chromosome/contig. position : int, optional Position (1-based). Returns ------- result : row or VariantTable
4.072813
4.467164
0.911722
if self.index is None: raise ValueError('no index has been set') if isinstance(self.index, SortedIndex): # ignore chrom loc = self.index.locate_range(start, stop) else: loc = self.index.locate_range(chrom, start, stop) return self[loc]
def query_region(self, chrom=None, start=None, stop=None)
Query the table, returning row or rows within the given genomic region. Parameters ---------- chrom : string, optional Chromosome/contig. start : int, optional Region start position (1-based). stop : int, optional Region stop position (1-based). Returns ------- result : VariantTable
3.653413
4.065567
0.898623
m = np.zeros(size, dtype=bool) for start, stop in self[[start_name, stop_name]]: m[start-1:stop] = True return m
def to_mask(self, size, start_name='start', stop_name='end')
Construct a mask array where elements are True if the fall within features in the table. Parameters ---------- size : int Size of chromosome/contig. start_name : string, optional Name of column with start coordinates. stop_name : string, optional Name of column with stop coordinates. Returns ------- mask : ndarray, bool
3.128008
3.304895
0.946477
a = gff3_to_recarray(path, attributes=attributes, region=region, score_fill=score_fill, phase_fill=phase_fill, attributes_fill=attributes_fill, dtype=dtype) if a is None: return None else: return FeatureTable(a, copy=False)
def from_gff3(path, attributes=None, region=None, score_fill=-1, phase_fill=-1, attributes_fill='.', dtype=None)
Read a feature table from a GFF3 format file. Parameters ---------- path : string File path. attributes : list of strings, optional List of columns to extract from the "attributes" field. region : string, optional Genome region to extract. If given, file must be position sorted, bgzipped and tabix indexed. Tabix must also be installed and on the system path. score_fill : int, optional Value to use where score field has a missing value. phase_fill : int, optional Value to use where phase field has a missing value. attributes_fill : object or list of objects, optional Value(s) to use where attribute field(s) have a missing value. dtype : numpy dtype, optional Manually specify a dtype. Returns ------- ft : FeatureTable
2.385388
2.790102
0.854947
import scipy.spatial # check inputs if not hasattr(x, 'ndim'): x = np.asarray(x) if x.ndim < 2: raise ValueError('array with at least 2 dimensions expected') if x.ndim == 2: # use scipy to calculate distance, it's most efficient def f(b): # transpose as pdist expects (m, n) for m observations in an # n-dimensional space t = b.T # compute the distance matrix return scipy.spatial.distance.pdist(t, metric=metric) else: # use our own implementation, it handles multidimensional observations def f(b): return pdist(b, metric=metric) if chunked: # use block-wise implementation blen = get_blen_array(x, blen) dist = None for i in range(0, x.shape[0], blen): j = min(x.shape[0], i+blen) block = x[i:j] if dist is None: dist = f(block) else: dist += f(block) else: # standard implementation dist = f(x) return dist
def pairwise_distance(x, metric, chunked=False, blen=None)
Compute pairwise distance between individuals (e.g., samples or haplotypes). Parameters ---------- x : array_like, shape (n, m, ...) Array of m observations (e.g., samples or haplotypes) in a space with n dimensions (e.g., variants). Note that the order of the first two dimensions is **swapped** compared to what is expected by scipy.spatial.distance.pdist. metric : string or function Distance metric. See documentation for the function :func:`scipy.spatial.distance.pdist` for a list of built-in distance metrics. chunked : bool, optional If True, use a block-wise implementation to avoid loading the entire input array into memory. This means that a distance matrix will be calculated for each block of the input array, and the results will be summed to produce the final output. For some distance metrics this will return a different result from the standard implementation. blen : int, optional Block length to use for chunked implementation. Returns ------- dist : ndarray, shape (m * (m - 1) / 2,) Distance matrix in condensed form. Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 1], [1, 1]], ... [[0, 1], [1, 1], [1, 2]], ... [[0, 2], [2, 2], [-1, -1]]]) >>> d = allel.pairwise_distance(g.to_n_alt(), metric='cityblock') >>> d array([3., 4., 3.]) >>> import scipy.spatial >>> scipy.spatial.distance.squareform(d) array([[0., 3., 4.], [3., 0., 3.], [4., 3., 0.]])
3.366024
3.248084
1.036311
if isinstance(metric, str): import scipy.spatial if hasattr(scipy.spatial.distance, metric): metric = getattr(scipy.spatial.distance, metric) else: raise ValueError('metric name not found') m = x.shape[1] dist = list() for i, j in itertools.combinations(range(m), 2): a = x[:, i, ...] b = x[:, j, ...] d = metric(a, b) dist.append(d) return np.array(dist)
def pdist(x, metric)
Alternative implementation of :func:`scipy.spatial.distance.pdist` which is slower but more flexible in that arrays with >2 dimensions can be passed, allowing for multidimensional observations, e.g., diploid genotype calls or allele counts. Parameters ---------- x : array_like, shape (n, m, ...) Array of m observations (e.g., samples or haplotypes) in a space with n dimensions (e.g., variants). Note that the order of the first two dimensions is **swapped** compared to what is expected by scipy.spatial.distance.pdist. metric : string or function Distance metric. See documentation for the function :func:`scipy.spatial.distance.pdist` for a list of built-in distance metrics. Returns ------- dist : ndarray Distance matrix in condensed form.
2.391741
2.338168
1.022912
if not isinstance(pos, SortedIndex): pos = SortedIndex(pos, copy=False) gac = asarray_ndim(gac, 3) # compute this once here, to avoid repeated evaluation within the loop gan = np.sum(gac, axis=2) m = gac.shape[1] dist = list() for i, j in itertools.combinations(range(m), 2): ac1 = gac[:, i, ...] an1 = gan[:, i] ac2 = gac[:, j, ...] an2 = gan[:, j] d = sequence_divergence(pos, ac1, ac2, an1=an1, an2=an2, start=start, stop=stop, is_accessible=is_accessible) dist.append(d) return np.array(dist)
def pairwise_dxy(pos, gac, start=None, stop=None, is_accessible=None)
Convenience function to calculate a pairwise distance matrix using nucleotide divergence (a.k.a. Dxy) as the distance metric. Parameters ---------- pos : array_like, int, shape (n_variants,) Variant positions. gac : array_like, int, shape (n_variants, n_samples, n_alleles) Per-genotype allele counts. start : int, optional Start position of region to use. stop : int, optional Stop position of region to use. is_accessible : array_like, bool, shape (len(contig),), optional Boolean array indicating accessibility status for all positions in the chromosome/contig. Returns ------- dist : ndarray Distance matrix in condensed form. See Also -------- allel.model.ndarray.GenotypeArray.to_allele_counts
3.222198
3.19006
1.010074
import scipy.linalg # This implementation is based on the skbio.math.stats.ordination.PCoA # implementation, with some minor adjustments. # check inputs dist = ensure_square(dist) # perform scaling e_matrix = (dist ** 2) / -2 row_means = np.mean(e_matrix, axis=1, keepdims=True) col_means = np.mean(e_matrix, axis=0, keepdims=True) matrix_mean = np.mean(e_matrix) f_matrix = e_matrix - row_means - col_means + matrix_mean eigvals, eigvecs = scipy.linalg.eigh(f_matrix) # deal with eigvals close to zero close_to_zero = np.isclose(eigvals, 0) eigvals[close_to_zero] = 0 # sort descending idxs = eigvals.argsort()[::-1] eigvals = eigvals[idxs] eigvecs = eigvecs[:, idxs] # keep only positive eigenvalues keep = eigvals >= 0 eigvecs = eigvecs[:, keep] eigvals = eigvals[keep] # compute coordinates coords = eigvecs * np.sqrt(eigvals) # compute ratio explained explained_ratio = eigvals / eigvals.sum() return coords, explained_ratio
def pcoa(dist)
Perform principal coordinate analysis of a distance matrix, a.k.a. classical multi-dimensional scaling. Parameters ---------- dist : array_like Distance matrix in condensed form. Returns ------- coords : ndarray, shape (n_samples, n_dimensions) Transformed coordinates for the samples. explained_ratio : ndarray, shape (n_dimensions) Variance explained by each dimension.
2.895042
2.774711
1.043367
# guard conditions if i == j or i >= n or j >= n or i < 0 or j < 0: raise ValueError('invalid coordinates: %s, %s' % (i, j)) # normalise order i, j = sorted([i, j]) # calculate number of items in rows before this one (sum of arithmetic # progression) x = i * ((2 * n) - i - 1) / 2 # add on previous items in current row ix = x + j - i - 1 return int(ix)
def condensed_coords(i, j, n)
Transform square distance matrix coordinates to the corresponding index into a condensed, 1D form of the matrix. Parameters ---------- i : int Row index. j : int Column index. n : int Size of the square matrix (length of first or second dimension). Returns ------- ix : int
5.382794
5.699744
0.944392
return [condensed_coords(i, j, n) for i, j in itertools.combinations(sorted(pop), 2)]
def condensed_coords_within(pop, n)
Return indices into a condensed distance matrix for all pairwise comparisons within the given population. Parameters ---------- pop : array_like, int Indices of samples or haplotypes within the population. n : int Size of the square matrix (length of first or second dimension). Returns ------- indices : ndarray, int
4.780144
8.003457
0.59726
return [condensed_coords(i, j, n) for i, j in itertools.product(sorted(pop1), sorted(pop2))]
def condensed_coords_between(pop1, pop2, n)
Return indices into a condensed distance matrix for all pairwise comparisons between two populations. Parameters ---------- pop1 : array_like, int Indices of samples or haplotypes within the first population. pop2 : array_like, int Indices of samples or haplotypes within the second population. n : int Size of the square matrix (length of first or second dimension). Returns ------- indices : ndarray, int
3.556943
6.969166
0.510383
import matplotlib.pyplot as plt # check inputs dist_square = ensure_square(dist) # set up axes if ax is None: # make a square figure x = plt.rcParams['figure.figsize'][0] fig, ax = plt.subplots(figsize=(x, x)) fig.tight_layout() # setup imshow arguments if imshow_kwargs is None: imshow_kwargs = dict() imshow_kwargs.setdefault('interpolation', 'none') imshow_kwargs.setdefault('cmap', 'jet') imshow_kwargs.setdefault('vmin', np.min(dist)) imshow_kwargs.setdefault('vmax', np.max(dist)) # plot as image im = ax.imshow(dist_square, **imshow_kwargs) # tidy up if labels: ax.set_xticks(range(len(labels))) ax.set_yticks(range(len(labels))) ax.set_xticklabels(labels, rotation=90) ax.set_yticklabels(labels, rotation=0) else: ax.set_xticks([]) ax.set_yticks([]) if colorbar: plt.gcf().colorbar(im, shrink=.5) return ax
def plot_pairwise_distance(dist, labels=None, colorbar=True, ax=None, imshow_kwargs=None)
Plot a pairwise distance matrix. Parameters ---------- dist : array_like The distance matrix in condensed form. labels : sequence of strings, optional Sample labels for the axes. colorbar : bool, optional If True, add a colorbar to the current figure. ax : axes, optional The axes on which to draw. If not provided, a new figure will be created. imshow_kwargs : dict-like, optional Additional keyword arguments passed through to :func:`matplotlib.pyplot.imshow`. Returns ------- ax : axes The axes on which the plot was drawn
2.025765
2.091325
0.968652
if isinstance(values, tuple): # multiple input arrays n = len(values[0]) masked_values = [np.ma.asarray(v) for v in values] for m in masked_values: assert m.ndim == 1, 'only 1D arrays supported' assert m.shape[0] == n, 'input arrays not of equal length' m.mask = np.zeros(m.shape, dtype=bool) else: n = len(values) masked_values = np.ma.asarray(values) assert masked_values.ndim == 1, 'only 1D arrays supported' masked_values.mask = np.zeros(masked_values.shape, dtype=bool) # values of the statistic calculated in each jackknife iteration vj = list() for i in range(n): if isinstance(values, tuple): # multiple input arrays for m in masked_values: m.mask[i] = True x = statistic(*masked_values) for m in masked_values: m.mask[i] = False else: masked_values.mask[i] = True x = statistic(masked_values) masked_values.mask[i] = False vj.append(x) # convert to array for convenience vj = np.array(vj) # compute mean of jackknife values m = vj.mean() # compute standard error sv = ((n - 1) / n) * np.sum((vj - m) ** 2) se = np.sqrt(sv) return m, se, vj
def jackknife(values, statistic)
Estimate standard error for `statistic` computed over `values` using the jackknife. Parameters ---------- values : array_like or tuple of array_like Input array, or tuple of input arrays. statistic : function The statistic to compute. Returns ------- m : float Mean of jackknife values. se : float Estimate of standard error. vj : ndarray Statistic values computed for each jackknife iteration.
2.17721
1.987225
1.095603
import matplotlib.pyplot as plt # check inputs pos = SortedIndex(pos, copy=False) # set up axes if ax is None: x = plt.rcParams['figure.figsize'][0] y = x / 7 fig, ax = plt.subplots(figsize=(x, y)) fig.tight_layout() # determine x axis limits if start is None: start = np.min(pos) if stop is None: stop = np.max(pos) loc = pos.locate_range(start, stop) pos = pos[loc] if step is None: step = len(pos) // 100 ax.set_xlim(start, stop) # plot the lines if line_kwargs is None: line_kwargs = dict() # line_kwargs.setdefault('linewidth', .5) n_variants = len(pos) for i, p in enumerate(pos[::step]): xfrom = p xto = ( start + ((i * step / n_variants) * (stop-start)) ) line = plt.Line2D([xfrom, xto], [0, 1], **line_kwargs) ax.add_line(line) # invert? if flip: ax.invert_yaxis() ax.xaxis.tick_top() else: ax.xaxis.tick_bottom() # tidy up ax.set_yticks([]) ax.xaxis.set_tick_params(direction='out') for spine in 'left', 'right': ax.spines[spine].set_visible(False) return ax
def plot_variant_locator(pos, step=None, ax=None, start=None, stop=None, flip=False, line_kwargs=None)
Plot lines indicating the physical genome location of variants from a single chromosome/contig. By default the top x axis is in variant index space, and the bottom x axis is in genome position space. Parameters ---------- pos : array_like A sorted 1-dimensional array of genomic positions from a single chromosome/contig. step : int, optional Plot a line for every `step` variants. ax : axes, optional The axes on which to draw. If not provided, a new figure will be created. start : int, optional The start position for the region to draw. stop : int, optional The stop position for the region to draw. flip : bool, optional Flip the plot upside down. line_kwargs : dict-like Additional keyword arguments passed through to `plt.Line2D`. Returns ------- ax : axes The axes on which the plot was drawn
2.553098
2.57748
0.990541
# check inputs x = asarray_ndim(x, 1) check_integer_dtype(x) x = memoryview_safe(x) # find state transitions switch_points, transitions, _ = state_transitions(x, states) # start to build a dataframe items = [('lstate', transitions[:, 0]), ('rstate', transitions[:, 1]), ('lidx', switch_points[:, 0]), ('ridx', switch_points[:, 1])] # deal with optional positions if pos is not None: pos = asarray_ndim(pos, 1) check_dim0_aligned(x, pos) check_integer_dtype(pos) # find switch positions switch_positions = np.take(pos, switch_points) # deal with boundary transitions switch_positions[0, 0] = -1 switch_positions[-1, 1] = -1 # add columns into dataframe items += [('lpos', switch_positions[:, 0]), ('rpos', switch_positions[:, 1])] import pandas return pandas.DataFrame.from_dict(OrderedDict(items))
def tabulate_state_transitions(x, states, pos=None)
Construct a dataframe where each row provides information about a state transition. Parameters ---------- x : array_like, int 1-dimensional array of state values. states : set Set of states of interest. Any state value not in this set will be ignored. pos : array_like, int, optional Array of positions corresponding to values in `x`. Returns ------- df : DataFrame Notes ----- The resulting dataframe includes one row at the start representing the first state observation and one row at the end representing the last state observation. Examples -------- >>> import allel >>> x = [1, 1, 0, 1, 1, 2, 2, 0, 2, 1, 1] >>> df = allel.tabulate_state_transitions(x, states={1, 2}) >>> df lstate rstate lidx ridx 0 -1 1 -1 0 1 1 2 4 5 2 2 1 8 9 3 1 -1 10 -1 >>> pos = [2, 4, 7, 8, 10, 14, 19, 23, 28, 30, 31] >>> df = allel.tabulate_state_transitions(x, states={1, 2}, pos=pos) >>> df lstate rstate lidx ridx lpos rpos 0 -1 1 -1 0 -1 2 1 1 2 4 5 10 14 2 2 1 8 9 28 30 3 1 -1 10 -1 31 -1
3.501071
3.402702
1.028909
# check inputs x = asarray_ndim(x, 1) check_integer_dtype(x) x = memoryview_safe(x) # find state transitions switch_points, transitions, observations = state_transitions(x, states) # setup some helpers t = transitions[1:, 0] o = observations[1:] s1 = switch_points[:-1] s2 = switch_points[1:] is_marginal = (s1[:, 0] < 0) | (s2[:, 1] < 0) size_min = s2[:, 0] - s1[:, 1] + 1 size_max = s2[:, 1] - s1[:, 0] - 1 size_max[is_marginal] = -1 # start to build a dataframe items = [ ('state', t), ('support', o), ('start_lidx', s1[:, 0]), ('start_ridx', s1[:, 1]), ('stop_lidx', s2[:, 0]), ('stop_ridx', s2[:, 1]), ('size_min', size_min), ('size_max', size_max), ('is_marginal', is_marginal) ] # deal with optional positions if pos is not None: pos = asarray_ndim(pos, 1) check_dim0_aligned(x, pos) check_integer_dtype(pos) # obtain switch positions switch_positions = np.take(pos, switch_points) # deal with boundary transitions switch_positions[0, 0] = -1 switch_positions[-1, 1] = -1 # setup helpers p1 = switch_positions[:-1] p2 = switch_positions[1:] length_min = p2[:, 0] - p1[:, 1] + 1 length_max = p2[:, 1] - p1[:, 0] - 1 length_max[is_marginal] = -1 items += [ ('start_lpos', p1[:, 0]), ('start_rpos', p1[:, 1]), ('stop_lpos', p2[:, 0]), ('stop_rpos', p2[:, 1]), ('length_min', length_min), ('length_max', length_max), ] import pandas return pandas.DataFrame.from_dict(OrderedDict(items))
def tabulate_state_blocks(x, states, pos=None)
Construct a dataframe where each row provides information about continuous state blocks. Parameters ---------- x : array_like, int 1-dimensional array of state values. states : set Set of states of interest. Any state value not in this set will be ignored. pos : array_like, int, optional Array of positions corresponding to values in `x`. Returns ------- df : DataFrame Examples -------- >>> import allel >>> x = [1, 1, 0, 1, 1, 2, 2, 0, 2, 1, 1] >>> df = allel.tabulate_state_blocks(x, states={1, 2}) >>> df state support start_lidx ... size_min size_max is_marginal 0 1 4 -1 ... 5 -1 True 1 2 3 4 ... 4 4 False 2 1 2 8 ... 2 -1 True [3 rows x 9 columns] >>> pos = [2, 4, 7, 8, 10, 14, 19, 23, 28, 30, 31] >>> df = allel.tabulate_state_blocks(x, states={1, 2}, pos=pos) >>> df state support start_lidx ... stop_rpos length_min length_max 0 1 4 -1 ... 14 9 -1 1 2 3 4 ... 30 15 19 2 1 2 8 ... -1 2 -1 [3 rows x 15 columns]
2.479265
2.258969
1.097521
names, callset = normalize_callset(callset) with open(path, 'w') as vcf_file: if write_header: write_vcf_header(vcf_file, names, callset=callset, rename=rename, number=number, description=description) write_vcf_data(vcf_file, names, callset=callset, rename=rename, fill=fill)
def write_vcf(path, callset, rename=None, number=None, description=None, fill=None, write_header=True)
Preliminary support for writing a VCF file. Currently does not support sample data. Needs further work.
2.336115
2.278978
1.025072
allow_none = kwargs.pop('allow_none', False) kwargs.setdefault('copy', False) if a is None and allow_none: return None a = np.array(a, **kwargs) if a.ndim not in ndims: if len(ndims) > 1: expect_str = 'one of %s' % str(ndims) else: # noinspection PyUnresolvedReferences expect_str = '%s' % ndims[0] raise TypeError('bad number of dimensions: expected %s; found %s' % (expect_str, a.ndim)) return a
def asarray_ndim(a, *ndims, **kwargs)
Ensure numpy array. Parameters ---------- a : array_like *ndims : int, optional Allowed values for number of dimensions. **kwargs Passed through to :func:`numpy.array`. Returns ------- a : numpy.ndarray
2.697962
2.658483
1.01485
# initialise HDF5 file path if filepath is None: import tempfile filepath = tempfile.mktemp(prefix='scikit_allel_', suffix='.h5') atexit.register(os.remove, filepath) # initialise defaults for dataset creation h5dcreate_kwargs.setdefault('chunks', True) def decorator(user_function): # setup the name for the cache container group if group is None: container = user_function.__name__ else: container = group def wrapper(*args, **kwargs): # load from cache or not no_cache = kwargs.pop('no_cache', False) # compute a key from the function arguments key = _make_key(args, kwargs, typed) if hashed_key: key = str(hash(key)) else: key = str(key).replace('/', '__slash__') return _hdf5_cache_act(filepath, parent, container, key, names, no_cache, user_function, args, kwargs, h5dcreate_kwargs) wrapper.cache_filepath = filepath return update_wrapper(wrapper, user_function) return decorator
def hdf5_cache(filepath=None, parent=None, group=None, names=None, typed=False, hashed_key=False, **h5dcreate_kwargs)
HDF5 cache decorator. Parameters ---------- filepath : string, optional Path to HDF5 file. If None a temporary file name will be used. parent : string, optional Path to group within HDF5 file to use as parent. If None the root group will be used. group : string, optional Path to group within HDF5 file, relative to parent, to use as container for cached data. If None the name of the wrapped function will be used. names : sequence of strings, optional Name(s) of dataset(s). If None, default names will be 'f00', 'f01', etc. typed : bool, optional If True, arguments of different types will be cached separately. For example, f(3.0) and f(3) will be treated as distinct calls with distinct results. hashed_key : bool, optional If False (default) the key will not be hashed, which makes for readable cache group names. If True the key will be hashed, however note that on Python >= 3.3 the hash value will not be the same between sessions unless the environment variable PYTHONHASHSEED has been set to the same value. Returns ------- decorator : function Examples -------- Without any arguments, will cache using a temporary HDF5 file:: >>> import allel >>> @allel.util.hdf5_cache() ... def foo(n): ... print('executing foo') ... return np.arange(n) ... >>> foo(3) executing foo array([0, 1, 2]) >>> foo(3) array([0, 1, 2]) >>> foo.cache_filepath # doctest: +SKIP '/tmp/tmp_jwtwgjz' Supports multiple return values, including scalars, e.g.:: >>> @allel.util.hdf5_cache() ... def bar(n): ... print('executing bar') ... a = np.arange(n) ... return a, a**2, n**2 ... >>> bar(3) executing bar (array([0, 1, 2]), array([0, 1, 4]), 9) >>> bar(3) (array([0, 1, 2]), array([0, 1, 4]), 9) Names can also be specified for the datasets, e.g.:: >>> @allel.util.hdf5_cache(names=['z', 'x', 'y']) ... def baz(n): ... print('executing baz') ... a = np.arange(n) ... return a, a**2, n**2 ... >>> baz(3) executing baz (array([0, 1, 2]), array([0, 1, 4]), 9) >>> baz(3) (array([0, 1, 2]), array([0, 1, 4]), 9)
3.455153
3.603754
0.958765
# set up the model model = GenotypePCA(n_components, copy=copy, scaler=scaler, ploidy=ploidy) # fit the model and project the input data onto the new dimensions coords = model.fit_transform(gn) return coords, model
def pca(gn, n_components=10, copy=True, scaler='patterson', ploidy=2)
Perform principal components analysis of genotype data, via singular value decomposition. Parameters ---------- gn : array_like, float, shape (n_variants, n_samples) Genotypes at biallelic variants, coded as the number of alternate alleles per call (i.e., 0 = hom ref, 1 = het, 2 = hom alt). n_components : int, optional Number of components to keep. copy : bool, optional If False, data passed to fit are overwritten. scaler : {'patterson', 'standard', None} Scaling method; 'patterson' applies the method of Patterson et al 2006; 'standard' scales to unit variance; None centers the data only. ploidy : int, optional Sample ploidy, only relevant if 'patterson' scaler is used. Returns ------- coords : ndarray, float, shape (n_samples, n_components) Transformed coordinates for the samples. model : GenotypePCA Model instance containing the variance ratio explained and the stored components (a.k.a., loadings). Can be used to project further data into the same principal components space via the transform() method. Notes ----- Genotype data should be filtered prior to using this function to remove variants in linkage disequilibrium. See Also -------- randomized_pca, allel.stats.ld.locate_unlinked
3.667938
3.774994
0.971641
# set up the model model = GenotypeRandomizedPCA(n_components, copy=copy, iterated_power=iterated_power, random_state=random_state, scaler=scaler, ploidy=ploidy) # fit the model and project the input data onto the new dimensions coords = model.fit_transform(gn) return coords, model
def randomized_pca(gn, n_components=10, copy=True, iterated_power=3, random_state=None, scaler='patterson', ploidy=2)
Perform principal components analysis of genotype data, via an approximate truncated singular value decomposition using randomization to speed up the computation. Parameters ---------- gn : array_like, float, shape (n_variants, n_samples) Genotypes at biallelic variants, coded as the number of alternate alleles per call (i.e., 0 = hom ref, 1 = het, 2 = hom alt). n_components : int, optional Number of components to keep. copy : bool, optional If False, data passed to fit are overwritten. iterated_power : int, optional Number of iterations for the power method. random_state : int or RandomState instance or None (default) Pseudo Random Number generator seed control. If None, use the numpy.random singleton. scaler : {'patterson', 'standard', None} Scaling method; 'patterson' applies the method of Patterson et al 2006; 'standard' scales to unit variance; None centers the data only. ploidy : int, optional Sample ploidy, only relevant if 'patterson' scaler is used. Returns ------- coords : ndarray, float, shape (n_samples, n_components) Transformed coordinates for the samples. model : GenotypeRandomizedPCA Model instance containing the variance ratio explained and the stored components (a.k.a., loadings). Can be used to project further data into the same principal components space via the transform() method. Notes ----- Genotype data should be filtered prior to using this function to remove variants in linkage disequilibrium. Based on the :class:`sklearn.decomposition.RandomizedPCA` implementation. See Also -------- pca, allel.stats.ld.locate_unlinked
2.869296
2.858865
1.003649
# check inputs ac = asarray_ndim(ac, 2) assert ac.shape[1] == 2, 'only biallelic variants supported' # compute allele number an = ac.sum(axis=1) # compute estimator x = (ac[:, 0] * ac[:, 1]) / (an * (an - 1)) return x
def h_hat(ac)
Unbiased estimator for h, where 2*h is the heterozygosity of the population. Parameters ---------- ac : array_like, int, shape (n_variants, 2) Allele counts array for a single population. Returns ------- h_hat : ndarray, float, shape (n_variants,) Notes ----- Used in Patterson (2012) for calculation of various statistics.
4.689188
3.808476
1.23125
# check inputs aca = AlleleCountsArray(aca, copy=False) assert aca.shape[1] == 2, 'only biallelic variants supported' acb = AlleleCountsArray(acb, copy=False) assert acb.shape[1] == 2, 'only biallelic variants supported' check_dim0_aligned(aca, acb) # compute allele numbers sa = aca.sum(axis=1) sb = acb.sum(axis=1) # compute heterozygosities ha = h_hat(aca) hb = h_hat(acb) # compute sample frequencies for the alternate allele a = aca.to_frequencies()[:, 1] b = acb.to_frequencies()[:, 1] # compute estimator x = ((a - b) ** 2) - (ha / sa) - (hb / sb) return x
def patterson_f2(aca, acb)
Unbiased estimator for F2(A, B), the branch length between populations A and B. Parameters ---------- aca : array_like, int, shape (n_variants, 2) Allele counts for population A. acb : array_like, int, shape (n_variants, 2) Allele counts for population B. Returns ------- f2 : ndarray, float, shape (n_variants,) Notes ----- See Patterson (2012), Appendix A.
3.333886
3.11926
1.068807
# check inputs aca = AlleleCountsArray(aca, copy=False) assert aca.shape[1] == 2, 'only biallelic variants supported' acb = AlleleCountsArray(acb, copy=False) assert acb.shape[1] == 2, 'only biallelic variants supported' acc = AlleleCountsArray(acc, copy=False) assert acc.shape[1] == 2, 'only biallelic variants supported' check_dim0_aligned(aca, acb, acc) # compute allele number and heterozygosity in test population sc = acc.sum(axis=1) hc = h_hat(acc) # compute sample frequencies for the alternate allele a = aca.to_frequencies()[:, 1] b = acb.to_frequencies()[:, 1] c = acc.to_frequencies()[:, 1] # compute estimator T = ((c - a) * (c - b)) - (hc / sc) B = 2 * hc return T, B
def patterson_f3(acc, aca, acb)
Unbiased estimator for F3(C; A, B), the three-population test for admixture in population C. Parameters ---------- acc : array_like, int, shape (n_variants, 2) Allele counts for the test population (C). aca : array_like, int, shape (n_variants, 2) Allele counts for the first source population (A). acb : array_like, int, shape (n_variants, 2) Allele counts for the second source population (B). Returns ------- T : ndarray, float, shape (n_variants,) Un-normalized f3 estimates per variant. B : ndarray, float, shape (n_variants,) Estimates for heterozygosity in population C. Notes ----- See Patterson (2012), main text and Appendix A. For un-normalized f3 statistics, ignore the `B` return value. To compute the f3* statistic, which is normalized by heterozygosity in population C to remove numerical dependence on the allele frequency spectrum, compute ``np.sum(T) / np.sum(B)``.
3.43958
2.9696
1.158264
# check inputs aca = AlleleCountsArray(aca, copy=False) assert aca.shape[1] == 2, 'only biallelic variants supported' acb = AlleleCountsArray(acb, copy=False) assert acb.shape[1] == 2, 'only biallelic variants supported' acc = AlleleCountsArray(acc, copy=False) assert acc.shape[1] == 2, 'only biallelic variants supported' acd = AlleleCountsArray(acd, copy=False) assert acd.shape[1] == 2, 'only biallelic variants supported' check_dim0_aligned(aca, acb, acc, acd) # compute sample frequencies for the alternate allele a = aca.to_frequencies()[:, 1] b = acb.to_frequencies()[:, 1] c = acc.to_frequencies()[:, 1] d = acd.to_frequencies()[:, 1] # compute estimator num = (a - b) * (c - d) den = (a + b - (2 * a * b)) * (c + d - (2 * c * d)) return num, den
def patterson_d(aca, acb, acc, acd)
Unbiased estimator for D(A, B; C, D), the normalised four-population test for admixture between (A or B) and (C or D), also known as the "ABBA BABA" test. Parameters ---------- aca : array_like, int, shape (n_variants, 2), Allele counts for population A. acb : array_like, int, shape (n_variants, 2) Allele counts for population B. acc : array_like, int, shape (n_variants, 2) Allele counts for population C. acd : array_like, int, shape (n_variants, 2) Allele counts for population D. Returns ------- num : ndarray, float, shape (n_variants,) Numerator (un-normalised f4 estimates). den : ndarray, float, shape (n_variants,) Denominator. Notes ----- See Patterson (2012), main text and Appendix A. For un-normalized f4 statistics, ignore the `den` return value.
2.078772
1.886985
1.101637
# calculate per-variant values T, B = patterson_f3(acc, aca, acb) # calculate value of statistic within each block if normed: T_bsum = moving_statistic(T, statistic=np.nansum, size=size, start=start, stop=stop, step=step) B_bsum = moving_statistic(B, statistic=np.nansum, size=size, start=start, stop=stop, step=step) f3 = T_bsum / B_bsum else: f3 = moving_statistic(T, statistic=np.nanmean, size=size, start=start, stop=stop, step=step) return f3
def moving_patterson_f3(acc, aca, acb, size, start=0, stop=None, step=None, normed=True)
Estimate F3(C; A, B) in moving windows. Parameters ---------- acc : array_like, int, shape (n_variants, 2) Allele counts for the test population (C). aca : array_like, int, shape (n_variants, 2) Allele counts for the first source population (A). acb : array_like, int, shape (n_variants, 2) Allele counts for the second source population (B). size : int The window size (number of variants). start : int, optional The index at which to start. stop : int, optional The index at which to stop. step : int, optional The number of variants between start positions of windows. If not given, defaults to the window size, i.e., non-overlapping windows. normed : bool, optional If False, use un-normalised f3 values. Returns ------- f3 : ndarray, float, shape (n_windows,) Estimated value of the statistic in each window.
2.551965
2.57437
0.991297
# calculate per-variant values num, den = patterson_d(aca, acb, acc, acd) # N.B., nans can occur if any of the populations have completely missing # genotype calls at a variant (i.e., allele number is zero). Here we # assume that is rare enough to be negligible. # compute the numerator and denominator within each window num_sum = moving_statistic(num, statistic=np.nansum, size=size, start=start, stop=stop, step=step) den_sum = moving_statistic(den, statistic=np.nansum, size=size, start=start, stop=stop, step=step) # calculate the statistic values in each block d = num_sum / den_sum return d
def moving_patterson_d(aca, acb, acc, acd, size, start=0, stop=None, step=None)
Estimate D(A, B; C, D) in moving windows. Parameters ---------- aca : array_like, int, shape (n_variants, 2), Allele counts for population A. acb : array_like, int, shape (n_variants, 2) Allele counts for population B. acc : array_like, int, shape (n_variants, 2) Allele counts for population C. acd : array_like, int, shape (n_variants, 2) Allele counts for population D. size : int The window size (number of variants). start : int, optional The index at which to start. stop : int, optional The index at which to stop. step : int, optional The number of variants between start positions of windows. If not given, defaults to the window size, i.e., non-overlapping windows. Returns ------- d : ndarray, float, shape (n_windows,) Estimated value of the statistic in each window.
4.642281
4.298811
1.079899
# calculate per-variant values T, B = patterson_f3(acc, aca, acb) # N.B., nans can occur if any of the populations have completely missing # genotype calls at a variant (i.e., allele number is zero). Here we # assume that is rare enough to be negligible. # calculate overall value of statistic if normed: f3 = np.nansum(T) / np.nansum(B) else: f3 = np.nanmean(T) # calculate value of statistic within each block if normed: T_bsum = moving_statistic(T, statistic=np.nansum, size=blen) B_bsum = moving_statistic(B, statistic=np.nansum, size=blen) vb = T_bsum / B_bsum _, se, vj = jackknife((T_bsum, B_bsum), statistic=lambda t, b: np.sum(t) / np.sum(b)) else: vb = moving_statistic(T, statistic=np.nanmean, size=blen) _, se, vj = jackknife(vb, statistic=np.mean) # compute Z score z = f3 / se return f3, se, z, vb, vj
def average_patterson_f3(acc, aca, acb, blen, normed=True)
Estimate F3(C; A, B) and standard error using the block-jackknife. Parameters ---------- acc : array_like, int, shape (n_variants, 2) Allele counts for the test population (C). aca : array_like, int, shape (n_variants, 2) Allele counts for the first source population (A). acb : array_like, int, shape (n_variants, 2) Allele counts for the second source population (B). blen : int Block size (number of variants). normed : bool, optional If False, use un-normalised f3 values. Returns ------- f3 : float Estimated value of the statistic using all data. se : float Estimated standard error. z : float Z-score (number of standard errors from zero). vb : ndarray, float, shape (n_blocks,) Value of the statistic in each block. vj : ndarray, float, shape (n_blocks,) Values of the statistic from block-jackknife resampling. Notes ----- See Patterson (2012), main text and Appendix A. See Also -------- allel.stats.admixture.patterson_f3
4.085251
3.377184
1.209662
# calculate per-variant values num, den = patterson_d(aca, acb, acc, acd) # N.B., nans can occur if any of the populations have completely missing # genotype calls at a variant (i.e., allele number is zero). Here we # assume that is rare enough to be negligible. # calculate overall estimate d_avg = np.nansum(num) / np.nansum(den) # compute the numerator and denominator within each block num_bsum = moving_statistic(num, statistic=np.nansum, size=blen) den_bsum = moving_statistic(den, statistic=np.nansum, size=blen) # calculate the statistic values in each block vb = num_bsum / den_bsum # estimate standard error _, se, vj = jackknife((num_bsum, den_bsum), statistic=lambda n, d: np.sum(n) / np.sum(d)) # compute Z score z = d_avg / se return d_avg, se, z, vb, vj
def average_patterson_d(aca, acb, acc, acd, blen)
Estimate D(A, B; C, D) and standard error using the block-jackknife. Parameters ---------- aca : array_like, int, shape (n_variants, 2), Allele counts for population A. acb : array_like, int, shape (n_variants, 2) Allele counts for population B. acc : array_like, int, shape (n_variants, 2) Allele counts for population C. acd : array_like, int, shape (n_variants, 2) Allele counts for population D. blen : int Block size (number of variants). Returns ------- d : float Estimated value of the statistic using all data. se : float Estimated standard error. z : float Z-score (number of standard errors from zero). vb : ndarray, float, shape (n_blocks,) Value of the statistic in each block. vj : ndarray, float, shape (n_blocks,) Values of the statistic from block-jackknife resampling. Notes ----- See Patterson (2012), main text and Appendix A. See Also -------- allel.stats.admixture.patterson_d
5.063098
4.033651
1.255215
if chunks is None: if hasattr(data, 'chunklen') and hasattr(data, 'shape'): # bcolz carray, chunk first dimension only return (data.chunklen,) + data.shape[1:] elif hasattr(data, 'chunks') and hasattr(data, 'shape') and \ len(data.chunks) == len(data.shape): # h5py dataset or zarr array return data.chunks else: # fall back to something simple, ~4Mb chunks of first dimension row = np.asarray(data[0]) chunklen = max(1, (2**22) // row.nbytes) if row.shape: chunks = (chunklen,) + row.shape else: chunks = (chunklen,) return chunks else: return chunks
def get_chunks(data, chunks=None)
Try to guess a reasonable chunk shape to use for block-wise algorithms operating over `data`.
4.037466
3.788757
1.065644
# prepare fill values for attributes if attributes is not None: attributes = list(attributes) if isinstance(attributes_fill, (list, tuple)): if len(attributes) != len(attributes_fill): raise ValueError('number of fills does not match attributes') else: attributes_fill = [attributes_fill] * len(attributes) # open input stream if region is not None: cmd = [tabix, path, region] buffer = subprocess.Popen(cmd, stdout=subprocess.PIPE).stdout elif path.endswith('.gz') or path.endswith('.bgz'): buffer = gzip.open(path, mode='rb') else: buffer = open(path, mode='rb') try: for line in buffer: if line[0] == b'>': # assume begin embedded FASTA return if line[0] == b'#': # skip comment lines continue vals = line.split(b'\t') if len(vals) == 9: # unpack for processing fseqid, fsource, ftype, fstart, fend, fscore, fstrand, fphase, fattrs = vals # convert numerics fstart = int(fstart) fend = int(fend) if fscore == b'.': fscore = score_fill else: fscore = float(fscore) if fphase == b'.': fphase = phase_fill else: fphase = int(fphase) if not PY2: fseqid = str(fseqid, 'ascii') fsource = str(fsource, 'ascii') ftype = str(ftype, 'ascii') fstrand = str(fstrand, 'ascii') fattrs = str(fattrs, 'ascii') rec = (fseqid, fsource, ftype, fstart, fend, fscore, fstrand, fphase) if attributes is not None: dattrs = gff3_parse_attributes(fattrs) vattrs = tuple( dattrs.get(k, f) for k, f in zip(attributes, attributes_fill) ) rec += vattrs yield rec finally: buffer.close()
def iter_gff3(path, attributes=None, region=None, score_fill=-1, phase_fill=-1, attributes_fill='.', tabix='tabix')
Iterate over records in a GFF3 file. Parameters ---------- path : string Path to input file. attributes : list of strings, optional List of columns to extract from the "attributes" field. region : string, optional Genome region to extract. If given, file must be position sorted, bgzipped and tabix indexed. Tabix must also be installed and on the system path. score_fill : int, optional Value to use where score field has a missing value. phase_fill : int, optional Value to use where phase field has a missing value. attributes_fill : object or list of objects, optional Value(s) to use where attribute field(s) have a missing value. tabix : string Tabix command. Returns ------- Iterator
2.288034
2.271303
1.007366
# read records recs = list(iter_gff3(path, attributes=attributes, region=region, score_fill=score_fill, phase_fill=phase_fill, attributes_fill=attributes_fill, tabix=tabix)) if not recs: return None # determine dtype if dtype is None: dtype = [('seqid', object), ('source', object), ('type', object), ('start', int), ('end', int), ('score', float), ('strand', object), ('phase', int)] if attributes: for n in attributes: dtype.append((n, object)) a = np.rec.fromrecords(recs, dtype=dtype) return a
def gff3_to_recarray(path, attributes=None, region=None, score_fill=-1, phase_fill=-1, attributes_fill='.', tabix='tabix', dtype=None)
Load data from a GFF3 into a NumPy recarray. Parameters ---------- path : string Path to input file. attributes : list of strings, optional List of columns to extract from the "attributes" field. region : string, optional Genome region to extract. If given, file must be position sorted, bgzipped and tabix indexed. Tabix must also be installed and on the system path. score_fill : int, optional Value to use where score field has a missing value. phase_fill : int, optional Value to use where phase field has a missing value. attributes_fill : object or list of objects, optional Value(s) to use where attribute field(s) have a missing value. tabix : string, optional Tabix command. dtype : dtype, optional Override dtype. Returns ------- np.recarray
1.903277
2.086491
0.91219
import pandas # read records recs = list(iter_gff3(path, attributes=attributes, region=region, score_fill=score_fill, phase_fill=phase_fill, attributes_fill=attributes_fill, tabix=tabix)) # load into pandas columns = ['seqid', 'source', 'type', 'start', 'end', 'score', 'strand', 'phase'] if attributes: columns += list(attributes) df = pandas.DataFrame.from_records(recs, columns=columns, **kwargs) return df
def gff3_to_dataframe(path, attributes=None, region=None, score_fill=-1, phase_fill=-1, attributes_fill='.', tabix='tabix', **kwargs)
Load data from a GFF3 into a pandas DataFrame. Parameters ---------- path : string Path to input file. attributes : list of strings, optional List of columns to extract from the "attributes" field. region : string, optional Genome region to extract. If given, file must be position sorted, bgzipped and tabix indexed. Tabix must also be installed and on the system path. score_fill : int, optional Value to use where score field has a missing value. phase_fill : int, optional Value to use where phase field has a missing value. attributes_fill : object or list of objects, optional Value(s) to use where attribute field(s) have a missing value. tabix : string, optional Tabix command. Returns ------- pandas.DataFrame
2.066299
2.463551
0.838748