code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
basic_deliver = self._inbound.pop(0) if not isinstance(basic_deliver, specification.Basic.Deliver): LOGGER.warning( 'Received an out-of-order frame: %s was ' 'expecting a Basic.Deliver frame', type(basic_deliver) ) return None content_header = self._inbound.pop(0) if not isinstance(content_header, ContentHeader): LOGGER.warning( 'Received an out-of-order frame: %s was ' 'expecting a ContentHeader frame', type(content_header) ) return None return basic_deliver, content_header
def _build_message_headers(self)
Fetch Message Headers (Deliver & Header Frames). :rtype: tuple|None
3.026313
2.746147
1.102022
body = bytes() while len(body) < body_size: if not self._inbound: self.check_for_errors() sleep(IDLE_WAIT) continue body_piece = self._inbound.pop(0) if not body_piece.value: break body += body_piece.value return body
def _build_message_body(self, body_size)
Build the Message body from the inbound queue. :rtype: str
4.557789
4.247504
1.073051
if frame_in.reply_code != 200: reply_text = try_utf8_decode(frame_in.reply_text) message = ( 'Channel %d was closed by remote server: %s' % ( self._channel_id, reply_text ) ) exception = AMQPChannelError(message, reply_code=frame_in.reply_code) self.exceptions.append(exception) self.set_state(self.CLOSED) if self._connection.is_open: try: self._connection.write_frame( self.channel_id, specification.Channel.CloseOk() ) except AMQPConnectionError: pass self.close()
def _close_channel(self, frame_in)
Close Channel. :param specification.Channel.Close frame_in: Channel Close frame. :return:
3.772677
3.717832
1.014752
user_payload = json.dumps({ 'password': password, 'tags': tags }) return self.http_client.put(API_USER % username, payload=user_payload)
def create(self, username, password, tags='')
Create User. :param str username: Username :param str password: Password :param str tags: Comma-separate list of tags (e.g. monitoring) :rtype: None
4.674662
5.808265
0.804829
virtual_host = quote(virtual_host, '') return self.http_client.get(API_USER_VIRTUAL_HOST_PERMISSIONS % ( virtual_host, username ))
def get_permission(self, username, virtual_host)
Get User permissions for the configured virtual host. :param str username: Username :param str virtual_host: Virtual host name :raises ApiError: Raises if the remote server encountered an error. :raises ApiConnectionError: Raises if there was a connectivity issue. :rtype: dict
6.74689
9.035014
0.746749
virtual_host = quote(virtual_host, '') permission_payload = json.dumps({ "configure": configure_regex, "read": read_regex, "write": write_regex }) return self.http_client.put(API_USER_VIRTUAL_HOST_PERMISSIONS % ( virtual_host, username ), payload=permission_payload)
def set_permission(self, username, virtual_host, configure_regex='.*', write_regex='.*', read_regex='.*')
Set User permissions for the configured virtual host. :param str username: Username :param str virtual_host: Virtual host name :param str configure_regex: Permission pattern for configuration operations for this user. :param str write_regex: Permission pattern for write operations for this user. :param str read_regex: Permission pattern for read operations for this user. :raises ApiError: Raises if the remote server encountered an error. :raises ApiConnectionError: Raises if there was a connectivity issue. :rtype: dict
3.336037
4.334952
0.769567
virtual_host = quote(virtual_host, '') return self.http_client.delete( API_USER_VIRTUAL_HOST_PERMISSIONS % ( virtual_host, username ))
def delete_permission(self, username, virtual_host)
Delete User permissions for the configured virtual host. :param str username: Username :param str virtual_host: Virtual host name :raises ApiError: Raises if the remote server encountered an error. :raises ApiConnectionError: Raises if there was a connectivity issue. :rtype: dict
5.694622
8.697758
0.654723
self._stopped.clear() if not self._connection or self._connection.is_closed: self._create_connection() while not self._stopped.is_set(): try: # Check our connection for errors. self._connection.check_for_errors() self._update_consumers() except amqpstorm.AMQPError as why: # If an error occurs, re-connect and let update_consumers # re-open the channels. LOGGER.warning(why) self._stop_consumers() self._create_connection() time.sleep(1)
def start_server(self)
Start the RPC Server. :return:
4.221832
4.379928
0.963904
# Do we need to start more consumers. consumer_to_start = \ min(max(self.number_of_consumers - len(self._consumers), 0), 2) for _ in range(consumer_to_start): consumer = Consumer(self.rpc_queue) self._start_consumer(consumer) self._consumers.append(consumer) # Check that all our consumers are active. for consumer in self._consumers: if consumer.active: continue self._start_consumer(consumer) break # Do we have any overflow of consumers. self._stop_consumers(self.number_of_consumers)
def _update_consumers(self)
Update Consumers. - Add more if requested. - Make sure the consumers are healthy. - Remove excess consumers. :return:
3.704399
3.823919
0.968744
return self._request('get', path, payload, headers)
def get(self, path, payload=None, headers=None)
HTTP GET operation. :param path: URI Path :param payload: HTTP Body :param headers: HTTP Headers :raises ApiError: Raises if the remote server encountered an error. :raises ApiConnectionError: Raises if there was a connectivity issue. :return: Response
5.201543
15.290383
0.340184
return self._request('post', path, payload, headers)
def post(self, path, payload=None, headers=None)
HTTP POST operation. :param path: URI Path :param payload: HTTP Body :param headers: HTTP Headers :raises ApiError: Raises if the remote server encountered an error. :raises ApiConnectionError: Raises if there was a connectivity issue. :return: Response
5.316162
14.679738
0.362143
return self._request('delete', path, payload, headers)
def delete(self, path, payload=None, headers=None)
HTTP DELETE operation. :param path: URI Path :param payload: HTTP Body :param headers: HTTP Headers :raises ApiError: Raises if the remote server encountered an error. :raises ApiConnectionError: Raises if there was a connectivity issue. :return: Response
5.610045
14.82427
0.378437
return self._request('put', path, payload, headers)
def put(self, path, payload=None, headers=None)
HTTP PUT operation. :param path: URI Path :param payload: HTTP Body :param headers: HTTP Headers :raises ApiError: Raises if the remote server encountered an error. :raises ApiConnectionError: Raises if there was a connectivity issue. :return: Response
5.452461
14.765288
0.369276
url = urlparse.urljoin(self._base_url, 'api/%s' % path) headers = headers or {} headers['content-type'] = 'application/json' try: response = requests.request( method, url, auth=self._auth, data=payload, headers=headers, cert=self._cert, verify=self._verify, timeout=self._timeout ) except requests.RequestException as why: raise ApiConnectionError(str(why)) json_response = self._get_json_output(response) self._check_for_errors(response, json_response) return json_response
def _request(self, method, path, payload=None, headers=None)
HTTP operation. :param method: Operation type (e.g. post) :param path: URI Path :param payload: HTTP Body :param headers: HTTP Headers :raises ApiError: Raises if the remote server encountered an error. :raises ApiConnectionError: Raises if there was a connectivity issue. :return: Response
2.436248
2.566953
0.949082
status_code = response.status_code try: response.raise_for_status() except requests.HTTPError as why: raise ApiError(str(why), reply_code=status_code) if isinstance(json_response, dict) and 'error' in json_response: raise ApiError(json_response['error'], reply_code=status_code)
def _check_for_errors(response, json_response)
Check payload for errors. :param response: HTTP response :param json_response: Json response :raises ApiError: Raises if the remote server encountered an error. :return:
2.603336
2.826731
0.920971
if not node: return self.http_client.get(HEALTHCHECKS) return self.http_client.get(HEALTHCHECKS_NODE % node)
def get(self, node=None)
Run basic healthchecks against the current node, or against a given node. Example response: > {"status":"ok"} > {"status":"failed","reason":"string"} :param node: Node name :raises ApiError: Raises if the remote server encountered an error. :raises ApiConnectionError: Raises if there was a connectivity issue. :rtype: dict
5.590331
5.62727
0.993436
# This function calculates the mean number of pairwise differences # between haplotypes within a single population, generalising to any number # of alleles. # check inputs ac = asarray_ndim(ac, 2) # total number of haplotypes if an is None: an = np.sum(ac, axis=1) else: an = asarray_ndim(an, 1) check_dim0_aligned(ac, an) # total number of pairwise comparisons for each variant: # (an choose 2) n_pairs = an * (an - 1) / 2 # number of pairwise comparisons where there is no difference: # sum of (ac choose 2) for each allele (i.e., number of ways to # choose the same allele twice) n_same = np.sum(ac * (ac - 1) / 2, axis=1) # number of pairwise differences n_diff = n_pairs - n_same # mean number of pairwise differences, accounting for cases where # there are no pairs with ignore_invalid(): mpd = np.where(n_pairs > 0, n_diff / n_pairs, fill) return mpd
def mean_pairwise_difference(ac, an=None, fill=np.nan)
Calculate for each variant the mean number of pairwise differences between chromosomes sampled from within a single population. Parameters ---------- ac : array_like, int, shape (n_variants, n_alleles) Allele counts array. an : array_like, int, shape (n_variants,), optional Allele numbers. If not provided, will be calculated from `ac`. fill : float Use this value where there are no pairs to compare (e.g., all allele calls are missing). Returns ------- mpd : ndarray, float, shape (n_variants,) Notes ----- The values returned by this function can be summed over a genome region and divided by the number of accessible bases to estimate nucleotide diversity, a.k.a. *pi*. Examples -------- >>> import allel >>> h = allel.HaplotypeArray([[0, 0, 0, 0], ... [0, 0, 0, 1], ... [0, 0, 1, 1], ... [0, 1, 1, 1], ... [1, 1, 1, 1], ... [0, 0, 1, 2], ... [0, 1, 1, 2], ... [0, 1, -1, -1]]) >>> ac = h.count_alleles() >>> allel.mean_pairwise_difference(ac) array([0. , 0.5 , 0.66666667, 0.5 , 0. , 0.83333333, 0.83333333, 1. ]) See Also -------- sequence_diversity, windowed_diversity
3.595909
3.415091
1.052947
# This function calculates the mean number of pairwise differences # between haplotypes from two different populations, generalising to any # number of alleles. # check inputs ac1 = asarray_ndim(ac1, 2) ac2 = asarray_ndim(ac2, 2) check_dim0_aligned(ac1, ac2) ac1, ac2 = ensure_dim1_aligned(ac1, ac2) # total number of haplotypes sampled from each population if an1 is None: an1 = np.sum(ac1, axis=1) else: an1 = asarray_ndim(an1, 1) check_dim0_aligned(ac1, an1) if an2 is None: an2 = np.sum(ac2, axis=1) else: an2 = asarray_ndim(an2, 1) check_dim0_aligned(ac2, an2) # total number of pairwise comparisons for each variant n_pairs = an1 * an2 # number of pairwise comparisons where there is no difference: # sum of (ac1 * ac2) for each allele (i.e., number of ways to # choose the same allele twice) n_same = np.sum(ac1 * ac2, axis=1) # number of pairwise differences n_diff = n_pairs - n_same # mean number of pairwise differences, accounting for cases where # there are no pairs with ignore_invalid(): mpd = np.where(n_pairs > 0, n_diff / n_pairs, fill) return mpd
def mean_pairwise_difference_between(ac1, ac2, an1=None, an2=None, fill=np.nan)
Calculate for each variant the mean number of pairwise differences between chromosomes sampled from two different populations. Parameters ---------- ac1 : array_like, int, shape (n_variants, n_alleles) Allele counts array from the first population. ac2 : array_like, int, shape (n_variants, n_alleles) Allele counts array from the second population. an1 : array_like, int, shape (n_variants,), optional Allele numbers for the first population. If not provided, will be calculated from `ac1`. an2 : array_like, int, shape (n_variants,), optional Allele numbers for the second population. If not provided, will be calculated from `ac2`. fill : float Use this value where there are no pairs to compare (e.g., all allele calls are missing). Returns ------- mpd : ndarray, float, shape (n_variants,) Notes ----- The values returned by this function can be summed over a genome region and divided by the number of accessible bases to estimate nucleotide divergence between two populations, a.k.a. *Dxy*. Examples -------- >>> import allel >>> h = allel.HaplotypeArray([[0, 0, 0, 0], ... [0, 0, 0, 1], ... [0, 0, 1, 1], ... [0, 1, 1, 1], ... [1, 1, 1, 1], ... [0, 0, 1, 2], ... [0, 1, 1, 2], ... [0, 1, -1, -1]]) >>> ac1 = h.count_alleles(subpop=[0, 1]) >>> ac2 = h.count_alleles(subpop=[2, 3]) >>> allel.mean_pairwise_difference_between(ac1, ac2) array([0. , 0.5 , 1. , 0.5 , 0. , 1. , 0.75, nan]) See Also -------- sequence_divergence, windowed_divergence
2.635864
2.57271
1.024548
# check inputs if not isinstance(pos, SortedIndex): pos = SortedIndex(pos, copy=False) ac = asarray_ndim(ac, 2) is_accessible = asarray_ndim(is_accessible, 1, allow_none=True) # deal with subregion if start is not None or stop is not None: loc = pos.locate_range(start, stop) pos = pos[loc] ac = ac[loc] if start is None: start = pos[0] if stop is None: stop = pos[-1] # calculate mean pairwise difference mpd = mean_pairwise_difference(ac, fill=0) # sum differences over variants mpd_sum = np.sum(mpd) # calculate value per base if is_accessible is None: n_bases = stop - start + 1 else: n_bases = np.count_nonzero(is_accessible[start-1:stop]) pi = mpd_sum / n_bases return pi
def sequence_diversity(pos, ac, start=None, stop=None, is_accessible=None)
Estimate nucleotide diversity within a given region, which is the average proportion of sites (including monomorphic sites not present in the data) that differ between randomly chosen pairs of chromosomes. Parameters ---------- pos : array_like, int, shape (n_items,) Variant positions, using 1-based coordinates, in ascending order. ac : array_like, int, shape (n_variants, n_alleles) Allele counts array. start : int, optional The position at which to start (1-based). Defaults to the first position. stop : int, optional The position at which to stop (1-based). Defaults to the last position. is_accessible : array_like, bool, shape (len(contig),), optional Boolean array indicating accessibility status for all positions in the chromosome/contig. Returns ------- pi : ndarray, float, shape (n_windows,) Nucleotide diversity. Notes ----- If start and/or stop are not provided, uses the difference between the last and the first position as a proxy for the total number of sites, which can overestimate the sequence diversity. Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 0]], ... [[0, 0], [0, 1]], ... [[0, 0], [1, 1]], ... [[0, 1], [1, 1]], ... [[1, 1], [1, 1]], ... [[0, 0], [1, 2]], ... [[0, 1], [1, 2]], ... [[0, 1], [-1, -1]], ... [[-1, -1], [-1, -1]]]) >>> ac = g.count_alleles() >>> pos = [2, 4, 7, 14, 15, 18, 19, 25, 27] >>> pi = allel.sequence_diversity(pos, ac, start=1, stop=31) >>> pi 0.13978494623655915
3.084503
3.193876
0.965755
# check inputs if not isinstance(pos, SortedIndex): pos = SortedIndex(pos, copy=False) is_accessible = asarray_ndim(is_accessible, 1, allow_none=True) # calculate mean pairwise difference mpd = mean_pairwise_difference(ac, fill=0) # sum differences in windows mpd_sum, windows, counts = windowed_statistic( pos, values=mpd, statistic=np.sum, size=size, start=start, stop=stop, step=step, windows=windows, fill=0 ) # calculate value per base pi, n_bases = per_base(mpd_sum, windows, is_accessible=is_accessible, fill=fill) return pi, windows, n_bases, counts
def windowed_diversity(pos, ac, size=None, start=None, stop=None, step=None, windows=None, is_accessible=None, fill=np.nan)
Estimate nucleotide diversity in windows over a single chromosome/contig. Parameters ---------- pos : array_like, int, shape (n_items,) Variant positions, using 1-based coordinates, in ascending order. ac : array_like, int, shape (n_variants, n_alleles) Allele counts array. size : int, optional The window size (number of bases). start : int, optional The position at which to start (1-based). stop : int, optional The position at which to stop (1-based). step : int, optional The distance between start positions of windows. If not given, defaults to the window size, i.e., non-overlapping windows. windows : array_like, int, shape (n_windows, 2), optional Manually specify the windows to use as a sequence of (window_start, window_stop) positions, using 1-based coordinates. Overrides the size/start/stop/step parameters. is_accessible : array_like, bool, shape (len(contig),), optional Boolean array indicating accessibility status for all positions in the chromosome/contig. fill : object, optional The value to use where a window is completely inaccessible. Returns ------- pi : ndarray, float, shape (n_windows,) Nucleotide diversity in each window. windows : ndarray, int, shape (n_windows, 2) The windows used, as an array of (window_start, window_stop) positions, using 1-based coordinates. n_bases : ndarray, int, shape (n_windows,) Number of (accessible) bases in each window. counts : ndarray, int, shape (n_windows,) Number of variants in each window. Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 0]], ... [[0, 0], [0, 1]], ... [[0, 0], [1, 1]], ... [[0, 1], [1, 1]], ... [[1, 1], [1, 1]], ... [[0, 0], [1, 2]], ... [[0, 1], [1, 2]], ... [[0, 1], [-1, -1]], ... [[-1, -1], [-1, -1]]]) >>> ac = g.count_alleles() >>> pos = [2, 4, 7, 14, 15, 18, 19, 25, 27] >>> pi, windows, n_bases, counts = allel.windowed_diversity( ... pos, ac, size=10, start=1, stop=31 ... ) >>> pi array([0.11666667, 0.21666667, 0.09090909]) >>> windows array([[ 1, 10], [11, 20], [21, 31]]) >>> n_bases array([10, 10, 11]) >>> counts array([3, 4, 2])
3.968067
3.873604
1.024386
# check inputs pos = SortedIndex(pos, copy=False) is_accessible = asarray_ndim(is_accessible, 1, allow_none=True) # locate fixed differences loc_df = locate_fixed_differences(ac1, ac2) # count number of fixed differences in windows n_df, windows, counts = windowed_statistic( pos, values=loc_df, statistic=np.count_nonzero, size=size, start=start, stop=stop, step=step, windows=windows, fill=0 ) # calculate value per base df, n_bases = per_base(n_df, windows, is_accessible=is_accessible, fill=fill) return df, windows, n_bases, counts
def windowed_df(pos, ac1, ac2, size=None, start=None, stop=None, step=None, windows=None, is_accessible=None, fill=np.nan)
Calculate the density of fixed differences between two populations in windows over a single chromosome/contig. Parameters ---------- pos : array_like, int, shape (n_items,) Variant positions, using 1-based coordinates, in ascending order. ac1 : array_like, int, shape (n_variants, n_alleles) Allele counts array for the first population. ac2 : array_like, int, shape (n_variants, n_alleles) Allele counts array for the second population. size : int, optional The window size (number of bases). start : int, optional The position at which to start (1-based). stop : int, optional The position at which to stop (1-based). step : int, optional The distance between start positions of windows. If not given, defaults to the window size, i.e., non-overlapping windows. windows : array_like, int, shape (n_windows, 2), optional Manually specify the windows to use as a sequence of (window_start, window_stop) positions, using 1-based coordinates. Overrides the size/start/stop/step parameters. is_accessible : array_like, bool, shape (len(contig),), optional Boolean array indicating accessibility status for all positions in the chromosome/contig. fill : object, optional The value to use where a window is completely inaccessible. Returns ------- df : ndarray, float, shape (n_windows,) Per-base density of fixed differences in each window. windows : ndarray, int, shape (n_windows, 2) The windows used, as an array of (window_start, window_stop) positions, using 1-based coordinates. n_bases : ndarray, int, shape (n_windows,) Number of (accessible) bases in each window. counts : ndarray, int, shape (n_windows,) Number of variants in each window. See Also -------- allel.model.locate_fixed_differences
4.004254
3.437578
1.164848
# check inputs if not isinstance(pos, SortedIndex): pos = SortedIndex(pos, copy=False) is_accessible = asarray_ndim(is_accessible, 1, allow_none=True) if not hasattr(ac, 'count_segregating'): ac = AlleleCountsArray(ac, copy=False) # deal with subregion if start is not None or stop is not None: loc = pos.locate_range(start, stop) pos = pos[loc] ac = ac[loc] if start is None: start = pos[0] if stop is None: stop = pos[-1] # count segregating variants S = ac.count_segregating() # assume number of chromosomes sampled is constant for all variants n = ac.sum(axis=1).max() # (n-1)th harmonic number a1 = np.sum(1 / np.arange(1, n)) # calculate absolute value theta_hat_w_abs = S / a1 # calculate value per base if is_accessible is None: n_bases = stop - start + 1 else: n_bases = np.count_nonzero(is_accessible[start-1:stop]) theta_hat_w = theta_hat_w_abs / n_bases return theta_hat_w
def watterson_theta(pos, ac, start=None, stop=None, is_accessible=None)
Calculate the value of Watterson's estimator over a given region. Parameters ---------- pos : array_like, int, shape (n_items,) Variant positions, using 1-based coordinates, in ascending order. ac : array_like, int, shape (n_variants, n_alleles) Allele counts array. start : int, optional The position at which to start (1-based). Defaults to the first position. stop : int, optional The position at which to stop (1-based). Defaults to the last position. is_accessible : array_like, bool, shape (len(contig),), optional Boolean array indicating accessibility status for all positions in the chromosome/contig. Returns ------- theta_hat_w : float Watterson's estimator (theta hat per base). Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 0]], ... [[0, 0], [0, 1]], ... [[0, 0], [1, 1]], ... [[0, 1], [1, 1]], ... [[1, 1], [1, 1]], ... [[0, 0], [1, 2]], ... [[0, 1], [1, 2]], ... [[0, 1], [-1, -1]], ... [[-1, -1], [-1, -1]]]) >>> ac = g.count_alleles() >>> pos = [2, 4, 7, 14, 15, 18, 19, 25, 27] >>> theta_hat_w = allel.watterson_theta(pos, ac, start=1, stop=31) >>> theta_hat_w 0.10557184750733138
3.733095
3.395226
1.099513
# check inputs if not hasattr(ac, 'count_segregating'): ac = AlleleCountsArray(ac, copy=False) # deal with subregion if pos is not None and (start is not None or stop is not None): if not isinstance(pos, SortedIndex): pos = SortedIndex(pos, copy=False) loc = pos.locate_range(start, stop) ac = ac[loc] # count segregating variants S = ac.count_segregating() if S < min_sites: return np.nan # assume number of chromosomes sampled is constant for all variants n = ac.sum(axis=1).max() # (n-1)th harmonic number a1 = np.sum(1 / np.arange(1, n)) # calculate Watterson's theta (absolute value) theta_hat_w_abs = S / a1 # calculate mean pairwise difference mpd = mean_pairwise_difference(ac, fill=0) # calculate theta_hat pi (sum differences over variants) theta_hat_pi_abs = np.sum(mpd) # N.B., both theta estimates are usually divided by the number of # (accessible) bases but here we want the absolute difference d = theta_hat_pi_abs - theta_hat_w_abs # calculate the denominator (standard deviation) a2 = np.sum(1 / (np.arange(1, n)**2)) b1 = (n + 1) / (3 * (n - 1)) b2 = 2 * (n**2 + n + 3) / (9 * n * (n - 1)) c1 = b1 - (1 / a1) c2 = b2 - ((n + 2) / (a1 * n)) + (a2 / (a1**2)) e1 = c1 / a1 e2 = c2 / (a1**2 + a2) d_stdev = np.sqrt((e1 * S) + (e2 * S * (S - 1))) # finally calculate Tajima's D D = d / d_stdev return D
def tajima_d(ac, pos=None, start=None, stop=None, min_sites=3)
Calculate the value of Tajima's D over a given region. Parameters ---------- ac : array_like, int, shape (n_variants, n_alleles) Allele counts array. pos : array_like, int, shape (n_items,), optional Variant positions, using 1-based coordinates, in ascending order. start : int, optional The position at which to start (1-based). Defaults to the first position. stop : int, optional The position at which to stop (1-based). Defaults to the last position. min_sites : int, optional Minimum number of segregating sites for which to calculate a value. If there are fewer, np.nan is returned. Defaults to 3. Returns ------- D : float Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 0]], ... [[0, 0], [0, 1]], ... [[0, 0], [1, 1]], ... [[0, 1], [1, 1]], ... [[1, 1], [1, 1]], ... [[0, 0], [1, 2]], ... [[0, 1], [1, 2]], ... [[0, 1], [-1, -1]], ... [[-1, -1], [-1, -1]]]) >>> ac = g.count_alleles() >>> allel.tajima_d(ac) 3.1445848780213814 >>> pos = [2, 4, 7, 14, 15, 18, 19, 25, 27] >>> allel.tajima_d(ac, pos=pos, start=7, stop=25) 3.8779735196179366
4.212989
4.201726
1.002681
d = moving_statistic(values=ac, statistic=tajima_d, size=size, start=start, stop=stop, step=step, min_sites=min_sites) return d
def moving_tajima_d(ac, size, start=0, stop=None, step=None, min_sites=3)
Calculate the value of Tajima's D in moving windows of `size` variants. Parameters ---------- ac : array_like, int, shape (n_variants, n_alleles) Allele counts array. size : int The window size (number of variants). start : int, optional The index at which to start. stop : int, optional The index at which to stop. step : int, optional The number of variants between start positions of windows. If not given, defaults to the window size, i.e., non-overlapping windows. min_sites : int, optional Minimum number of segregating sites for which to calculate a value. If there are fewer, np.nan is returned. Defaults to 3. Returns ------- d : ndarray, float, shape (n_windows,) Tajima's D. Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 0]], ... [[0, 0], [0, 1]], ... [[0, 0], [1, 1]], ... [[0, 1], [1, 1]], ... [[1, 1], [1, 1]], ... [[0, 0], [1, 2]], ... [[0, 1], [1, 2]], ... [[0, 1], [-1, -1]], ... [[-1, -1], [-1, -1]]]) >>> ac = g.count_alleles() >>> D = allel.moving_tajima_d(ac, size=4, step=2) >>> D array([0.1676558 , 2.01186954, 5.70029703])
3.304922
5.553347
0.595123
# check input dac, n = _check_dac_n(dac, n) # need platform integer for bincount dac = dac.astype(int, copy=False) # compute site frequency spectrum x = n + 1 s = np.bincount(dac, minlength=x) return s
def sfs(dac, n=None)
Compute the site frequency spectrum given derived allele counts at a set of biallelic variants. Parameters ---------- dac : array_like, int, shape (n_variants,) Array of derived allele counts. n : int, optional The total number of chromosomes called. Returns ------- sfs : ndarray, int, shape (n_chromosomes,) Array where the kth element is the number of variant sites with k derived alleles.
6.229896
5.133232
1.21364
# check input ac, n = _check_ac_n(ac, n) # compute minor allele counts mac = np.amin(ac, axis=1) # need platform integer for bincount mac = mac.astype(int, copy=False) # compute folded site frequency spectrum x = n//2 + 1 s = np.bincount(mac, minlength=x) return s
def sfs_folded(ac, n=None)
Compute the folded site frequency spectrum given reference and alternate allele counts at a set of biallelic variants. Parameters ---------- ac : array_like, int, shape (n_variants, 2) Allele counts array. n : int, optional The total number of chromosomes called. Returns ------- sfs_folded : ndarray, int, shape (n_chromosomes//2,) Array where the kth element is the number of variant sites with a minor allele count of k.
6.624519
4.877339
1.358224
# compute site frequency spectrum s = sfs(dac, n=n) # apply scaling s = scale_sfs(s) return s
def sfs_scaled(dac, n=None)
Compute the site frequency spectrum scaled such that a constant value is expected across the spectrum for neutral variation and constant population size. Parameters ---------- dac : array_like, int, shape (n_variants,) Array of derived allele counts. n : int, optional The total number of chromosomes called. Returns ------- sfs_scaled : ndarray, int, shape (n_chromosomes,) An array where the value of the kth element is the number of variants with k derived alleles, multiplied by k.
7.391342
7.887751
0.937066
k = np.arange(s.size) out = s * k return out
def scale_sfs(s)
Scale a site frequency spectrum. Parameters ---------- s : array_like, int, shape (n_chromosomes,) Site frequency spectrum. Returns ------- sfs_scaled : ndarray, int, shape (n_chromosomes,) Scaled site frequency spectrum.
7.678865
13.736296
0.55902
# check input ac, n = _check_ac_n(ac, n) # compute the site frequency spectrum s = sfs_folded(ac, n=n) # apply scaling s = scale_sfs_folded(s, n) return s
def sfs_folded_scaled(ac, n=None)
Compute the folded site frequency spectrum scaled such that a constant value is expected across the spectrum for neutral variation and constant population size. Parameters ---------- ac : array_like, int, shape (n_variants, 2) Allele counts array. n : int, optional The total number of chromosomes called. Returns ------- sfs_folded_scaled : ndarray, int, shape (n_chromosomes//2,) An array where the value of the kth element is the number of variants with minor allele count k, multiplied by the scaling factor (k * (n - k) / n).
4.279909
3.805521
1.124658
k = np.arange(s.shape[0]) out = s * k * (n - k) / n return out
def scale_sfs_folded(s, n)
Scale a folded site frequency spectrum. Parameters ---------- s : array_like, int, shape (n_chromosomes//2,) Folded site frequency spectrum. n : int Number of chromosomes called. Returns ------- sfs_folded_scaled : ndarray, int, shape (n_chromosomes//2,) Scaled folded site frequency spectrum.
5.161904
7.664744
0.673461
# check inputs dac1, n1 = _check_dac_n(dac1, n1) dac2, n2 = _check_dac_n(dac2, n2) # compute site frequency spectrum x = n1 + 1 y = n2 + 1 # need platform integer for bincount tmp = (dac1 * y + dac2).astype(int, copy=False) s = np.bincount(tmp) s.resize(x, y) return s
def joint_sfs(dac1, dac2, n1=None, n2=None)
Compute the joint site frequency spectrum between two populations. Parameters ---------- dac1 : array_like, int, shape (n_variants,) Derived allele counts for the first population. dac2 : array_like, int, shape (n_variants,) Derived allele counts for the second population. n1, n2 : int, optional The total number of chromosomes called in each population. Returns ------- joint_sfs : ndarray, int, shape (m_chromosomes, n_chromosomes) Array where the (i, j)th element is the number of variant sites with i derived alleles in the first population and j derived alleles in the second population.
4.065212
3.823812
1.063131
# check inputs ac1, n1 = _check_ac_n(ac1, n1) ac2, n2 = _check_ac_n(ac2, n2) # compute minor allele counts mac1 = np.amin(ac1, axis=1) mac2 = np.amin(ac2, axis=1) # compute site frequency spectrum x = n1//2 + 1 y = n2//2 + 1 tmp = (mac1 * y + mac2).astype(int, copy=False) s = np.bincount(tmp) s.resize(x, y) return s
def joint_sfs_folded(ac1, ac2, n1=None, n2=None)
Compute the joint folded site frequency spectrum between two populations. Parameters ---------- ac1 : array_like, int, shape (n_variants, 2) Allele counts for the first population. ac2 : array_like, int, shape (n_variants, 2) Allele counts for the second population. n1, n2 : int, optional The total number of chromosomes called in each population. Returns ------- joint_sfs_folded : ndarray, int, shape (n1//2 + 1, n2//2 + 1) Array where the (i, j)th element is the number of variant sites with a minor allele count of i in the first population and j in the second population.
3.360686
2.670726
1.258342
# compute site frequency spectrum s = joint_sfs(dac1, dac2, n1=n1, n2=n2) # apply scaling s = scale_joint_sfs(s) return s
def joint_sfs_scaled(dac1, dac2, n1=None, n2=None)
Compute the joint site frequency spectrum between two populations, scaled such that a constant value is expected across the spectrum for neutral variation, constant population size and unrelated populations. Parameters ---------- dac1 : array_like, int, shape (n_variants,) Derived allele counts for the first population. dac2 : array_like, int, shape (n_variants,) Derived allele counts for the second population. n1, n2 : int, optional The total number of chromosomes called in each population. Returns ------- joint_sfs_scaled : ndarray, int, shape (n1 + 1, n2 + 1) Array where the (i, j)th element is the scaled frequency of variant sites with i derived alleles in the first population and j derived alleles in the second population.
3.940327
4.185941
0.941324
i = np.arange(s.shape[0])[:, None] j = np.arange(s.shape[1])[None, :] out = (s * i) * j return out
def scale_joint_sfs(s)
Scale a joint site frequency spectrum. Parameters ---------- s : array_like, int, shape (n1, n2) Joint site frequency spectrum. Returns ------- joint_sfs_scaled : ndarray, int, shape (n1, n2) Scaled joint site frequency spectrum.
3.428765
3.762452
0.911311
# noqa # check inputs ac1, n1 = _check_ac_n(ac1, n1) ac2, n2 = _check_ac_n(ac2, n2) # compute site frequency spectrum s = joint_sfs_folded(ac1, ac2, n1=n1, n2=n2) # apply scaling s = scale_joint_sfs_folded(s, n1, n2) return s
def joint_sfs_folded_scaled(ac1, ac2, n1=None, n2=None)
Compute the joint folded site frequency spectrum between two populations, scaled such that a constant value is expected across the spectrum for neutral variation, constant population size and unrelated populations. Parameters ---------- ac1 : array_like, int, shape (n_variants, 2) Allele counts for the first population. ac2 : array_like, int, shape (n_variants, 2) Allele counts for the second population. n1, n2 : int, optional The total number of chromosomes called in each population. Returns ------- joint_sfs_folded_scaled : ndarray, int, shape (n1//2 + 1, n2//2 + 1) Array where the (i, j)th element is the scaled frequency of variant sites with a minor allele count of i in the first population and j in the second population.
2.497306
2.4195
1.032158
# noqa out = np.empty_like(s) for i in range(s.shape[0]): for j in range(s.shape[1]): out[i, j] = s[i, j] * i * j * (n1 - i) * (n2 - j) return out
def scale_joint_sfs_folded(s, n1, n2)
Scale a folded joint site frequency spectrum. Parameters ---------- s : array_like, int, shape (m_chromosomes//2, n_chromosomes//2) Folded joint site frequency spectrum. n1, n2 : int, optional The total number of chromosomes called in each population. Returns ------- joint_sfs_folded_scaled : ndarray, int, shape (m_chromosomes//2, n_chromosomes//2) Scaled folded joint site frequency spectrum.
2.251354
2.731575
0.824196
# check inputs s = asarray_ndim(s, 1) assert s.shape[0] <= n + 1, 'invalid number of chromosomes' # need to check s has all entries up to n if s.shape[0] < n + 1: sn = np.zeros(n + 1, dtype=s.dtype) sn[:s.shape[0]] = s s = sn # fold nf = (n + 1) // 2 n = nf * 2 o = s[:nf] + s[nf:n][::-1] return o
def fold_sfs(s, n)
Fold a site frequency spectrum. Parameters ---------- s : array_like, int, shape (n_chromosomes,) Site frequency spectrum n : int Total number of chromosomes called. Returns ------- sfs_folded : ndarray, int Folded site frequency spectrum
4.089837
3.847441
1.063002
# check inputs s = asarray_ndim(s, 2) assert s.shape[0] <= n1 + 1, 'invalid number of chromosomes' assert s.shape[1] <= n2 + 1, 'invalid number of chromosomes' # need to check s has all entries up to m if s.shape[0] < n1 + 1: sm = np.zeros((n1 + 1, s.shape[1]), dtype=s.dtype) sm[:s.shape[0]] = s s = sm # need to check s has all entries up to n if s.shape[1] < n2 + 1: sn = np.zeros((s.shape[0], n2 + 1), dtype=s.dtype) sn[:, :s.shape[1]] = s s = sn # fold mf = (n1 + 1) // 2 nf = (n2 + 1) // 2 n1 = mf * 2 n2 = nf * 2 o = (s[:mf, :nf] + # top left s[mf:n1, :nf][::-1] + # top right s[:mf, nf:n2][:, ::-1] + # bottom left s[mf:n1, nf:n2][::-1, ::-1]) # bottom right return o
def fold_joint_sfs(s, n1, n2)
Fold a joint site frequency spectrum. Parameters ---------- s : array_like, int, shape (m_chromosomes, n_chromosomes) Joint site frequency spectrum. n1, n2 : int, optional The total number of chromosomes called in each population. Returns ------- joint_sfs_folded : ndarray, int Folded joint site frequency spectrum.
2.28234
2.14641
1.063329
import matplotlib.pyplot as plt import scipy # check inputs s = asarray_ndim(s, 1) # setup axes if ax is None: fig, ax = plt.subplots() # setup data if bins is None: if clip_endpoints: x = np.arange(1, s.shape[0]-1) y = s[1:-1] else: x = np.arange(s.shape[0]) y = s else: if clip_endpoints: y, b, _ = scipy.stats.binned_statistic( np.arange(1, s.shape[0]-1), values=s[1:-1], bins=bins, statistic='sum') else: y, b, _ = scipy.stats.binned_statistic( np.arange(s.shape[0]), values=s, bins=bins, statistic='sum') # use bin midpoints for plotting x = (b[:-1] + b[1:]) / 2 if n: # convert allele counts to allele frequencies x = x / n ax.set_xlabel('derived allele frequency') else: ax.set_xlabel('derived allele count') # do plotting if plot_kwargs is None: plot_kwargs = dict() ax.plot(x, y, label=label, **plot_kwargs) # tidy ax.set_yscale(yscale) ax.set_ylabel('site frequency') ax.autoscale(axis='x', tight=True) return ax
def plot_sfs(s, yscale='log', bins=None, n=None, clip_endpoints=True, label=None, plot_kwargs=None, ax=None)
Plot a site frequency spectrum. Parameters ---------- s : array_like, int, shape (n_chromosomes,) Site frequency spectrum. yscale : string, optional Y axis scale. bins : int or array_like, int, optional Allele count bins. n : int, optional Number of chromosomes sampled. If provided, X axis will be plotted as allele frequency, otherwise as allele count. clip_endpoints : bool, optional If True, do not plot first and last values from frequency spectrum. label : string, optional Label for data series in plot. plot_kwargs : dict-like Additional keyword arguments, passed through to ax.plot(). ax : axes, optional Axes on which to draw. If not provided, a new figure will be created. Returns ------- ax : axes The axes on which the plot was drawn.
2.296087
2.158731
1.063628
ax = plot_sfs(*args, **kwargs) n = kwargs.get('n', None) if n: ax.set_xlabel('minor allele frequency') else: ax.set_xlabel('minor allele count') return ax
def plot_sfs_folded(*args, **kwargs)
Plot a folded site frequency spectrum. Parameters ---------- s : array_like, int, shape (n_chromosomes/2,) Site frequency spectrum. yscale : string, optional Y axis scale. bins : int or array_like, int, optional Allele count bins. n : int, optional Number of chromosomes sampled. If provided, X axis will be plotted as allele frequency, otherwise as allele count. clip_endpoints : bool, optional If True, do not plot first and last values from frequency spectrum. label : string, optional Label for data series in plot. plot_kwargs : dict-like Additional keyword arguments, passed through to ax.plot(). ax : axes, optional Axes on which to draw. If not provided, a new figure will be created. Returns ------- ax : axes The axes on which the plot was drawn.
3.610062
3.690419
0.978225
kwargs.setdefault('yscale', 'linear') ax = plot_sfs(*args, **kwargs) ax.set_ylabel('scaled site frequency') return ax
def plot_sfs_scaled(*args, **kwargs)
Plot a scaled site frequency spectrum. Parameters ---------- s : array_like, int, shape (n_chromosomes,) Site frequency spectrum. yscale : string, optional Y axis scale. bins : int or array_like, int, optional Allele count bins. n : int, optional Number of chromosomes sampled. If provided, X axis will be plotted as allele frequency, otherwise as allele count. clip_endpoints : bool, optional If True, do not plot first and last values from frequency spectrum. label : string, optional Label for data series in plot. plot_kwargs : dict-like Additional keyword arguments, passed through to ax.plot(). ax : axes, optional Axes on which to draw. If not provided, a new figure will be created. Returns ------- ax : axes The axes on which the plot was drawn.
4.065731
5.296726
0.767593
kwargs.setdefault('yscale', 'linear') ax = plot_sfs_folded(*args, **kwargs) ax.set_ylabel('scaled site frequency') n = kwargs.get('n', None) if n: ax.set_xlabel('minor allele frequency') else: ax.set_xlabel('minor allele count') return ax
def plot_sfs_folded_scaled(*args, **kwargs)
Plot a folded scaled site frequency spectrum. Parameters ---------- s : array_like, int, shape (n_chromosomes/2,) Site frequency spectrum. yscale : string, optional Y axis scale. bins : int or array_like, int, optional Allele count bins. n : int, optional Number of chromosomes sampled. If provided, X axis will be plotted as allele frequency, otherwise as allele count. clip_endpoints : bool, optional If True, do not plot first and last values from frequency spectrum. label : string, optional Label for data series in plot. plot_kwargs : dict-like Additional keyword arguments, passed through to ax.plot(). ax : axes, optional Axes on which to draw. If not provided, a new figure will be created. Returns ------- ax : axes The axes on which the plot was drawn.
3.138923
3.187671
0.984707
import matplotlib.pyplot as plt from matplotlib.colors import LogNorm # check inputs s = asarray_ndim(s, 2) # setup axes if ax is None: w = plt.rcParams['figure.figsize'][0] fig, ax = plt.subplots(figsize=(w, w)) # set plotting defaults if imshow_kwargs is None: imshow_kwargs = dict() imshow_kwargs.setdefault('cmap', 'jet') imshow_kwargs.setdefault('interpolation', 'none') imshow_kwargs.setdefault('aspect', 'auto') imshow_kwargs.setdefault('norm', LogNorm()) # plot data ax.imshow(s.T, **imshow_kwargs) # tidy ax.invert_yaxis() ax.set_xlabel('derived allele count (population 1)') ax.set_ylabel('derived allele count (population 2)') return ax
def plot_joint_sfs(s, ax=None, imshow_kwargs=None)
Plot a joint site frequency spectrum. Parameters ---------- s : array_like, int, shape (n_chromosomes_pop1, n_chromosomes_pop2) Joint site frequency spectrum. ax : axes, optional Axes on which to draw. If not provided, a new figure will be created. imshow_kwargs : dict-like Additional keyword arguments, passed through to ax.imshow(). Returns ------- ax : axes The axes on which the plot was drawn.
2.406808
2.303504
1.044847
ax = plot_joint_sfs(*args, **kwargs) ax.set_xlabel('minor allele count (population 1)') ax.set_ylabel('minor allele count (population 2)') return ax
def plot_joint_sfs_folded(*args, **kwargs)
Plot a joint site frequency spectrum. Parameters ---------- s : array_like, int, shape (n_chromosomes_pop1/2, n_chromosomes_pop2/2) Joint site frequency spectrum. ax : axes, optional Axes on which to draw. If not provided, a new figure will be created. imshow_kwargs : dict-like Additional keyword arguments, passed through to ax.imshow(). Returns ------- ax : axes The axes on which the plot was drawn.
3.396128
3.799074
0.893936
imshow_kwargs = kwargs.get('imshow_kwargs', dict()) imshow_kwargs.setdefault('norm', None) kwargs['imshow_kwargs'] = imshow_kwargs ax = plot_joint_sfs(*args, **kwargs) return ax
def plot_joint_sfs_scaled(*args, **kwargs)
Plot a scaled joint site frequency spectrum. Parameters ---------- s : array_like, int, shape (n_chromosomes_pop1, n_chromosomes_pop2) Joint site frequency spectrum. ax : axes, optional Axes on which to draw. If not provided, a new figure will be created. imshow_kwargs : dict-like Additional keyword arguments, passed through to ax.imshow(). Returns ------- ax : axes The axes on which the plot was drawn.
2.71855
3.167956
0.85814
imshow_kwargs = kwargs.get('imshow_kwargs', dict()) imshow_kwargs.setdefault('norm', None) kwargs['imshow_kwargs'] = imshow_kwargs ax = plot_joint_sfs_folded(*args, **kwargs) ax.set_xlabel('minor allele count (population 1)') ax.set_ylabel('minor allele count (population 2)') return ax
def plot_joint_sfs_folded_scaled(*args, **kwargs)
Plot a scaled folded joint site frequency spectrum. Parameters ---------- s : array_like, int, shape (n_chromosomes_pop1/2, n_chromosomes_pop2/2) Joint site frequency spectrum. ax : axes, optional Axes on which to draw. If not provided, a new figure will be created. imshow_kwargs : dict-like Additional keyword arguments, passed through to ax.imshow(). Returns ------- ax : axes The axes on which the plot was drawn.
2.873175
2.794944
1.02799
if not x.flags.writeable: if not x.flags.owndata: x = x.copy(order='A') x.setflags(write=True) return x
def memoryview_safe(x)
Make array safe to run in a Cython memoryview-based kernel. These kernels typically break down with the error ``ValueError: buffer source array is read-only`` when running in dask distributed. See Also -------- https://github.com/dask/distributed/issues/1978 https://github.com/cggh/scikit-allel/issues/206
3.219173
3.94538
0.815935
store_samples = False if fields is None: # add samples by default return True, None if isinstance(fields, str): fields = [fields] else: fields = list(fields) if 'samples' in fields: fields.remove('samples') store_samples = True elif '*' in fields: store_samples = True return store_samples, fields
def _prep_fields_param(fields)
Prepare the `fields` parameter, and determine whether or not to store samples.
3.639806
2.857218
1.273899
n_variants = 0 before_all = time.time() before_chunk = before_all for chunk, chunk_length, chrom, pos in it: after_chunk = time.time() elapsed_chunk = after_chunk - before_chunk elapsed = after_chunk - before_all n_variants += chunk_length chrom = text_type(chrom, 'utf8') message = ( '%s %s rows in %.2fs; chunk in %.2fs (%s rows/s)' % (prefix, n_variants, elapsed, elapsed_chunk, int(chunk_length // elapsed_chunk)) ) if chrom: message += '; %s:%s' % (chrom, pos) print(message, file=log) log.flush() yield chunk, chunk_length, chrom, pos before_chunk = after_chunk after_all = time.time() elapsed = after_all - before_all print('%s all done (%s rows/s)' % (prefix, int(n_variants // elapsed)), file=log) log.flush()
def _chunk_iter_progress(it, log, prefix)
Wrap a chunk iterator for progress logging.
2.69929
2.620053
1.030242
# samples requested? # noinspection PyTypeChecker store_samples, fields = _prep_fields_param(fields) # setup fields, samples, headers, it = iter_vcf_chunks( input=input, fields=fields, exclude_fields=exclude_fields, types=types, numbers=numbers, alt_number=alt_number, buffer_size=buffer_size, chunk_length=chunk_length, fills=fills, region=region, tabix=tabix, samples=samples, transformers=transformers ) # handle field renaming if rename_fields: rename_fields, it = _do_rename(it, fields=fields, rename_fields=rename_fields, headers=headers) # setup progress logging if log is not None: it = _chunk_iter_progress(it, log, prefix='[read_vcf]') # read all chunks into a list chunks = [d[0] for d in it] if chunks: # setup output output = dict() if len(samples) > 0 and store_samples: output['samples'] = samples # find array keys keys = sorted(chunks[0].keys()) # concatenate chunks for k in keys: output[k] = np.concatenate([chunk[k] for chunk in chunks], axis=0) else: output = None return output
def read_vcf(input, fields=None, exclude_fields=None, rename_fields=None, types=None, numbers=None, alt_number=DEFAULT_ALT_NUMBER, fills=None, region=None, tabix='tabix', samples=None, transformers=None, buffer_size=DEFAULT_BUFFER_SIZE, chunk_length=DEFAULT_CHUNK_LENGTH, log=None)
Read data from a VCF file into NumPy arrays. .. versionchanged:: 1.12.0 Now returns None if no variants are found in the VCF file or matching the requested region. Parameters ---------- input : string or file-like {input} fields : list of strings, optional {fields} exclude_fields : list of strings, optional {exclude_fields} rename_fields : dict[str -> str], optional {rename_fields} types : dict, optional {types} numbers : dict, optional {numbers} alt_number : int, optional {alt_number} fills : dict, optional {fills} region : string, optional {region} tabix : string, optional {tabix} samples : list of strings {samples} transformers : list of transformer objects, optional {transformers} buffer_size : int, optional {buffer_size} chunk_length : int, optional {chunk_length} log : file-like, optional {log} Returns ------- data : dict[str, ndarray] A dictionary holding arrays, or None if no variants were found.
3.700475
3.749058
0.987041
# guard condition if not overwrite and os.path.exists(output): raise ValueError('file exists at path %r; use overwrite=True to replace' % output) # read all data into memory data = read_vcf( input=input, fields=fields, exclude_fields=exclude_fields, rename_fields=rename_fields, types=types, numbers=numbers, alt_number=alt_number, buffer_size=buffer_size, chunk_length=chunk_length, log=log, fills=fills, region=region, tabix=tabix, samples=samples, transformers=transformers ) if data is None: # no data, bail out return # setup save function if compressed: savez = np.savez_compressed else: savez = np.savez # save as npz savez(output, **data)
def vcf_to_npz(input, output, compressed=True, overwrite=False, fields=None, exclude_fields=None, rename_fields=None, types=None, numbers=None, alt_number=DEFAULT_ALT_NUMBER, fills=None, region=None, tabix=True, samples=None, transformers=None, buffer_size=DEFAULT_BUFFER_SIZE, chunk_length=DEFAULT_CHUNK_LENGTH, log=None)
Read data from a VCF file into NumPy arrays and save as a .npz file. .. versionchanged:: 1.12.0 Now will not create any output file if no variants are found in the VCF file or matching the requested region. Parameters ---------- input : string {input} output : string {output} compressed : bool, optional If True (default), save with compression. overwrite : bool, optional {overwrite} fields : list of strings, optional {fields} exclude_fields : list of strings, optional {exclude_fields} rename_fields : dict[str -> str], optional {rename_fields} types : dict, optional {types} numbers : dict, optional {numbers} alt_number : int, optional {alt_number} fills : dict, optional {fills} region : string, optional {region} tabix : string, optional {tabix} samples : list of strings {samples} transformers : list of transformer objects, optional {transformers} buffer_size : int, optional {buffer_size} chunk_length : int, optional {chunk_length} log : file-like, optional {log}
2.411504
2.457221
0.981395
# setup commmon keyword args kwds = dict(fields=fields, exclude_fields=exclude_fields, types=types, numbers=numbers, alt_number=alt_number, chunk_length=chunk_length, fills=fills, samples=samples, region=region) # setup input stream stream = _setup_input_stream(input=input, region=region, tabix=tabix, buffer_size=buffer_size) # setup iterator fields, samples, headers, it = _iter_vcf_stream(stream, **kwds) # setup transformers if transformers is not None: # API flexibility if not isinstance(transformers, (list, tuple)): transformers = [transformers] for trans in transformers: fields = trans.transform_fields(fields) it = _chunk_iter_transform(it, transformers) return fields, samples, headers, it
def iter_vcf_chunks(input, fields=None, exclude_fields=None, types=None, numbers=None, alt_number=DEFAULT_ALT_NUMBER, fills=None, region=None, tabix='tabix', samples=None, transformers=None, buffer_size=DEFAULT_BUFFER_SIZE, chunk_length=DEFAULT_CHUNK_LENGTH)
Iterate over chunks of data from a VCF file as NumPy arrays. Parameters ---------- input : string {input} fields : list of strings, optional {fields} exclude_fields : list of strings, optional {exclude_fields} types : dict, optional {types} numbers : dict, optional {numbers} alt_number : int, optional {alt_number} fills : dict, optional {fills} region : string, optional {region} tabix : string, optional {tabix} samples : list of strings {samples} transformers : list of transformer objects, optional {transformers} buffer_size : int, optional {buffer_size} chunk_length : int, optional {chunk_length} Returns ------- fields : list of strings Normalised names of fields that will be extracted. samples : ndarray Samples for which data will be extracted. headers : VCFHeaders Tuple of metadata extracted from VCF headers. it : iterator Chunk iterator.
2.931031
2.788053
1.051282
import pandas # samples requested? # noinspection PyTypeChecker _, fields = _prep_fields_param(fields) # setup fields, _, _, it = iter_vcf_chunks( input=input, fields=fields, exclude_fields=exclude_fields, types=types, numbers=numbers, alt_number=alt_number, buffer_size=buffer_size, chunk_length=chunk_length, fills=fills, region=region, tabix=tabix, samples=[], transformers=transformers ) # setup progress logging if log is not None: it = _chunk_iter_progress(it, log, prefix='[vcf_to_dataframe]') # read all chunks into a list chunks = [d[0] for d in it] # setup output output = None if chunks: # concatenate chunks output = pandas.concat([_chunk_to_dataframe(fields, chunk) for chunk in chunks]) return output
def vcf_to_dataframe(input, fields=None, exclude_fields=None, types=None, numbers=None, alt_number=DEFAULT_ALT_NUMBER, fills=None, region=None, tabix='tabix', transformers=None, buffer_size=DEFAULT_BUFFER_SIZE, chunk_length=DEFAULT_CHUNK_LENGTH, log=None)
Read data from a VCF file into a pandas DataFrame. Parameters ---------- input : string {input} fields : list of strings, optional {fields} exclude_fields : list of strings, optional {exclude_fields} types : dict, optional {types} numbers : dict, optional {numbers} alt_number : int, optional {alt_number} fills : dict, optional {fills} region : string, optional {region} tabix : string, optional {tabix} transformers : list of transformer objects, optional {transformers} buffer_size : int, optional {buffer_size} chunk_length : int, optional {chunk_length} log : file-like, optional {log} Returns ------- df : pandas.DataFrame
4.249186
4.578972
0.927978
r # samples requested? # noinspection PyTypeChecker _, fields = _prep_fields_param(fields) # setup fields, _, _, it = iter_vcf_chunks( input=input, fields=fields, exclude_fields=exclude_fields, types=types, numbers=numbers, alt_number=alt_number, buffer_size=buffer_size, chunk_length=chunk_length, fills=fills, region=region, tabix=tabix, samples=[], transformers=transformers ) # setup progress logging if log is not None: it = _chunk_iter_progress(it, log, prefix='[vcf_to_csv]') kwargs['index'] = False for i, (chunk, _, _, _) in enumerate(it): df = _chunk_to_dataframe(fields, chunk) if i == 0: kwargs['header'] = True kwargs['mode'] = 'w' else: kwargs['header'] = False kwargs['mode'] = 'a' df.to_csv(output, **kwargs)
def vcf_to_csv(input, output, fields=None, exclude_fields=None, types=None, numbers=None, alt_number=DEFAULT_ALT_NUMBER, fills=None, region=None, tabix='tabix', transformers=None, buffer_size=DEFAULT_BUFFER_SIZE, chunk_length=DEFAULT_CHUNK_LENGTH, log=None, **kwargs)
r"""Read data from a VCF file and write out to a comma-separated values (CSV) file. Parameters ---------- input : string {input} output : string {output} fields : list of strings, optional {fields} exclude_fields : list of strings, optional {exclude_fields} types : dict, optional {types} numbers : dict, optional {numbers} alt_number : int, optional {alt_number} fills : dict, optional {fills} region : string, optional {region} tabix : string, optional {tabix} transformers : list of transformer objects, optional {transformers} buffer_size : int, optional {buffer_size} chunk_length : int, optional {chunk_length} log : file-like, optional {log} kwargs : keyword arguments All remaining keyword arguments are passed through to pandas.DataFrame.to_csv(). E.g., to write a tab-delimited file, provide `sep='\t'`.
3.455195
3.475999
0.994015
# samples requested? # noinspection PyTypeChecker _, fields = _prep_fields_param(fields) # setup chunk iterator # N.B., set samples to empty list so we don't get any calldata fields fields, _, _, it = iter_vcf_chunks( input=input, fields=fields, exclude_fields=exclude_fields, types=types, numbers=numbers, alt_number=alt_number, buffer_size=buffer_size, chunk_length=chunk_length, fills=fills, region=region, tabix=tabix, samples=[], transformers=transformers ) # setup progress logging if log is not None: it = _chunk_iter_progress(it, log, prefix='[vcf_to_recarray]') # read all chunks into a list chunks = [d[0] for d in it] # setup output output = None if chunks: # concatenate chunks output = np.concatenate([_chunk_to_recarray(fields, chunk) for chunk in chunks]) return output
def vcf_to_recarray(input, fields=None, exclude_fields=None, types=None, numbers=None, alt_number=DEFAULT_ALT_NUMBER, fills=None, region=None, tabix='tabix', transformers=None, buffer_size=DEFAULT_BUFFER_SIZE, chunk_length=DEFAULT_CHUNK_LENGTH, log=None)
Read data from a VCF file into a NumPy recarray. Parameters ---------- input : string {input} fields : list of strings, optional {fields} exclude_fields : list of strings, optional {exclude_fields} types : dict, optional {types} numbers : dict, optional {numbers} alt_number : int, optional {alt_number} fills : dict, optional {fills} region : string, optional {region} tabix : string, optional {tabix} transformers : list of transformer objects, optional {transformers} buffer_size : int, optional {buffer_size} chunk_length : int, optional {chunk_length} log : file-like, optional {log} Returns ------- ra : np.rec.array
4.5022
4.734571
0.95092
# check inputs if isinstance(sequences, np.ndarray): # single sequence sequences = [sequences] names = [names] if len(sequences) != len(names): raise ValueError('must provide the same number of sequences and names') for sequence in sequences: if sequence.dtype != np.dtype('S1'): raise ValueError('expected S1 dtype, found %r' % sequence.dtype) # force binary mode mode = 'ab' if 'a' in mode else 'wb' # write to file with open(path, mode=mode) as fasta: for name, sequence in zip(names, sequences): # force bytes if isinstance(name, text_type): name = name.encode('ascii') header = b'>' + name + b'\n' fasta.write(header) for i in range(0, sequence.size, width): line = sequence[i:i+width].tostring() + b'\n' fasta.write(line)
def write_fasta(path, sequences, names, mode='w', width=80)
Write nucleotide sequences stored as numpy arrays to a FASTA file. Parameters ---------- path : string File path. sequences : sequence of arrays One or more ndarrays of dtype 'S1' containing the sequences. names : sequence of strings Names of the sequences. mode : string, optional Use 'a' to append to an existing file. width : int, optional Maximum line width.
2.364797
2.287656
1.03372
# check inputs if not hasattr(g, 'count_het') or not hasattr(g, 'count_called'): g = GenotypeArray(g, copy=False) # count hets n_het = np.asarray(g.count_het(axis=1)) n_called = np.asarray(g.count_called(axis=1)) # calculate rate of observed heterozygosity, accounting for variants # where all calls are missing with ignore_invalid(): ho = np.where(n_called > 0, n_het / n_called, fill) return ho
def heterozygosity_observed(g, fill=np.nan)
Calculate the rate of observed heterozygosity for each variant. Parameters ---------- g : array_like, int, shape (n_variants, n_samples, ploidy) Genotype array. fill : float, optional Use this value for variants where all calls are missing. Returns ------- ho : ndarray, float, shape (n_variants,) Observed heterozygosity Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 0], [0, 0]], ... [[0, 0], [0, 1], [1, 1]], ... [[0, 0], [1, 1], [2, 2]], ... [[1, 1], [1, 2], [-1, -1]]]) >>> allel.heterozygosity_observed(g) array([0. , 0.33333333, 0. , 0.5 ])
3.741098
3.337521
1.120921
# check inputs af = asarray_ndim(af, 2) # calculate expected heterozygosity out = 1 - np.sum(np.power(af, ploidy), axis=1) # fill values where allele frequencies could not be calculated af_sum = np.sum(af, axis=1) with ignore_invalid(): out[(af_sum < 1) | np.isnan(af_sum)] = fill return out
def heterozygosity_expected(af, ploidy, fill=np.nan)
Calculate the expected rate of heterozygosity for each variant under Hardy-Weinberg equilibrium. Parameters ---------- af : array_like, float, shape (n_variants, n_alleles) Allele frequencies array. ploidy : int Sample ploidy. fill : float, optional Use this value for variants where allele frequencies do not sum to 1. Returns ------- he : ndarray, float, shape (n_variants,) Expected heterozygosity Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 0], [0, 0]], ... [[0, 0], [0, 1], [1, 1]], ... [[0, 0], [1, 1], [2, 2]], ... [[1, 1], [1, 2], [-1, -1]]]) >>> af = g.count_alleles().to_frequencies() >>> allel.heterozygosity_expected(af, ploidy=2) array([0. , 0.5 , 0.66666667, 0.375 ])
3.502588
4.179441
0.838052
# check inputs if not hasattr(g, 'count_het') or not hasattr(g, 'count_called'): g = GenotypeArray(g, copy=False) # calculate observed and expected heterozygosity ho = heterozygosity_observed(g) af = g.count_alleles().to_frequencies() he = heterozygosity_expected(af, ploidy=g.shape[-1], fill=0) # calculate inbreeding coefficient, accounting for variants with no # expected heterozygosity with ignore_invalid(): f = np.where(he > 0, 1 - (ho / he), fill) return f
def inbreeding_coefficient(g, fill=np.nan)
Calculate the inbreeding coefficient for each variant. Parameters ---------- g : array_like, int, shape (n_variants, n_samples, ploidy) Genotype array. fill : float, optional Use this value for variants where the expected heterozygosity is zero. Returns ------- f : ndarray, float, shape (n_variants,) Inbreeding coefficient. Notes ----- The inbreeding coefficient is calculated as *1 - (Ho/He)* where *Ho* is the observed heterozygosity and *He* is the expected heterozygosity. Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 0], [0, 0]], ... [[0, 0], [0, 1], [1, 1]], ... [[0, 0], [1, 1], [2, 2]], ... [[1, 1], [1, 2], [-1, -1]]]) >>> allel.inbreeding_coefficient(g) array([ nan, 0.33333333, 1. , -0.33333333])
4.6792
4.462459
1.04857
# setup g = GenotypeArray(g, dtype='i1', copy=True) check_ploidy(g.ploidy, 2) check_min_samples(g.n_samples, 3) # run the phasing # N.B., a copy has already been made, so no need to make memoryview safe is_phased = _opt_phase_progeny_by_transmission(g.values) g.is_phased = np.asarray(is_phased).view(bool) # outputs return g
def phase_progeny_by_transmission(g)
Phase progeny genotypes from a trio or cross using Mendelian transmission. Parameters ---------- g : array_like, int, shape (n_variants, n_samples, 2) Genotype array, with parents as first two columns and progeny as remaining columns. Returns ------- g : ndarray, int8, shape (n_variants, n_samples, 2) Genotype array with progeny phased where possible. Examples -------- >>> import allel >>> g = allel.GenotypeArray([ ... [[0, 0], [0, 0], [0, 0]], ... [[1, 1], [1, 1], [1, 1]], ... [[0, 0], [1, 1], [0, 1]], ... [[1, 1], [0, 0], [0, 1]], ... [[0, 0], [0, 1], [0, 0]], ... [[0, 0], [0, 1], [0, 1]], ... [[0, 1], [0, 0], [0, 1]], ... [[0, 1], [0, 1], [0, 1]], ... [[0, 1], [1, 2], [0, 1]], ... [[1, 2], [0, 1], [1, 2]], ... [[0, 1], [2, 3], [0, 2]], ... [[2, 3], [0, 1], [1, 3]], ... [[0, 0], [0, 0], [-1, -1]], ... [[0, 0], [0, 0], [1, 1]], ... ], dtype='i1') >>> g = allel.phase_progeny_by_transmission(g) >>> print(g.to_str(row_threshold=None)) 0/0 0/0 0|0 1/1 1/1 1|1 0/0 1/1 0|1 1/1 0/0 1|0 0/0 0/1 0|0 0/0 0/1 0|1 0/1 0/0 1|0 0/1 0/1 0/1 0/1 1/2 0|1 1/2 0/1 2|1 0/1 2/3 0|2 2/3 0/1 3|1 0/0 0/0 ./. 0/0 0/0 1/1 >>> g.is_phased array([[False, False, True], [False, False, True], [False, False, True], [False, False, True], [False, False, True], [False, False, True], [False, False, True], [False, False, False], [False, False, True], [False, False, True], [False, False, True], [False, False, True], [False, False, False], [False, False, False]])
6.725786
6.289349
1.069393
# setup check_type(g, GenotypeArray) check_dtype(g.values, 'i1') check_ploidy(g.ploidy, 2) if g.is_phased is None: raise ValueError('genotype array must first have progeny phased by transmission') check_min_samples(g.n_samples, 3) # run the phasing g._values = memoryview_safe(g.values) g._is_phased = memoryview_safe(g.is_phased) _opt_phase_parents_by_transmission(g.values, g.is_phased.view('u1'), window_size) # outputs return g
def phase_parents_by_transmission(g, window_size)
Phase parent genotypes from a trio or cross, given progeny genotypes already phased by Mendelian transmission. Parameters ---------- g : GenotypeArray Genotype array, with parents as first two columns and progeny as remaining columns, where progeny genotypes are already phased. window_size : int Number of previous heterozygous sites to include when phasing each parent. A number somewhere between 10 and 100 may be appropriate, depending on levels of heterozygosity and quality of data. Returns ------- g : GenotypeArray Genotype array with parents phased where possible.
5.058883
4.523187
1.118433
# setup g = np.asarray(g, dtype='i1') g = GenotypeArray(g, copy=copy) g._values = memoryview_safe(g.values) check_ploidy(g.ploidy, 2) check_min_samples(g.n_samples, 3) # phase the progeny is_phased = _opt_phase_progeny_by_transmission(g.values) g.is_phased = np.asarray(is_phased).view(bool) # phase the parents _opt_phase_parents_by_transmission(g.values, is_phased, window_size) return g
def phase_by_transmission(g, window_size, copy=True)
Phase genotypes in a trio or cross where possible using Mendelian transmission. Parameters ---------- g : array_like, int, shape (n_variants, n_samples, 2) Genotype array, with parents as first two columns and progeny as remaining columns. window_size : int Number of previous heterozygous sites to include when phasing each parent. A number somewhere between 10 and 100 may be appropriate, depending on levels of heterozygosity and quality of data. copy : bool, optional If False, attempt to phase genotypes in-place. Note that this is only possible if the input array has int8 dtype, otherwise a copy is always made regardless of this parameter. Returns ------- g : GenotypeArray Genotype array with progeny phased where possible.
4.428724
3.92299
1.128916
if blen is None: if hasattr(data, 'chunklen'): # bcolz carray return data.chunklen elif hasattr(data, 'chunks') and \ hasattr(data, 'shape') and \ hasattr(data.chunks, '__len__') and \ hasattr(data.shape, '__len__') and \ len(data.chunks) == len(data.shape): # something like h5py dataset return data.chunks[0] else: # fall back to something simple, ~1Mb chunks row = np.asarray(data[0]) return max(1, (2**20) // row.nbytes) else: return blen
def get_blen_array(data, blen=None)
Try to guess a reasonable block length to use for block-wise iteration over `data`.
3.518659
3.238396
1.086544
# need a file name even tho nothing is ever written fn = tempfile.mktemp() # file creation args kwargs['mode'] = 'w' kwargs['driver'] = 'core' kwargs['backing_store'] = False # open HDF5 file h5f = h5py.File(fn, **kwargs) return h5f
def h5fmem(**kwargs)
Create an in-memory HDF5 file.
4.620164
4.296307
1.075381
# create temporary file name suffix = kwargs.pop('suffix', '.h5') prefix = kwargs.pop('prefix', 'scikit_allel_') tempdir = kwargs.pop('dir', None) fn = tempfile.mktemp(suffix=suffix, prefix=prefix, dir=tempdir) atexit.register(os.remove, fn) # file creation args kwargs['mode'] = 'w' # open HDF5 file h5f = h5py.File(fn, **kwargs) return h5f
def h5ftmp(**kwargs)
Create an HDF5 file backed by a temporary file.
3.292353
3.023727
1.088839
# setup blen = _util.get_blen_array(data, blen) if stop is None: stop = len(data) else: stop = min(stop, len(data)) length = stop - start if length < 0: raise ValueError('invalid stop/start') # copy block-wise for bi in range(start, stop, blen): bj = min(bi+blen, stop) bl = bj - bi arr[offset:offset+bl] = data[bi:bj] offset += bl
def store(data, arr, start=0, stop=None, offset=0, blen=None)
Copy `data` block-wise into `arr`.
2.913813
2.688041
1.083991
# setup storage = _util.get_storage(storage) blen = _util.get_blen_array(data, blen) if stop is None: stop = len(data) else: stop = min(stop, len(data)) length = stop - start if length < 0: raise ValueError('invalid stop/start') # copy block-wise out = None for i in range(start, stop, blen): j = min(i+blen, stop) block = data[i:j] if out is None: out = getattr(storage, create)(block, expectedlen=length, **kwargs) else: out.append(block) return out
def copy(data, start=0, stop=None, blen=None, storage=None, create='array', **kwargs)
Copy `data` block-wise into a new array.
2.711944
2.716707
0.998247
# setup names, columns = _util.check_table_like(tbl) storage = _util.get_storage(storage) blen = _util.get_blen_table(tbl, blen) if stop is None: stop = len(columns[0]) else: stop = min(stop, len(columns[0])) length = stop - start if length < 0: raise ValueError('invalid stop/start') # copy block-wise out = None for i in range(start, stop, blen): j = min(i+blen, stop) res = [c[i:j] for c in columns] if out is None: out = getattr(storage, create)(res, names=names, expectedlen=length, **kwargs) else: out.append(res) return out
def copy_table(tbl, start=0, stop=None, blen=None, storage=None, create='table', **kwargs)
Copy `tbl` block-wise into a new table.
3.011893
2.905209
1.036721
# setup storage = _util.get_storage(storage) if isinstance(data, tuple): blen = max(_util.get_blen_array(d, blen) for d in data) else: blen = _util.get_blen_array(data, blen) if isinstance(data, tuple): _util.check_equal_length(*data) length = len(data[0]) else: length = len(data) # block-wise iteration out = None for i in range(0, length, blen): j = min(i+blen, length) # obtain blocks if isinstance(data, tuple): blocks = [d[i:j] for d in data] else: blocks = [data[i:j]] # map res = f(*blocks) # store if out is None: out = getattr(storage, create)(res, expectedlen=length, **kwargs) else: out.append(res) return out
def map_blocks(data, f, blen=None, storage=None, create='array', **kwargs)
Apply function `f` block-wise over `data`.
2.595205
2.581947
1.005135
# setup storage = _util.get_storage(storage) blen = _util.get_blen_array(data, blen) length = len(data) # normalise axis arg if isinstance(axis, int): axis = (axis,) # deal with 'out' kwarg if supplied, can arise if a chunked array is # passed as an argument to numpy.sum(), see also # https://github.com/cggh/scikit-allel/issues/66 kwarg_out = kwargs.pop('out', None) if kwarg_out is not None: raise ValueError('keyword argument "out" is not supported') if axis is None or 0 in axis: # two-step reduction out = None for i in range(0, length, blen): j = min(i+blen, length) block = data[i:j] if mapper: block = mapper(block) res = reducer(block, axis=axis) if out is None: out = res else: out = block_reducer(out, res) if np.isscalar(out): return out elif len(out.shape) == 0: return out[()] else: return getattr(storage, create)(out, **kwargs) else: # first dimension is preserved, no need to reduce blocks out = None for i in range(0, length, blen): j = min(i+blen, length) block = data[i:j] if mapper: block = mapper(block) r = reducer(block, axis=axis) if out is None: out = getattr(storage, create)(r, expectedlen=length, **kwargs) else: out.append(r) return out
def reduce_axis(data, reducer, block_reducer, mapper=None, axis=None, blen=None, storage=None, create='array', **kwargs)
Apply an operation to `data` that reduces over one or more axes.
2.985666
3.046807
0.979933
return reduce_axis(data, axis=axis, reducer=np.amax, block_reducer=np.maximum, mapper=mapper, blen=blen, storage=storage, create=create, **kwargs)
def amax(data, axis=None, mapper=None, blen=None, storage=None, create='array', **kwargs)
Compute the maximum value.
2.778565
3.030637
0.916825
return reduce_axis(data, axis=axis, reducer=np.amin, block_reducer=np.minimum, mapper=mapper, blen=blen, storage=storage, create=create, **kwargs)
def amin(data, axis=None, mapper=None, blen=None, storage=None, create='array', **kwargs)
Compute the minimum value.
2.787075
3.065241
0.909251
return reduce_axis(data, axis=axis, reducer=np.sum, block_reducer=np.add, mapper=mapper, blen=blen, storage=storage, create=create, **kwargs)
def asum(data, axis=None, mapper=None, blen=None, storage=None, create='array', **kwargs)
Compute the sum.
2.815374
2.927104
0.961829
return reduce_axis(data, reducer=np.count_nonzero, block_reducer=np.add, mapper=mapper, blen=blen, storage=storage, create=create, **kwargs)
def count_nonzero(data, mapper=None, blen=None, storage=None, create='array', **kwargs)
Count the number of non-zero elements.
3.668085
3.769788
0.973021
# setup if out is not None: # argument is only there for numpy API compatibility raise NotImplementedError('out argument is not supported') storage = _util.get_storage(storage) blen = _util.get_blen_array(data, blen) length = len(data) nnz = count_nonzero(condition) if axis == 0: _util.check_equal_length(data, condition) # block iteration out = None for i in range(0, length, blen): j = min(i+blen, length) bcond = np.asarray(condition[i:j]) # don't access any data unless we have to if np.any(bcond): block = np.asarray(data[i:j]) res = np.compress(bcond, block, axis=0) if out is None: out = getattr(storage, create)(res, expectedlen=nnz, **kwargs) else: out.append(res) return out elif axis == 1: # block iteration out = None condition = np.asanyarray(condition) for i in range(0, length, blen): j = min(i+blen, length) block = np.asarray(data[i:j]) res = np.compress(condition, block, axis=1) if out is None: out = getattr(storage, create)(res, expectedlen=length, **kwargs) else: out.append(res) return out else: raise NotImplementedError('axis not supported: %s' % axis)
def compress(condition, data, axis=0, out=None, blen=None, storage=None, create='array', **kwargs)
Return selected slices of an array along given axis.
2.761816
2.769284
0.997303
# setup if out is not None: # argument is only there for numpy API compatibility raise NotImplementedError('out argument is not supported') length = len(data) if axis == 0: # check that indices are strictly increasing indices = np.asanyarray(indices) if np.any(indices[1:] <= indices[:-1]): raise NotImplementedError( 'indices must be strictly increasing' ) # implement via compress() condition = np.zeros((length,), dtype=bool) condition[indices] = True return compress(condition, data, axis=0, blen=blen, storage=storage, create=create, **kwargs) elif axis == 1: # setup storage = _util.get_storage(storage) blen = _util.get_blen_array(data, blen) # block iteration out = None for i in range(0, length, blen): j = min(i+blen, length) block = data[i:j] res = np.take(block, indices, axis=1, mode=mode) if out is None: out = getattr(storage, create)(res, expectedlen=length, **kwargs) else: out.append(res) return out else: raise NotImplementedError('axis not supported: %s' % axis)
def take(data, indices, axis=0, out=None, mode='raise', blen=None, storage=None, create='array', **kwargs)
Take elements from an array along an axis.
3.112096
3.131107
0.993928
# setup if axis is not None and axis != 0: raise NotImplementedError('only axis 0 is supported') if out is not None: # argument is only there for numpy API compatibility raise NotImplementedError('out argument is not supported') storage = _util.get_storage(storage) names, columns = _util.check_table_like(tbl) blen = _util.get_blen_table(tbl, blen) _util.check_equal_length(columns[0], condition) length = len(columns[0]) nnz = count_nonzero(condition) # block iteration out = None for i in range(0, length, blen): j = min(i+blen, length) bcond = condition[i:j] # don't access any data unless we have to if np.any(bcond): bcolumns = [c[i:j] for c in columns] res = [np.compress(bcond, c, axis=0) for c in bcolumns] if out is None: out = getattr(storage, create)(res, names=names, expectedlen=nnz, **kwargs) else: out.append(res) return out
def compress_table(condition, tbl, axis=None, out=None, blen=None, storage=None, create='table', **kwargs)
Return selected rows of a table.
3.54845
3.540191
1.002333
# setup if axis is not None and axis != 0: raise NotImplementedError('only axis 0 is supported') if out is not None: # argument is only there for numpy API compatibility raise NotImplementedError('out argument is not supported') if mode is not None and mode != 'raise': raise NotImplementedError('only mode=raise is supported') names, columns = _util.check_table_like(tbl) length = len(columns[0]) # check that indices are strictly increasing indices = np.asanyarray(indices) if np.any(indices[1:] <= indices[:-1]): raise NotImplementedError( 'indices must be strictly increasing' ) # implement via compress() condition = np.zeros((length,), dtype=bool) condition[indices] = True return compress_table(condition, tbl, blen=blen, storage=storage, create=create, **kwargs)
def take_table(tbl, indices, axis=None, out=None, mode='raise', blen=None, storage=None, create='table', **kwargs)
Return selected rows of a table.
3.601137
3.54166
1.016793
# TODO refactor sel0 and sel1 normalization with ndarray.subset # setup storage = _util.get_storage(storage) blen = _util.get_blen_array(data, blen) length = len(data) if sel0 is not None: sel0 = np.asanyarray(sel0) if sel1 is not None: sel1 = np.asanyarray(sel1) # ensure boolean array for dim 0 if sel0 is not None and sel0.dtype.kind != 'b': # assume indices, convert to boolean condition tmp = np.zeros(length, dtype=bool) tmp[sel0] = True sel0 = tmp # ensure indices for dim 1 if sel1 is not None and sel1.dtype.kind == 'b': # assume boolean condition, convert to indices sel1, = np.nonzero(sel1) # shortcuts if sel0 is None and sel1 is None: return copy(data, blen=blen, storage=storage, create=create, **kwargs) elif sel1 is None: return compress(sel0, data, axis=0, blen=blen, storage=storage, create=create, **kwargs) elif sel0 is None: return take(data, sel1, axis=1, blen=blen, storage=storage, create=create, **kwargs) # build output sel0_nnz = count_nonzero(sel0) out = None for i in range(0, length, blen): j = min(i+blen, length) bsel0 = sel0[i:j] # don't access data unless we have to if np.any(bsel0): block = data[i:j] res = _numpy_subset(block, bsel0, sel1) if out is None: out = getattr(storage, create)(res, expectedlen=sel0_nnz, **kwargs) else: out.append(res) return out
def subset(data, sel0=None, sel1=None, blen=None, storage=None, create='array', **kwargs)
Return selected rows and columns of an array.
2.801528
2.830121
0.989897
# setup storage = _util.get_storage(storage) if not isinstance(tup, (tuple, list)): raise ValueError('expected tuple or list, found %r' % tup) if len(tup) < 2: raise ValueError('expected two or more tables to stack') # build output expectedlen = sum(len(t) for t in tup) out = None tnames = None for tdata in tup: tblen = _util.get_blen_table(tdata, blen) tnames, tcolumns = _util.check_table_like(tdata, names=tnames) tlen = len(tcolumns[0]) for i in range(0, tlen, tblen): j = min(i+tblen, tlen) bcolumns = [c[i:j] for c in tcolumns] if out is None: out = getattr(storage, create)(bcolumns, names=tnames, expectedlen=expectedlen, **kwargs) else: out.append(bcolumns) return out
def concatenate_table(tup, blen=None, storage=None, create='table', **kwargs)
Stack tables in sequence vertically (row-wise).
3.427943
3.367682
1.017894
# setup storage = _util.get_storage(storage) if not isinstance(tup, (tuple, list)): raise ValueError('expected tuple or list, found %r' % tup) if len(tup) < 2: raise ValueError('expected two or more arrays') if axis == 0: # build output expectedlen = sum(len(a) for a in tup) out = None for a in tup: ablen = _util.get_blen_array(a, blen) for i in range(0, len(a), ablen): j = min(i+ablen, len(a)) block = a[i:j] if out is None: out = getattr(storage, create)(block, expectedlen=expectedlen, **kwargs) else: out.append(block) else: def f(*blocks): return np.concatenate(blocks, axis=axis) out = map_blocks(tup, f, blen=blen, storage=storage, create=create, **kwargs) return out
def concatenate(tup, axis=0, blen=None, storage=None, create='array', **kwargs)
Concatenate arrays.
3.034012
3.029184
1.001594
# normalise scalars if hasattr(other, 'shape') and len(other.shape) == 0: other = other[()] if np.isscalar(other): def f(block): return op(block, other) return map_blocks(data, f, blen=blen, storage=storage, create=create, **kwargs) elif len(data) == len(other): def f(a, b): return op(a, b) return map_blocks((data, other), f, blen=blen, storage=storage, create=create, **kwargs) else: raise NotImplementedError('argument type not supported')
def binary_op(data, op, other, blen=None, storage=None, create='array', **kwargs)
Compute a binary operation block-wise over `data`.
2.582013
2.5679
1.005496
# setup storage = _util.get_storage(storage) names, columns = _util.check_table_like(tbl) length = len(columns[0]) if vm_kwargs is None: vm_kwargs = dict() # setup vm if vm == 'numexpr': import numexpr evaluate = numexpr.evaluate elif vm == 'python': # noinspection PyUnusedLocal def evaluate(expr, local_dict=None, **kw): # takes no keyword arguments return eval(expr, dict(), local_dict) else: raise ValueError('expected vm either "numexpr" or "python"') # compile expression and get required columns variables = _get_expression_variables(expression, vm) required_columns = {v: columns[names.index(v)] for v in variables} # determine block size for evaluation blen = _util.get_blen_table(required_columns, blen=blen) # build output out = None for i in range(0, length, blen): j = min(i+blen, length) blocals = {v: c[i:j] for v, c in required_columns.items()} res = evaluate(expression, local_dict=blocals, **vm_kwargs) if out is None: out = getattr(storage, create)(res, expectedlen=length, **kwargs) else: out.append(res) return out
def eval_table(tbl, expression, vm='python', blen=None, storage=None, create='array', vm_kwargs=None, **kwargs)
Evaluate `expression` against columns of a table.
3.12753
3.148541
0.993327
ref = asarray_ndim(ref, 1) alt = asarray_ndim(alt, 1, 2) alleles = asarray_ndim(alleles, 1, 2) check_dim0_aligned(ref, alt, alleles) # reshape for convenience ref = ref[:, None] if alt.ndim == 1: alt = alt[:, None] if alleles.ndim == 1: alleles = alleles[:, None] source_alleles = np.append(ref, alt, axis=1) # setup output array out = np.empty(source_alleles.shape, dtype=dtype) out.fill(-1) # find matches for ai in range(source_alleles.shape[1]): match = source_alleles[:, ai, None] == alleles match_i, match_j = match.nonzero() out[match_i, ai] = match_j return out
def create_allele_mapping(ref, alt, alleles, dtype='i1')
Create an array mapping variant alleles into a different allele index system. Parameters ---------- ref : array_like, S1, shape (n_variants,) Reference alleles. alt : array_like, S1, shape (n_variants, n_alt_alleles) Alternate alleles. alleles : array_like, S1, shape (n_variants, n_alleles) Alleles defining the new allele indexing. dtype : dtype, optional Output dtype. Returns ------- mapping : ndarray, int8, shape (n_variants, n_alt_alleles + 1) Examples -------- Example with biallelic variants:: >>> import allel >>> ref = [b'A', b'C', b'T', b'G'] >>> alt = [b'T', b'G', b'C', b'A'] >>> alleles = [[b'A', b'T'], # no transformation ... [b'G', b'C'], # swap ... [b'T', b'A'], # 1 missing ... [b'A', b'C']] # 1 missing >>> mapping = allel.create_allele_mapping(ref, alt, alleles) >>> mapping array([[ 0, 1], [ 1, 0], [ 0, -1], [-1, 0]], dtype=int8) Example with multiallelic variants:: >>> ref = [b'A', b'C', b'T'] >>> alt = [[b'T', b'G'], ... [b'A', b'T'], ... [b'G', b'.']] >>> alleles = [[b'A', b'T'], ... [b'C', b'T'], ... [b'G', b'A']] >>> mapping = create_allele_mapping(ref, alt, alleles) >>> mapping array([[ 0, 1, -1], [ 0, -1, 1], [-1, 0, -1]], dtype=int8) See Also -------- GenotypeArray.map_alleles, HaplotypeArray.map_alleles, AlleleCountsArray.map_alleles
2.529257
2.780985
0.909482
# check inputs ac1 = asarray_ndim(ac1, 2) ac2 = asarray_ndim(ac2, 2) check_dim0_aligned(ac1, ac2) ac1, ac2 = ensure_dim1_aligned(ac1, ac2) # stack allele counts for convenience pac = np.dstack([ac1, ac2]) # count numbers of alleles called in each population pan = np.sum(pac, axis=1) # count the numbers of populations with each allele npa = np.sum(pac > 0, axis=2) # locate variants with allele calls in both populations non_missing = np.all(pan > 0, axis=1) # locate variants where all alleles are only found in a single population no_shared_alleles = np.all(npa <= 1, axis=1) return non_missing & no_shared_alleles
def locate_fixed_differences(ac1, ac2)
Locate variants with no shared alleles between two populations. Parameters ---------- ac1 : array_like, int, shape (n_variants, n_alleles) Allele counts array from the first population. ac2 : array_like, int, shape (n_variants, n_alleles) Allele counts array from the second population. Returns ------- loc : ndarray, bool, shape (n_variants,) See Also -------- allel.stats.diversity.windowed_df Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 0], [1, 1], [1, 1]], ... [[0, 1], [0, 1], [0, 1], [0, 1]], ... [[0, 1], [0, 1], [1, 1], [1, 1]], ... [[0, 0], [0, 0], [1, 1], [2, 2]], ... [[0, 0], [-1, -1], [1, 1], [-1, -1]]]) >>> ac1 = g.count_alleles(subpop=[0, 1]) >>> ac2 = g.count_alleles(subpop=[2, 3]) >>> loc_df = allel.locate_fixed_differences(ac1, ac2) >>> loc_df array([ True, False, False, True, True])
3.947079
3.796102
1.039772
# check inputs acs = [asarray_ndim(ac, 2) for ac in acs] check_dim0_aligned(*acs) acs = ensure_dim1_aligned(*acs) # stack allele counts for convenience pac = np.dstack(acs) # count the numbers of populations with each allele npa = np.sum(pac > 0, axis=2) # locate alleles found only in a single population loc_pa = npa == 1 return loc_pa
def locate_private_alleles(*acs)
Locate alleles that are found only in a single population. Parameters ---------- *acs : array_like, int, shape (n_variants, n_alleles) Allele counts arrays from each population. Returns ------- loc : ndarray, bool, shape (n_variants, n_alleles) Boolean array where elements are True if allele is private to a single population. Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 0], [1, 1], [1, 1]], ... [[0, 1], [0, 1], [0, 1], [0, 1]], ... [[0, 1], [0, 1], [1, 1], [1, 1]], ... [[0, 0], [0, 0], [1, 1], [2, 2]], ... [[0, 0], [-1, -1], [1, 1], [-1, -1]]]) >>> ac1 = g.count_alleles(subpop=[0, 1]) >>> ac2 = g.count_alleles(subpop=[2]) >>> ac3 = g.count_alleles(subpop=[3]) >>> loc_private_alleles = allel.locate_private_alleles(ac1, ac2, ac3) >>> loc_private_alleles array([[ True, False, False], [False, False, False], [ True, False, False], [ True, True, True], [ True, True, False]]) >>> loc_private_variants = np.any(loc_private_alleles, axis=1) >>> loc_private_variants array([ True, False, True, True, True])
5.555701
5.637597
0.985473
# flake8: noqa # check inputs ac1 = asarray_ndim(ac1, 2) ac2 = asarray_ndim(ac2, 2) check_dim0_aligned(ac1, ac2) ac1, ac2 = ensure_dim1_aligned(ac1, ac2) # calculate these once only an1 = np.sum(ac1, axis=1) an2 = np.sum(ac2, axis=1) # calculate average diversity (a.k.a. heterozygosity) within each # population within = (mean_pairwise_difference(ac1, an1, fill=fill) + mean_pairwise_difference(ac2, an2, fill=fill)) / 2 # calculate divergence (a.k.a. heterozygosity) between each population between = mean_pairwise_difference_between(ac1, ac2, an1, an2, fill=fill) # define numerator and denominator for Fst calculations num = between - within den = between return num, den
def hudson_fst(ac1, ac2, fill=np.nan)
Calculate the numerator and denominator for Fst estimation using the method of Hudson (1992) elaborated by Bhatia et al. (2013). Parameters ---------- ac1 : array_like, int, shape (n_variants, n_alleles) Allele counts array from the first population. ac2 : array_like, int, shape (n_variants, n_alleles) Allele counts array from the second population. fill : float Use this value where there are no pairs to compare (e.g., all allele calls are missing). Returns ------- num : ndarray, float, shape (n_variants,) Divergence between the two populations minus average of diversity within each population. den : ndarray, float, shape (n_variants,) Divergence between the two populations. Examples -------- Calculate numerator and denominator for Fst estimation:: >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 0], [1, 1], [1, 1]], ... [[0, 1], [0, 1], [0, 1], [0, 1]], ... [[0, 0], [0, 0], [0, 0], [0, 0]], ... [[0, 1], [1, 2], [1, 1], [2, 2]], ... [[0, 0], [1, 1], [0, 1], [-1, -1]]]) >>> subpops = [[0, 1], [2, 3]] >>> ac1 = g.count_alleles(subpop=subpops[0]) >>> ac2 = g.count_alleles(subpop=subpops[1]) >>> num, den = allel.hudson_fst(ac1, ac2) >>> num array([ 1. , -0.16666667, 0. , -0.125 , -0.33333333]) >>> den array([1. , 0.5 , 0. , 0.625, 0.5 ]) Estimate Fst for each variant individually:: >>> fst = num / den >>> fst array([ 1. , -0.33333333, nan, -0.2 , -0.66666667]) Estimate Fst averaging over variants:: >>> fst = np.sum(num) / np.sum(den) >>> fst 0.1428571428571429
3.251853
3.03062
1.072999
from allel.stats.admixture import patterson_f2, h_hat num = patterson_f2(aca, acb) den = num + h_hat(aca) + h_hat(acb) return num, den
def patterson_fst(aca, acb)
Estimator of differentiation between populations A and B based on the F2 parameter. Parameters ---------- aca : array_like, int, shape (n_variants, 2) Allele counts for population A. acb : array_like, int, shape (n_variants, 2) Allele counts for population B. Returns ------- num : ndarray, shape (n_variants,), float Numerator. den : ndarray, shape (n_variants,), float Denominator. Notes ----- See Patterson (2012), Appendix A. TODO check if this is numerically equivalent to Hudson's estimator.
5.599524
4.80169
1.166157
# compute values per-variant a, b, c = weir_cockerham_fst(g, subpops, max_allele=max_allele) # define the statistic to compute within each window def average_fst(wa, wb, wc): return np.nansum(wa) / (np.nansum(wa) + np.nansum(wb) + np.nansum(wc)) # calculate average Fst in windows fst, windows, counts = windowed_statistic(pos, values=(a, b, c), statistic=average_fst, size=size, start=start, stop=stop, step=step, windows=windows, fill=fill) return fst, windows, counts
def windowed_weir_cockerham_fst(pos, g, subpops, size=None, start=None, stop=None, step=None, windows=None, fill=np.nan, max_allele=None)
Estimate average Fst in windows over a single chromosome/contig, following the method of Weir and Cockerham (1984). Parameters ---------- pos : array_like, int, shape (n_items,) Variant positions, using 1-based coordinates, in ascending order. g : array_like, int, shape (n_variants, n_samples, ploidy) Genotype array. subpops : sequence of sequences of ints Sample indices for each subpopulation. size : int The window size (number of bases). start : int, optional The position at which to start (1-based). stop : int, optional The position at which to stop (1-based). step : int, optional The distance between start positions of windows. If not given, defaults to the window size, i.e., non-overlapping windows. windows : array_like, int, shape (n_windows, 2), optional Manually specify the windows to use as a sequence of (window_start, window_stop) positions, using 1-based coordinates. Overrides the size/start/stop/step parameters. fill : object, optional The value to use where there are no variants within a window. max_allele : int, optional The highest allele index to consider. Returns ------- fst : ndarray, float, shape (n_windows,) Average Fst in each window. windows : ndarray, int, shape (n_windows, 2) The windows used, as an array of (window_start, window_stop) positions, using 1-based coordinates. counts : ndarray, int, shape (n_windows,) Number of variants in each window.
3.098462
2.88474
1.074087
# compute values per-variants num, den = hudson_fst(ac1, ac2) # define the statistic to compute within each window def average_fst(wn, wd): return np.nansum(wn) / np.nansum(wd) # calculate average Fst in windows fst, windows, counts = windowed_statistic(pos, values=(num, den), statistic=average_fst, size=size, start=start, stop=stop, step=step, windows=windows, fill=fill) return fst, windows, counts
def windowed_hudson_fst(pos, ac1, ac2, size=None, start=None, stop=None, step=None, windows=None, fill=np.nan)
Estimate average Fst in windows over a single chromosome/contig, following the method of Hudson (1992) elaborated by Bhatia et al. (2013). Parameters ---------- pos : array_like, int, shape (n_items,) Variant positions, using 1-based coordinates, in ascending order. ac1 : array_like, int, shape (n_variants, n_alleles) Allele counts array from the first population. ac2 : array_like, int, shape (n_variants, n_alleles) Allele counts array from the second population. size : int, optional The window size (number of bases). start : int, optional The position at which to start (1-based). stop : int, optional The position at which to stop (1-based). step : int, optional The distance between start positions of windows. If not given, defaults to the window size, i.e., non-overlapping windows. windows : array_like, int, shape (n_windows, 2), optional Manually specify the windows to use as a sequence of (window_start, window_stop) positions, using 1-based coordinates. Overrides the size/start/stop/step parameters. fill : object, optional The value to use where there are no variants within a window. Returns ------- fst : ndarray, float, shape (n_windows,) Average Fst in each window. windows : ndarray, int, shape (n_windows, 2) The windows used, as an array of (window_start, window_stop) positions, using 1-based coordinates. counts : ndarray, int, shape (n_windows,) Number of variants in each window.
4.6987
3.975374
1.181952
# calculate per-variant values a, b, c = weir_cockerham_fst(g, subpops, max_allele=max_allele) # compute the numerator and denominator in moving windows num = moving_statistic(a, statistic=np.nansum, size=size, start=start, stop=stop, step=step) den = moving_statistic(a + b + c, statistic=np.nansum, size=size, start=start, stop=stop, step=step) # calculate fst in each window fst = num / den return fst
def moving_weir_cockerham_fst(g, subpops, size, start=0, stop=None, step=None, max_allele=None)
Estimate average Fst in moving windows over a single chromosome/contig, following the method of Weir and Cockerham (1984). Parameters ---------- g : array_like, int, shape (n_variants, n_samples, ploidy) Genotype array. subpops : sequence of sequences of ints Sample indices for each subpopulation. size : int The window size (number of variants). start : int, optional The index at which to start. stop : int, optional The index at which to stop. step : int, optional The number of variants between start positions of windows. If not given, defaults to the window size, i.e., non-overlapping windows. max_allele : int, optional The highest allele index to consider. Returns ------- fst : ndarray, float, shape (n_windows,) Average Fst in each window.
3.489551
3.22355
1.082518
# calculate per-variant values num, den = hudson_fst(ac1, ac2, fill=np.nan) # compute the numerator and denominator in moving windows num_sum = moving_statistic(num, statistic=np.nansum, size=size, start=start, stop=stop, step=step) den_sum = moving_statistic(den, statistic=np.nansum, size=size, start=start, stop=stop, step=step) # calculate fst in each window fst = num_sum / den_sum return fst
def moving_hudson_fst(ac1, ac2, size, start=0, stop=None, step=None)
Estimate average Fst in moving windows over a single chromosome/contig, following the method of Hudson (1992) elaborated by Bhatia et al. (2013). Parameters ---------- ac1 : array_like, int, shape (n_variants, n_alleles) Allele counts array from the first population. ac2 : array_like, int, shape (n_variants, n_alleles) Allele counts array from the second population. size : int The window size (number of variants). start : int, optional The index at which to start. stop : int, optional The index at which to stop. step : int, optional The number of variants between start positions of windows. If not given, defaults to the window size, i.e., non-overlapping windows. Returns ------- fst : ndarray, float, shape (n_windows,) Average Fst in each window.
3.578233
3.259155
1.097902
# calculate per-variant values num, den = patterson_fst(ac1, ac2) # compute the numerator and denominator in moving windows num_sum = moving_statistic(num, statistic=np.nansum, size=size, start=start, stop=stop, step=step) den_sum = moving_statistic(den, statistic=np.nansum, size=size, start=start, stop=stop, step=step) # calculate fst in each window fst = num_sum / den_sum return fst
def moving_patterson_fst(ac1, ac2, size, start=0, stop=None, step=None)
Estimate average Fst in moving windows over a single chromosome/contig, following the method of Patterson (2012). Parameters ---------- ac1 : array_like, int, shape (n_variants, n_alleles) Allele counts array from the first population. ac2 : array_like, int, shape (n_variants, n_alleles) Allele counts array from the second population. size : int The window size (number of variants). start : int, optional The index at which to start. stop : int, optional The index at which to stop. step : int, optional The number of variants between start positions of windows. If not given, defaults to the window size, i.e., non-overlapping windows. Returns ------- fst : ndarray, float, shape (n_windows,) Average Fst in each window.
3.463118
3.10234
1.116292
# calculate per-variant values a, b, c = weir_cockerham_fst(g, subpops, max_allele=max_allele) # calculate overall estimate a_sum = np.nansum(a) b_sum = np.nansum(b) c_sum = np.nansum(c) fst = a_sum / (a_sum + b_sum + c_sum) # compute the numerator and denominator within each block num_bsum = moving_statistic(a, statistic=np.nansum, size=blen) den_bsum = moving_statistic(a + b + c, statistic=np.nansum, size=blen) # calculate the statistic values in each block vb = num_bsum / den_bsum # estimate standard error _, se, vj = jackknife((num_bsum, den_bsum), statistic=lambda n, d: np.sum(n) / np.sum(d)) return fst, se, vb, vj
def average_weir_cockerham_fst(g, subpops, blen, max_allele=None)
Estimate average Fst and standard error using the block-jackknife. Parameters ---------- g : array_like, int, shape (n_variants, n_samples, ploidy) Genotype array. subpops : sequence of sequences of ints Sample indices for each subpopulation. blen : int Block size (number of variants). max_allele : int, optional The highest allele index to consider. Returns ------- fst : float Estimated value of the statistic using all data. se : float Estimated standard error. vb : ndarray, float, shape (n_blocks,) Value of the statistic in each block. vj : ndarray, float, shape (n_blocks,) Values of the statistic from block-jackknife resampling.
3.414571
2.815782
1.212655
# calculate per-variant values num, den = hudson_fst(ac1, ac2, fill=np.nan) # calculate overall estimate fst = np.nansum(num) / np.nansum(den) # compute the numerator and denominator within each block num_bsum = moving_statistic(num, statistic=np.nansum, size=blen) den_bsum = moving_statistic(den, statistic=np.nansum, size=blen) # calculate the statistic values in each block vb = num_bsum / den_bsum # estimate standard error _, se, vj = jackknife((num_bsum, den_bsum), statistic=lambda n, d: np.sum(n) / np.sum(d)) return fst, se, vb, vj
def average_hudson_fst(ac1, ac2, blen)
Estimate average Fst between two populations and standard error using the block-jackknife. Parameters ---------- ac1 : array_like, int, shape (n_variants, n_alleles) Allele counts array from the first population. ac2 : array_like, int, shape (n_variants, n_alleles) Allele counts array from the second population. blen : int Block size (number of variants). Returns ------- fst : float Estimated value of the statistic using all data. se : float Estimated standard error. vb : ndarray, float, shape (n_blocks,) Value of the statistic in each block. vj : ndarray, float, shape (n_blocks,) Values of the statistic from block-jackknife resampling.
4.479762
3.410528
1.31351
# calculate per-variant values num, den = patterson_fst(ac1, ac2) # calculate overall estimate fst = np.nansum(num) / np.nansum(den) # compute the numerator and denominator within each block num_bsum = moving_statistic(num, statistic=np.nansum, size=blen) den_bsum = moving_statistic(den, statistic=np.nansum, size=blen) # calculate the statistic values in each block vb = num_bsum / den_bsum # estimate standard error _, se, vj = jackknife((num_bsum, den_bsum), statistic=lambda n, d: np.sum(n) / np.sum(d)) return fst, se, vb, vj
def average_patterson_fst(ac1, ac2, blen)
Estimate average Fst between two populations and standard error using the block-jackknife. Parameters ---------- ac1 : array_like, int, shape (n_variants, n_alleles) Allele counts array from the first population. ac2 : array_like, int, shape (n_variants, n_alleles) Allele counts array from the second population. blen : int Block size (number of variants). Returns ------- fst : float Estimated value of the statistic using all data. se : float Estimated standard error. vb : ndarray, float, shape (n_blocks,) Value of the statistic in each block. vj : ndarray, float, shape (n_blocks,) Values of the statistic from block-jackknife resampling.
4.430239
3.310229
1.338348
# check inputs gn = asarray_ndim(gn, 2, dtype='i1') gn = memoryview_safe(gn) # compute correlation coefficients r = gn_pairwise_corrcoef_int8(gn) # convenience for singletons if r.size == 1: r = r[0] return r
def rogers_huff_r(gn)
Estimate the linkage disequilibrium parameter *r* for each pair of variants using the method of Rogers and Huff (2008). Parameters ---------- gn : array_like, int8, shape (n_variants, n_samples) Diploid genotypes at biallelic variants, coded as the number of alternate alleles per call (i.e., 0 = hom ref, 1 = het, 2 = hom alt). Returns ------- r : ndarray, float, shape (n_variants * (n_variants - 1) // 2,) Matrix in condensed form. Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [1, 1], [0, 0]], ... [[0, 0], [1, 1], [0, 0]], ... [[1, 1], [0, 0], [1, 1]], ... [[0, 0], [0, 1], [-1, -1]]], dtype='i1') >>> gn = g.to_n_alt(fill=-1) >>> gn array([[ 0, 2, 0], [ 0, 2, 0], [ 2, 0, 2], [ 0, 1, -1]], dtype=int8) >>> r = allel.rogers_huff_r(gn) >>> r # doctest: +ELLIPSIS array([ 1. , -1.0000001, 1. , -1.0000001, 1. , -1. ], dtype=float32) >>> r ** 2 # doctest: +ELLIPSIS array([1. , 1.0000002, 1. , 1.0000002, 1. , 1. ], dtype=float32) >>> from scipy.spatial.distance import squareform >>> squareform(r ** 2) array([[0. , 1. , 1.0000002, 1. ], [1. , 0. , 1.0000002, 1. ], [1.0000002, 1.0000002, 0. , 1. ], [1. , 1. , 1. , 0. ]], dtype=float32)
8.890898
9.089623
0.978137
# check inputs gna = asarray_ndim(gna, 2, dtype='i1') gnb = asarray_ndim(gnb, 2, dtype='i1') gna = memoryview_safe(gna) gnb = memoryview_safe(gnb) # compute correlation coefficients r = gn_pairwise2_corrcoef_int8(gna, gnb) # convenience for singletons if r.size == 1: r = r[0, 0] return r
def rogers_huff_r_between(gna, gnb)
Estimate the linkage disequilibrium parameter *r* for each pair of variants between the two input arrays, using the method of Rogers and Huff (2008). Parameters ---------- gna, gnb : array_like, int8, shape (n_variants, n_samples) Diploid genotypes at biallelic variants, coded as the number of alternate alleles per call (i.e., 0 = hom ref, 1 = het, 2 = hom alt). Returns ------- r : ndarray, float, shape (m_variants, n_variants ) Matrix in rectangular form.
4.228588
4.119401
1.026506
# check inputs if not hasattr(gn, 'shape') or not hasattr(gn, 'dtype'): gn = np.asarray(gn, dtype='i1') if gn.ndim != 2: raise ValueError('gn must have two dimensions') # setup output loc = np.ones(gn.shape[0], dtype='u1') # compute in chunks to avoid loading big arrays into memory blen = get_blen_array(gn, blen) blen = max(blen, 10*size) # avoid too small chunks n_variants = gn.shape[0] for i in range(0, n_variants, blen): # N.B., ensure overlap with next window j = min(n_variants, i+blen+size) gnb = np.asarray(gn[i:j], dtype='i1') gnb = memoryview_safe(gnb) locb = loc[i:j] gn_locate_unlinked_int8(gnb, locb, size, step, threshold) return loc.astype('b1')
def locate_unlinked(gn, size=100, step=20, threshold=.1, blen=None)
Locate variants in approximate linkage equilibrium, where r**2 is below the given `threshold`. Parameters ---------- gn : array_like, int8, shape (n_variants, n_samples) Diploid genotypes at biallelic variants, coded as the number of alternate alleles per call (i.e., 0 = hom ref, 1 = het, 2 = hom alt). size : int Window size (number of variants). step : int Number of variants to advance to the next window. threshold : float Maximum value of r**2 to include variants. blen : int, optional Block length to use for chunked computation. Returns ------- loc : ndarray, bool, shape (n_variants) Boolean array where True items locate variants in approximate linkage equilibrium. Notes ----- The value of r**2 between each pair of variants is calculated using the method of Rogers and Huff (2008).
4.437623
3.851341
1.152228
# define the statistic function if isinstance(percentile, (list, tuple)): fill = [fill for _ in percentile] def statistic(gnw): r_squared = rogers_huff_r(gnw) ** 2 return [np.percentile(r_squared, p) for p in percentile] else: def statistic(gnw): r_squared = rogers_huff_r(gnw) ** 2 return np.percentile(r_squared, percentile) return windowed_statistic(pos, gn, statistic, size, start=start, stop=stop, step=step, windows=windows, fill=fill)
def windowed_r_squared(pos, gn, size=None, start=None, stop=None, step=None, windows=None, fill=np.nan, percentile=50)
Summarise linkage disequilibrium in windows over a single chromosome/contig. Parameters ---------- pos : array_like, int, shape (n_items,) The item positions in ascending order, using 1-based coordinates.. gn : array_like, int8, shape (n_variants, n_samples) Diploid genotypes at biallelic variants, coded as the number of alternate alleles per call (i.e., 0 = hom ref, 1 = het, 2 = hom alt). size : int, optional The window size (number of bases). start : int, optional The position at which to start (1-based). stop : int, optional The position at which to stop (1-based). step : int, optional The distance between start positions of windows. If not given, defaults to the window size, i.e., non-overlapping windows. windows : array_like, int, shape (n_windows, 2), optional Manually specify the windows to use as a sequence of (window_start, window_stop) positions, using 1-based coordinates. Overrides the size/start/stop/step parameters. fill : object, optional The value to use where a window is empty, i.e., contains no items. percentile : int or sequence of ints, optional The percentile or percentiles to calculate within each window. Returns ------- out : ndarray, shape (n_windows,) The value of the statistic for each window. windows : ndarray, int, shape (n_windows, 2) The windows used, as an array of (window_start, window_stop) positions, using 1-based coordinates. counts : ndarray, int, shape (n_windows,) The number of items in each window. Notes ----- Linkage disequilibrium (r**2) is calculated using the method of Rogers and Huff (2008). See Also -------- allel.stats.window.windowed_statistic
2.915216
2.901306
1.004795