code
string | signature
string | docstring
string | loss_without_docstring
float64 | loss_with_docstring
float64 | factor
float64 |
---|---|---|---|---|---|
netin, netinfo = process_input(netin, ['C', 'G', 'TO'])
# Set diagonal to 0
netin = set_diagonal(netin, 0)
if axis == 'graphlet' and netinfo['nettype'][-1] == 'u':
triu = np.triu_indices(netinfo['netshape'][0], k=1)
netin = netin[triu[0], triu[1], :]
netin = netin.transpose()
if sign == 'both':
net_sorted = np.argsort(np.abs(netin), axis=-1)
elif sign == 'pos':
net_sorted = np.argsort(netin, axis=-1)
elif sign == 'neg':
net_sorted = np.argsort(-1*netin, axis=-1)
else:
raise ValueError('Unknown value for parameter: sign')
# Predefine
netout = np.zeros(netinfo['netshape'])
if axis == 'time':
# These for loops can probabaly be removed for speed
for i in range(netinfo['netshape'][0]):
for j in range(netinfo['netshape'][1]):
netout[i, j, net_sorted[i, j, -
int(round(net_sorted.shape[-1])*level):]] = 1
elif axis == 'graphlet':
netout_tmp = np.zeros(netin.shape)
for i in range(netout_tmp.shape[0]):
netout_tmp[i, net_sorted[i, -
int(round(net_sorted.shape[-1])*level):]] = 1
netout_tmp = netout_tmp.transpose()
netout[triu[0], triu[1], :] = netout_tmp
netout[triu[1], triu[0], :] = netout_tmp
netout = set_diagonal(netout, 0)
# If input is contact, output contact
if netinfo['inputtype'] == 'C':
netinfo['nettype'] = 'b' + netinfo['nettype'][1]
netout = graphlet2contact(netout, netinfo)
netout.pop('inputtype')
netout.pop('values')
netout['diagonal'] = 0
return netout | def binarize_percent(netin, level, sign='pos', axis='time') | Binarizes a network proprtionally. When axis='time' (only one available at the moment) then the top values for each edge time series are considered.
Parameters
----------
netin : array or dict
network (graphlet or contact representation),
level : float
Percent to keep (expressed as decimal, e.g. 0.1 = top 10%)
sign : str, default='pos'
States the sign of the thresholding. Can be 'pos', 'neg' or 'both'. If "neg", only negative values are thresholded and vice versa.
axis : str, default='time'
Specify which dimension thresholding is applied against. Can be 'time' (takes top % for each edge time-series) or 'graphlet' (takes top % for each graphlet)
Returns
-------
netout : array or dict (depending on input)
Binarized network | 2.999741 | 2.840442 | 1.056082 |
netin, netinfo = process_input(netin, ['C', 'G', 'TO'])
trajectory = rdp(netin, level)
contacts = []
# Use the trajectory points as threshold
for n in range(trajectory['index'].shape[0]):
if sign == 'pos':
sel = trajectory['trajectory_points'][n][trajectory['trajectory']
[n][trajectory['trajectory_points'][n]] > 0]
elif sign == 'neg':
sel = trajectory['trajectory_points'][n][trajectory['trajectory']
[n][trajectory['trajectory_points'][n]] < 0]
else:
sel = trajectory['trajectory_points']
i_ind = np.repeat(trajectory['index'][n, 0], len(sel))
j_ind = np.repeat(trajectory['index'][n, 1], len(sel))
contacts.append(np.array([i_ind, j_ind, sel]).transpose())
contacts = np.concatenate(contacts)
# Create output dictionary
netout = dict(netinfo)
netout['contacts'] = contacts
netout['nettype'] = 'b' + netout['nettype'][1]
netout['dimord'] = 'node,node,time'
netout['timetype'] = 'discrete'
netout['diagonal'] = 0
# If input is graphlet, output graphlet
if netinfo['inputtype'] == 'G':
netout = contact2graphlet(netout)
else:
netout.pop('inputtype')
return netout | def binarize_rdp(netin, level, sign='pos', axis='time') | Binarizes a network based on RDP compression.
Parameters
----------
netin : array or dict
Network (graphlet or contact representation),
level : float
Delta parameter which is the tolorated error in RDP compression.
sign : str, default='pos'
States the sign of the thresholding. Can be 'pos', 'neg' or 'both'. If "neg", only negative values are thresholded and vice versa.
Returns
-------
netout : array or dict (dependning on input)
Binarized network | 3.885113 | 3.87973 | 1.001388 |
if threshold_type == 'percent':
netout = binarize_percent(netin, threshold_level, sign, axis)
elif threshold_type == 'magnitude':
netout = binarize_magnitude(netin, threshold_level, sign)
elif threshold_type == 'rdp':
netout = binarize_rdp(netin, threshold_level, sign, axis)
else:
raise ValueError('Unknown value to parameter: threshold_type.')
return netout | def binarize(netin, threshold_type, threshold_level, sign='pos', axis='time') | Binarizes a network, returning the network. General wrapper function for different binarization functions.
Parameters
----------
netin : array or dict
Network (graphlet or contact representation),
threshold_type : str
What type of thresholds to make binarization. Options: 'rdp', 'percent', 'magnitude'.
threshold_level : str
Paramter dependent on threshold type.
If 'rdp', it is the delta (i.e. error allowed in compression).
If 'percent', it is the percentage to keep (e.g. 0.1, means keep 10% of signal).
If 'magnitude', it is the amplitude of signal to keep.
sign : str, default='pos'
States the sign of the thresholding. Can be 'pos', 'neg' or 'both'. If "neg", only negative values are thresholded and vice versa.
axis : str
Threshold over specfied axis. Valid for percent and rdp. Can be time or graphlet.
Returns
-------
netout : array or dict (depending on input)
Binarized network | 2.25078 | 2.055175 | 1.095177 |
inputtype = checkInput(netIn)
# Convert TN to G representation
if inputtype == 'TN' and 'TN' in allowedformats and outputformat != 'TN':
G = netIn.df_to_array()
netInfo = {'nettype': netIn.nettype, 'netshape': netIn.netshape}
elif inputtype == 'TN' and 'TN' in allowedformats and outputformat == 'TN':
TN = netIn
elif inputtype == 'C' and 'C' in allowedformats and outputformat == 'G':
G = contact2graphlet(netIn)
netInfo = dict(netIn)
netInfo.pop('contacts')
elif inputtype == 'C' and 'C' in allowedformats and outputformat == 'TN':
TN = TemporalNetwork(from_dict=netIn)
elif inputtype == 'G' and 'G' in allowedformats and outputformat == 'TN':
TN = TemporalNetwork(from_array=netIn)
# Get network type if not set yet
elif inputtype == 'G' and 'G' in allowedformats:
netInfo = {}
netInfo['netshape'] = netIn.shape
netInfo['nettype'] = gen_nettype(netIn)
G = netIn
elif inputtype == 'C' and outputformat == 'C':
pass
else:
raise ValueError('Input invalid.')
if outputformat == 'TN' and not isinstance(TN.network, str):
TN.network['i'] = TN.network['i'].astype(int)
TN.network['j'] = TN.network['j'].astype(int)
TN.network['t'] = TN.network['t'].astype(int)
if outputformat == 'C' or outputformat == 'G':
netInfo['inputtype'] = inputtype
if inputtype != 'C' and outputformat == 'C':
C = graphlet2contact(G, netInfo)
if outputformat == 'G':
return G, netInfo
elif outputformat == 'C':
return C
elif outputformat == 'TN':
return TN | def process_input(netIn, allowedformats, outputformat='G') | Takes input network and checks what the input is.
Parameters
----------
netIn : array, dict, or TemporalNetwork
Network (graphlet, contact or object)
allowedformats : str
Which format of network objects that are allowed. Options: 'C', 'TN', 'G'.
outputformat: str, default=G
Target output format. Options: 'C' or 'G'.
Returns
-------
C : dict
OR
G : array
Graphlet representation.
netInfo : dict
Metainformation about network.
OR
tnet : object
object of TemporalNetwork class | 2.646709 | 2.436064 | 1.086469 |
communityID = np.array(communityID)
cid_shape = communityID.shape
if len(cid_shape) > 1:
communityID = communityID.flatten()
new_communityID = np.zeros(len(communityID))
for i, n in enumerate(np.unique(communityID)):
new_communityID[communityID == n] = i
if len(cid_shape) > 1:
new_communityID = new_communityID.reshape(cid_shape)
return new_communityID | def clean_community_indexes(communityID) | Takes input of community assignments. Returns reindexed community assignment by using smallest numbers possible.
Parameters
----------
communityID : array-like
list or array of integers. Output from community detection algorithems.
Returns
-------
new_communityID : array
cleaned list going from 0 to len(np.unique(communityID))-1
Note
-----
Behaviour of funciton entails that the lowest community integer in communityID will recieve the lowest integer in new_communityID. | 2.007231 | 2.093854 | 0.95863 |
d = collections.OrderedDict()
for c in C['contacts']:
ct = tuple(c)
if ct in d:
d[ct] += 1
else:
d[ct] = 1
new_contacts = []
new_values = []
for (key, value) in d.items():
new_values.append(value)
new_contacts.append(key)
C_out = C
C_out['contacts'] = new_contacts
C_out['values'] = new_values
return C_out | def multiple_contacts_get_values(C) | Given an contact representation with repeated contacts, this function removes duplicates and creates a value
Parameters
----------
C : dict
contact representation with multiple repeated contacts.
Returns
-------
:C_out: dict
Contact representation with duplicate contacts removed and the number of duplicates is now in the 'values' field. | 2.388001 | 2.190151 | 1.090336 |
if len(df) > 0:
idx = np.array(list(map(list, df.values)))
G = np.zeros([netshape[0], netshape[0], netshape[1]])
if idx.shape[1] == 3:
if nettype[-1] == 'u':
idx = np.vstack([idx, idx[:, [1, 0, 2]]])
idx = idx.astype(int)
G[idx[:, 0], idx[:, 1], idx[:, 2]] = 1
elif idx.shape[1] == 4:
if nettype[-1] == 'u':
idx = np.vstack([idx, idx[:, [1, 0, 2, 3]]])
weights = idx[:, 3]
idx = np.array(idx[:, :3], dtype=int)
G[idx[:, 0], idx[:, 1], idx[:, 2]] = weights
else:
G = np.zeros([netshape[0], netshape[0], netshape[1]])
return G | def df_to_array(df, netshape, nettype) | Returns a numpy array (snapshot representation) from thedataframe contact list
Parameters:
df : pandas df
pandas df with columns, i,j,t.
netshape : tuple
network shape, format: (node, time)
nettype : str
'wu', 'wd', 'bu', 'bd'
Returns:
--------
G : array
(node,node,time) array for the network | 1.946345 | 1.890199 | 1.029704 |
if distance_func_name == 'default' and netinfo['nettype'][0] == 'b':
print('Default distance funciton specified. As network is binary, using Hamming')
distance_func_name = 'hamming'
elif distance_func_name == 'default' and netinfo['nettype'][0] == 'w':
distance_func_name = 'euclidean'
print(
'Default distance funciton specified. '
'As network is weighted, using Euclidean')
return distance_func_name | def check_distance_funciton_input(distance_func_name, netinfo) | Funciton checks distance_func_name, if it is specified as 'default'. Then given the type of the network selects a default distance function.
Parameters
----------
distance_func_name : str
distance function name.
netinfo : dict
the output of utils.process_input
Returns
-------
distance_func_name : str
distance function name. | 3.215411 | 3.008744 | 1.068689 |
path = tenetopath[0] + '/data/parcellation/' + parcellation_name + '.csv'
parc = np.loadtxt(path, skiprows=1, delimiter=',', usecols=[1, 2, 3])
return parc | def load_parcellation_coords(parcellation_name) | Loads coordinates of included parcellations.
Parameters
----------
parcellation_name : str
options: 'gordon2014_333', 'power2012_264', 'shen2013_278'.
Returns
-------
parc : array
parcellation cordinates | 4.322772 | 5.015762 | 0.861837 |
if isinstance(parcellation, str):
parcin = ''
if '+' in parcellation:
parcin = parcellation
parcellation = parcellation.split('+')[0]
if '+OH' in parcin:
subcortical = True
else:
subcortical = None
if '+SUIT' in parcin:
cerebellar = True
else:
cerebellar = None
if not parc_type or not parc_params:
path = tenetopath[0] + '/data/parcellation_defaults/defaults.json'
with open(path) as data_file:
defaults = json.load(data_file)
if not parc_type:
parc_type = defaults[parcellation]['type']
print('Using default parcellation type')
if not parc_params:
parc_params = defaults[parcellation]['params']
print('Using default parameters')
if parc_type == 'sphere':
parcellation = load_parcellation_coords(parcellation)
seed = NiftiSpheresMasker(np.array(parcellation), **parc_params)
data = seed.fit_transform(data_path)
elif parc_type == 'region':
path = tenetopath[0] + '/data/parcellation/' + parcellation + '.nii.gz'
region = NiftiLabelsMasker(path, **parc_params)
data = region.fit_transform(data_path)
else:
raise ValueError('Unknown parc_type specified')
if subcortical:
subatlas = fetch_atlas_harvard_oxford('sub-maxprob-thr0-2mm')['maps']
region = NiftiLabelsMasker(subatlas, **parc_params)
data_sub = region.fit_transform(data_path)
data = np.hstack([data, data_sub])
if cerebellar:
path = tenetopath[0] + '/data/parcellation/Cerebellum-SUIT_space-MNI152NLin2009cAsym.nii.gz'
region = NiftiLabelsMasker(path, **parc_params)
data_cerebellar = region.fit_transform(data_path)
data = np.hstack([data, data_cerebellar])
return data | def make_parcellation(data_path, parcellation, parc_type=None, parc_params=None) | Performs a parcellation which reduces voxel space to regions of interest (brain data).
Parameters
----------
data_path : str
Path to .nii image.
parcellation : str
Specify which parcellation that you would like to use. For MNI: 'gordon2014_333', 'power2012_264', For TAL: 'shen2013_278'.
It is possible to add the OH subcotical atlas on top of a cortical atlas (e.g. gordon) by adding:
'+OH' (for oxford harvard subcortical atlas) and '+SUIT' for SUIT cerebellar atlas.
e.g.: gordon2014_333+OH+SUIT'
parc_type : str
Can be 'sphere' or 'region'. If nothing is specified, the default for that parcellation will be used.
parc_params : dict
**kwargs for nilearn functions
Returns
-------
data : array
Data after the parcellation.
NOTE
----
These functions make use of nilearn. Please cite nilearn if used in a publicaiton. | 2.531412 | 2.223121 | 1.138675 |
steps = (1.0/(N-1)) * (stop - start)
if np.isscalar(steps):
return steps*np.arange(N) + start
else:
return steps[:, None]*np.arange(N) + start[:, None] | def create_traj_ranges(start, stop, N) | Fills in the trajectory range.
# Adapted from https://stackoverflow.com/a/40624614 | 3.039685 | 2.983643 | 1.018783 |
if not calc:
calc = ''
else:
calc = '_' + calc
if not community:
community = ''
else:
community = 'community'
if 'community' in calc and 'community' in community:
community = ''
if calc == 'community_avg' or calc == 'community_pairs':
community = ''
dimord_dict = {
'temporal_closeness_centrality': 'node',
'temporal_degree_centrality': 'node',
'temporal_degree_centralit_avg': 'node',
'temporal_degree_centrality_time': 'node,time',
'temporal_efficiency': 'global',
'temporal_efficiency_global': 'global',
'temporal_efficiency_node': 'node',
'temporal_efficiency_to': 'node',
'sid_global': 'global,time',
'community_pairs': 'community,community,time',
'community_avg': 'community,time',
'sid': 'community,community,time',
'reachability_latency_global': 'global',
'reachability_latency': 'global',
'reachability_latency_node': 'node',
'fluctuability': 'node',
'fluctuability_global': 'global',
'bursty_coeff': 'edge,edge',
'bursty_coeff_edge': 'edge,edge',
'bursty_coeff_node': 'node',
'bursty_coeff_meanEdgePerNode': 'node',
'volatility_global': 'time',
}
if measure + calc + community in dimord_dict:
return dimord_dict[measure + calc + community]
else:
print('WARNINGL: get_dimord() returned unknown dimension labels')
return 'unknown' | def get_dimord(measure, calc=None, community=None) | Get the dimension order of a network measure.
Parameters
----------
measure : str
Name of funciton in teneto.networkmeasures.
calc : str, default=None
Calc parameter for the function
community : bool, default=None
If not null, then community property is assumed to be believed.
Returns
-------
dimord : str
Dimension order. So "node,node,time" would define the dimensions of the network measure. | 3.66602 | 3.49872 | 1.047818 |
newnetwork = tnet.network.copy()
newnetwork['i'] = (tnet.network['i']) + \
((tnet.netshape[0]) * (tnet.network['t']))
newnetwork['j'] = (tnet.network['j']) + \
((tnet.netshape[0]) * (tnet.network['t']))
if 'weight' not in newnetwork.columns:
newnetwork['weight'] = 1
newnetwork.drop('t', axis=1, inplace=True)
timepointconns = pd.DataFrame()
timepointconns['i'] = np.arange(0, (tnet.N*tnet.T)-tnet.N)
timepointconns['j'] = np.arange(tnet.N, (tnet.N*tnet.T))
timepointconns['weight'] = intersliceweight
supranet = pd.concat([newnetwork, timepointconns]).reset_index(drop=True)
return supranet | def create_supraadjacency_matrix(tnet, intersliceweight=1) | Returns a supraadjacency matrix from a temporal network structure
Parameters
--------
tnet : TemporalNetwork
Temporal network (any network type)
intersliceweight : int
Weight that links the same node from adjacent time-points
Returns
--------
supranet : dataframe
Supraadjacency matrix | 2.527046 | 2.362498 | 1.06965 |
if t is not None:
df = get_network_when(df, t=t)
if 'weight' in df.columns:
nxobj = nx.from_pandas_edgelist(
df, source='i', target='j', edge_attr='weight')
else:
nxobj = nx.from_pandas_edgelist(df, source='i', target='j')
return nxobj | def tnet_to_nx(df, t=None) | Creates undirected networkx object | 2.394759 | 2.319137 | 1.032608 |
r
tnet = process_input(tnet, ['C', 'G', 'TN'], 'TN')
# Divide resolution by the number of timepoints
resolution = resolution / tnet.T
supranet = create_supraadjacency_matrix(
tnet, intersliceweight=intersliceweight)
if negativeedge == 'ignore':
supranet = supranet[supranet['weight'] > 0]
nxsupra = tnet_to_nx(supranet)
np.random.seed(randomseed)
while True:
comtmp = []
with ProcessPoolExecutor(max_workers=njobs) as executor:
job = {executor.submit(_run_louvain, nxsupra, resolution, tnet.N, tnet.T) for n in range(n_iter)}
for j in as_completed(job):
comtmp.append(j.result())
comtmp = np.stack(comtmp)
comtmp = comtmp.transpose()
comtmp = np.reshape(comtmp, [tnet.N, tnet.T, n_iter], order='F')
if n_iter == 1:
break
nxsupra_old = nxsupra
nxsupra = make_consensus_matrix(comtmp, consensus_threshold)
# If there was no consensus, there are no communities possible, return
if nxsupra is None:
break
if (nx.to_numpy_array(nxsupra, nodelist=np.arange(tnet.N*tnet.T)) == nx.to_numpy_array(nxsupra_old, nodelist=np.arange(tnet.N*tnet.T))).all():
break
communities = comtmp[:, :, 0]
if temporal_consensus == True:
communities = make_temporal_consensus(communities)
return communities | def temporal_louvain(tnet, resolution=1, intersliceweight=1, n_iter=100, negativeedge='ignore', randomseed=None, consensus_threshold=0.5, temporal_consensus=True, njobs=1) | r"""
Louvain clustering for a temporal network.
Parameters
-----------
tnet : array, dict, TemporalNetwork
Input network
resolution : int
resolution of Louvain clustering ($\gamma$)
intersliceweight : int
interslice weight of multilayer clustering ($\omega$). Must be positive.
n_iter : int
Number of iterations to run louvain for
randomseed : int
Set for reproduceability
negativeedge : str
If there are negative edges, what should be done with them.
Options: 'ignore' (i.e. set to 0). More options to be added.
consensus : float (0.5 default)
When creating consensus matrix to average over number of iterations, keep values when the consensus is this amount.
Returns
-------
communities : array (node,time)
node,time array of community assignment
Notes
-------
References
---------- | 3.314098 | 3.313821 | 1.000083 |
r
com_membership = np.array(com_membership)
D = []
for i in range(com_membership.shape[0]):
for j in range(i+1, com_membership.shape[0]):
con = np.sum((com_membership[i, :] - com_membership[j, :])
== 0, axis=-1) / com_membership.shape[-1]
twhere = np.where(con > th)[0]
D += list(zip(*[np.repeat(i, len(twhere)).tolist(), np.repeat(j,
len(twhere)).tolist(), twhere.tolist(), con[twhere].tolist()]))
if len(D) > 0:
D = pd.DataFrame(D, columns=['i', 'j', 't', 'weight'])
D = TemporalNetwork(from_df=D)
D = create_supraadjacency_matrix(D, intersliceweight=0)
Dnx = tnet_to_nx(D)
else:
Dnx = None
return Dnx | def make_consensus_matrix(com_membership, th=0.5) | r"""
Makes the consensus matrix
.
Parameters
----------
com_membership : array
Shape should be node, time, iteration.
th : float
threshold to cancel noisey edges
Returns
-------
D : array
consensus matrix | 3.630805 | 3.608872 | 1.006077 |
r
com_membership = np.array(com_membership)
# make first indicies be between 0 and 1.
com_membership[:, 0] = clean_community_indexes(com_membership[:, 0])
# loop over all timepoints, get jacccard distance in greedy manner for largest community to time period before
for t in range(1, com_membership.shape[1]):
ct, counts_t = np.unique(com_membership[:, t], return_counts=True)
ct = ct[np.argsort(counts_t)[::-1]]
c1back = np.unique(com_membership[:, t-1])
new_index = np.zeros(com_membership.shape[0])
for n in ct:
if len(c1back) > 0:
d = np.ones(int(c1back.max())+1)
for m in c1back:
v1 = np.zeros(com_membership.shape[0])
v2 = np.zeros(com_membership.shape[0])
v1[com_membership[:, t] == n] = 1
v2[com_membership[:, t-1] == m] = 1
d[int(m)] = jaccard(v1, v2)
bestval = np.argmin(d)
else:
bestval = new_index.max() + 1
new_index[com_membership[:, t] == n] = bestval
c1back = np.array(np.delete(c1back, np.where(c1back == bestval)))
com_membership[:, t] = new_index
return com_membership | def make_temporal_consensus(com_membership) | r"""
Matches community labels accross time-points
Jaccard matching is in a greedy fashiong. Matching the largest community at t with the community at t-1.
Parameters
----------
com_membership : array
Shape should be node, time.
Returns
-------
D : array
temporal consensus matrix using Jaccard distance | 3.279249 | 3.015183 | 1.087579 |
# Preallocate
flex = np.zeros(communities.shape[0])
# Go from the second time point to last, compare with time-point before
for t in range(1, communities.shape[1]):
flex[communities[:, t] != communities[:, t-1]] += 1
# Normalize
flex = flex / (communities.shape[1] - 1)
return flex | def flexibility(communities) | Amount a node changes community
Parameters
----------
communities : array
Community array of shape (node,time)
Returns
--------
flex : array
Size with the flexibility of each node.
Notes
-----
Flexbility calculates the number of times a node switches its community label during a time series. It is normalized by the number of possible changes which could occur. It is important to make sure that the different community labels accross time points are not artbirary.
References
-----------
Bassett, DS, Wymbs N, Porter MA, Mucha P, Carlson JM, Grafton ST. Dynamic reconfiguration of human brain networks during learning. PNAS, 2011, 108(18):7641-6. | 4.733977 | 3.730659 | 1.268938 |
if '/' in fname:
split = fname.split('/')
dirnames = '/'.join(split[:-1]) + '/'
fname = split[-1]
else:
dirnames = ''
tags = [tag for tag in fname.split('_') if '-' in tag]
fname_head = '_'.join(tags)
fileformat = '.' + '.'.join(fname.split('.')[1:])
return dirnames + fname_head, fileformat | def drop_bids_suffix(fname) | Given a filename sub-01_run-01_preproc.nii.gz, it will return ['sub-01_run-01', '.nii.gz']
Parameters
----------
fname : str
BIDS filename with suffice. Directories should not be included.
Returns
-------
fname_head : str
BIDS filename with
fileformat : str
The file format (text after suffix)
Note
------
This assumes that there are no periods in the filename | 3.282015 | 3.044714 | 1.077939 |
if index_col:
index_col = 0
else:
index_col = None
if header:
header = 0
else:
header = None
df = pd.read_csv(fname, header=header, index_col=index_col, sep='\t')
if return_meta:
json_fname = fname.replace('tsv', 'json')
meta = pd.read_json(json_fname)
return df, meta
else:
return df | def load_tabular_file(fname, return_meta=False, header=True, index_col=True) | Given a file name loads as a pandas data frame
Parameters
----------
fname : str
file name and path. Must be tsv.
return_meta :
header : bool (default True)
if there is a header in the tsv file, true will use first row in file.
index_col : bool (default None)
if there is an index column in the csv or tsv file, true will use first row in file.
Returns
-------
df : pandas
The loaded file
info : pandas, if return_meta=True
Meta infomration in json file (if specified) | 1.917804 | 1.945934 | 0.985544 |
if allowedfileformats == 'default':
allowedfileformats = ['.tsv', '.nii.gz']
for f in allowedfileformats:
fname = fname.split(f)[0]
fname += '.json'
if os.path.exists(fname):
with open(fname) as fs:
sidecar = json.load(fs)
else:
sidecar = {}
if 'filestatus' not in sidecar:
sidecar['filestatus'] = {}
sidecar['filestatus']['reject'] = False
sidecar['filestatus']['reason'] = []
return sidecar | def get_sidecar(fname, allowedfileformats='default') | Loads sidecar or creates one | 2.457534 | 2.412616 | 1.018618 |
relfun = []
threshold = []
for ec in exclusion_criteria:
if ec[0:2] == '>=':
relfun.append(np.greater_equal)
threshold.append(float(ec[2:]))
elif ec[0:2] == '<=':
relfun.append(np.less_equal)
threshold.append(float(ec[2:]))
elif ec[0] == '>':
relfun.append(np.greater)
threshold.append(float(ec[1:]))
elif ec[0] == '<':
relfun.append(np.less)
threshold.append(float(ec[1:]))
else:
raise ValueError('exclusion crieria must being with >,<,>= or <=')
return relfun, threshold | def process_exclusion_criteria(exclusion_criteria) | Parses an exclusion critera string to get the function and threshold.
Parameters
----------
exclusion_criteria : list
list of strings where each string is of the format [relation][threshold]. E.g. \'<0.5\' or \'>=1\'
Returns
-------
relfun : list
list of numpy functions for the exclusion criteria
threshold : list
list of floats for threshold for each relfun | 2.13075 | 1.827252 | 1.166095 |
if tnet is not None and paths is not None:
raise ValueError('Only network or path input allowed.')
if tnet is None and paths is None:
raise ValueError('No input.')
# if shortest paths are not calculated, calculate them
if tnet is not None:
paths = shortest_temporal_path(tnet)
pathmat = np.zeros([paths[['from', 'to']].max().max(
)+1, paths[['from', 'to']].max().max()+1, paths[['t_start']].max().max()+1]) * np.nan
pathmat[paths['from'].values, paths['to'].values,
paths['t_start'].values] = paths['temporal-distance']
netshape = pathmat.shape
edges_to_reach = netshape[0] - np.round(netshape[0] * rratio)
reach_lat = np.zeros([netshape[1], netshape[2]]) * np.nan
for t_ind in range(0, netshape[2]):
paths_sort = -np.sort(-pathmat[:, :, t_ind], axis=1)
reach_lat[:, t_ind] = paths_sort[:, edges_to_reach]
if calc == 'global':
reach_lat = np.nansum(reach_lat)
reach_lat = reach_lat / ((netshape[0]) * netshape[2])
elif calc == 'nodes':
reach_lat = np.nansum(reach_lat, axis=1)
reach_lat = reach_lat / (netshape[2])
return reach_lat | def reachability_latency(tnet=None, paths=None, rratio=1, calc='global') | Reachability latency. This is the r-th longest temporal path.
Parameters
---------
data : array or dict
Can either be a network (graphlet or contact), binary unidrected only. Alternative can be a paths dictionary (output of teneto.networkmeasure.shortest_temporal_path)
rratio: float (default: 1)
reachability ratio that the latency is calculated in relation to.
Value must be over 0 and up to 1.
1 (default) - all nodes must be reached.
Other values (e.g. .5 imply that 50% of nodes are reached)
This is rounded to the nearest node inter.
E.g. if there are 6 nodes [1,2,3,4,5,6], it will be node 4 (due to round upwards)
calc : str
what to calculate. Alternatives: 'global' entire network; 'nodes': for each node.
Returns
--------
reach_lat : array
Reachability latency
Notes
------
Reachability latency calculates the time it takes for the paths. | 3.028404 | 2.779521 | 1.089542 |
# make sure the static and temporal communities have the same number of nodes
if staticcommunities.shape[0] != temporalcommunities.shape[0]:
raise ValueError(
'Temporal and static communities have different dimensions')
alleg = allegiance(temporalcommunities)
Rcoeff = np.zeros(len(staticcommunities))
for i, statcom in enumerate(staticcommunities):
Rcoeff[i] = np.mean(alleg[i, staticcommunities == statcom])
return Rcoeff | def recruitment(temporalcommunities, staticcommunities) | Calculates recruitment coefficient for each node. Recruitment coefficient is the average probability of nodes from the
same static communities being in the same temporal communities at other time-points or during different tasks.
Parameters:
------------
temporalcommunities : array
temporal communities vector (node,time)
staticcommunities : array
Static communities vector for each node
Returns:
-------
Rcoeff : array
recruitment coefficient for each node
References:
-----------
Danielle S. Bassett, Muzhi Yang, Nicholas F. Wymbs, Scott T. Grafton.
Learning-Induced Autonomy of Sensorimotor Systems. Nat Neurosci. 2015 May;18(5):744-51.
Marcelo Mattar, Michael W. Cole, Sharon Thompson-Schill, Danielle S. Bassett. A Functional
Cartography of Cognitive Systems. PLoS Comput Biol. 2015 Dec 2;11(12):e1004533. | 3.982296 | 3.488743 | 1.14147 |
# make sure the static and temporal communities have the same number of nodes
if staticcommunities.shape[0] != temporalcommunities.shape[0]:
raise ValueError(
'Temporal and static communities have different dimensions')
alleg = allegiance(temporalcommunities)
Icoeff = np.zeros(len(staticcommunities))
# calc integration for each node
for i, statcom in enumerate(len(staticcommunities)):
Icoeff[i] = np.mean(alleg[i, staticcommunities != statcom])
return Icoeff | def integration(temporalcommunities, staticcommunities) | Calculates the integration coefficient for each node. Measures the average probability
that a node is in the same community as nodes from other systems.
Parameters:
------------
temporalcommunities : array
temporal communities vector (node,time)
staticcommunities : array
Static communities vector for each node
Returns:
-------
Icoeff : array
integration coefficient for each node
References:
----------
Danielle S. Bassett, Muzhi Yang, Nicholas F. Wymbs, Scott T. Grafton.
Learning-Induced Autonomy of Sensorimotor Systems. Nat Neurosci. 2015 May;18(5):744-51.
Marcelo Mattar, Michael W. Cole, Sharon Thompson-Schill, Danielle S. Bassett.
A Functional Cartography of Cognitive Systems. PLoS Comput Biol. 2015 Dec
2;11(12):e1004533. | 4.464949 | 3.614507 | 1.235286 |
# Process input
tnet = process_input(tnet, ['C', 'G', 'TN'], 'TN')
if tnet.nettype[0] == 'w':
print('WARNING: assuming connections to be binary when computing intercontacttimes')
# Each time series is padded with a 0 at the start and end. Then t[0:-1]-[t:].
# Then discard the noninformative ones (done automatically)
# Finally return back as np array
contacts = np.array([[None] * tnet.netshape[0]] * tnet.netshape[0])
if tnet.nettype[1] == 'u':
for i in range(0, tnet.netshape[0]):
for j in range(i + 1, tnet.netshape[0]):
edge_on = tnet.get_network_when(i=i, j=j)['t'].values
if len(edge_on) > 0:
edge_on_diff = edge_on[1:] - edge_on[:-1]
contacts[i, j] = np.array(edge_on_diff)
contacts[j, i] = np.array(edge_on_diff)
else:
contacts[i, j] = []
contacts[j, i] = []
elif tnet.nettype[1] == 'd':
for i in range(0, tnet.netshape[0]):
for j in range(0, tnet.netshape[0]):
edge_on = tnet.get_network_when(i=i, j=j)['t'].values
if len(edge_on) > 0:
edge_on_diff = edge_on[1:] - edge_on[:-1]
contacts[i, j] = np.array(edge_on_diff)
else:
contacts[i, j] = []
out = {}
out['intercontacttimes'] = contacts
out['nettype'] = tnet.nettype
return out | def intercontacttimes(tnet) | Calculates the intercontacttimes of each edge in a network.
Parameters
-----------
tnet : array, dict
Temporal network (craphlet or contact). Nettype: 'bu', 'bd'
Returns
---------
contacts : dict
Intercontact times as numpy array in dictionary. contacts['intercontacttimes']
Notes
------
The inter-contact times is calculated by the time between consequecutive "active" edges (where active means
that the value is 1 in a binary network).
Examples
--------
This example goes through how inter-contact times are calculated.
>>> import teneto
>>> import numpy as np
Make a network with 2 nodes and 4 time-points with 4 edges spaced out.
>>> G = np.zeros([2,2,10])
>>> edge_on = [1,3,5,9]
>>> G[0,1,edge_on] = 1
The network visualised below make it clear what the inter-contact times are between the two nodes:
.. plot::
import teneto
import numpy as np
import matplotlib.pyplot as plt
G = np.zeros([2,2,10])
edge_on = [1,3,5,9]
G[0,1,edge_on] = 1
fig, ax = plt.subplots(1, figsize=(4,2))
teneto.plot.slice_plot(G, ax=ax, cmap='Pastel2')
ax.set_ylim(-0.25, 1.25)
plt.tight_layout()
fig.show()
Calculating the inter-contact times of these edges becomes: 2,2,4 between nodes 0 and 1.
>>> ict = teneto.networkmeasures.intercontacttimes(G)
The function returns a dictionary with the icts in the key: intercontacttimes. This is of the size NxN.
So the icts between nodes 0 and 1 are found by:
>>> ict['intercontacttimes'][0,1]
array([2, 2, 4]) | 3.052711 | 2.980393 | 1.024264 |
# Create report directory
if not os.path.exists(sdir):
os.makedirs(sdir)
# Add a slash to file directory if not included to avoid DirNameFleName
# instead of DirName/FileName being creaated
if sdir[-1] != '/':
sdir += '/'
report_html = '<html><body>'
if 'method' in report.keys():
report_html += "<h1>Method: " + report['method'] + "</h1><p>"
for i in report[report['method']]:
if i == 'taper_window':
fig, ax = plt.subplots(1)
ax.plot(report[report['method']]['taper_window'],
report[report['method']]['taper'])
ax.set_xlabel('Window (time). 0 in middle of window.')
ax.set_title(
'Taper from ' + report[report['method']]['distribution'] + ' distribution (PDF).')
fig.savefig(sdir + 'taper.png')
report_html += "<img src='./taper.png' width=500>" + "<p>"
else:
report_html += "- <b>" + i + "</b>: " + \
str(report[report['method']][i]) + "<br>"
if 'postprocess' in report.keys():
report_html += "<p><h2>Postprocessing:</h2><p>"
report_html += "<b>Pipeline: </b>"
for i in report['postprocess']:
report_html += " " + i + ","
for i in report['postprocess']:
report_html += "<p><h3>" + i + "</h3><p>"
for j in report[i]:
if j == 'lambda':
report_html += "- <b>" + j + "</b>: " + "<br>"
lambda_val = np.array(report['boxcox']['lambda'])
fig, ax = plt.subplots(1)
ax.hist(lambda_val[:, -1])
ax.set_xlabel('lambda')
ax.set_ylabel('frequency')
ax.set_title('Histogram of lambda parameter')
fig.savefig(sdir + 'boxcox_lambda.png')
report_html += "<img src='./boxcox_lambda.png' width=500>" + "<p>"
report_html += "Data located in " + sdir + "boxcox_lambda.csv <p>"
np.savetxt(sdir + "boxcox_lambda.csv",
lambda_val, delimiter=",")
else:
report_html += "- <b>" + j + "</b>: " + \
str(report[i][j]) + "<br>"
report_html += '</body></html>'
with open(sdir + report_name, 'w') as file:
file.write(report_html)
file.close() | def gen_report(report, sdir='./', report_name='report.html') | Generates report of derivation and postprocess steps in teneto.derive | 2.728611 | 2.705617 | 1.008499 |
if init == 1:
self.history = []
self.history.append([fname, fargs]) | def add_history(self, fname, fargs, init=0) | Adds a processing step to TenetoBIDS.history. | 3.481344 | 3.045114 | 1.143256 |
mods = [(m.__name__, m.__version__)
for m in sys.modules.values() if m if hasattr(m, '__version__')]
with open(dirname + '/requirements.txt', 'w') as f:
for m in mods:
m = list(m)
if not isinstance(m[1], str):
m[1] = m[1].decode("utf-8")
f.writelines(m[0] + ' == ' + m[1] + '\n')
with open(dirname + '/TenetoBIDShistory.py', 'w') as f:
f.writelines('import teneto\n')
for func, args in self.history:
f.writelines(func + '(**' + str(args) + ')\n')
with open(dirname + '/tenetoinfo.json', 'w') as f:
json.dump(self.tenetoinfo, f) | def export_history(self, dirname) | Exports TenetoBIDShistory.py, tenetoinfo.json, requirements.txt (modules currently imported) to dirname
Parameters
---------
dirname : str
directory to export entire TenetoBIDS history. | 3.08671 | 2.335047 | 1.321905 |
if not njobs:
njobs = self.njobs
self.add_history(inspect.stack()[0][3], locals(), 1)
files = self.get_selected_files(quiet=1)
confound_files = self.get_selected_files(quiet=1, pipeline='confound')
if confound_files:
confounds_exist = True
else:
confounds_exist = False
if not confound_corr_report:
confounds_exist = False
if not tag:
tag = ''
else:
tag = 'desc-' + tag
with ProcessPoolExecutor(max_workers=njobs) as executor:
job = {executor.submit(self._derive_temporalnetwork, f, i, tag, params,
confounds_exist, confound_files) for i, f in enumerate(files) if f}
for j in as_completed(job):
j.result()
if update_pipeline == True:
if not self.confound_pipeline and len(self.get_selected_files(quiet=1, pipeline='confound')) > 0:
self.set_confound_pipeline = self.pipeline
self.set_pipeline('teneto_' + teneto.__version__)
self.set_pipeline_subdir('tvc')
self.set_bids_suffix('tvcconn') | def derive_temporalnetwork(self, params, update_pipeline=True, tag=None, njobs=1, confound_corr_report=True) | Derive time-varying connectivity on the selected files.
Parameters
----------
params : dict.
See teneto.timeseries.derive_temporalnetwork for the structure of the param dictionary. Assumes dimord is time,node (output of other TenetoBIDS funcitons)
update_pipeline : bool
If true, the object updates the selected files with those derived here.
njobs : int
How many parallel jobs to run
confound_corr_report : bool
If true, histograms and summary statistics of TVC and confounds are plotted in a report directory.
tag : str
any additional tag that will be placed in the saved file name. Will be placed as 'desc-[tag]'
Returns
-------
dfc : files
saved in .../derivatives/teneto/sub-xxx/tvc/..._tvc.npy | 3.936603 | 3.678212 | 1.070249 |
if not njobs:
njobs = self.njobs
self.add_history(inspect.stack()[0][3], locals(), 1)
files = self.get_selected_files(quiet=1)
R_group = []
with ProcessPoolExecutor(max_workers=njobs) as executor:
job = {executor.submit(
self._run_make_functional_connectivity, f, file_hdr, file_idx) for f in files}
for j in as_completed(job):
R_group.append(j.result())
if returngroup:
# Fisher tranform -> mean -> inverse fisher tranform
R_group = np.tanh(np.mean(np.arctanh(np.array(R_group)), axis=0))
return np.array(R_group) | def make_functional_connectivity(self, njobs=None, returngroup=False, file_hdr=None, file_idx=None) | Makes connectivity matrix for each of the subjects.
Parameters
----------
returngroup : bool, default=False
If true, returns the group average connectivity matrix.
njobs : int
How many parallel jobs to run
file_idx : bool
Default False, true if to ignore index column in loaded file.
file_hdr : bool
Default False, true if to ignore header row in loaded file.
Returns
-------
Saves data in derivatives/teneto_<version>/.../fc/
R_group : array
if returngroup is true, the average connectivity matrix is returned. | 3.989188 | 4.038682 | 0.987745 |
file_name = f.split('/')[-1].split('.')[0]
if tag != '':
tag = '_' + tag
if suffix:
file_name, _ = drop_bids_suffix(file_name)
save_name = file_name + tag
save_name += '_' + suffix
else:
save_name = file_name + tag
paths_post_pipeline = f.split(self.pipeline)
if self.pipeline_subdir:
paths_post_pipeline = paths_post_pipeline[1].split(self.pipeline_subdir)[
0]
else:
paths_post_pipeline = paths_post_pipeline[1].split(file_name)[0]
base_dir = self.BIDS_dir + '/derivatives/' + 'teneto_' + \
teneto.__version__ + '/' + paths_post_pipeline + '/'
save_dir = base_dir + '/' + save_directory + '/'
if not os.path.exists(save_dir):
# A case has happened where this has been done in parallel and an error was raised. So do try/except
try:
os.makedirs(save_dir)
except:
# Wait 2 seconds so that the error does not try and save something in the directory before it is created
time.sleep(2)
if not os.path.exists(self.BIDS_dir + '/derivatives/' + 'teneto_' + teneto.__version__ + '/dataset_description.json'):
try:
with open(self.BIDS_dir + '/derivatives/' + 'teneto_' + teneto.__version__ + '/dataset_description.json', 'w') as fs:
json.dump(self.tenetoinfo, fs)
except:
# Same as above, just in case parallel does duplicaiton
time.sleep(2)
return save_name, save_dir, base_dir | def _save_namepaths_bids_derivatives(self, f, tag, save_directory, suffix=None) | Creates output directory and output name
Paramters
---------
f : str
input files, includes the file bids_suffix
tag : str
what should be added to f in the output file.
save_directory : str
additional directory that the output file should go in
suffix : str
add new suffix to data
Returns
-------
save_name : str
previous filename with new tag
save_dir : str
directory where it will be saved
base_dir : str
subjective base directory (i.e. derivatives/teneto/func[/anythingelse/]) | 3.698952 | 3.384067 | 1.093049 |
if not self.pipeline:
print('Please set pipeline first.')
self.get_pipeline_alternatives(quiet)
else:
if tag == 'sub':
datapath = self.BIDS_dir + '/derivatives/' + self.pipeline + '/'
tag_alternatives = [
f.split('sub-')[1] for f in os.listdir(datapath) if os.path.isdir(datapath + f) and 'sub-' in f]
elif tag == 'ses':
tag_alternatives = []
for sub in self.bids_tags['sub']:
tag_alternatives += [f.split('ses-')[1] for f in os.listdir(
self.BIDS_dir + '/derivatives/' + self.pipeline + '/' + 'sub-' + sub) if 'ses' in f]
tag_alternatives = set(tag_alternatives)
else:
files = self.get_selected_files(quiet=1)
tag_alternatives = []
for f in files:
f = f.split('.')[0]
f = f.split('/')[-1]
tag_alternatives += [t.split('-')[1]
for t in f.split('_') if t.split('-')[0] == tag]
tag_alternatives = set(tag_alternatives)
if quiet == 0:
print(tag + ' alternatives: ' + ', '.join(tag_alternatives))
return list(tag_alternatives) | def get_tags(self, tag, quiet=1) | Returns which tag alternatives can be identified in the BIDS derivatives structure. | 2.570588 | 2.368181 | 1.085469 |
if not os.path.exists(self.BIDS_dir + '/derivatives/'):
print('Derivative directory not found. Is the data preprocessed?')
else:
pipeline_alternatives = os.listdir(self.BIDS_dir + '/derivatives/')
if quiet == 0:
print('Derivative alternatives: ' +
', '.join(pipeline_alternatives))
return list(pipeline_alternatives) | def get_pipeline_alternatives(self, quiet=0) | The pipeline are the different outputs that are placed in the ./derivatives directory.
get_pipeline_alternatives gets those which are found in the specified BIDS directory structure. | 3.615895 | 3.293552 | 1.097871 |
if not self.pipeline:
print('Please set pipeline first.')
self.get_pipeline_alternatives()
else:
pipeline_subdir_alternatives = []
for s in self.bids_tags['sub']:
derdir_files = os.listdir(
self.BIDS_dir + '/derivatives/' + self.pipeline + '/sub-' + s + '/func/')
pipeline_subdir_alternatives += [
f for f in derdir_files if os.path.isdir(self.BIDS_dir + '/derivatives/' + self.pipeline + '/sub-' + s + '/func/' + f)]
pipeline_subdir_alternatives = set(pipeline_subdir_alternatives)
if quiet == 0:
print('Pipeline_subdir alternatives: ' +
', '.join(pipeline_subdir_alternatives))
return list(pipeline_subdir_alternatives) | def get_pipeline_subdir_alternatives(self, quiet=0) | Note
-----
This function currently returns the wrong folders and will be fixed in the future.
This function should return ./derivatives/pipeline/sub-xx/[ses-yy/][func/]/pipeline_subdir
But it does not care about ses-yy at the moment. | 2.786347 | 2.677234 | 1.040756 |
self.add_history(inspect.stack()[0][3], locals(), 1)
if isinstance(confound, str):
confound = [confound]
if isinstance(exclusion_criteria, str):
exclusion_criteria = [exclusion_criteria]
if isinstance(confound_stat, str):
confound_stat = [confound_stat]
if len(exclusion_criteria) != len(confound):
raise ValueError(
'Same number of confound names and exclusion criteria must be given')
if len(confound_stat) != len(confound):
raise ValueError(
'Same number of confound names and confound stats must be given')
relex, crit = process_exclusion_criteria(exclusion_criteria)
files = sorted(self.get_selected_files(quiet=1))
confound_files = sorted(
self.get_selected_files(quiet=1, pipeline='confound'))
files, confound_files = confound_matching(files, confound_files)
bad_files = []
bs = 0
foundconfound = []
foundreason = []
for s, cfile in enumerate(confound_files):
df = load_tabular_file(cfile, index_col=None)
found_bad_subject = False
for i, _ in enumerate(confound):
if confound_stat[i] == 'median':
if relex[i](df[confound[i]].median(), crit[i]):
found_bad_subject = True
elif confound_stat[i] == 'mean':
if relex[i](df[confound[i]].mean(), crit[i]):
found_bad_subject = True
elif confound_stat[i] == 'std':
if relex[i](df[i][confound[i]].std(), crit[i]):
found_bad_subject = True
if found_bad_subject:
foundconfound.append(confound[i])
foundreason.append(exclusion_criteria[i])
if found_bad_subject:
bad_files.append(files[s])
bs += 1
self.set_bad_files(
bad_files, reason='excluded file (confound over specfied stat threshold)')
for i, f in enumerate(bad_files):
sidecar = get_sidecar(f)
sidecar['file_exclusion'] = {}
sidecar['confound'] = foundconfound[i]
sidecar['threshold'] = foundreason[i]
for af in ['.tsv', '.nii.gz']:
f = f.split(af)[0]
f += '.json'
with open(f, 'w') as fs:
json.dump(sidecar, fs)
print('Removed ' + str(bs) + ' files from inclusion.') | def set_exclusion_file(self, confound, exclusion_criteria, confound_stat='mean') | Excludes subjects given a certain exclusion criteria.
Parameters
----------
confound : str or list
string or list of confound name(s) from confound files
exclusion_criteria : str or list
for each confound, an exclusion_criteria should be expressed as a string. It starts with >,<,>= or <= then the numerical threshold. Ex. '>0.2' will entail every subject with the avg greater than 0.2 of confound will be rejected.
confound_stat : str or list
Can be median, mean, std. How the confound data is aggregated (so if there is a meaasure per time-point, this is averaged over all time points. If multiple confounds specified, this has to be a list.).
Returns
--------
calls TenetoBIDS.set_bad_files with the files meeting the exclusion criteria. | 2.879331 | 2.792995 | 1.030912 |
if not njobs:
njobs = self.njobs
self.add_history(inspect.stack()[0][3], locals(), 1)
parc_name = parcellation.split('_')[0].lower()
# Check confounds have been specified
if not self.confounds and removeconfounds:
raise ValueError(
'Specified confounds are not found. Make sure that you have run self.set_confunds([\'Confound1\',\'Confound2\']) first.')
# Check confounds have been specified
if update_pipeline == False and removeconfounds:
raise ValueError(
'Pipeline must be updated in order to remove confounds within this funciton.')
# In theory these should be the same. So at the moment, it goes through each element and checks they are matched.
# A matching algorithem may be needed if cases arise where this isnt the case
files = self.get_selected_files(quiet=1)
# Load network communities, if possible.
self.set_network_communities(parcellation, netn=yeonetworkn)
if not tag:
tag = ''
else:
tag = 'desc-' + tag
if not parc_params:
parc_params = {}
with ProcessPoolExecutor(max_workers=njobs) as executor:
job = {executor.submit(self._run_make_parcellation, f, i, tag, parcellation,
parc_name, parc_type, parc_params) for i, f in enumerate(files)}
for j in as_completed(job):
j.result()
if update_pipeline == True:
if not self.confound_pipeline and len(self.get_selected_files(quiet=1, pipeline='confound')) > 0:
self.set_confound_pipeline(self.pipeline)
self.set_pipeline('teneto_' + teneto.__version__)
self.set_pipeline_subdir('parcellation')
if tag:
self.set_bids_tags({'desc': tag.split('-')[1]})
self.set_bids_suffix('roi')
if removeconfounds:
self.removeconfounds(
clean_params=clean_params, transpose=None, njobs=njobs) | def make_parcellation(self, parcellation, parc_type=None, parc_params=None, network='defaults', update_pipeline=True, removeconfounds=False, tag=None, njobs=None, clean_params=None, yeonetworkn=None) | Reduces the data from voxel to parcellation space. Files get saved in a teneto folder in the derivatives with a roi tag at the end.
Parameters
-----------
parcellation : str
specify which parcellation that you would like to use. For MNI: 'power2012_264', 'gordon2014_333'. TAL: 'shen2013_278'
parc_type : str
can be 'sphere' or 'region'. If nothing is specified, the default for that parcellation will be used.
parc_params : dict
**kwargs for nilearn functions
network : str
if "defaults", it selects static parcellation, _if available_ (other options will be made available soon).
removeconfounds : bool
if true, regresses out confounds that are specfied in self.set_confounds with linear regression.
update_pipeline : bool
TenetoBIDS gets updated with the parcellated files being selected.
tag : str or list
any additional tag that must be in file name. After the tag there must either be a underscore or period (following bids).
clean_params : dict
**kwargs for nilearn function nilearn.signal.clean
yeonetworkn : int (7 or 17)
Only relevant for when parcellation is schaeffer2018. Use 7 or 17 template networks
njobs : n
number of processes to run. Overrides TenetoBIDS.njobs
Returns
-------
Files are saved in ./BIDS_dir/derivatives/teneto_<version>/.../parcellation/.
To load these files call TenetoBIDS.load_parcellation.
NOTE
----
These functions make use of nilearn. Please cite nilearn if used in a publicaiton. | 5.05477 | 4.756377 | 1.062735 |
if not njobs:
njobs = self.njobs
self.add_history(inspect.stack()[0][3], locals(), 1)
if not tag:
tag = ''
else:
tag = 'desc-' + tag
if community_type == 'temporal':
files = self.get_selected_files(quiet=True)
# Run check to make sure files are tvc input
for f in files:
if 'tvc' not in f:
raise ValueError(
'tvc tag not found in filename. TVC data must be used in communitydetection (perhaps run TenetoBIDS.derive first?).')
elif community_type == 'static':
files = self.get_selected_files(
quiet=True, pipeline='functionalconnectivity')
with ProcessPoolExecutor(max_workers=njobs) as executor:
job = {executor.submit(self._run_communitydetection, f, community_detection_params, community_type, file_hdr,
file_idx, tag) for i, f in enumerate(files) if all([t + '_' in f or t + '.' in f for t in tag])}
for j in as_completed(job):
j.result() | def communitydetection(self, community_detection_params, community_type='temporal', tag=None, file_hdr=False, file_idx=False, njobs=None) | Calls temporal_louvain_with_consensus on connectivity data
Parameters
----------
community_detection_params : dict
kwargs for detection. See teneto.communitydetection.louvain.temporal_louvain_with_consensus
community_type : str
Either 'temporal' or 'static'. If temporal, community is made per time-point for each timepoint.
file_idx : bool (default false)
if true, index column present in data and this will be ignored
file_hdr : bool (default false)
if true, header row present in data and this will be ignored
njobs : int
number of processes to run. Overrides TenetoBIDS.njobs
Note
----
All non-positive edges are made to zero.
Returns
-------
List of communities for each subject. Saved in BIDS_dir/derivatives/teneto/communitydetection/ | 5.442759 | 5.180504 | 1.050623 |
if not njobs:
njobs = self.njobs
self.add_history(inspect.stack()[0][3], locals(), 1)
if not self.confounds and not confounds:
raise ValueError(
'Specified confounds are not found. Make sure that you have run self.set_confunds([\'Confound1\',\'Confound2\']) first or pass confounds as input to function.')
if not tag:
tag = ''
else:
tag = 'desc-' + tag
if confounds:
self.set_confounds(confounds)
files = sorted(self.get_selected_files(quiet=1))
confound_files = sorted(
self.get_selected_files(quiet=1, pipeline='confound'))
files, confound_files = confound_matching(files, confound_files)
if not clean_params:
clean_params = {}
with ProcessPoolExecutor(max_workers=njobs) as executor:
job = {executor.submit(
self._run_removeconfounds, f, confound_files[i], clean_params, transpose, overwrite, tag) for i, f in enumerate(files)}
for j in as_completed(job):
j.result()
self.set_pipeline('teneto_' + teneto.__version__)
self.set_bids_suffix('roi')
if tag:
self.set_bids_tags({'desc': tag.split('-')[1]}) | def removeconfounds(self, confounds=None, clean_params=None, transpose=None, njobs=None, update_pipeline=True, overwrite=True, tag=None) | Removes specified confounds using nilearn.signal.clean
Parameters
----------
confounds : list
List of confounds. Can be prespecified in set_confounds
clean_params : dict
Dictionary of kawgs to pass to nilearn.signal.clean
transpose : bool (default False)
Default removeconfounds works on time,node dimensions. Pass transpose=True to transpose pre and post confound removal.
njobs : int
Number of jobs. Otherwise tenetoBIDS.njobs is run.
update_pipeline : bool
update pipeline with '_clean' tag for new files created
overwrite : bool
tag : str
Returns
-------
Says all TenetBIDS.get_selected_files with confounds removed with _rmconfounds at the end.
Note
----
There may be some issues regarding loading non-cleaned data through the TenetoBIDS functions instead of the cleaned data. This depeneds on when you clean the data. | 4.260266 | 3.968511 | 1.073518 |
if not njobs:
njobs = self.njobs
self.add_history(inspect.stack()[0][3], locals(), 1)
# measure can be string or list
if isinstance(measure, str):
measure = [measure]
# measure_params can be dictionaary or list of dictionaries
if isinstance(measure_params, dict):
measure_params = [measure_params]
if measure_params and len(measure) != len(measure_params):
raise ValueError('Number of identified measure_params (' + str(len(measure_params)) +
') differs from number of identified measures (' + str(len(measure)) + '). Leave black dictionary if default methods are wanted')
files = self.get_selected_files(quiet=1)
if not tag:
tag = ''
else:
tag = 'desc-' + tag
with ProcessPoolExecutor(max_workers=njobs) as executor:
job = {executor.submit(
self._run_networkmeasures, f, tag, measure, measure_params) for f in files}
for j in as_completed(job):
j.result() | def networkmeasures(self, measure=None, measure_params=None, tag=None, njobs=None) | Calculates a network measure
For available funcitons see: teneto.networkmeasures
Parameters
----------
measure : str or list
Mame of function(s) from teneto.networkmeasures that will be run.
measure_params : dict or list of dctionaries)
Containing kwargs for the argument in measure.
See note regarding Communities key.
tag : str
Add additional tag to saved filenames.
Note
----
In measure_params, if communities can equal 'template', 'static', or 'temporal'.
These options must be precalculated. If template, Teneto tries to load default for parcellation. If static, loads static communities
in BIDS_dir/teneto_<version>/sub-.../func/communities/..._communitytype-static....npy. If temporal, loads static communities
in BIDS_dir/teneto_<version>/sub-.../func/communities/..._communitytype-temporal....npy
Returns
-------
Saves in ./BIDS_dir/derivatives/teneto/sub-NAME/func//temporalnetwork/MEASURE/
Load the measure with tenetoBIDS.load_network_measure | 3.711886 | 3.895005 | 0.952986 |
self.add_history(inspect.stack()[0][3], locals(), 1)
if not os.path.exists(self.BIDS_dir + '/derivatives/' + confound_pipeline):
print('Specified direvative directory not found.')
self.get_pipeline_alternatives()
else:
# Todo: perform check that pipeline is valid
self.confound_pipeline = confound_pipeline | def set_confound_pipeline(self, confound_pipeline) | There may be times when the pipeline is updated (e.g. teneto) but you want the confounds from the preprocessing pipieline (e.g. fmriprep).
To do this, you set the confound_pipeline to be the preprocessing pipeline where the confound files are.
Parameters
----------
confound_pipeline : str
Directory in the BIDS_dir where the confounds file is. | 8.540937 | 8.295675 | 1.029565 |
self.add_history(inspect.stack()[0][3], locals(), 1)
self.bids_suffix = bids_suffix | def set_bids_suffix(self, bids_suffix) | The last analysis step is the final tag that is present in files. | 6.845271 | 6.459279 | 1.059758 |
self.add_history(inspect.stack()[0][3], locals(), 1)
if not os.path.exists(self.BIDS_dir + '/derivatives/' + pipeline):
print('Specified direvative directory not found.')
self.get_pipeline_alternatives()
else:
# Todo: perform check that pipeline is valid
self.pipeline = pipeline | def set_pipeline(self, pipeline) | Specify the pipeline. See get_pipeline_alternatives to see what are avaialble. Input should be a string. | 10.255918 | 7.598383 | 1.34975 |
print('--- DATASET INFORMATION ---')
print('--- Subjects ---')
if self.raw_data_exists:
if self.BIDS.get_subjects():
print('Number of subjects (in dataset): ' +
str(len(self.BIDS.get_subjects())))
print('Subjects (in dataset): ' +
', '.join(self.BIDS.get_subjects()))
else:
print('NO SUBJECTS FOUND (is the BIDS directory specified correctly?)')
print('Number of subjects (selected): ' +
str(len(self.bids_tags['sub'])))
print('Subjects (selected): ' + ', '.join(self.bids_tags['sub']))
if isinstance(self.bad_subjects, list):
print('Bad subjects: ' + ', '.join(self.bad_subjects))
else:
print('Bad subjects: 0')
print('--- Tasks ---')
if self.raw_data_exists:
if self.BIDS.get_tasks():
print('Number of tasks (in dataset): ' +
str(len(self.BIDS.get_tasks())))
print('Tasks (in dataset): ' + ', '.join(self.BIDS.get_tasks()))
if 'task' in self.bids_tags:
print('Number of tasks (selected): ' +
str(len(self.bids_tags['task'])))
print('Tasks (selected): ' + ', '.join(self.bids_tags['task']))
else:
print('No task names found')
print('--- Runs ---')
if self.raw_data_exists:
if self.BIDS.get_runs():
print('Number of runs (in dataset): ' +
str(len(self.BIDS.get_runs())))
print('Runs (in dataset): ' + ', '.join(self.BIDS.get_runs()))
if 'run' in self.bids_tags:
print('Number of runs (selected): ' +
str(len(self.bids_tags['run'])))
print('Rubs (selected): ' + ', '.join(self.bids_tags['run']))
else:
print('No run names found')
print('--- Sessions ---')
if self.raw_data_exists:
if self.BIDS.get_sessions():
print('Number of runs (in dataset): ' +
str(len(self.BIDS.get_sessions())))
print('Sessions (in dataset): ' +
', '.join(self.BIDS.get_sessions()))
if 'ses' in self.bids_tags:
print('Number of sessions (selected): ' +
str(len(self.bids_tags['ses'])))
print('Sessions (selected): ' + ', '.join(self.bids_tags['ses']))
else:
print('No session names found')
print('--- PREPROCESSED DATA (Pipelines/Derivatives) ---')
if not self.pipeline:
print('Derivative pipeline not set. To set, run TN.set_pipeline()')
else:
print('Pipeline: ' + self.pipeline)
if self.pipeline_subdir:
print('Pipeline subdirectories: ' + self.pipeline_subdir)
selected_files = self.get_selected_files(quiet=1)
if selected_files:
print('--- SELECTED DATA ---')
print('Numnber of selected files: ' + str(len(selected_files)))
print('\n - '.join(selected_files)) | def print_dataset_summary(self) | Prints information about the the BIDS data and the files currently selected. | 1.947178 | 1.886012 | 1.032431 |
if fname[-4:] != '.pkl':
fname += '.pkl'
with open(fname, 'rb') as f:
tnet = pickle.load(f)
if reload_object:
reloadnet = teneto.TenetoBIDS(tnet.BIDS_dir, pipeline=tnet.pipeline, pipeline_subdir=tnet.pipeline_subdir, bids_tags=tnet.bids_tags, bids_suffix=tnet.bids_suffix,
bad_subjects=tnet.bad_subjects, confound_pipeline=tnet.confound_pipeline, raw_data_exists=tnet.raw_data_exists, njobs=tnet.njobs)
reloadnet.histroy = tnet.history
tnet = reloadnet
return tnet | def load_frompickle(cls, fname, reload_object=False) | Loaded saved instance of
fname : str
path to pickle object (output of TenetoBIDS.save_aspickle)
reload_object : bool (default False)
reloads object by calling teneto.TenetoBIDS (some information lost, for development)
Returns
-------
self :
TenetoBIDS instance | 4.056008 | 3.802735 | 1.066603 |
if datatype == 'temporalnetwork' and not measure:
raise ValueError(
'When datatype is temporalnetwork, \'measure\' must also be specified.')
self.add_history(inspect.stack()[0][3], locals(), 1)
data_list = []
trialinfo_list = []
for s in self.bids_tags['sub']:
# Define base folder
base_path, file_list, datainfo = self._get_filelist(
datatype, s, tag, measure=measure)
if base_path:
for f in file_list:
# Include only if all analysis step tags are present
# Get all BIDS tags. i.e. in 'sub-AAA', get 'sub' as key and 'AAA' as item.
# Ignore if tsv file is empty
try:
filetags = get_bids_tag(f, 'all')
data_list.append(load_tabular_file(base_path + f))
# Only return trialinfo if datatype is trlinfo
if datainfo == 'trlinfo':
trialinfo_list.append(
pd.DataFrame(filetags, index=[0]))
except pd.errors.EmptyDataError:
pass
# If group data and length of output is one, don't make it a list
if datatype == 'group' and len(data_list) == 1:
data_list = data_list[0]
if measure:
data_list = {measure: data_list}
setattr(self, datatype + '_data_', data_list)
if trialinfo_list:
out_trialinfo = pd.concat(trialinfo_list)
out_trialinfo.reset_index(inplace=True, drop=True)
setattr(self, datatype + '_trialinfo_', out_trialinfo) | def load_data(self, datatype='tvc', tag=None, measure='') | Function loads time-varying connectivity estimates created by the TenetoBIDS.derive function.
The default grabs all data (in numpy arrays) in the teneto/../func/tvc/ directory.
Data is placed in teneto.tvc_data_
Parameters
----------
datatype : str
\'tvc\', \'parcellation\', \'participant\', \'temporalnetwork\'
tag : str or list
any additional tag that must be in file name. After the tag there must either be a underscore or period (following bids).
measure : str
retquired when datatype is temporalnetwork. A networkmeasure that should be loaded.
Returns
-------
tvc_data_ : numpy array
Containing the parcellation data. Each file is appended to the first dimension of the numpy array.
tvc_trialinfo_ : pandas data frame
Containing the subject info (all BIDS tags) in the numpy array. | 5.143754 | 4.640587 | 1.108427 |
'''
Returns temporal closeness centrality per node.
Parameters
-----------
Input should be *either* tnet or paths.
data : array or dict
Temporal network input (graphlet or contact). nettype: 'bu', 'bd'.
paths : pandas dataframe
Output of TenetoBIDS.networkmeasure.shortest_temporal_paths
Returns
--------
:close: array
temporal closness centrality (nodal measure)
'''
if tnet is not None and paths is not None:
raise ValueError('Only network or path input allowed.')
if tnet is None and paths is None:
raise ValueError('No input.')
# if shortest paths are not calculated, calculate them
if tnet is not None:
paths = shortest_temporal_path(tnet)
pathmat = np.zeros([paths[['from', 'to']].max().max(
)+1, paths[['from', 'to']].max().max()+1, paths[['t_start']].max().max()+1]) * np.nan
pathmat[paths['from'].values, paths['to'].values,
paths['t_start'].values] = paths['temporal-distance']
closeness = np.nansum(1 / np.nanmean(pathmat, axis=2),
axis=1) / (pathmat.shape[1] - 1)
return closeness | def temporal_closeness_centrality(tnet=None, paths=None) | Returns temporal closeness centrality per node.
Parameters
-----------
Input should be *either* tnet or paths.
data : array or dict
Temporal network input (graphlet or contact). nettype: 'bu', 'bd'.
paths : pandas dataframe
Output of TenetoBIDS.networkmeasure.shortest_temporal_paths
Returns
--------
:close: array
temporal closness centrality (nodal measure) | 6.580522 | 2.403835 | 2.73751 |
if isinstance(reducer, str):
reducer = REDUCER_DICT[reducer]
flat_dict = {}
def _flatten(d, parent=None):
for key, value in six.viewitems(d):
flat_key = reducer(parent, key)
if isinstance(value, Mapping):
_flatten(value, flat_key)
else:
if inverse:
flat_key, value = value, flat_key
if flat_key in flat_dict:
raise ValueError("duplicated key '{}'".format(flat_key))
flat_dict[flat_key] = value
_flatten(d)
return flat_dict | def flatten(d, reducer='tuple', inverse=False) | Flatten dict-like object.
Parameters
----------
d: dict-like object
The dict that will be flattened.
reducer: {'tuple', 'path', function} (default: 'tuple')
The key joining method. If a function is given, the function will be
used to reduce.
'tuple': The resulting key will be tuple of the original keys
'path': Use ``os.path.join`` to join keys.
inverse: bool (default: False)
Whether you want invert the resulting key and value.
Returns
-------
flat_dict: dict | 2.311543 | 2.538382 | 0.910636 |
assert keys
key = keys[0]
if len(keys) == 1:
if key in d:
raise ValueError("duplicated key '{}'".format(key))
d[key] = value
return
d = d.setdefault(key, {})
nested_set_dict(d, keys[1:], value) | def nested_set_dict(d, keys, value) | Set a value to a sequence of nested keys
Parameters
----------
d: Mapping
keys: Sequence[str]
value: Any | 2.346478 | 2.699527 | 0.869218 |
if isinstance(splitter, str):
splitter = SPLITTER_DICT[splitter]
unflattened_dict = {}
for flat_key, value in six.viewitems(d):
if inverse:
flat_key, value = value, flat_key
key_tuple = splitter(flat_key)
nested_set_dict(unflattened_dict, key_tuple, value)
return unflattened_dict | def unflatten(d, splitter='tuple', inverse=False) | Unflatten dict-like object.
Parameters
----------
d: dict-like object
The dict that will be unflattened.
splitter: {'tuple', 'path', function} (default: 'tuple')
The key splitting method. If a function is given, the function will be
used to split.
'tuple': Use each element in the tuple key as the key of the unflattened dict.
'path': Use ``pathlib.Path.parts`` to split keys.
inverse: bool (default: False)
Whether you want to invert the key and value before flattening.
Returns
-------
unflattened_dict: dict | 2.80591 | 3.133884 | 0.895346 |
if not HAS_MATPLOTLIB:
raise ImportError("matplotlib package is required for plotting "
"supports.")
fig, ax = plt.subplots()
plot_pianoroll(ax, track.pianoroll, track.is_drum, beat_resolution,
downbeats, preset=preset, cmap=cmap, xtick=xtick,
ytick=ytick, xticklabel=xticklabel, yticklabel=yticklabel,
tick_loc=tick_loc, tick_direction=tick_direction,
label=label, grid=grid, grid_linestyle=grid_linestyle,
grid_linewidth=grid_linewidth)
if filename is not None:
plt.savefig(filename)
return fig, ax | def plot_track(track, filename=None, beat_resolution=None, downbeats=None,
preset='default', cmap='Blues', xtick='auto', ytick='octave',
xticklabel=True, yticklabel='auto', tick_loc=None,
tick_direction='in', label='both', grid='both',
grid_linestyle=':', grid_linewidth=.5) | Plot the pianoroll or save a plot of the pianoroll.
Parameters
----------
filename :
The filename to which the plot is saved. If None, save nothing.
beat_resolution : int
The number of time steps used to represent a beat. Required and only
effective when `xtick` is 'beat'.
downbeats : list
An array that indicates whether the time step contains a downbeat
(i.e., the first time step of a bar).
preset : {'default', 'plain', 'frame'}
A string that indicates the preset theme to use.
- In 'default' preset, the ticks, grid and labels are on.
- In 'frame' preset, the ticks and grid are both off.
- In 'plain' preset, the x- and y-axis are both off.
cmap : `matplotlib.colors.Colormap`
The colormap to use in :func:`matplotlib.pyplot.imshow`. Defaults to
'Blues'. Only effective when `pianoroll` is 2D.
xtick : {'auto', 'beat', 'step', 'off'}
A string that indicates what to use as ticks along the x-axis. If
'auto' is given, automatically set to 'beat' if `beat_resolution` is
also given and set to 'step', otherwise. Defaults to 'auto'.
ytick : {'octave', 'pitch', 'off'}
A string that indicates what to use as ticks along the y-axis.
Defaults to 'octave'.
xticklabel : bool
Whether to add tick labels along the x-axis. Only effective when
`xtick` is not 'off'.
yticklabel : {'auto', 'name', 'number', 'off'}
If 'name', use octave name and pitch name (key name when `is_drum`
is True) as tick labels along the y-axis. If 'number', use pitch
number. If 'auto', set to 'name' when `ytick` is 'octave' and
'number' when `ytick` is 'pitch'. Defaults to 'auto'. Only effective
when `ytick` is not 'off'.
tick_loc : tuple or list
The locations to put the ticks. Availables elements are 'bottom',
'top', 'left' and 'right'. Defaults to ('bottom', 'left').
tick_direction : {'in', 'out', 'inout'}
A string that indicates where to put the ticks. Defaults to 'in'.
Only effective when one of `xtick` and `ytick` is on.
label : {'x', 'y', 'both', 'off'}
A string that indicates whether to add labels to the x-axis and
y-axis. Defaults to 'both'.
grid : {'x', 'y', 'both', 'off'}
A string that indicates whether to add grids to the x-axis, y-axis,
both or neither. Defaults to 'both'.
grid_linestyle : str
Will be passed to :meth:`matplotlib.axes.Axes.grid` as 'linestyle'
argument.
grid_linewidth : float
Will be passed to :meth:`matplotlib.axes.Axes.grid` as 'linewidth'
argument.
Returns
-------
fig : `matplotlib.figure.Figure` object
A :class:`matplotlib.figure.Figure` object.
ax : `matplotlib.axes.Axes` object
A :class:`matplotlib.axes.Axes` object. | 1.866401 | 1.99786 | 0.9342 |
if track is not None:
if not isinstance(track, Track):
raise TypeError("`track` must be a pypianoroll.Track instance.")
track.check_validity()
else:
track = Track(pianoroll, program, is_drum, name)
self.tracks.append(track) | def append_track(self, track=None, pianoroll=None, program=0, is_drum=False,
name='unknown') | Append a multitrack.Track instance to the track list or create a new
multitrack.Track object and append it to the track list.
Parameters
----------
track : pianoroll.Track
A :class:`pypianoroll.Track` instance to be appended to the track
list.
pianoroll : np.ndarray, shape=(n_time_steps, 128)
A pianoroll matrix. The first and second dimension represents time
and pitch, respectively. Available datatypes are bool, int and
float. Only effective when `track` is None.
program: int
A program number according to General MIDI specification [1].
Available values are 0 to 127. Defaults to 0 (Acoustic Grand Piano).
Only effective when `track` is None.
is_drum : bool
A boolean number that indicates whether it is a percussion track.
Defaults to False. Only effective when `track` is None.
name : str
The name of the track. Defaults to 'unknown'. Only effective when
`track` is None.
References
----------
[1] https://www.midi.org/specifications/item/gm-level-1-sound-set | 2.723031 | 2.831453 | 0.961708 |
# tracks
for track in self.tracks:
if not isinstance(track, Track):
raise TypeError("`tracks` must be a list of "
"`pypianoroll.Track` instances.")
track.check_validity()
# tempo
if not isinstance(self.tempo, np.ndarray):
raise TypeError("`tempo` must be int or a numpy array.")
elif not np.issubdtype(self.tempo.dtype, np.number):
raise TypeError("Data type of `tempo` must be a subdtype of "
"np.number.")
elif self.tempo.ndim != 1:
raise ValueError("`tempo` must be a 1D numpy array.")
if np.any(self.tempo <= 0.0):
raise ValueError("`tempo` should contain only positive numbers.")
# downbeat
if self.downbeat is not None:
if not isinstance(self.downbeat, np.ndarray):
raise TypeError("`downbeat` must be a numpy array.")
if not np.issubdtype(self.downbeat.dtype, np.bool_):
raise TypeError("Data type of `downbeat` must be bool.")
if self.downbeat.ndim != 1:
raise ValueError("`downbeat` must be a 1D numpy array.")
# beat_resolution
if not isinstance(self.beat_resolution, int):
raise TypeError("`beat_resolution` must be int.")
if self.beat_resolution < 1:
raise ValueError("`beat_resolution` must be a positive integer.")
# name
if not isinstance(self.name, string_types):
raise TypeError("`name` must be a string.") | def check_validity(self) | Raise an error if any invalid attribute found.
Raises
------
TypeError
If an attribute has an invalid type.
ValueError
If an attribute has an invalid value (of the correct type). | 1.885514 | 1.867777 | 1.009497 |
for track in self.tracks:
track.clip(lower, upper) | def clip(self, lower=0, upper=127) | Clip the pianorolls of all tracks by the given lower and upper bounds.
Parameters
----------
lower : int or float
The lower bound to clip the pianorolls. Defaults to 0.
upper : int or float
The upper bound to clip the pianorolls. Defaults to 127. | 5.518245 | 4.178946 | 1.320487 |
active_length = 0
for track in self.tracks:
now_length = track.get_active_length()
if active_length < track.get_active_length():
active_length = now_length
return active_length | def get_active_length(self) | Return the maximum active length (i.e., without trailing silence) among
the pianorolls of all tracks. The unit is time step.
Returns
-------
active_length : int
The maximum active length (i.e., without trailing silence) among the
pianorolls of all tracks. The unit is time step. | 3.03312 | 3.09052 | 0.981427 |
lowest, highest = self.tracks[0].get_active_pitch_range()
if len(self.tracks) > 1:
for track in self.tracks[1:]:
low, high = track.get_active_pitch_range()
if low < lowest:
lowest = low
if high > highest:
highest = high
return lowest, highest | def get_active_pitch_range(self) | Return the active pitch range of the pianorolls of all tracks as a tuple
(lowest, highest).
Returns
-------
lowest : int
The lowest active pitch among the pianorolls of all tracks.
highest : int
The lowest highest pitch among the pianorolls of all tracks. | 1.955238 | 1.833228 | 1.066555 |
if self.downbeat is None:
return []
downbeat_steps = np.nonzero(self.downbeat)[0].tolist()
return downbeat_steps | def get_downbeat_steps(self) | Return the indices of time steps that contain downbeats.
Returns
-------
downbeat_steps : list
The indices of time steps that contain downbeats. | 3.369263 | 3.294505 | 1.022692 |
empty_track_indices = [idx for idx, track in enumerate(self.tracks)
if not np.any(track.pianoroll)]
return empty_track_indices | def get_empty_tracks(self) | Return the indices of tracks with empty pianorolls.
Returns
-------
empty_track_indices : list
The indices of tracks with empty pianorolls. | 4.312263 | 3.439944 | 1.253585 |
max_length = 0
for track in self.tracks:
if max_length < track.pianoroll.shape[0]:
max_length = track.pianoroll.shape[0]
return max_length | def get_max_length(self) | Return the maximum length of the pianorolls along the time axis (in
time step).
Returns
-------
max_length : int
The maximum length of the pianorolls along the time axis (in time
step). | 2.647152 | 2.661066 | 0.994771 |
stacked = self.get_stacked_pianoroll()
if mode == 'any':
merged = np.any(stacked, axis=2)
elif mode == 'sum':
merged = np.sum(stacked, axis=2)
elif mode == 'max':
merged = np.max(stacked, axis=2)
else:
raise ValueError("`mode` must be one of {'max', 'sum', 'any'}.")
return merged | def get_merged_pianoroll(self, mode='sum') | Return the merged pianoroll.
Parameters
----------
mode : {'sum', 'max', 'any'}
A string that indicates the merging strategy to apply along the
track axis. Default to 'sum'.
- In 'sum' mode, the merged pianoroll is the sum of all the
pianorolls. Note that for binarized pianorolls, integer summation
is performed.
- In 'max' mode, for each pixel, the maximum value among all the
pianorolls is assigned to the merged pianoroll.
- In 'any' mode, the value of a pixel in the merged pianoroll is
True if any of the pianorolls has nonzero value at that pixel;
False if all pianorolls are inactive (zero-valued) at that pixel.
Returns
-------
merged : np.ndarray, shape=(n_time_steps, 128)
The merged pianoroll. | 2.118396 | 2.18024 | 0.971634 |
multitrack = deepcopy(self)
multitrack.pad_to_same()
stacked = np.stack([track.pianoroll for track in multitrack.tracks], -1)
return stacked | def get_stacked_pianoroll(self) | Return a stacked multitrack pianoroll. The shape of the return array is
(n_time_steps, 128, n_tracks).
Returns
-------
stacked : np.ndarray, shape=(n_time_steps, 128, n_tracks)
The stacked pianoroll. | 4.018862 | 4.567359 | 0.879909 |
def reconstruct_sparse(target_dict, name):
return csc_matrix((target_dict[name+'_csc_data'],
target_dict[name+'_csc_indices'],
target_dict[name+'_csc_indptr']),
shape=target_dict[name+'_csc_shape']).toarray()
with np.load(filename) as loaded:
if 'info.json' not in loaded:
raise ValueError("Cannot find 'info.json' in the npz file.")
info_dict = json.loads(loaded['info.json'].decode('utf-8'))
self.name = info_dict['name']
self.beat_resolution = info_dict['beat_resolution']
self.tempo = loaded['tempo']
if 'downbeat' in loaded.files:
self.downbeat = loaded['downbeat']
else:
self.downbeat = None
idx = 0
self.tracks = []
while str(idx) in info_dict:
pianoroll = reconstruct_sparse(
loaded, 'pianoroll_{}'.format(idx))
track = Track(pianoroll, info_dict[str(idx)]['program'],
info_dict[str(idx)]['is_drum'],
info_dict[str(idx)]['name'])
self.tracks.append(track)
idx += 1
self.check_validity() | def load(self, filename) | Load a npz file. Supports only files previously saved by
:meth:`pypianoroll.Multitrack.save`.
Notes
-----
Attribute values will all be overwritten.
Parameters
----------
filename : str
The name of the npz file to be loaded. | 2.775418 | 2.725425 | 1.018343 |
if mode not in ('max', 'sum', 'any'):
raise ValueError("`mode` must be one of {'max', 'sum', 'any'}.")
merged = self[track_indices].get_merged_pianoroll(mode)
merged_track = Track(merged, program, is_drum, name)
self.append_track(merged_track)
if remove_merged:
self.remove_tracks(track_indices) | def merge_tracks(self, track_indices=None, mode='sum', program=0,
is_drum=False, name='merged', remove_merged=False) | Merge pianorolls of the tracks specified by `track_indices`. The merged
track will have program number as given by `program` and drum indicator
as given by `is_drum`. The merged track will be appended at the end of
the track list.
Parameters
----------
track_indices : list
The indices of tracks to be merged. Defaults to all the tracks.
mode : {'sum', 'max', 'any'}
A string that indicates the merging strategy to apply along the
track axis. Default to 'sum'.
- In 'sum' mode, the merged pianoroll is the sum of the collected
pianorolls. Note that for binarized pianorolls, integer summation
is performed.
- In 'max' mode, for each pixel, the maximum value among the
collected pianorolls is assigned to the merged pianoroll.
- In 'any' mode, the value of a pixel in the merged pianoroll is
True if any of the collected pianorolls has nonzero value at that
pixel; False if all the collected pianorolls are inactive
(zero-valued) at that pixel.
program: int
A program number according to General MIDI specification [1].
Available values are 0 to 127. Defaults to 0 (Acoustic Grand Piano).
is_drum : bool
A boolean number that indicates whether it is a percussion track.
Defaults to False.
name : str
A name to be assigned to the merged track. Defaults to 'merged'.
remove_merged : bool
True to remove the source tracks from the track list. False to keep
them. Defaults to False.
References
----------
[1] https://www.midi.org/specifications/item/gm-level-1-sound-set | 2.741085 | 2.752048 | 0.996016 |
max_length = self.get_max_length()
for track in self.tracks:
if track.pianoroll.shape[0] < max_length:
track.pad(max_length - track.pianoroll.shape[0]) | def pad_to_same(self) | Pad shorter pianorolls with zeros at the end along the time axis to
make the resulting pianoroll lengths the same as the maximum pianoroll
length among all the tracks. | 2.965192 | 2.088281 | 1.41992 |
pm = pretty_midi.PrettyMIDI(filename)
self.parse_pretty_midi(pm, **kwargs) | def parse_midi(self, filename, **kwargs) | Parse a MIDI file.
Parameters
----------
filename : str
The name of the MIDI file to be parsed.
**kwargs:
See :meth:`pypianoroll.Multitrack.parse_pretty_midi` for full
documentation. | 3.80752 | 4.499307 | 0.846246 |
if isinstance(track_indices, int):
track_indices = [track_indices]
self.tracks = [track for idx, track in enumerate(self.tracks)
if idx not in track_indices] | def remove_tracks(self, track_indices) | Remove tracks specified by `track_indices`.
Parameters
----------
track_indices : list
The indices of the tracks to be removed. | 2.08517 | 2.51969 | 0.82755 |
def update_sparse(target_dict, sparse_matrix, name):
csc = csc_matrix(sparse_matrix)
target_dict[name+'_csc_data'] = csc.data
target_dict[name+'_csc_indices'] = csc.indices
target_dict[name+'_csc_indptr'] = csc.indptr
target_dict[name+'_csc_shape'] = csc.shape
self.check_validity()
array_dict = {'tempo': self.tempo}
info_dict = {'beat_resolution': self.beat_resolution,
'name': self.name}
if self.downbeat is not None:
array_dict['downbeat'] = self.downbeat
for idx, track in enumerate(self.tracks):
update_sparse(array_dict, track.pianoroll,
'pianoroll_{}'.format(idx))
info_dict[str(idx)] = {'program': track.program,
'is_drum': track.is_drum,
'name': track.name}
if not filename.endswith('.npz'):
filename += '.npz'
if compressed:
np.savez_compressed(filename, **array_dict)
else:
np.savez(filename, **array_dict)
compression = zipfile.ZIP_DEFLATED if compressed else zipfile.ZIP_STORED
with zipfile.ZipFile(filename, 'a') as zip_file:
zip_file.writestr('info.json', json.dumps(info_dict), compression) | def save(self, filename, compressed=True) | Save the multitrack pianoroll to a (compressed) npz file, which can be
later loaded by :meth:`pypianoroll.Multitrack.load`.
Notes
-----
To reduce the file size, the pianorolls are first converted to instances
of scipy.sparse.csc_matrix, whose component arrays are then collected
and saved to a npz file.
Parameters
----------
filename : str
The name of the npz file to which the mulitrack pianoroll is saved.
compressed : bool
True to save to a compressed npz file. False to save to an
uncompressed npz file. Defaults to True. | 2.42716 | 2.164958 | 1.121112 |
self.check_validity()
pm = pretty_midi.PrettyMIDI(initial_tempo=self.tempo[0])
# TODO: Add downbeat support -> time signature change events
# TODO: Add tempo support -> tempo change events
if constant_tempo is None:
constant_tempo = self.tempo[0]
time_step_size = 60. / constant_tempo / self.beat_resolution
for track in self.tracks:
instrument = pretty_midi.Instrument(
program=track.program, is_drum=track.is_drum, name=track.name)
copied = track.copy()
if copied.is_binarized():
copied.assign_constant(constant_velocity)
copied.clip()
clipped = copied.pianoroll.astype(np.uint8)
binarized = (clipped > 0)
padded = np.pad(binarized, ((1, 1), (0, 0)), 'constant')
diff = np.diff(padded.astype(np.int8), axis=0)
positives = np.nonzero((diff > 0).T)
pitches = positives[0]
note_ons = positives[1]
note_on_times = time_step_size * note_ons
note_offs = np.nonzero((diff < 0).T)[1]
note_off_times = time_step_size * note_offs
for idx, pitch in enumerate(pitches):
velocity = np.mean(clipped[note_ons[idx]:note_offs[idx], pitch])
note = pretty_midi.Note(
velocity=int(velocity), pitch=pitch,
start=note_on_times[idx], end=note_off_times[idx])
instrument.notes.append(note)
instrument.notes.sort(key=lambda x: x.start)
pm.instruments.append(instrument)
return pm | def to_pretty_midi(self, constant_tempo=None, constant_velocity=100) | Convert to a :class:`pretty_midi.PrettyMIDI` instance.
Notes
-----
- Only constant tempo is supported by now.
- The velocities of the converted pianorolls are clipped to [0, 127],
i.e. values below 0 and values beyond 127 are replaced by 127 and 0,
respectively.
- Adjacent nonzero values of the same pitch will be considered a single
note with their mean as its velocity.
Parameters
----------
constant_tempo : int
The constant tempo value of the output object. Defaults to use the
first element of `tempo`.
constant_velocity : int
The constant velocity to be assigned to binarized tracks. Defaults
to 100.
Returns
-------
pm : `pretty_midi.PrettyMIDI` object
The converted :class:`pretty_midi.PrettyMIDI` instance. | 2.652399 | 2.666023 | 0.99489 |
for track in self.tracks:
if not track.is_drum:
track.transpose(semitone) | def transpose(self, semitone) | Transpose the pianorolls of all tracks by a number of semitones, where
positive values are for higher key, while negative values are for lower
key. The drum tracks are ignored.
Parameters
----------
semitone : int
The number of semitones to transpose the pianorolls. | 4.447332 | 3.724059 | 1.194216 |
active_length = self.get_active_length()
for track in self.tracks:
track.pianoroll = track.pianoroll[:active_length] | def trim_trailing_silence(self) | Trim the trailing silences of the pianorolls of all tracks. Trailing
silences are considered globally. | 4.846605 | 3.522719 | 1.375813 |
if not filename.endswith(('.mid', '.midi', '.MID', '.MIDI')):
filename = filename + '.mid'
pm = self.to_pretty_midi()
pm.write(filename) | def write(self, filename) | Write the multitrack pianoroll to a MIDI file.
Parameters
----------
filename : str
The name of the MIDI file to which the multitrack pianoroll is
written. | 4.215076 | 3.44287 | 1.224292 |
if not isinstance(arr, np.ndarray):
raise TypeError("`arr` must be of np.ndarray type")
if not (np.issubdtype(arr.dtype, np.bool_)
or np.issubdtype(arr.dtype, np.number)):
return False
if arr.ndim != 2:
return False
if arr.shape[1] != 128:
return False
return True | def check_pianoroll(arr) | Return True if the array is a standard piano-roll matrix. Otherwise,
return False. Raise TypeError if the input object is not a numpy array. | 2.148167 | 2.172539 | 0.988782 |
_check_supported(obj)
copied = deepcopy(obj)
copied.binarize(threshold)
return copied | def binarize(obj, threshold=0) | Return a copy of the object with binarized piano-roll(s).
Parameters
----------
threshold : int or float
Threshold to binarize the piano-roll(s). Default to zero. | 7.253647 | 11.036232 | 0.657258 |
_check_supported(obj)
copied = deepcopy(obj)
copied.clip(lower, upper)
return copied | def clip(obj, lower=0, upper=127) | Return a copy of the object with piano-roll(s) clipped by a lower bound
and an upper bound specified by `lower` and `upper`, respectively.
Parameters
----------
lower : int or float
The lower bound to clip the piano-roll. Default to 0.
upper : int or float
The upper bound to clip the piano-roll. Default to 127. | 6.137272 | 9.723185 | 0.6312 |
_check_supported(obj)
copied = deepcopy(obj)
copied.pad(pad_length)
return copied | def pad(obj, pad_length) | Return a copy of the object with piano-roll padded with zeros at the end
along the time axis.
Parameters
----------
pad_length : int
The length to pad along the time axis with zeros. | 6.623208 | 10.187099 | 0.650156 |
_check_supported(obj)
copied = deepcopy(obj)
copied.pad_to_multiple(factor)
return copied | def pad_to_multiple(obj, factor) | Return a copy of the object with its piano-roll padded with zeros at the
end along the time axis with the minimal length that make the length of
the resulting piano-roll a multiple of `factor`.
Parameters
----------
factor : int
The value which the length of the resulting piano-roll will be
a multiple of. | 6.376031 | 10.097955 | 0.631418 |
if not isinstance(obj, Multitrack):
raise TypeError("Support only `pypianoroll.Multitrack` class objects")
copied = deepcopy(obj)
copied.pad_to_same()
return copied | def pad_to_same(obj) | Return a copy of the object with shorter piano-rolls padded with zeros
at the end along the time axis to the length of the piano-roll with the
maximal length. | 7.100032 | 5.867205 | 1.210122 |
if not filepath.endswith(('.mid', '.midi', '.MID', '.MIDI')):
raise ValueError("Only MIDI files are supported")
return Multitrack(filepath, beat_resolution=beat_resolution, name=name) | def parse(filepath, beat_resolution=24, name='unknown') | Return a :class:`pypianoroll.Multitrack` object loaded from a MIDI
(.mid, .midi, .MID, .MIDI) file.
Parameters
----------
filepath : str
The file path to the MIDI file. | 3.290745 | 2.869472 | 1.146812 |
if not isinstance(obj, Multitrack):
raise TypeError("Support only `pypianoroll.Multitrack` class objects")
obj.save(filepath, compressed) | def save(filepath, obj, compressed=True) | Save the object to a .npz file.
Parameters
----------
filepath : str
The path to save the file.
obj: `pypianoroll.Multitrack` objects
The object to be saved. | 7.594944 | 5.497353 | 1.381564 |
_check_supported(obj)
copied = deepcopy(obj)
copied.transpose(semitone)
return copied | def transpose(obj, semitone) | Return a copy of the object with piano-roll(s) transposed by `semitones`
semitones.
Parameters
----------
semitone : int
Number of semitones to transpose the piano-roll(s). | 6.022088 | 10.241092 | 0.588032 |
_check_supported(obj)
copied = deepcopy(obj)
length = copied.get_active_length()
copied.pianoroll = copied.pianoroll[:length]
return copied | def trim_trailing_silence(obj) | Return a copy of the object with trimmed trailing silence of the
piano-roll(s). | 7.972359 | 6.668567 | 1.195513 |
if not isinstance(obj, Multitrack):
raise TypeError("Support only `pypianoroll.Multitrack` class objects")
obj.write(filepath) | def write(obj, filepath) | Write the object to a MIDI file.
Parameters
----------
filepath : str
The path to write the MIDI file. | 7.763184 | 7.183427 | 1.080707 |
if not isinstance(pianoroll, np.ndarray):
raise TypeError("`pianoroll` must be of np.ndarray type.")
if not (np.issubdtype(pianoroll.dtype, np.bool_)
or np.issubdtype(pianoroll.dtype, np.number)):
raise TypeError("The data type of `pianoroll` must be np.bool_ or a "
"subdtype of np.number.")
if pianoroll.ndim != 2:
raise ValueError("`pianoroll` must have exactly two dimensions.")
if pianoroll.shape[1] != 128:
raise ValueError("The length of the second axis of `pianoroll` must be "
"128.") | def _validate_pianoroll(pianoroll) | Raise an error if the input array is not a standard pianoroll. | 1.910924 | 1.894665 | 1.008581 |
_validate_pianoroll(pianoroll)
reshaped = pianoroll[:, :120].reshape(-1, 12, 10)
reshaped[..., :8] += pianoroll[:, 120:].reshape(-1, 1, 8)
return np.sum(reshaped, 1) | def _to_chroma(pianoroll) | Return the unnormalized chroma features of a pianoroll. | 2.94459 | 2.857388 | 1.030518 |
_validate_pianoroll(pianoroll)
reshaped = pianoroll.reshape(-1, beat_resolution * pianoroll.shape[1])
n_empty_beats = np.count_nonzero(reshaped.any(1))
return n_empty_beats / len(reshaped) | def empty_beat_rate(pianoroll, beat_resolution) | Return the ratio of empty beats to the total number of beats in a
pianoroll. | 2.961781 | 2.79248 | 1.060628 |
_validate_pianoroll(pianoroll)
chroma = _to_chroma(pianoroll)
return np.count_nonzero(np.any(chroma, 0)) | def n_pitche_classes_used(pianoroll) | Return the number of unique pitch classes used in a pianoroll. | 3.500182 | 3.410502 | 1.026295 |
_validate_pianoroll(pianoroll)
if np.issubdtype(pianoroll.dtype, np.bool_):
pianoroll = pianoroll.astype(np.uint8)
padded = np.pad(pianoroll, ((1, 1), (0, 0)), 'constant')
diff = np.diff(padded, axis=0).reshape(-1)
onsets = (diff > 0).nonzero()[0]
offsets = (diff < 0).nonzero()[0]
n_qualified_notes = np.count_nonzero(offsets - onsets >= threshold)
return n_qualified_notes / len(onsets) | def qualified_note_rate(pianoroll, threshold=2) | Return the ratio of the number of the qualified notes (notes longer than
`threshold` (in time step)) to the total number of notes in a pianoroll. | 2.257025 | 2.245966 | 1.004924 |
_validate_pianoroll(pianoroll)
n_poly = np.count_nonzero(np.count_nonzero(pianoroll, 1) > threshold)
return n_poly / len(pianoroll) | def polyphonic_rate(pianoroll, threshold=2) | Return the ratio of the number of time steps where the number of pitches
being played is larger than `threshold` to the total number of time steps
in a pianoroll. | 2.696493 | 2.875043 | 0.937897 |
if beat_resolution not in (4, 6, 8, 9, 12, 16, 18, 24):
raise ValueError("Unsupported beat resolution. Only 4, 6, 8 ,9, 12, "
"16, 18, 42 are supported.")
_validate_pianoroll(pianoroll)
def _drum_pattern_mask(res, tol):
if res == 24:
drum_pattern_mask = np.tile([1., tol, 0., 0., 0., tol], 4)
elif res == 12:
drum_pattern_mask = np.tile([1., tol, tol], 4)
elif res == 6:
drum_pattern_mask = np.tile([1., tol, tol], 2)
elif res == 18:
drum_pattern_mask = np.tile([1., tol, 0., 0., 0., tol], 3)
elif res == 9:
drum_pattern_mask = np.tile([1., tol, tol], 3)
elif res == 16:
drum_pattern_mask = np.tile([1., tol, 0., tol], 4)
elif res == 8:
drum_pattern_mask = np.tile([1., tol], 4)
elif res == 4:
drum_pattern_mask = np.tile([1., tol], 2)
return drum_pattern_mask
drum_pattern_mask = _drum_pattern_mask(beat_resolution, tolerance)
n_in_pattern = np.sum(drum_pattern_mask * np.count_nonzero(pianoroll, 1))
return n_in_pattern / np.count_nonzero(pianoroll) | def drum_in_pattern_rate(pianoroll, beat_resolution, tolerance=0.1) | Return the ratio of the number of drum notes that lie on the drum
pattern (i.e., at certain time steps) to the total number of drum notes. | 1.908838 | 1.893038 | 1.008347 |
if not isinstance(key, int):
raise TypeError("`key` must an integer.")
if key > 11 or key < 0:
raise ValueError("`key` must be in an integer in between 0 and 11.")
if kind not in ('major', 'minor'):
raise ValueError("`kind` must be one of 'major' or 'minor'.")
_validate_pianoroll(pianoroll)
def _scale_mask(key, kind):
if kind == 'major':
a_scale_mask = np.array([0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1], bool)
else:
a_scale_mask = np.array([1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1], bool)
return np.roll(a_scale_mask, key)
chroma = _to_chroma(pianoroll)
scale_mask = _scale_mask(key, kind)
n_in_scale = np.sum(scale_mask.reshape(-1, 12) * chroma)
return n_in_scale / np.count_nonzero(pianoroll) | def in_scale_rate(pianoroll, key=3, kind='major') | Return the ratio of the number of nonzero entries that lie in a specific
scale to the total number of nonzero entries in a pianoroll. Default to C
major scale. | 2.141032 | 2.103438 | 1.017873 |
_validate_pianoroll(pianoroll_1)
_validate_pianoroll(pianoroll_2)
assert len(pianoroll_1) == len(pianoroll_2), (
"Input pianorolls must have the same length.")
def _tonal_matrix(r1, r2, r3):
tonal_matrix = np.empty((6, 12))
tonal_matrix[0] = r1 * np.sin(np.arange(12) * (7. / 6.) * np.pi)
tonal_matrix[1] = r1 * np.cos(np.arange(12) * (7. / 6.) * np.pi)
tonal_matrix[2] = r2 * np.sin(np.arange(12) * (3. / 2.) * np.pi)
tonal_matrix[3] = r2 * np.cos(np.arange(12) * (3. / 2.) * np.pi)
tonal_matrix[4] = r3 * np.sin(np.arange(12) * (2. / 3.) * np.pi)
tonal_matrix[5] = r3 * np.cos(np.arange(12) * (2. / 3.) * np.pi)
return tonal_matrix
def _to_tonal_space(pianoroll, tonal_matrix):
beat_chroma = _to_chroma(pianoroll).reshape(-1, beat_resolution, 12)
beat_chroma = beat_chroma / np.sum(beat_chroma, 2, keepdims=True)
return np.matmul(tonal_matrix, beat_chroma.T).T
tonal_matrix = _tonal_matrix(r1, r2, r3)
mapped_1 = _to_tonal_space(pianoroll_1, tonal_matrix)
mapped_2 = _to_tonal_space(pianoroll_2, tonal_matrix)
return np.linalg.norm(mapped_1 - mapped_2) | def tonal_distance(pianoroll_1, pianoroll_2, beat_resolution, r1=1.0, r2=1.0,
r3=0.5) | Return the tonal distance [1] between the two input pianorolls.
[1] Christopher Harte, Mark Sandler, and Martin Gasser. Detecting
harmonic change in musical audio. In Proc. ACM Workshop on Audio and
Music Computing Multimedia, 2006. | 1.6112 | 1.627604 | 0.989921 |
if not self.is_binarized():
self.pianoroll[self.pianoroll.nonzero()] = value
return
if dtype is None:
if isinstance(value, int):
dtype = int
elif isinstance(value, float):
dtype = float
nonzero = self.pianoroll.nonzero()
self.pianoroll = np.zeros(self.pianoroll.shape, dtype)
self.pianoroll[nonzero] = value | def assign_constant(self, value, dtype=None) | Assign a constant value to all nonzeros in the pianoroll. If the
pianoroll is not binarized, its data type will be preserved. If the
pianoroll is binarized, it will be casted to the type of `value`.
Arguments
---------
value : int or float
The constant value to be assigned to all the nonzeros in the
pianoroll. | 2.642281 | 2.187109 | 1.208116 |
if not self.is_binarized():
self.pianoroll = (self.pianoroll > threshold) | def binarize(self, threshold=0) | Binarize the pianoroll.
Parameters
----------
threshold : int or float
A threshold used to binarize the pianorolls. Defaults to zero. | 5.594303 | 5.025837 | 1.113109 |
# pianoroll
if not isinstance(self.pianoroll, np.ndarray):
raise TypeError("`pianoroll` must be a numpy array.")
if not (np.issubdtype(self.pianoroll.dtype, np.bool_)
or np.issubdtype(self.pianoroll.dtype, np.number)):
raise TypeError("The data type of `pianoroll` must be np.bool_ or "
"a subdtype of np.number.")
if self.pianoroll.ndim != 2:
raise ValueError("`pianoroll` must have exactly two dimensions.")
if self.pianoroll.shape[1] != 128:
raise ValueError("The length of the second axis of `pianoroll` "
"must be 128.")
# program
if not isinstance(self.program, int):
raise TypeError("`program` must be int.")
if self.program < 0 or self.program > 127:
raise ValueError("`program` must be in between 0 to 127.")
# is_drum
if not isinstance(self.is_drum, bool):
raise TypeError("`is_drum` must be bool.")
# name
if not isinstance(self.name, string_types):
raise TypeError("`name` must be a string.") | def check_validity(self) | Raise error if any invalid attribute found. | 1.781616 | 1.74328 | 1.021991 |
self.pianoroll = self.pianoroll.clip(lower, upper) | def clip(self, lower=0, upper=127) | Clip the pianoroll by the given lower and upper bounds.
Parameters
----------
lower : int or float
The lower bound to clip the pianoroll. Defaults to 0.
upper : int or float
The upper bound to clip the pianoroll. Defaults to 127. | 4.324906 | 3.536804 | 1.222829 |
nonzero_steps = np.any(self.pianoroll, axis=1)
inv_last_nonzero_step = np.argmax(np.flip(nonzero_steps, axis=0))
active_length = self.pianoroll.shape[0] - inv_last_nonzero_step
return active_length | def get_active_length(self) | Return the active length (i.e., without trailing silence) of the
pianoroll. The unit is time step.
Returns
-------
active_length : int
The active length (i.e., without trailing silence) of the pianoroll. | 3.980884 | 3.492857 | 1.139721 |
if self.pianoroll.shape[1] < 1:
raise ValueError("Cannot compute the active pitch range for an "
"empty pianoroll")
lowest = 0
highest = 127
while lowest < highest:
if np.any(self.pianoroll[:, lowest]):
break
lowest += 1
if lowest == highest:
raise ValueError("Cannot compute the active pitch range for an "
"empty pianoroll")
while not np.any(self.pianoroll[:, highest]):
highest -= 1
return lowest, highest | def get_active_pitch_range(self) | Return the active pitch range as a tuple (lowest, highest).
Returns
-------
lowest : int
The lowest active pitch in the pianoroll.
highest : int
The highest active pitch in the pianoroll. | 2.558522 | 2.321702 | 1.102003 |
is_binarized = np.issubdtype(self.pianoroll.dtype, np.bool_)
return is_binarized | def is_binarized(self) | Return True if the pianoroll is already binarized. Otherwise, return
False.
Returns
-------
is_binarized : bool
True if the pianoroll is already binarized; otherwise, False. | 4.700046 | 3.541803 | 1.327021 |
self.pianoroll = np.pad(
self.pianoroll, ((0, pad_length), (0, 0)), 'constant') | def pad(self, pad_length) | Pad the pianoroll with zeros at the end along the time axis.
Parameters
----------
pad_length : int
The length to pad with zeros along the time axis. | 3.21232 | 2.930999 | 1.095982 |
remainder = self.pianoroll.shape[0] % factor
if remainder:
pad_width = ((0, (factor - remainder)), (0, 0))
self.pianoroll = np.pad(self.pianoroll, pad_width, 'constant') | def pad_to_multiple(self, factor) | Pad the pianoroll with zeros at the end along the time axis with the
minimum length that makes the resulting pianoroll length a multiple of
`factor`.
Parameters
----------
factor : int
The value which the length of the resulting pianoroll will be
a multiple of. | 2.626986 | 2.893454 | 0.907907 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.