markdown
stringlengths
0
37k
code
stringlengths
1
33.3k
path
stringlengths
8
215
repo_name
stringlengths
6
77
license
stringclasses
15 values
Note that the OrbitPlot function chooses reasonable limits for the axes for you. There are various ways to customize the plot. Have a look at the arguments used in the following examples, which are pretty much self-explanatory (if in doubt, check the documentation!).
fig = rebound.OrbitPlot(sim, unitlabel="[AU]", color=True, trails=True, periastron=True) fig = rebound.OrbitPlot(sim, unitlabel="[AU]", periastron=True, lw=2)
ipython_examples/OrbitPlot.ipynb
dchandan/rebound
gpl-3.0
<a id='import_data'></a> Import the data Data consists of a large matrix, with r rows and c columns. Rows are labeled with 2 pieces of information: 1) Which disease does row belong to? 2) Which GO term does row belong to? The values in each row represent the similarity of the focal (row) datapoint to other datapoints. Each row has at least one entry equal to 1.0. We can think of each row as coordinates (in c-dimensional space).
# load the dataframe using pandas cluster_focal_df = pd.read_csv('cluster_diff_test_nodes_5d.csv',sep='\t', index_col='index') # drop this column because we don't need it cluster_focal_df = cluster_focal_df.drop('focal_mean',1) # add a column that is the mean of values in each row, and sort by it cluster_focal_mean = cluster_focal_df.mean(1) cluster_focal_df['total_mean']=cluster_focal_mean cluster_focal_df = cluster_focal_df.sort('total_mean',ascending=False)
notebooks/networkAnalysis/specificity_visualization_high_dimensional_data/Visualizing and scoring labeled high dimensional data.ipynb
ucsd-ccbb/jupyter-genomics
mit
TOC <a id='plot_heatmap'></a> Plot the raw data as a heatmap
# plot the heatmap plt.figure(figsize=(15,15)) plt.matshow(cluster_focal_df,fignum=False,cmap='jet',vmin=0,vmax=1,aspect='auto') #plt.yticks(range(len(cluster_focal_df)),list(cluster_focal_df.index),fontsize=8) plt.xticks(range(len(cluster_focal_df.columns)),list(cluster_focal_df.columns),rotation=90,fontsize=10) plt.grid('off')
notebooks/networkAnalysis/specificity_visualization_high_dimensional_data/Visualizing and scoring labeled high dimensional data.ipynb
ucsd-ccbb/jupyter-genomics
mit
TOC <a id='parse_rlabels'></a> Parse the row labels Here we include two functions that will be useful for parsing row labels from DF indices, and mapping these labels to colors NOTE These functions are specific to the example dataset used here
def build_row_colors(nodes_df,cmap = matplotlib.cm.nipy_spectral,find_col_colors = True): ''' Simple helper function for plotting to return row_colors and col_colors for sns.clustermap. - disease names will be extracted from df indices and columns and used for plotting - cmap defines the desired colormap (can be any matplotlib colormap) ''' # make the list of disease naes nodes_index = list(nodes_df.index) dname_list = [] for idx_temp in nodes_index: idx_ = idx_temp.find('_') dname_temp = idx_temp[:idx_] dname_list.append(dname_temp) dname_list = pd.Series(dname_list) # make the row colors (one color per disease) num_diseases = len(np.unique(dname_list)) dnames = list(np.unique(dname_list)) #list(dname_list.unique()) cmap_idx_dict = dict(zip(dnames,[int(round(i/float(num_diseases)*220.)+25) for i in range(num_diseases)])) rcolors=[] for dfocal in dname_list: #color_list = [sns.color_palette('Set2',num_diseases)[cmap_idx]]*(num_dfocal) color_temp = cmap(cmap_idx_dict[dfocal]) rcolors.append(color_temp) # now find the column colors if find_col_colors: dnames_split = [split_dname(d) for d in dnames] # loop over columns to find which disease it is colnames = list(nodes_df.columns) dname_col_list = [0]*len(colnames) for i in range(len(colnames)): col = colnames[i] for d in dnames_split: # is disease d in column col? idx_match = col.find(d[0:5]) if idx_match>-1: dname_col_list[i]=d if type(dname_col_list[i]) != str: dname_col_list[i]='unknown' cmap_col_idx_dict = dict(zip(dnames_split,[int(round(i/float(num_diseases)*256.)) for i in range(num_diseases)])) cmap_col_idx_dict['unknown'] = 255 print(cmap_col_idx_dict) ccolors=[] for dfocal in dname_col_list: #color_list = [sns.color_palette('Set2',num_diseases)[cmap_idx]]*(num_dfocal) color_temp = cmap(cmap_col_idx_dict[dfocal]) ccolors.append(color_temp) return rcolors,ccolors,dname_col_list,dname_list else: return rcolors,dname_col_list,dname_list def split_dname(dtemp): ''' Helper function to split disease name into words separated by underscores ''' dkeep=dtemp icount = 0 # don't look at the first letter for i in range(1,len(dtemp)): icount+=1 c = dtemp[i] if c.isupper(): dkeep = dkeep[0:icount]+'_'+dkeep[icount:] icount+=1 # add another to icount to account for new underscore return dkeep def get_reduced_labels(nodes_df,num_common_bigrams=25): ''' Reduce the cluster labels to common bigrams ''' cluster_labels = list(nodes_df.index) # shuffle cluster_labels to get rid of local structure np.random.shuffle(cluster_labels) # build up a list of the most common words word_list = [] for c in cluster_labels: # split cluster_label into parts separated by underscore cluster_label = c.split('_') GO_temp = cluster_label[2] # the third element is the GO term tokens = word_tokenize(GO_temp) word_list.extend(tokens) word_list = pd.Series(word_list) word_list.value_counts() filtered_words = [word for word in word_list if word not in stopwords.words('english')] # find common bigrams bigram_measures = nltk.collocations.BigramAssocMeasures() trigram_measures = nltk.collocations.TrigramAssocMeasures() finder = nltk.collocations.BigramCollocationFinder.from_words(filtered_words) top_N = finder.nbest(bigram_measures.raw_freq,num_common_bigrams) # loop over cluster_labels, and replace with common phrase if it occurs cluster_labels = list(nodes_df.index) reduced_labels = [] for c in cluster_labels: # split cluster_label into parts separated by underscore cluster_label = c.split('_') if cluster_label[2]=='': GO_temp = cluster_label[3] # the fourth element is the GO term if third is blank else: GO_temp = cluster_label[2] # the third element is the GO term tokens = word_tokenize(GO_temp) is_match = False i = -1 while (not is_match) and (i<len(top_N)-1): i+=1 num_overlap = len(set.intersection(set(top_N[i]),set(tokens))) if num_overlap>=2: # for bigrams only is_match=True reduced_labels.append(top_N[i][0]+' ' + top_N[i][1]) if not is_match: # if there isn't any match, just take the normal label reduced_labels.append(GO_temp) return reduced_labels # parse first label set (called GO terms from now on) reduced_labels = get_reduced_labels(cluster_focal_df,num_common_bigrams=0) reduced_label_VC = pd.Series(reduced_labels).value_counts() n_bigrams = len(np.unique(reduced_labels))-1 # include all labels # make dictionaries going from label to index and back label_to_idx = dict(zip(list(reduced_label_VC.index),range(len(reduced_label_VC)))) idx_to_label = dict(zip(range(len(reduced_label_VC)),list(reduced_label_VC.index))) reduced_idx = [float(label_to_idx[label]) if label_to_idx[label]<n_bigrams else n_bigrams+1. for label in reduced_labels ] labels = idx_to_label.values() keys = idx_to_label.keys() idx_to_label_reduced = dict(zip(keys[0:n_bigrams+1],labels[0:n_bigrams+1])) idx_to_label_reduced[n_bigrams+1]='other' # set all unlabeled points to 'other' label_to_idx_reduced = dict(zip(labels[0:n_bigrams+1],keys[0:n_bigrams+1])) label_to_idx_reduced['other']=n_bigrams+1 # set all unlabeled points to 'other' # parse second label set (called Disease names from now on) # map diseases to colors rcolors,tmp1,tmp2,dname_list = build_row_colors(cluster_focal_df,cmap = matplotlib.cm.nipy_spectral,find_col_colors = True) dname_to_rcolors = dict(zip(dname_list.values,rcolors))
notebooks/networkAnalysis/specificity_visualization_high_dimensional_data/Visualizing and scoring labeled high dimensional data.ipynb
ucsd-ccbb/jupyter-genomics
mit
TOC <a id='dim_reduce'></a> Reduce to two dimensions Methods (scikit-learn implementations used here): - t-SNE: Van der Maaten, Laurens, and Geoffrey Hinton. "Visualizing data using t-SNE." Journal of Machine Learning Research 9.2579-2605 (2008): 85. <img src="screenshots/sklearn_tsne.png" width="600" height="600"> Principal Component Analysis (PCA): M. Tipping and C. Bishop, Probabilistic Principal Component Analysis, Journal of the Royal Statistical Society, Series B, 61, Part 3, pp. 611-622 <img src="screenshots/sklearn_pca.png" width="600" height="600"> Isomap: Tenenbaum, J.B.; De Silva, V.; & Langford, J.C. A global geometric framework for nonlinear dimensionality reduction. Science 290 (5500) <img src="screenshots/sklearn_isomap.png" width="600" height="600">
from sklearn.manifold import TSNE from sklearn.decomposition import PCA from sklearn.decomposition import NMF from sklearn.manifold import Isomap # select which dimensionality reduction technique you want here dim_reduct_method = 'TSNE' tsne = TSNE(n_components=2) pca = PCA(n_components=2) isomap = Isomap(n_neighbors=10,n_components=2,path_method='auto') # drop total_mean column focal_df = cluster_focal_df.drop('total_mean',1) focal_df = focal_df.replace(to_replace=1.0,value=0.0) # make an array out of the df for input into dim reduction methods cluster_mat =np.array(focal_df) if dim_reduct_method=='TSNE': cluster_transf = tsne.fit_transform(cluster_mat) elif dim_reduct_method=='PCA': cluster_transf = pca.fit_transform(cluster_mat) elif dim_reduct_method=='Isomap': cluster_transf = isomap.fit_transform(cluster_mat)
notebooks/networkAnalysis/specificity_visualization_high_dimensional_data/Visualizing and scoring labeled high dimensional data.ipynb
ucsd-ccbb/jupyter-genomics
mit
TOC <a id='plot_transformed'></a> Plot the data in transformed coordinates Left panel: transformed coordinates color-coded by GO term. Looks like there is some grouping happening, where some points labeled by the same GO term appear to be clustered together. Right panel: transformed coordinates color-coded by disease name. Again there is some clear grouping happening, easily identified by eye. Can we quantify our observations by developing a scoring method to evaluate how localized points are by GO term and by disease name?
plt.figure(figsize=(20,10)) plt.subplot(1,2,1) plt.plot(cluster_transf[:,0],cluster_transf[:,1],'o',color='gray',markersize=4) for i in range(len(idx_to_label_reduced)): reduced_labels = pd.Series(reduced_labels) label_temp = idx_to_label_reduced[i] idx_focal = list(reduced_labels[reduced_labels==label_temp].index) if len(idx_focal)>0: col_temp =matplotlib.cm.Set1(int(round(float(i)/len(idx_to_label_reduced)*255))) plt.plot(cluster_transf[idx_focal,0],cluster_transf[idx_focal,1],'o',color=col_temp,label=idx_to_label_reduced[i], markersize=5) #plt.legend(loc='upper left',fontsize=10,ncol=1) #plt.xlim([-30,30]) plt.title(dim_reduct_method+' transformed data \ncolor-coded by GO term',fontsize=18) plt.subplot(1,2,2) for d in dname_to_rcolors.keys(): idx_focal = list(dname_list[dname_list==d].index) if len(idx_focal)>0: col_temp =dname_to_rcolors[d] plt.plot(cluster_transf[idx_focal,0],cluster_transf[idx_focal,1],'o',color=col_temp,label=d, markersize=5) plt.legend(fontsize=14,loc='lower left') plt.title(dim_reduct_method+' transformed data \ncolor-coded by disease name',fontsize=18) #plt.xlim([-30,30])
notebooks/networkAnalysis/specificity_visualization_high_dimensional_data/Visualizing and scoring labeled high dimensional data.ipynb
ucsd-ccbb/jupyter-genomics
mit
TOC <a id='scoring_method'></a> Scoring method (Specificity) Our scoring method measures a weighted distance ($S$) between all pairs of points in the dataset, wehre the weights are determined by the labels. If two nearby points have the same label, they will be rewarded, if they have different labels, they will be penalized. $ s_i = \sum_{j=1}^N \frac{1}{N}F(d_{ij}) \delta(c_{ij}) $ Distances ($d_{ij}$ are Euclidean distances meausured in 2-d reduced space. $\delta(c_{ij})$ is 0 if points $i$ and $j$ have different labels, and 1 if they have the same labels. The distance transformation function $F(d_{ij})$ is selected by the user based on desired encoding of distance. This transformation is necessary because we want to reward nearby points in our weighted average. Choices are: 'log_inv': $F(x) = \log(1/x)$ 'inv': $F(x) = 1/x$ 'sub': $F(x) = 1-x/\max(x)$ 'rank': $F(x) = (1-rank(x))/N$ 'rank_inv': $F(x) = 1/rank(x)$
def weighted_score(x,y,labels1,labels2,dtype='log_inv'): ''' This function calculates the weighted scores of points in x,y, defined by labels1 and labels2. - Points are scored more highly if they are close to other points with the same label, and are penalized if they are close to points with different labels. ''' d = squareform(pdist(np.transpose([x,y]))) #d = squareform(pdist(cluster_mat)) if dtype=='log_inv': d_log_inv = np.log(1/d) np.fill_diagonal(d_log_inv,0) d_transf = d_log_inv elif dtype=='inv': d_inv = 1/d np.fill_diagonal(d_inv,0) d_transf = d_inv elif dtype=='sub': d_sub = 1 - d/np.max(d) np.fill_diagonal(d_sub,1) d_transf = d_sub elif dtype=='rank': d_rank = [] for i in range(len(d)): d_rank.append(len(d)-np.argsort(d[i,:])) d_transf = d_rank elif dtype=='rank_inv': d_inv_rank = [] for i in range(len(d)): d_inv_rank.append(1./(np.argsort(d[i,:])+1)) d_transf = d_inv_rank labels1 = pd.Series(labels1) label_delta_mat = np.zeros((len(labels1),len(labels1))) for i in range(len(labels1)): label_temp = labels1==labels1[i] label_plus_minus = [(int(label)-.5)*2 for label in label_temp] label_delta_mat[i,:] = label_plus_minus score1 = np.mean(d_transf*label_delta_mat,axis=0) labels2 = pd.Series(labels2) label_delta_mat = np.zeros((len(labels2),len(labels2))) for i in range(len(labels2)): label_temp = labels2==labels2[i] label_plus_minus = [(int(label)-.5)*2 for label in label_temp] label_delta_mat[i,:] = label_plus_minus score2 = np.mean(d_transf*label_delta_mat,axis=0) return score1,score2 # calculate the score here x = cluster_transf[:,0] y = cluster_transf[:,1] labels1 = [l if l in label_to_idx_reduced.keys() else 'other' for l in reduced_labels] labels2 = dname_list score1,score2 = weighted_score(x,y,labels1,labels2,dtype='log_inv') # make a dataframe to store the score results Score_df = pd.DataFrame({'score1':list(score1),'score2':list(score2), 'GOlabels':list(labels1),'Dnames':list(dname_list)},index=range(len(score1))) # calculate the average score for each GOterm and disease name sGO_GB_mean = [] sD_GB_mean = [] sGO_GB_mean = Score_df.groupby('GOlabels').mean() sD_GB_mean = Score_df.groupby('Dnames').mean() # measure how many disease names are associated with each GOterm GO_GB_D = Score_df['Dnames'].groupby(Score_df['GOlabels']).value_counts() # need to normalize by total number of clusters in each disease clusters_per_disease = Score_df['Dnames'].value_counts() clusters_per_GOterm = Score_df['GOlabels'].value_counts() # plot the reduced data in specificity coordinates here plt.figure(figsize=(14,7)) plt.subplot(1,2,1) plt.scatter(score1,score2,c=[label_to_idx_reduced[l] for l in labels1],cmap='jet') plt.xlabel('GO specificity',fontsize=16) plt.ylabel('Disease specificity',fontsize=16) plt.title('color-coded by GO term',fontsize=16) plt.subplot(1,2,2) plt.scatter(score1,score2,c=[dname_to_rcolors[d] for d in dname_list],cmap='jet') plt.xlabel('GO specificity',fontsize=16) plt.ylabel('Disease specificity',fontsize=16) plt.title('color-coded by disease name',fontsize=16)
notebooks/networkAnalysis/specificity_visualization_high_dimensional_data/Visualizing and scoring labeled high dimensional data.ipynb
ucsd-ccbb/jupyter-genomics
mit
TOC <a id='plot_specificity'></a> Plot the average specificities per GO term and per disease name Plot points as label names Left panel: GO term plotted in specificity coordinates. Points are color-coded by the disease which contains the most counts of that term. Points are larger if the GO term has more occurrences in the data. GO terms with high GO specificity and Disease specificity (upper right quadrant) are likely to be found nearby to other points with the same GO label and disease label. GO terms with high GO specificity but low disease specificity are likely to be found near points with the same GO labels, but different disease labels GO terms with low GO specificity, but high disease specificity are likely to be found near points with different GO labels, but the same disease labels. Go terms with low specificity in both GO and Disease (lower left quadrant) are not likely to be found near other points with the same labels. Right panel: Disease names plotted in specificity coordinates. Diseases with high specificity in both GO and Disease are likely to be found near points with the same GO labels and Disease labels. Diseases with high GO specificity but low disease specificity are found near points with the same GO labels, but different disease labels. Diseases with low GO specificity but high disease specificity are found near points with different GO labels, but the same disease labels. Diseases with low specificity in both GO and disease are not likely to be found near other points with the same labels.
fig = plt.figure(figsize=(15,15)) axes = fig.add_subplot(1,1,1) subpos = [0.7,0.7,0.25,0.25] for GOname in list(sGO_GB_mean.index): msize = np.log(clusters_per_GOterm[GOname])*3*15 # set the marker size # get the text color D_freq_norm = GO_GB_D[GOname]# /clusters_per_disease # normalize by number of clusters per disease D_freq_norm.sort(ascending=False) if (D_freq_norm[0]/float(np.sum(D_freq_norm))) > .5: most_frequent_D = D_freq_norm.index[0] # get the most frequent disease for focal GO term color_temp = dname_to_rcolors[most_frequent_D] else: # if focal GOname doesn't really belong to any disease, make it white color_temp='black' axes.plot(sGO_GB_mean['score1'][GOname],sGO_GB_mean['score2'][GOname], '.',marker=r'$'+GOname[0:20]+'$',markersize=msize,color=color_temp) plt.xlabel('GO specificity',fontsize=16) plt.ylabel('Disease specificity',fontsize=16) plt.xlim([2.5,3.5]) plt.ylim([0.5,3.2]) subax1 = add_subplot_axes(axes,subpos) for Dname in list(sD_GB_mean.index): msize = len(Dname)*5 subax1.plot(sD_GB_mean['score1'][Dname],sD_GB_mean['score2'][Dname], '.',marker=r'$'+Dname+'$',markersize=msize,color=dname_to_rcolors[Dname]) plt.xlabel('GO specificity',fontsize=12) plt.ylabel('Disease specificity',fontsize=12) plt.xlim([2.5,3.5]) def add_subplot_axes(ax,rect,axisbg='w'): ''' This function allows for plotting of inset subplots (from http://stackoverflow.com/questions/17458580/embedding-small-plots-inside-subplots-in-matplotlib) ''' fig = plt.gcf() box = ax.get_position() width = box.width height = box.height inax_position = ax.transAxes.transform(rect[0:2]) transFigure = fig.transFigure.inverted() infig_position = transFigure.transform(inax_position) x = infig_position[0] y = infig_position[1] width *= rect[2] height *= rect[3] # <= Typo was here subax = fig.add_axes([x,y,width,height],axisbg=axisbg) x_labelsize = subax.get_xticklabels()[0].get_size() y_labelsize = subax.get_yticklabels()[0].get_size() x_labelsize *= rect[2]**0.5 y_labelsize *= rect[3]**0.5 subax.xaxis.set_tick_params(labelsize=x_labelsize) subax.yaxis.set_tick_params(labelsize=y_labelsize) return subax
notebooks/networkAnalysis/specificity_visualization_high_dimensional_data/Visualizing and scoring labeled high dimensional data.ipynb
ucsd-ccbb/jupyter-genomics
mit
1.8.2. Built-in Collection Data Types 1. lists Lists are heterogeneous, meaning that the data objects need not all be from the same class and the collection can be assigned to a variable as below. | Operation Name | Operator | Explanation | | --- | --- | --- | | indexing | [ ] | Access an element of a sequence | | concatenation | + | Combine sequences together | | repetition | * | Concatenate a repeated number of times | | membership | in | Ask whether an item is in a sequence | | length | len | Ask the number of items in the sequence | | slicing | [ : ] | Extract a part of a sequence |
fakeList = ['str', 12, True, 1.232] # heterogeneous print(fakeList) myList = [1,2,3,4] A = [myList] * 3 print(A) myList[2]=45454545 print(A)
algorithms/python_revision.ipynb
AnasFullStack/Awesome-Full-Stack-Web-Developer
mit
| Method Name | Use | Explanation | | --- | --- | --- | | append | alist.append(item) | Adds a new item to the end of a list | | insert | alist.insert(i,item) | Inserts an item at the ith position in a list | | pop | alist.pop() | Removes and returns the last item in a list | | pop | alist.pop(i) | Removes and returns the ith item in a list | | sort | alist.sort() | Modifies a list to be sorted | | reverse | alist.reverse() | Modifies a list to be in reverse order | | del | del alist[i] | Deletes the item in the ith position | | index | alist.index(item) | Returns the index of the first occurrence of item | | count | alist.count(item) | Returns the number of occurrences of item | | remove | alist.remove(item) | Removes the first occurrence of item |
myList = [1024, 3, True, 6.5] myList.append(False) print(myList) myList.insert(2,4.5) print(myList) print(myList.pop()) print(myList) print(myList.pop(1)) print(myList) myList.pop(2) print(myList) myList.sort() print(myList) myList.reverse() print(myList) print(myList.count(6.5)) print(myList.index(4.5)) myList.remove(6.5) print(myList) del myList[0] print(myList) print(list(range(10))) print(list(range(5,10))) print(list(range(5,10,2))) print(list(range(10,1,-1)))
algorithms/python_revision.ipynb
AnasFullStack/Awesome-Full-Stack-Web-Developer
mit
2. Strings | Method Name | Use | Explanation | | --- | --- | --- | | center | astring.center(w) | Returns a string centered in a field of size w | | count | astring.count(item) | Returns the number of occurrences of item in the string | | ljust | astring.ljust(w) | Returns a string left-justified in a field of size w | | lower | astring.lower() | Returns a string in all lowercase | | rjust | astring.rjust(w) | Returns a string right-justified in a field of size w | | find | astring.find(item) | Returns the index of the first occurrence of item | | split | astring.split(schar) | Splits a string into substrings at schar |
myName= "David" print(myName[3]) print(myName * 2) print(len(myName)) print(myName.upper()) print('.' + myName.center(10) + '.') print('.' + myName.ljust(10) + '.') print('.' + myName.rjust(10) + '.') print(myName.find('v')) print(myName.split('v'))
algorithms/python_revision.ipynb
AnasFullStack/Awesome-Full-Stack-Web-Developer
mit
A major difference between lists and strings is that lists can be modified while strings cannot. This is referred to as mutability. Lists are mutable; strings are immutable. For example, you can change an item in a list by using indexing and assignment. With a string that change is not allowed. 3. Tuples Tuples are very similar to lists in that they are heterogeneous sequences of data. The difference is that a tuple is immutable, like a string. A tuple cannot be changed.
myTuple = (2,True,4.96) print(myTuple) print(len(myTuple))
algorithms/python_revision.ipynb
AnasFullStack/Awesome-Full-Stack-Web-Developer
mit
However, if you try to change an item in a tuple, you will get an error. Note that the error message provides location and reason for the problem.
myTuple[1]=False
algorithms/python_revision.ipynb
AnasFullStack/Awesome-Full-Stack-Web-Developer
mit
4. Set A set is an unordered collection of zero or more immutable Python data objects. Sets do not allow duplicates and are written as comma-delimited values enclosed in curly braces. The empty set is represented by set(). Sets are heterogeneous, and the collection can be assigned to a variable as below.
print({3,6,"cat",4.5,False}) mySet = {3,6,"cat",4.5,False} print(mySet)
algorithms/python_revision.ipynb
AnasFullStack/Awesome-Full-Stack-Web-Developer
mit
| Operation Name | Operator | Explanation | | --- | --- | --- | | membership | in | Set membership | | length | len | Returns the cardinality of the set | | &#124; | aset &#124; otherset | Returns a new set with all elements from both sets | | &amp; | aset &amp; otherset | Returns a new set with only those elements common to both sets | | - | aset - otherset | Returns a new set with all items from the first set not in second | | &lt;= | aset &lt;= otherset | Asks whether all elements of the first set are in the second | | Method Name | Use | Explanation | | --- | --- | --- | | union | aset.union(otherset) | Returns a new set with all elements from both sets | | intersection | aset.intersection(otherset) | Returns a new set with only those elements common to both sets | | difference | aset.difference(otherset) | Returns a new set with all items from first set not in second | | issubset | aset.issubset(otherset) | Asks whether all elements of one set are in the other | | add | aset.add(item) | Adds item to the set | | remove | aset.remove(item) | Removes item from the set | | pop | aset.pop() | Removes an arbitrary element from the set | | clear | aset.clear() | Removes all elements from the set |
mySet = {3,6,"cat",4.5,False} print(mySet) yourSet = {99,3,100} print(yourSet) print( mySet.union(yourSet)) print( mySet | yourSet) print( mySet.intersection(yourSet)) print( mySet & yourSet) print( mySet.difference(yourSet)) print( mySet - yourSet) print( {3,100}.issubset(yourSet)) print( {3,100}<=yourSet) mySet.add("house") print( mySet) mySet.remove(4.5) print( mySet) mySet.pop() print( mySet) mySet.clear() print( mySet)
algorithms/python_revision.ipynb
AnasFullStack/Awesome-Full-Stack-Web-Developer
mit
5. Dictionary Dictionaries are collections of associated pairs of items where each pair consists of a key and a value. This key-value pair is typically written as key:value. Dictionaries are written as comma-delimited key:value pairs enclosed in curly braces. For example,
capitals = {'Iowa':'DesMoines','Wisconsin':'Madison'} print(capitals) print(capitals['Iowa']) capitals['Utah']='SaltLakeCity' print(capitals) capitals['California']='Sacramento' print(len(capitals)) for k in capitals: print(capitals[k]," is the capital of ", k)
algorithms/python_revision.ipynb
AnasFullStack/Awesome-Full-Stack-Web-Developer
mit
| Operator | Use | Explanation | | --- | --- | --- | | [] | myDict[k] | Returns the value associated with k, otherwise its an error | | in | key in adict | Returns True if key is in the dictionary, False otherwise | | del | del adict[key] | Removes the entry from the dictionary | | Method Name | Use | Explanation | | --- | --- | --- | | keys | adict.keys() | Returns the keys of the dictionary in a dict_keys object | | values | adict.values() | Returns the values of the dictionary in a dict_values object | | items | adict.items() | Returns the key-value pairs in a dict_items object | | get | adict.get(k) | Returns the value associated with k, None otherwise | | get | adict.get(k,alt) | Returns the value associated with k, alt otherwise |
phoneext={'david':1410,'brad':1137} print(phoneext) print(phoneext.keys()) print(list(phoneext.keys())) print(phoneext.values()) print(list(phoneext.values())) print(phoneext.items()) print(list(phoneext.items())) print(phoneext.get("kent")) print(phoneext.get("kent","NO ENTRY"))
algorithms/python_revision.ipynb
AnasFullStack/Awesome-Full-Stack-Web-Developer
mit
1.9. Input and Output
aName = input("Please enter your name ") print("Your name in all capitals is",aName.upper(), "and has length", len(aName)) sradius = input("Please enter the radius of the circle ") radius = float(sradius) diameter = 2 * radius print(diameter)
algorithms/python_revision.ipynb
AnasFullStack/Awesome-Full-Stack-Web-Developer
mit
1.9.1. String Formatting
print("Hello","World") print("Hello","World", sep="***") print("Hello","World", end="***") aName = "Anas" age = 10 print(aName, "is", age, "years old.") print("%s is %d years old." % (aName, age)) # The % operator is a string operator called the format operator.
algorithms/python_revision.ipynb
AnasFullStack/Awesome-Full-Stack-Web-Developer
mit
| Character | Output Format | | --- | --- | | d, i | Integer | | u | Unsigned integer | | f | Floating point as m.ddddd | | e | Floating point as m.ddddde+/-xx | | E | Floating point as m.dddddE+/-xx | | g | Use %e for exponents less than <span class="math"><span class="MathJax_Preview" style="color: inherit; display: none;"> <span class="MathJax" id="MathJax-Element-1-Frame" tabindex="0" data-mathml="\<math xmlns=&quot;http://www.w3.org/1998/Math/MathML&quot;>\<nobr aria-hidden="true">\<span class="math" id="MathJax-Span-1" style="width: 1.432em; display: inline-block;">\<span style="display: inline-block; position: relative; width: 1.193em; height: 0px; font-size: 120%;">\<span style="position: absolute; clip: rect(1.729em 1001.19em 2.86em -999.997em); top: -2.557em; left: 0em;">\<span class="mrow" id="MathJax-Span-2">\<span class="mo" id="MathJax-Span-3" style="font-family: STIXGeneral-Regular;">− \<span class="mn" id="MathJax-Span-4" style="font-family: STIXGeneral-Regular;">4 \<span style="display: inline-block; width: 0px; height: 2.562em;"> \<span style="display: inline-block; overflow: hidden; vertical-align: -0.211em; border-left: 0px solid; width: 0px; height: 1.146em;"> \</nobr>\<span class="MJX_Assistive_MathML" role="presentation">\<math xmlns="http://www.w3.org/1998/Math/MathML">\<mo>−\</mo>\<mn>4\</mn>\</math> \<mo>&amp;#x2212;\</mo>\<mn>4\</mn>\</math>" role="presentation" style="position: relative;"> <script type="math/tex" id="MathJax-Element-1">-4</script> or greater than <span class="math"><span class="MathJax_Preview" style="color: inherit; display: none;"> <span class="MathJax" id="MathJax-Element-2-Frame" tabindex="0" data-mathml="\<math xmlns=&quot;http://www.w3.org/1998/Math/MathML&quot;>\<nobr aria-hidden="true">\<span class="math" id="MathJax-Span-5" style="width: 1.432em; display: inline-block;">\<span style="display: inline-block; position: relative; width: 1.193em; height: 0px; font-size: 120%;">\<span style="position: absolute; clip: rect(1.67em 1001.13em 2.801em -999.997em); top: -2.557em; left: 0em;">\<span class="mrow" id="MathJax-Span-6">\<span class="mo" id="MathJax-Span-7" style="font-family: STIXGeneral-Regular;">+ \<span class="mn" id="MathJax-Span-8" style="font-family: STIXGeneral-Regular;">5 \<span style="display: inline-block; width: 0px; height: 2.562em;"> \<span style="display: inline-block; overflow: hidden; vertical-align: -0.139em; border-left: 0px solid; width: 0px; height: 1.004em;"> \</nobr>\<span class="MJX_Assistive_MathML" role="presentation">\<math xmlns="http://www.w3.org/1998/Math/MathML">\<mo>+\</mo>\<mn>5\</mn>\</math> \<mo>+\</mo>\<mn>5\</mn>\</math>" role="presentation" style="position: relative;"> <script type="math/tex" id="MathJax-Element-2">+5</script> , otherwise use %f | | c | Single character | | s | String, or any Python data object that can be converted to a string by using the str function. | | % | Insert a literal % character | | Modifier | Example | Description | | --- | --- | --- | | number | %20d | Put the value in a field width of 20 | | - | %-20d | Put the value in a field 20 characters wide, left-justified | | + | %+20d | Put the value in a field 20 characters wide, right-justified | | 0 | %020d | Put the value in a field 20 characters wide, fill in with leading zeros. | | . | %20.2f | Put the value in a field 20 characters wide with 2 characters to the right of the decimal point. | | (name) | %(name)d | Get the value from the supplied dictionary using name as the key.
price = 24 item = "banana" print("The %s costs %d cents" % (item, price)) print("The %+10s costs %5.2f cents" % (item, price)) print("The %+10s costs %10.2f cents" % (item, price)) print("The %+10s costs %010.2f cents" % (item, price)) itemdict = {"item":"banana","cost":24} print("The %(item)s costs %(cost)7.1f cents" % itemdict)
algorithms/python_revision.ipynb
AnasFullStack/Awesome-Full-Stack-Web-Developer
mit
1.10. Control Structures algorithms require two important control structures: iteration and selection. - Iteration 1. While
counter = 1 while counter <= 5: print("Hello, world") counter = counter + 1
algorithms/python_revision.ipynb
AnasFullStack/Awesome-Full-Stack-Web-Developer
mit
2. for
for item in [1,3,6,2,5]: print(item) for item in range(5): ... print(item**2) wordlist = ['cat','dog','rabbit'] letterlist = [ ] for aword in wordlist: for aletter in aword: if(aletter not in letterlist): letterlist.append(aletter) print(letterlist)
algorithms/python_revision.ipynb
AnasFullStack/Awesome-Full-Stack-Web-Developer
mit
list comprehension
sqlist=[] for x in range(1,11): sqlist.append(x*x) print(sqlist) sqlist2=[x*x for x in range(1,11)] # list comprehension print(sqlist2) sqlist=[x*x for x in range(1,11) if x%2 != 0] print(sqlist) [ch.upper() for ch in 'comprehension' if ch not in 'aeiou'] wordlist = ['cat','dog','rabbit'] uniqueLetters = [letter for word in wordlist for letter in word] print(uniqueLetters)
algorithms/python_revision.ipynb
AnasFullStack/Awesome-Full-Stack-Web-Developer
mit
1.12. Defining Functions problem Here’s a self check that really covers everything so far. You may have heard of the infinite monkey theorem? The theorem states that a monkey hitting keys at random on a typewriter keyboard for an infinite amount of time will almost surely type a given text, such as the complete works of William Shakespeare. Well, suppose we replace a monkey with a Python function. How long do you think it would take for a Python function to generate just one sentence of Shakespeare? The sentence we’ll shoot for is: “methinks it is like a weasel” You’re not going to want to run this one in the browser, so fire up your favorite Python IDE. The way we’ll simulate this is to write a function that generates a string that is 27 characters long by choosing random letters from the 26 letters in the alphabet plus the space. We’ll write another function that will score each generated string by comparing the randomly generated string to the goal. A third function will repeatedly call generate and score, then if 100% of the letters are correct we are done. If the letters are not correct then we will generate a whole new string.To make it easier to follow your program’s progress this third function should print out the best string generated so far and its score every 1000 tries.
import string import random import time start_time = time.time() def generate_new_sentense(): sentense = [random.choice(string.ascii_lowercase + " ") for x in range(28) ] return "".join(sentense) def compare_sentences(guess): target_sentence = "methinks it is like a weasel" return guess == target_sentence def main(): i= 0 print (i) guess = generate_new_sentense() print (guess) while not compare_sentences(guess): guess = generate_new_sentense() print (guess) i+= 1 print (i) # main() print("--- %s seconds ---" % (time.time() - start_time))
algorithms/python_revision.ipynb
AnasFullStack/Awesome-Full-Stack-Web-Developer
mit
1.13. Object-Oriented Programming in Python: Defining Classes 1.13.1. A Fraction Class
class Fraction: def __init__(self, top, bottom): self.num = top self.den = bottom def show(self): print(self.num,"/",self.den) # Overriding the default __str__ function def __str__(self): return str(self.num)+"/"+str(self.den) def __add__(self,otherfraction): newnum = self.num * otherfraction.den + self.den * otherfraction.num newden = self.den * otherfraction.den return Fraction(newnum,newden) myfraction = Fraction(3,5) print(myfraction) print(myfraction.show()) f1 = Fraction(1,4) f2 = Fraction(1,2) f3 = f1 + f2 print(f3)
algorithms/python_revision.ipynb
AnasFullStack/Awesome-Full-Stack-Web-Developer
mit
With out materials created, we'll now define key dimensions in our model. These dimensions are taken from the example in section 11.1.3 of the Serpent manual.
# Outer radius of fuel and clad r_fuel = 0.6122 r_clad = 0.6540 # Pressure tube and calendria radii pressure_tube_ir = 5.16890 pressure_tube_or = 5.60320 calendria_ir = 6.44780 calendria_or = 6.58750 # Radius to center of each ring of fuel pins ring_radii = np.array([0.0, 1.4885, 2.8755, 4.3305])
examples/jupyter/candu.ipynb
wbinventor/openmc
mit
To begin creating the bundle, we'll first create annular regions completely filled with heavy water and add in the fuel pins later. The radii that we've specified above correspond to the center of each ring. We actually need to create cylindrical surfaces at radii that are half-way between the centers.
# These are the surfaces that will divide each of the rings radial_surf = [openmc.ZCylinder(R=r) for r in (ring_radii[:-1] + ring_radii[1:])/2] water_cells = [] for i in range(ring_radii.size): # Create annular region if i == 0: water_region = -radial_surf[i] elif i == ring_radii.size - 1: water_region = +radial_surf[i-1] else: water_region = +radial_surf[i-1] & -radial_surf[i] water_cells.append(openmc.Cell(fill=heavy_water, region=water_region))
examples/jupyter/candu.ipynb
wbinventor/openmc
mit
Now we need to create a universe that contains a fuel pin. Note that we don't actually need to put water outside of the cladding in this universe because it will be truncated by a higher universe.
surf_fuel = openmc.ZCylinder(R=r_fuel) fuel_cell = openmc.Cell(fill=fuel, region=-surf_fuel) clad_cell = openmc.Cell(fill=clad, region=+surf_fuel) pin_universe = openmc.Universe(cells=(fuel_cell, clad_cell)) pin_universe.plot(**plot_args)
examples/jupyter/candu.ipynb
wbinventor/openmc
mit
The code below works through each ring to create a cell containing the fuel pin universe. As each fuel pin is created, we modify the region of the water cell to include everything outside the fuel pin.
num_pins = [1, 6, 12, 18] angles = [0, 0, 15, 0] for i, (r, n, a) in enumerate(zip(ring_radii, num_pins, angles)): for j in range(n): # Determine location of center of pin theta = (a + j/n*360.) * pi/180. x = r*cos(theta) y = r*sin(theta) pin_boundary = openmc.ZCylinder(x0=x, y0=y, R=r_clad) water_cells[i].region &= +pin_boundary # Create each fuel pin -- note that we explicitly assign an ID so # that we can identify the pin later when looking at tallies pin = openmc.Cell(fill=pin_universe, region=-pin_boundary) pin.translation = (x, y, 0) pin.id = (i + 1)*100 + j bundle_universe.add_cell(pin) bundle_universe.plot(**plot_args)
examples/jupyter/candu.ipynb
wbinventor/openmc
mit
Looking pretty good! Finally, we create cells for the pressure tube and calendria and then put our bundle in the middle of the pressure tube.
pt_inner = openmc.ZCylinder(R=pressure_tube_ir) pt_outer = openmc.ZCylinder(R=pressure_tube_or) calendria_inner = openmc.ZCylinder(R=calendria_ir) calendria_outer = openmc.ZCylinder(R=calendria_or, boundary_type='vacuum') bundle = openmc.Cell(fill=bundle_universe, region=-pt_inner) pressure_tube = openmc.Cell(fill=clad, region=+pt_inner & -pt_outer) v1 = openmc.Cell(region=+pt_outer & -calendria_inner) calendria = openmc.Cell(fill=clad, region=+calendria_inner & -calendria_outer) root_universe = openmc.Universe(cells=[bundle, pressure_tube, v1, calendria])
examples/jupyter/candu.ipynb
wbinventor/openmc
mit
Let's look at the final product. We'll export our geometry and materials and then use plot_inline() to get a nice-looking plot.
geom = openmc.Geometry(root_universe) geom.export_to_xml() mats = openmc.Materials(geom.get_all_materials().values()) mats.export_to_xml() p = openmc.Plot.from_geometry(geom) p.color_by = 'material' p.colors = { fuel: 'black', clad: 'silver', heavy_water: 'blue' } openmc.plot_inline(p)
examples/jupyter/candu.ipynb
wbinventor/openmc
mit
1 - Generate materialized views Before generating the aline cohort, we require the following materialized views to be already generated: angus - from angus.sql heightweight - from HeightWeightQuery.sql aline_vaso_flag - from aline_vaso_flag.sql You can generate the above by executing the below codeblock. If you haven't changed the directory structure, the below should work, otherwise you may need to modify the concepts_path variable above.
# Load in the query from file query='DROP TABLE IF EXISTS DATABASE.angus_sepsis;' cursor.execute(query.replace("DATABASE", gluedatabase)) f = os.path.join(concepts_path,'sepsis/angus-awsathena.sql') with open(f) as fp: query = ''.join(fp.readlines()) # Execute the query print('Generating table \'angus_sepsis\' using {} ...'.format(f),end=' ') cursor.execute(query.replace("DATABASE", gluedatabase)) print('done.') # Load in the query from file query='DROP TABLE IF EXISTS DATABASE.heightweight;' cursor.execute(query.replace("DATABASE", gluedatabase)) f = os.path.join(concepts_path,'demographics/HeightWeightQuery-awsathena.sql') with open(f) as fp: query = ''.join(fp.readlines()) # Execute the query print('Generating table \'heightweight\' using {} ...'.format(f),end=' ') cursor.execute(query.replace("DATABASE", gluedatabase)) print('done.') # Load in the query from file query='DROP TABLE IF EXISTS DATABASE.aline_vaso_flag;' cursor.execute(query.replace("DATABASE", gluedatabase)) f = os.path.join(aline_path,'aline_vaso_flag-awsathena.sql') with open(f) as fp: query = ''.join(fp.readlines()) # Execute the query print('Generating table \'aline_vaso_flag\' using {} ...'.format(f),end=' ') cursor.execute(query.replace("DATABASE", gluedatabase)) print('done.') # Load in the query from file query='DROP TABLE IF EXISTS DATABASE.ventsettings;' cursor.execute(query.replace("DATABASE", gluedatabase)) f = os.path.join(concepts_path,'durations/ventilation-settings-awsathena.sql') with open(f) as fp: query = ''.join(fp.readlines()) # Execute the query print('Generating table \'vent_settings\' using {} ...'.format(f),end=' ') cursor.execute(query.replace("DATABASE", gluedatabase)) print('done.') # Load in the query from file query='DROP TABLE IF EXISTS DATABASE.ventdurations;' cursor.execute(query.replace("DATABASE", gluedatabase)) f = os.path.join(concepts_path,'durations/ventilation-durations-awsathena.sql') with open(f) as fp: query = ''.join(fp.readlines()) # Execute the query print('Generating table \'vent_durations\' using {} ...'.format(f),end=' ') cursor.execute(query.replace("DATABASE", gluedatabase)) print('done.')
mimic-iii/notebooks/aline-aws/aline-awsathena.ipynb
MIT-LCP/mimic-code
mit
Now we generate the aline_cohort table using the aline_cohort.sql file. Afterwards, we can generate the remaining 6 materialized views in any order, as they all depend on only aline_cohort and raw MIMIC-III data.
# Load in the query from file query='DROP TABLE IF EXISTS DATABASE.aline_cohort_all;' cursor.execute(query.replace("DATABASE", gluedatabase)) f = os.path.join(aline_path,'aline_cohort-awsathena.sql') with open(f) as fp: query = ''.join(fp.readlines()) # Execute the query print('Generating table \'aline_cohort_all\' using {} ...'.format(f),end=' ') cursor.execute(query.replace("DATABASE", gluedatabase)) print('done.') # Load in the query from file query='DROP TABLE IF EXISTS DATABASE.aline_cohort;' cursor.execute(query.replace("DATABASE", gluedatabase)) f = os.path.join(aline_path,'aline_final_cohort-awsathena.sql') with open(f) as fp: query = ''.join(fp.readlines()) # Execute the query print('Generating table \'aline_cohort\' using {} ...'.format(f),end=' ') cursor.execute(query.replace("DATABASE", gluedatabase)) print('done.') query = """ select icustay_id , exclusion_readmission , exclusion_shortstay , exclusion_vasopressors , exclusion_septic , exclusion_aline_before_admission , exclusion_not_ventilated_first24hr , exclusion_service_surgical from DATABASE.aline_cohort_all """ cursor.execute(query.replace("DATABASE", gluedatabase)) # Load the result of the query into a dataframe df = as_pandas(cursor) # print out exclusions idxRem = df['icustay_id'].isnull() for c in df.columns: if 'exclusion_' in c: print('{:5d} - {}'.format(df[c].sum(), c)) idxRem[df[c]==1] = True # final exclusion (excl sepsis/something else) print('Will remove {} of {} patients.'.format(np.sum(idxRem), df.shape[0])) print('') print('') print('Reproducing the flow of the flowchart from Chest paper.') # first stay idxRem = (df['exclusion_readmission']==1) | (df['exclusion_shortstay']==1) print('{:5d} - removing {:5d} ({:2.2f}%) patients - short stay // readmission.'.format( df.shape[0], np.sum(idxRem), 100.0*np.mean(idxRem))) df = df.loc[~idxRem,:] idxRem = df['exclusion_not_ventilated_first24hr']==1 print('{:5d} - removing {:5d} ({:2.2f}%) patients - not ventilated in first 24 hours.'.format( df.shape[0], np.sum(idxRem), 100.0*np.mean(idxRem))) df = df.loc[df['exclusion_not_ventilated_first24hr']==0,:] print('{:5d}'.format(df.shape[0])) idxRem = df['icustay_id'].isnull() for c in ['exclusion_septic', 'exclusion_vasopressors', 'exclusion_aline_before_admission', 'exclusion_service_surgical']: print('{:5s} - removing {:5d} ({:2.2f}%) patients - additional {:5d} {:2.2f}% - {}'.format( '', df[c].sum(), 100.0*df[c].mean(), np.sum((idxRem==0)&(df[c]==1)), 100.0*np.mean((idxRem==0)&(df[c]==1)), c)) idxRem = idxRem | (df[c]==1) df = df.loc[~idxRem,:] print('{} - final cohort.'.format(df.shape[0]))
mimic-iii/notebooks/aline-aws/aline-awsathena.ipynb
MIT-LCP/mimic-code
mit
The following codeblock loads in the SQL from each file in the aline subfolder and executes the query to generate the materialized view. We specifically exclude the aline_cohort.sql file as we have already executed it above. Again, the order of query execution does not matter for these queries. Note also that the filenames are the same as the created materialized view names for convenience.
# get a list of all files in the subfolder aline_queries = [f for f in os.listdir(aline_path) # only keep the filename if it is actually a file (and not a directory) if os.path.isfile(os.path.join(aline_path,f)) # and only keep the filename if it is an SQL file & f.endswith('.sql') # and we do *not* want aline_cohort - it's generated above & (f != 'aline_cohort-awsathena.sql') & (f != 'aline_final_cohort-awsathena.sql') & (f != 'aline_vaso_flag-awsathena.sql')] for f in aline_queries: # Load in the query from file table=f.split('-') query='DROP TABLE IF EXISTS DATABASE.{};'.format(table[0]) cursor.execute(query.replace("DATABASE", gluedatabase)) print('Executing {} ...'.format(f), end=' ') with open(os.path.join(aline_path,f)) as fp: query = ''.join(fp.readlines()) cursor.execute(query.replace("DATABASE", gluedatabase)) print('done.')
mimic-iii/notebooks/aline-aws/aline-awsathena.ipynb
MIT-LCP/mimic-code
mit
Summarize the cohort exclusions before we pull all the data together. 2 - Extract all covariates and outcome measures We now aggregate all the data from the various views into a single dataframe.
# Load in the query from file query = """ --FINAL QUERY select co.subject_id, co.hadm_id, co.icustay_id -- static variables from patient tracking tables , co.age , co.gender -- , co.gender_num -- gender, 0=F, 1=M , co.intime as icustay_intime , co.day_icu_intime -- day of week, text --, co.day_icu_intime_num -- day of week, numeric (0=Sun, 6=Sat) , co.hour_icu_intime -- hour of ICU admission (24 hour clock) , case when co.hour_icu_intime >= 7 and co.hour_icu_intime < 19 then 1 else 0 end as icu_hour_flag , co.outtime as icustay_outtime -- outcome variables , co.icu_los_day , co.hospital_los_day , co.hosp_exp_flag -- 1/0 patient died within current hospital stay , co.icu_exp_flag -- 1/0 patient died within current ICU stay , co.mort_day -- days from ICU admission to mortality, if they died , co.day_28_flag -- 1/0 whether the patient died 28 days after *ICU* admission , co.mort_day_censored -- days until patient died *or* 150 days (150 days is our censor time) , co.censor_flag -- 1/0 did this patient have 150 imputed in mort_day_censored -- aline flags -- , co.initial_aline_flag -- always 0, we remove patients admitted w/ aline , co.aline_flag -- 1/0 did the patient receive an aline , co.aline_time_day -- if the patient received aline, fractional days until aline put in -- demographics extracted using regex + echos , bmi.weight as weight_first , bmi.height as height_first , bmi.bmi -- service patient was admitted to the ICU under , co.service_unit -- severity of illness just before ventilation , so.sofa as sofa_first -- vital sign value just preceeding ventilation , vi.map as map_first , vi.heartrate as hr_first , vi.temperature as temp_first , vi.spo2 as spo2_first -- labs! , labs.bun_first , labs.creatinine_first , labs.chloride_first , labs.hgb_first , labs.platelet_first , labs.potassium_first , labs.sodium_first , labs.tco2_first , labs.wbc_first -- comorbidities extracted using ICD-9 codes , icd.chf as chf_flag , icd.afib as afib_flag , icd.renal as renal_flag , icd.liver as liver_flag , icd.copd as copd_flag , icd.cad as cad_flag , icd.stroke as stroke_flag , icd.malignancy as malignancy_flag , icd.respfail as respfail_flag , icd.endocarditis as endocarditis_flag , icd.ards as ards_flag , icd.pneumonia as pneumonia_flag -- sedative use , sed.sedative_flag , sed.midazolam_flag , sed.fentanyl_flag , sed.propofol_flag from DATABASE.aline_cohort co -- The following tables are generated by code within this repository left join DATABASE.aline_sofa so on co.icustay_id = so.icustay_id left join DATABASE.aline_bmi bmi on co.icustay_id = bmi.icustay_id left join DATABASE.aline_icd icd on co.hadm_id = icd.hadm_id left join DATABASE.aline_vitals vi on co.icustay_id = vi.icustay_id left join DATABASE.aline_labs labs on co.icustay_id = labs.icustay_id left join DATABASE.aline_sedatives sed on co.icustay_id = sed.icustay_id order by co.icustay_id """ cursor.execute(query.replace("DATABASE", gluedatabase)) # Load the result of the query into a dataframe df = as_pandas(cursor) df.describe().T
mimic-iii/notebooks/aline-aws/aline-awsathena.ipynb
MIT-LCP/mimic-code
mit
Networks We give two sets of networks. One of them allows for all parameters. The other is identical except it only uses essential parameters.
network_strings = [ ["SWI4 : (NDD1)(~YOX1)", "HCM1 : SWI4", "NDD1 : HCM1", "YOX1 : SWI4"], ["SWI4 : (NDD1)(~YOX1)", "HCM1 : SWI4", "NDD1 : HCM1", "YOX1 : (SWI4)(HCM1)"], ["SWI4 : (NDD1)(~YOX1)", "HCM1 : SWI4", "NDD1 : HCM1", "YOX1 : (SWI4)(~HCM1)"], ["SWI4 : (NDD1)(~YOX1)", "HCM1 : SWI4", "NDD1 : HCM1", "YOX1 : (SWI4)(NDD1)"], ["SWI4 : (NDD1)(~YOX1)", "HCM1 : SWI4", "NDD1 : HCM1", "YOX1 : (SWI4)(~NDD1)"], ["SWI4 : (NDD1)(~YOX1)", "HCM1 : (SWI4)(YOX1)", "NDD1 : HCM1", "YOX1 : SWI4"], ["SWI4 : (NDD1)(~YOX1)", "HCM1 : (SWI4)(~YOX1)", "NDD1 : HCM1", "YOX1 : SWI4"], ["SWI4 : (NDD1)(~YOX1)", "HCM1 : SWI4", "NDD1 : (HCM1)(YOX1)", "YOX1 : SWI4"], ["SWI4 : (NDD1)(~YOX1)", "HCM1 : SWI4", "NDD1 : (HCM1)(~YOX1)", "YOX1 : SWI4"] ]
Tutorials/PatternMatchExperiments.ipynb
shaunharker/DSGRN
mit
Full Networks
networks = [Network() for i in range(0,9)] for i,network in enumerate(networks): network.assign('\n'.join(network_strings[i]))
Tutorials/PatternMatchExperiments.ipynb
shaunharker/DSGRN
mit
Essential Networks
essential_network_strings = [ [ line + " : E" for line in network_string ] for network_string in network_strings] essential_networks = [Network() for i in range(0,9)] for i,network in enumerate(essential_networks): network.assign('\n'.join(essential_network_strings[i]))
Tutorials/PatternMatchExperiments.ipynb
shaunharker/DSGRN
mit
Path match analysis We give two functions for path match analysis. One looks at the entire domain graph. The other only checks for path matches in stable Morse sets. Analysis on entire domain graph
def Analyze(network, events, event_ordering): poe = PosetOfExtrema(network, events, event_ordering ) pattern_graph = PatternGraph(poe) parameter_graph = ParameterGraph(network) result = [] for parameter_index in range(0, parameter_graph.size()): parameter = parameter_graph.parameter(parameter_index) search_graph = SearchGraph(DomainGraph(parameter)) matching_graph = MatchingGraph(search_graph, pattern_graph); if PathMatch(matching_graph): result.append(parameter_index) return [result, parameter_graph.size()]
Tutorials/PatternMatchExperiments.ipynb
shaunharker/DSGRN
mit
Analysis on stable Morse set only
def AnalyzeOnStable(network, events, event_ordering): poe = PosetOfExtrema(network, events, event_ordering ) pattern_graph = PatternGraph(poe) parameter_graph = ParameterGraph(network) results = [] for parameter_index in range(0, parameter_graph.size()): parameter = parameter_graph.parameter(parameter_index) domain_graph = DomainGraph(parameter) morse_decomposition = MorseDecomposition(domain_graph.digraph()) morse_graph = MorseGraph() morse_graph.assign(domain_graph, morse_decomposition) MorseNodes = range(0, morse_graph.poset().size()) isStable = lambda node : len(morse_graph.poset().children(node)) == 0 isStableFC = lambda node : morse_graph.annotation(node)[0] == 'FC' and isStable(node) hasStableFC = any( isStableFC(node) for node in MorseNodes) StableNodes = [ node for node in MorseNodes if isStable(node) ] subresult = [] for node in StableNodes: search_graph = SearchGraph(domain_graph, node) matching_graph = MatchingGraph(search_graph, pattern_graph) path_match = PathMatch(matching_graph) if path_match: subresult.append([parameter_index, node]) results.append([subresult, 1 if hasStableFC else 0]) return [results, parameter_graph.size()]
Tutorials/PatternMatchExperiments.ipynb
shaunharker/DSGRN
mit
Poset of Extrema We study two poset of extrema. The first poset comes from looking at times [10,60] and assuming SWI4 happens before the other minima at the beginning and thus can be excluded. The other comes from including all extrema. Original Poset of Extrema
original_events = [("HCM1", "min"), ("NDD1", "min"), ("YOX1", "min"), ("SWI4", "max"), ("HCM1", "max"), ("YOX1", "max"), ("NDD1", "max"), ("SWI4","min")] original_event_ordering = [ (i,j) for i in [0,1,2] for j in [3,4,5] ] + \ [ (i,j) for i in [3,4,5] for j in [6] ] + \ [ (i,j) for i in [6] for j in [7] ] DrawGraph(PosetOfExtrema(networks[0], original_events, original_event_ordering ))
Tutorials/PatternMatchExperiments.ipynb
shaunharker/DSGRN
mit
Alternative Poset of Extrema
all_events = [("SWI4", "min"), ("HCM1", "min"), ("NDD1", "min"), ("YOX1", "min"), ("SWI4", "max"), ("HCM1", "max"), ("YOX1", "max"), ("NDD1", "max"), ("SWI4","min"), ("YOX1", "min"), ("HCM1","min"), ("NDD1", "min"), ("SWI4", "max"), ("HCM1", "max"), ("YOX1", "max"), ("NDD1", "max")] all_event_ordering = [ (i,j) for i in [0,1,2,3] for j in [4,5,6] ] + \ [ (i,j) for i in [4,5,6] for j in [7] ] + \ [ (i,j) for i in [7] for j in [8] ] + \ [ (i,j) for i in [8] for j in [9,10] ] + \ [ (i,j) for i in [9,10] for j in [11,12,13,14] ] + \ [ (11,15) ] DrawGraph(PosetOfExtrema(networks[0], all_events, all_event_ordering ))
Tutorials/PatternMatchExperiments.ipynb
shaunharker/DSGRN
mit
Experiments There are 8 experiements corresponding to 3 binary choices: Full networks vs Essential networks Path matching in entire domain graph vs path matching in stable Morse sets Original poset of extrema vs Alternative poset of extrema
def DisplayExperiment(results, title): markdown_string = "# " + title + "\n\n" markdown_string += "| network | # parameters | # parameters with path match |\n" markdown_string += "| ------- |------------ | ---------------------------- |\n" for i, item in enumerate(results): [parameters_with_path_match, pgsize] = item markdown_string += ("|" + str(i) + "|" + str(pgsize) + "|" + str(len(parameters_with_path_match)) + "|\n") from IPython.display import display, Markdown, Latex display(Markdown(markdown_string)) def DisplayStableExperiment(results, title): markdown_string = "# " + title + "\n\n" markdown_string += "| network | # parameters | # parameters with stable FC | # parameters with path match |\n" markdown_string += "| ------- |------------ | ---------------------------- | ---------------------------- |\n" for i, item in enumerate(results): [results, pgsize] = item parameters_with_path_match = sum([ 1 if pair[0] else 0 for pair in results]) parameters_with_stable_fc = sum([ 1 if pair[1] else 0 for pair in results]) markdown_string += ("|" + str(i) + "|" + str(pgsize) + "|" +str(parameters_with_stable_fc) +"|"+str(parameters_with_path_match) + "|\n") from IPython.display import display, Markdown, Latex display(Markdown(markdown_string)) %%time experiment = lambda network : Analyze(network, original_events, original_event_ordering) experimental_results_1 = [ experiment(network) for network in networks ] DisplayExperiment(experimental_results_1, "Experiment 1: All parameters, original poset of extrema") %%time experiment = lambda network : Analyze(network, original_events, original_event_ordering) experimental_results_2 = [ experiment(network) for network in essential_networks ] DisplayExperiment(experimental_results_2, "Experiment 2: Essential parameters, original poset of extrema") %%time experiment = lambda network : AnalyzeOnStable(network, original_events, original_event_ordering) experimental_results_3 = [ experiment(network) for network in networks ] DisplayStableExperiment(experimental_results_3, "Experiment 3: All parameters, original poset, stable only") %%time experiment = lambda network : AnalyzeOnStable(network, original_events, original_event_ordering) experimental_results_4 = [ experiment(network) for network in essential_networks ] DisplayStableExperiment(experimental_results_4, "Experiment 4: Essential parameters, original poset, stable only") %%time experiment = lambda network : Analyze(network, all_events, all_event_ordering) experimental_results_5 = [ experiment(network) for network in networks ] DisplayExperiment(experimental_results_5, "Experiment 5: All parameters, alternative poset of extrema") %%time experiment = lambda network : Analyze(network, all_events, all_event_ordering) experimental_results_6 = [ experiment(network) for network in essential_networks ] DisplayExperiment(experimental_results_6, "Experiment 6: Essential parameters, alternative poset of extrema") %%time experiment = lambda network : AnalyzeOnStable(network, all_events, all_event_ordering) experimental_results_7 = [ experiment(network) for network in networks ] DisplayStableExperiment(experimental_results_7, "Experiment 7: All parameters, alternative poset of extrema, stable only") %%time experiment = lambda network : AnalyzeOnStable(network, all_events, all_event_ordering) experimental_results_8 = [ experiment(network) for network in essential_networks ] DisplayStableExperiment(experimental_results_8, "Experiment 8: Essential parameters, alternative poset of extrema, stable only")
Tutorials/PatternMatchExperiments.ipynb
shaunharker/DSGRN
mit
Step 2: Determine output variance
CV = 0.05 #Coefficient of variation set to 5% (CV = sigma/mu) var_A = np.power(abs(CV*A_det),2) #Variance of the A-matrix (var =sigma^2) var_B = np.power(abs(CV*B_det),2) #Variance of the B-matrix P = np.concatenate((np.reshape(dgdA, 4), dgdB), axis=1) #P contains partial derivatives of both A and B var_P = np.concatenate((np.reshape(var_A, 4), var_B), axis=1) #var_P contains all variances of each parameter in A and B var_g = sum(np.multiply(np.power(P, 2), var_P)) #Total output variance (first order Taylor) var_g = var_g[0,0] + var_g[0,1] +var_g[0,2] + var_g[0,3] + var_g[0,4] + var_g[0,5] print("The total output variance equals:", var_g)
Code/.ipynb_checkpoints/AUP_LCA_evelynegroen-checkpoint.ipynb
evelynegroen/evelynegroen.github.io
mit
First: load and "featurize" Featurization refers to the process of converting the conformational snapshots from your MD trajectories into vectors in some space $\mathbb{R}^N$ that can be manipulated and modeled by subsequent analyses. The Gaussian HMM, for instance, uses Gaussian emission distributions, so it models the trajectory as a time-dependent mixture of multivariate Gaussians. In general, the featurization is somewhat of an art. For this example, we're using MSMBuilder's SuperposeFeaturizer, which superposes each snapshot onto a reference frame (trajectories[0][0] in this example), and then measure the distance from each atom to its position in the reference conformation as the 'feature'
print(AlanineDipeptide.description()) dataset = AlanineDipeptide().get() trajectories = dataset.trajectories topology = trajectories[0].topology indices = [atom.index for atom in topology.atoms if atom.element.symbol in ['C', 'O', 'N']] featurizer = SuperposeFeaturizer(indices, trajectories[0][0]) sequences = featurizer.transform(trajectories)
examples/advanced/hmm-and-msm.ipynb
cxhernandez/msmbuilder
lgpl-2.1
Expected output: <table> <tr> <td> **gradients["dWaa"][1][2] ** </td> <td> 10.0 </td> </tr> <tr> <td> **gradients["dWax"][3][1]** </td> <td> -10.0 </td> </td> </tr> <tr> <td> **gradients["dWya"][1][2]** </td> <td> 0.29713815361 </td> </tr> <tr> <td> **gradients["db"][4]** </td> <td> [ 10.] </td> </tr> <tr> <td> **gradients["dby"][1]** </td> <td> [ 8.45833407] </td> </tr> </table> 2.2 - Sampling Now assume that your model is trained. You would like to generate new text (characters). The process of generation is explained in the picture below: <img src="images/dinos3.png" style="width:500;height:300px;"> <caption><center> Figure 3: In this picture, we assume the model is already trained. We pass in $x^{\langle 1\rangle} = \vec{0}$ at the first time step, and have the network then sample one character at a time. </center></caption> Exercise: Implement the sample function below to sample characters. You need to carry out 4 steps: Step 1: Pass the network the first "dummy" input $x^{\langle 1 \rangle} = \vec{0}$ (the vector of zeros). This is the default input before we've generated any characters. We also set $a^{\langle 0 \rangle} = \vec{0}$ Step 2: Run one step of forward propagation to get $a^{\langle 1 \rangle}$ and $\hat{y}^{\langle 1 \rangle}$. Here are the equations: $$ a^{\langle t+1 \rangle} = \tanh(W_{ax} x^{\langle t \rangle } + W_{aa} a^{\langle t \rangle } + b)\tag{1}$$ $$ z^{\langle t + 1 \rangle } = W_{ya} a^{\langle t + 1 \rangle } + b_y \tag{2}$$ $$ \hat{y}^{\langle t+1 \rangle } = softmax(z^{\langle t + 1 \rangle })\tag{3}$$ Note that $\hat{y}^{\langle t+1 \rangle }$ is a (softmax) probability vector (its entries are between 0 and 1 and sum to 1). $\hat{y}^{\langle t+1 \rangle}_i$ represents the probability that the character indexed by "i" is the next character. We have provided a softmax() function that you can use. Step 3: Carry out sampling: Pick the next character's index according to the probability distribution specified by $\hat{y}^{\langle t+1 \rangle }$. This means that if $\hat{y}^{\langle t+1 \rangle }_i = 0.16$, you will pick the index "i" with 16% probability. To implement it, you can use np.random.choice. Here is an example of how to use np.random.choice(): python np.random.seed(0) p = np.array([0.1, 0.0, 0.7, 0.2]) index = np.random.choice([0, 1, 2, 3], p = p.ravel()) This means that you will pick the index according to the distribution: $P(index = 0) = 0.1, P(index = 1) = 0.0, P(index = 2) = 0.7, P(index = 3) = 0.2$. Step 4: The last step to implement in sample() is to overwrite the variable x, which currently stores $x^{\langle t \rangle }$, with the value of $x^{\langle t + 1 \rangle }$. You will represent $x^{\langle t + 1 \rangle }$ by creating a one-hot vector corresponding to the character you've chosen as your prediction. You will then forward propagate $x^{\langle t + 1 \rangle }$ in Step 1 and keep repeating the process until you get a "\n" character, indicating you've reached the end of the dinosaur name.
# GRADED FUNCTION: sample def sample(parameters, char_to_ix, seed): """ Sample a sequence of characters according to a sequence of probability distributions output of the RNN Arguments: parameters -- python dictionary containing the parameters Waa, Wax, Wya, by, and b. char_to_ix -- python dictionary mapping each character to an index. seed -- used for grading purposes. Do not worry about it. Returns: indices -- a list of length n containing the indices of the sampled characters. """ # Retrieve parameters and relevant shapes from "parameters" dictionary Waa, Wax, Wya, by, b = parameters['Waa'], parameters['Wax'], parameters['Wya'], parameters['by'], parameters['b'] vocab_size = by.shape[0] n_a = Waa.shape[1] ### START CODE HERE ### # Step 1: Create the one-hot vector x for the first character (initializing the sequence generation). (≈1 line) x = None # Step 1': Initialize a_prev as zeros (≈1 line) a_prev = None # Create an empty list of indices, this is the list which will contain the list of indices of the characters to generate (≈1 line) indices = [] # Idx is a flag to detect a newline character, we initialize it to -1 idx = -1 # Loop over time-steps t. At each time-step, sample a character from a probability distribution and append # its index to "indices". We'll stop if we reach 50 characters (which should be very unlikely with a well # trained model), which helps debugging and prevents entering an infinite loop. counter = 0 newline_character = char_to_ix['\n'] while (idx != newline_character and counter != 50): # Step 2: Forward propagate x using the equations (1), (2) and (3) a = None z = None y = None # for grading purposes np.random.seed(counter+seed) # Step 3: Sample the index of a character within the vocabulary from the probability distribution y idx = None # Append the index to "indices" None # Step 4: Overwrite the input character as the one corresponding to the sampled index. x = None x[None] = None # Update "a_prev" to be "a" a_prev = None # for grading purposes seed += 1 counter +=1 ### END CODE HERE ### if (counter == 50): indices.append(char_to_ix['\n']) return indices np.random.seed(2) n, n_a = 20, 100 a0 = np.random.randn(n_a, 1) i0 = 1 # first character is ix_to_char[i0] Wax, Waa, Wya = np.random.randn(n_a, vocab_size), np.random.randn(n_a, n_a), np.random.randn(vocab_size, n_a) b, by = np.random.randn(n_a, 1), np.random.randn(vocab_size, 1) parameters = {"Wax": Wax, "Waa": Waa, "Wya": Wya, "b": b, "by": by} indices = sample(parameters, char_to_ix, 0) print("Sampling:") print("list of sampled indices:", indices) print("list of sampled characters:", [ix_to_char[i] for i in indices])
deeplearning.ai/C5.SequenceModel/Week1_RNN/assignment/Dinosaur Island -- Character-level language model/Dinosaurus Island -- Character level language model final - v1.ipynb
jinzishuai/learn2deeplearn
gpl-3.0
Class Methods
class Employee: emp_count = 0 # Class Variable company = 'Google' # Class Variable raise_amount = 1.04 def __init__(self, fname, lname): self.fname = fname self.lname = lname self.email = self.fname + '.' + self.lname + '@' + self.company + '.com' Employee.emp_count += 1 def get_fullname(self): return '{} {}'.format(self.fname, self.lname) def get_company(self): return 'Company Name is: {}'.format(Employee.company) @classmethod def set_raise_amt(cls, amount): cls.raise_amount = amount emp1 = Employee('Sri', 'Paladugu') emp2 = Employee('Dhruv', 'Paladugu') Employee.set_raise_amt(1.05) print(Employee.raise_amount) print(emp1.raise_amount) print(emp2.raise_amount)
ipynb/OOP Concepts.ipynb
sripaladugu/sripaladugu.github.io
mit
Class Methods can be used to create alternate constructors
class Employee: emp_count = 0 # Class Variable company = 'Google' # Class Variable raise_amount = 1.04 def __init__(self, fname, lname, salary): self.fname = fname self.lname = lname self.salary = salary self.email = self.fname + '.' + self.lname + '@' + self.company + '.com' Employee.emp_count += 1 def get_fullname(self): return '{} {}'.format(self.fname, self.lname) def get_company(self): return 'Company Name is: {}'.format(Employee.company) @classmethod def set_raise_amt(cls, amount): cls.raise_amount = amount @classmethod def from_string(cls, emp_str): fname, lname, salary = emp_str.split("-") return cls(fname, lname, salary) new_emp = Employee.from_string("Pradeep-Koganti-10000") print(new_emp.email)
ipynb/OOP Concepts.ipynb
sripaladugu/sripaladugu.github.io
mit
Static Methods Instance methods take self as the first argument Class methods take cls as the first argument Static methods don't take instance or class as their argument, we just pass the arguments we want to work with. Static methods don't operate on instance or class.
class Employee: emp_count = 0 # Class Variable company = 'Google' # Class Variable raise_amount = 1.04 def __init__(self, fname, lname, salary): self.fname = fname self.lname = lname self.salary = salary self.email = self.fname + '.' + self.lname + '@' + self.company + '.com' Employee.emp_count += 1 def get_fullname(self): return '{} {}'.format(self.fname, self.lname) def get_company(self): return 'Company Name is: {}'.format(Employee.company) @classmethod def set_raise_amt(cls, amount): cls.raise_amount = amount @classmethod def from_string(cls, emp_str): fname, lname, salary = emp_str.split("-") return cls(fname, lname, salary) @staticmethod def is_workday(day): if day.weekday() == 5 or day.weekday() == 6: return False else: return True import datetime my_date = datetime.date(2016, 7, 10) print(Employee.is_workday(my_date))
ipynb/OOP Concepts.ipynb
sripaladugu/sripaladugu.github.io
mit
Inheritance - Creating subclasses
class Employee: emp_count = 0 # Class Variable company = 'Google' # Class Variable raise_amount = 1.04 def __init__(self, fname, lname, salary): self.fname = fname self.lname = lname self.salary = salary self.email = self.fname + '.' + self.lname + '@' + self.company + '.com' Employee.emp_count += 1 def get_fullname(self): return '{} {}'.format(self.fname, self.lname) def get_company(self): return 'Company Name is: {}'.format(Employee.company) def apply_raise(self): self.salary = self.salary * self.raise_amount class Developer(Employee): pass dev1 = Developer('Sri', 'Paladugu', 1000) print(dev1.get_fullname()) print(help(Developer)) # This command prints the Method resolution order. # Indicating the order in which the interpreter is going to look for methods.
ipynb/OOP Concepts.ipynb
sripaladugu/sripaladugu.github.io
mit
Now what if you want Developer's raise_amount to be 10%?
class Employee: emp_count = 0 # Class Variable company = 'Google' # Class Variable raise_amount = 1.04 def __init__(self, fname, lname, salary): self.fname = fname self.lname = lname self.salary = salary self.email = self.fname + '.' + self.lname + '@' + self.company + '.com' Employee.emp_count += 1 def get_fullname(self): return '{} {}'.format(self.fname, self.lname) def get_company(self): return 'Company Name is: {}'.format(Employee.company) def apply_raise(self): self.salary = self.salary * self.raise_amount class Developer(Employee): raise_amount = 1.10 dev1 = Developer('Sri', 'Paladugu', 1000) dev1.apply_raise() print(dev1.salary)
ipynb/OOP Concepts.ipynb
sripaladugu/sripaladugu.github.io
mit
Now what if we want the Developer class to have an extra attribute like prog_lang?
class Employee: emp_count = 0 # Class Variable company = 'Google' # Class Variable raise_amount = 1.04 def __init__(self, fname, lname, salary): self.fname = fname self.lname = lname self.salary = salary self.email = self.fname + '.' + self.lname + '@' + self.company + '.com' Employee.emp_count += 1 def get_fullname(self): return '{} {}'.format(self.fname, self.lname) def get_company(self): return 'Company Name is: {}'.format(Employee.company) def apply_raise(self): self.salary = self.salary * self.raise_amount class Developer(Employee): raise_amount = 1.10 def __init__(self, fname, lname, salary, prog_lang): super().__init__(fname, lname, salary) # or you can also use the following syntax # Employee.__init__(self, fname, lname, salary) self.prog_lang = prog_lang dev1 = Developer('Sri', 'Paladugu', 1000, 'Python') print(dev1.get_fullname()) print(dev1.prog_lang)
ipynb/OOP Concepts.ipynb
sripaladugu/sripaladugu.github.io
mit
Gotcha - Mutable default arguments * https://pythonconquerstheuniverse.wordpress.com/2012/02/15/mutable-default-arguments/
class Employee: emp_count = 0 # Class Variable company = 'Google' # Class Variable raise_amount = 1.04 def __init__(self, fname, lname, salary): self.fname = fname self.lname = lname self.salary = salary self.email = self.fname + '.' + self.lname + '@' + self.company + '.com' Employee.emp_count += 1 def get_fullname(self): return '{} {}'.format(self.fname, self.lname) def get_company(self): return 'Company Name is: {}'.format(Employee.company) def apply_raise(self): self.salary = self.salary * self.raise_amount class Developer(Employee): raise_amount = 1.10 def __init__(self, fname, lname, salary, prog_lang): super().__init__(fname, lname, salary) # or you can also use the following syntax # Employee.__init__(self, fname, lname, salary) self.prog_lang = prog_lang class Manager(Employee): def __init__(self, fname, lname, salary, employees = None): # Use None as default not empty list [] super().__init__(fname, lname, salary) if employees is None: self.employees = [] else: self.employees = employees def add_employee(self, emp): if emp not in self.employees: self.employees.append(emp) def remove_employee(self, emp): if emp in self.employees: self.employees.remove(emp) def print_emps(self): for emp in self.employees: print('--->', emp.get_fullname()) dev_1 = Developer('Sri', 'Paladugu', 1000, 'Python') dev_2 = Developer('Dhruv', 'Paladugu', 2000, 'Java') mgr_1 = Manager('Sue', 'Smith', 9000, [dev_1]) print(mgr_1.email) print(mgr_1.print_emps()) mgr_1.add_employee(dev_2) print(mgr_1.print_emps()) print('Is dev_1 an instance of Developer: ', isinstance(dev_1, Developer)) print('Is dev_1 an instance of Employee: ', isinstance(dev_1, Employee)) print('Is Developer an Subclass of Developer: ', issubclass(Developer, Developer)) print('Is Developer an Subclass of Employee: ', issubclass(Developer, Employee))
ipynb/OOP Concepts.ipynb
sripaladugu/sripaladugu.github.io
mit
Magic or Dunder Methods https://www.youtube.com/watch?v=3ohzBxoFHAY&index=5&list=PL-osiE80TeTsqhIuOqKhwlXsIBIdSeYtc Dunder methods: 1. __repr__ 2. __str__
class Employee: company = 'Google' def __init__(self, fname, lname, salary): self.fname = fname self.lname = lname self.salary = salary self.email = self.fname + '.' + self.lname + '@' + self.company + '.com' def __repr__(self): # For other developers return "Employee('{}','{}','{}')".format(self.fname, self.lname, self.salary) def __str__(self): # For end user return '{} - {}'.format(self.get_fullname(), self.email) def get_fullname(self): return '{} {}'.format(self.fname, self.lname) emp1 = Employee('Sri', 'Paladugu', 5000) print(emp1) print(repr(emp1))
ipynb/OOP Concepts.ipynb
sripaladugu/sripaladugu.github.io
mit
__add__ __len__
# if you do: 1 + 2 internally the interpreter calls the dunder method __add__ print(int.__add__(1,2)) # Similarly # if you do: [2,3] + [4,5] internally the interpreter calls the dunder method __add__ print(list.__add__([2,3],[4,5])) print('Paladugu'.__len__()) # This is same as len('Paladugu') class Employee: company = 'Google' def __init__(self, fname, lname, salary): self.fname = fname self.lname = lname self.salary = salary self.email = self.fname + '.' + self.lname + '@' + self.company + '.com' def __repr__(self): # For other developers return "Employee('{}','{}','{}')".format(self.fname, self.lname, self.salary) def __str__(self): # For end user return '{} - {}'.format(self.get_fullname(), self.email) def get_fullname(self): return '{} {}'.format(self.fname, self.lname) def __add__(self, other): return self.salary + other.salary def __len__(self): return len(self.get_fullname()) emp1 = Employee('Sri', 'Paladugu', 5000) emp2 = Employee('Dhruv', 'Paladugu', 5000) print(emp1 + emp2) print(len(emp1))
ipynb/OOP Concepts.ipynb
sripaladugu/sripaladugu.github.io
mit
Property Decorators
class Employee: company = 'Google' def __init__(self, fname, lname, salary): self.fname = fname self.lname = lname self.salary = salary @property def email(self): return '{}.{}@{}.com'.format(self.fname, self.lname, self.company) @property def fullname(self): return '{} {}'.format(self.fname, self.lname) @fullname.setter def fullname(self, name): first, last = name.split(' ') self.fname = first self.lname = last @fullname.deleter def fullname(self): print('Delete Name!') self.fname = None self.lname = None emp1 = Employee('Sri', 'Paladugu', 5000) print(emp1.email) print(emp1.fullname) emp1.fullname = 'Ramki Paladugu' print(emp1.email) del emp1.fullname print(emp1.email)
ipynb/OOP Concepts.ipynb
sripaladugu/sripaladugu.github.io
mit
Abstract Base Classes in Python What are Abstract Base Classes good for? A while ago I had a discussion about which pattern to use for implementing a maintainable class hierarchy in Python. More specifically, the goal was to define a simple class hierarchy for a service backend in the most programmer-friendly and maintainable way. There was a BaseService that defines a common interface and several concrete implementations that do different things but all provide the same interface (MockService, RealService, and so on). To make this relationship explicit the concrete implementations all subclass BaseService. To be as maintainable and programmer-friendly as possible the idea was to make sure that: instantiating the base class is impossible; and forgetting to implement interface methods in one of the subclasses raises an error as early as possible.
from abc import ABCMeta, abstractmethod class Base(metaclass=ABCMeta): @abstractmethod def foo(self): pass @abstractmethod def bar(self): pass class Concrete(Base): def foo(self): pass # We forget to declare bar() c = Concrete()
ipynb/OOP Concepts.ipynb
sripaladugu/sripaladugu.github.io
mit
Network definitions
from AAE import create_encoder, create_decoder, create_aae_trainer
Notebooks/AAE.ipynb
nimagh/CNN_Implementations
gpl-3.0
Training AAE You can either get the fully trained models from google drive or train your own models using the AAE.py script. Experiments Create demo networks and restore weights
iter_num = 18018 best_model = work_dir + "Model_Iter_%.3d.ckpt"%iter_num best_img = work_dir + 'Gen_Iter_%d.jpg'%iter_num Image(filename=best_img) latentD = 2 # of the best model trained batch_size = 128 tf.reset_default_graph() demo_sess = tf.InteractiveSession() is_training = tf.placeholder(tf.bool, [], 'is_training') Zph = tf.placeholder(tf.float32, [None, latentD]) Xph = tf.placeholder(tf.float32, [None, 28, 28, 1]) Z_op = create_encoder(Xph, is_training, latentD, reuse=False, networktype=networktype + '_Enc') Xrec_op = create_decoder(Z_op, is_training, latentD, reuse=False, networktype=networktype + '_Dec') Xgen_op = create_decoder(Zph, is_training, latentD, reuse=True, networktype=networktype + '_Dec') tf.global_variables_initializer().run() enc_varlist = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=networktype + '_Enc') dec_varlist = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=networktype + '_Dec') saver = tf.train.Saver(var_list=enc_varlist+dec_varlist) saver.restore(demo_sess, best_model) #Get uniform samples over the labels spl = 800 # sample_per_label data = input_data.read_data_sets(data_dir, one_hot=False, reshape=False) Xdemo, Xdemo_labels = get_demo_data(data, spl) decoded_data = demo_sess.run(Z_op, feed_dict={Xph:Xdemo, is_training:False}) plot_latent_variable(decoded_data, Xdemo_labels)
Notebooks/AAE.ipynb
nimagh/CNN_Implementations
gpl-3.0
Generate new data Approximate samples from the posterior distribution over the latent variables p(z|x)
Zdemo = np.random.normal(size=[128, latentD], loc=0.0, scale=1.).astype(np.float32) gen_sample = demo_sess.run(Xgen_op, feed_dict={Zph: Zdemo , is_training:False}) vis_square(gen_sample[:121], [11, 11], save_path=work_dir + 'sample.jpg') Image(filename=work_dir + 'sample.jpg')
Notebooks/AAE.ipynb
nimagh/CNN_Implementations
gpl-3.0
After doing a pip install, click on Reset Session so that the Python environment picks up the new package
import os PROJECT = 'cloud-training-demos' # REPLACE WITH YOUR PROJECT ID BUCKET = 'cloud-training-demos-ml' # REPLACE WITH YOUR BUCKET NAME REGION = 'us-central1' # REPLACE WITH YOUR BUCKET REGION e.g. us-central1 MODEL_TYPE = 'tpu' # do not change these os.environ['PROJECT'] = PROJECT os.environ['BUCKET'] = BUCKET os.environ['REGION'] = REGION os.environ['MODEL_TYPE'] = MODEL_TYPE os.environ['TFVERSION'] = '1.8' # Tensorflow version %%bash gcloud config set project $PROJECT gcloud config set compute/region $REGION
courses/machine_learning/deepdive/08_image_keras/flowers_fromscratch_tpu.ipynb
GoogleCloudPlatform/training-data-analyst
apache-2.0
Breaking a parser down (attach) If we examine the source code for the attach pipeline, we can see that it is in fact a two step pipeline combining the attach classifier wrapper and a decoder. So let's see what happens when we run the attach classifier by itself.
import numpy as np from attelo.learning import (SklearnAttachClassifier) from attelo.parser.attach import (AttachClassifierWrapper) from sklearn.linear_model import (LogisticRegression) def print_results_verbose(dpack): """Print detailed parse results""" for i, (edu1, edu2) in enumerate(dpack.pairings): attach = "{:.2f}".format(dpack.graph.attach[i]) label = np.around(dpack.graph.label[i,:], decimals=2) got = dpack.get_label(dpack.graph.prediction[i]) print(i, edu1.id, edu2.id, '\t|', attach, label, got) learner = SklearnAttachClassifier(LogisticRegression()) parser1a = AttachClassifierWrapper(learner) parser1a.fit(train_dpacks, train_targets) dpack = parser1a.transform(test_dpack) print_results_verbose(dpack)
doc/tut_parser2.ipynb
kowey/attelo
gpl-3.0
Parsers and weighted datapacks In the output above, we have dug a little bit deeper into our datapacks. Recall above that a parser translates datapacks to datapacks. The output of a parser is always a weighted datapack., ie. a datapack whose 'graph' attribute is set to a record containing attachment weights label weights predictions (like target values) So called "standalone" parsers will take an unweighted datapack (graph == None) and produce a weighted datapack with predictions set. But some parsers tend to be more useful as part of a pipeline: the attach classfier wrapper fills the attachment weights likewise the label classifier wrapper assigns label weights a decoder assigns predictions from weights We see the first case in the above output. Notice that the attachments have been set to values from a model, but the label weights and predictions are assigned default values. NB: all parsers should do "something sensible" in the face of all inputs. This typically consists of assuming the default weight of 1.0 for unweighted datapacks. Decoders Having now transformed a datapack with the attach classifier wrapper, let's now pass its results to a decoder. In fact, let's try a couple of different decoders and compare the output.
from attelo.decoding.baseline import (LocalBaseline) decoder = LocalBaseline(threshold=0.4) dpack2 = decoder.transform(dpack) print_results_verbose(dpack2)
doc/tut_parser2.ipynb
kowey/attelo
gpl-3.0
The result above is what we get if we run a decoder on the output of the attach classifier wrapper. This is in fact, the the same thing as running the attachment pipeline. We can define a similar pipeline below.
from attelo.parser.pipeline import (Pipeline) # this is basically attelo.parser.attach.AttachPipeline parser1 = Pipeline(steps=[('attach weights', parser1a), ('decoder', decoder)]) parser1.fit(train_dpacks, train_targets) print_results_verbose(parser1.transform(test_dpack))
doc/tut_parser2.ipynb
kowey/attelo
gpl-3.0
Mixing and matching Being able to break parsing down to this level of granularity lets us experiment with parsing techniques by composing different parsing substeps in different ways. For example, below, we write two slightly different pipelines, one which sets labels separately from decoding, and one which combines attach and label scores before handing them off to a decoder.
from attelo.learning.local import (SklearnLabelClassifier) from attelo.parser.label import (LabelClassifierWrapper, SimpleLabeller) from attelo.parser.full import (AttachTimesBestLabel) learner_l = SklearnLabelClassifier(LogisticRegression()) print("Post-labelling") print("--------------") parser = Pipeline(steps=[('attach weights', parser1a), ('decoder', decoder), ('labels', SimpleLabeller(learner_l))]) parser.fit(train_dpacks, train_targets) print_results_verbose(parser.transform(test_dpack)) print() print("Joint") print("-----") parser = Pipeline(steps=[('attach weights', parser1a), ('label weights', LabelClassifierWrapper(learner_l)), ('attach times label', AttachTimesBestLabel()), ('decoder', decoder)]) parser.fit(train_dpacks, train_targets) print_results_verbose(parser.transform(test_dpack))
doc/tut_parser2.ipynb
kowey/attelo
gpl-3.0
3. Enter Storage Bucket Recipe Parameters Specify the name of the bucket and who will have owner permissions. Existing buckets are preserved. Adding a permission to the list will update the permissions but removing them will not. You have to manualy remove grants. Modify the values below for your use case, can be done multiple times, then click play.
FIELDS = { 'auth_write':'service', # Credentials used for writing data. 'bucket_bucket':'', # Name of Google Cloud Bucket to create. 'bucket_emails':'', # Comma separated emails. 'bucket_groups':'', # Comma separated groups. } print("Parameters Set To: %s" % FIELDS)
colabs/bucket.ipynb
google/starthinker
apache-2.0
4. Execute Storage Bucket This does NOT need to be modified unless you are changing the recipe, click play.
from starthinker.util.configuration import execute from starthinker.util.recipe import json_set_fields TASKS = [ { 'bucket':{ 'auth':{'field':{'name':'auth_write','kind':'authentication','order':1,'default':'service','description':'Credentials used for writing data.'}}, 'bucket':{'field':{'name':'bucket_bucket','kind':'string','order':2,'default':'','description':'Name of Google Cloud Bucket to create.'}}, 'emails':{'field':{'name':'bucket_emails','kind':'string_list','order':3,'default':'','description':'Comma separated emails.'}}, 'groups':{'field':{'name':'bucket_groups','kind':'string_list','order':4,'default':'','description':'Comma separated groups.'}} } } ] json_set_fields(TASKS, FIELDS) execute(CONFIG, TASKS, force=True)
colabs/bucket.ipynb
google/starthinker
apache-2.0
Loading data Load the data from disk into memory.
with open('potus_wiki_bios_cleaned.json','r') as f: bios = json.load(f)
2018/materials/boulder/day4-text-analysis/Day 4, Lecture 3 - Text networks and word embeddings.ipynb
compsocialscience/summer-institute
mit
Confirm there are 44 presidents (shaking fist at Grover Cleveland) in the dictionary.
print("There are {0} biographies of presidents.".format(len(bios)))
2018/materials/boulder/day4-text-analysis/Day 4, Lecture 3 - Text networks and word embeddings.ipynb
compsocialscience/summer-institute
mit
Get some metadata about the U.S. Presidents.
presidents_df = pd.DataFrame(requests.get('https://raw.githubusercontent.com/hitch17/sample-data/master/presidents.json').json()) presidents_df = presidents_df.set_index('president') presidents_df['wikibio words'] = pd.Series({bio_name:len(bio_text) for bio_name,bio_text in bios.items()}) presidents_df.head()
2018/materials/boulder/day4-text-analysis/Day 4, Lecture 3 - Text networks and word embeddings.ipynb
compsocialscience/summer-institute
mit
A really basic exploratory scatterplot for the number of words in each President's biography compared to their POTUS index.
presidents_df.plot.scatter(x='number',y='wikibio words')
2018/materials/boulder/day4-text-analysis/Day 4, Lecture 3 - Text networks and word embeddings.ipynb
compsocialscience/summer-institute
mit
TF-IDF We can create a document-term matrix where the rows are our 44 presidential biographies, the columns are the terms (words), and the values in the cells are the word counts: the number of times that document contains that word. This is the "term frequency" (TF) part of TF-IDF. The IDF part of TF-IDF is the "inverse document frequency". The intuition is that words that occur frequency within a single document but are infrequent across the corpus of documents should receiving a higher weighting: these words have greater relative meaning. Conversely, words that are frequently used across documents are down-weighted. The image below has documents as columns and terms as rows.
# Import the libraries from scikit-learn from sklearn.feature_extraction.text import CountVectorizer,TfidfTransformer count_vect = CountVectorizer() # Compute the word counts -- it expects a big string, so join our cleaned words back together bio_counts = count_vect.fit_transform([' '.join(bio) for bio in bios.values()]) # Compute the TF-IDF for the word counts from each biography bio_tfidf = TfidfTransformer().fit_transform(bio_counts) # Convert from sparse to dense array representation bio_tfidf_dense = bio_tfidf.todense()
2018/materials/boulder/day4-text-analysis/Day 4, Lecture 3 - Text networks and word embeddings.ipynb
compsocialscience/summer-institute
mit
Make a text similarity network Once we have the TFIDF scores for every word in each president's biography, we can make a text similarity network. Multiplying the document by term matrix by its transpose should return the cosine similarities between documents. We can also import cosine_similarity from scikit-learn if you don't believe me (I didn't believe me either). Cosine similarity values closer to 1 indicate these documents' words have more similar TFIDF scores and values closer to 0 indicate these documents' words are more dissimilar. The goal here is to create a network where nodes are presidents and edges are weighted similarity scores. All text documents will have some minimal similarity, so we can threshold the similarity scores to only those similarities in the top 10% for each president.
# Compute cosine similarity pres_pres_df = pd.DataFrame(bio_tfidf_dense*bio_tfidf_dense.T) # If you don't believe me that cosine similiarty is the document-term matrix times its transpose from sklearn.metrics.pairwise import cosine_similarity pres_pres_df = pd.DataFrame(cosine_similarity(bio_tfidf_dense)) # Filter for edges in the 90th percentile or greater pres_pres_filtered_df = pres_pres_df[pres_pres_df >= pres_pres_df.quantile(.9)] # Reshape and filter data edgelist_df = pres_pres_filtered_df.stack().reset_index() edgelist_df = edgelist_df[(edgelist_df[0] != 0) & (edgelist_df['level_0'] != edgelist_df['level_1'])] # Rename and replace data edgelist_df.rename(columns={'level_0':'from','level_1':'to',0:'weight'},inplace=True) edgelist_df.replace(dict(enumerate(bios.keys())),inplace=True) # Inspect edgelist_df.head()
2018/materials/boulder/day4-text-analysis/Day 4, Lecture 3 - Text networks and word embeddings.ipynb
compsocialscience/summer-institute
mit
We read this pandas edgelist into networkx using from_pandas_edgelist, report out some basic descriptives about the network, and write the graph object to file in case we want to visualize it in a dedicated network visualization package like Gephi.
# Convert from edgelist to a graph object g = nx.from_pandas_edgelist(edgelist_df,source='from',target='to',edge_attr=['weight']) # Report out basic descriptives print("There are {0:,} nodes and {1:,} edges in the network.".format(g.number_of_nodes(),g.number_of_edges())) # Write graph object to disk for visualization nx.write_gexf(g,'bio_similarity.gexf')
2018/materials/boulder/day4-text-analysis/Day 4, Lecture 3 - Text networks and word embeddings.ipynb
compsocialscience/summer-institute
mit
Since this is a small and sparse network, we can try to use Matplotlib to visualize it instead. I would only use the nx.draw functionality for small networks like this one.
# Plot the nodes as a spring layout #g_pos = nx.layout.fruchterman_reingold_layout(g, k = 5, iterations=10000) g_pos = nx.layout.kamada_kawai_layout(g) # Draw the graph f,ax = plt.subplots(1,1,figsize=(10,10)) nx.draw(G = g, ax = ax, pos = g_pos, with_labels = True, node_size = [dc*(len(g) - 1)*100 for dc in nx.degree_centrality(g).values()], font_size = 10, font_weight = 'bold', width = [d['weight']*10 for i,j,d in g.edges(data=True)], node_color = 'tomato', edge_color = 'grey' )
2018/materials/boulder/day4-text-analysis/Day 4, Lecture 3 - Text networks and word embeddings.ipynb
compsocialscience/summer-institute
mit
Case study: Text similarity network of the S&P 500 companies Step 1: Load and preprocess the content of the articles.
# Load the data with open('sp500_wiki_articles.json','r') as f: sp500_articles = json.load(f) # Bring in the text_preprocessor we wrote from Day 4, Lecture 1 def text_preprocessor(text): """Takes a large string (document) and returns a list of cleaned tokens""" tokens = nltk.wordpunct_tokenize(text) clean_tokens = [] for t in tokens: if t.lower() not in all_stopwords and len(t) > 2: clean_tokens.append(lemmatizer(t.lower())) return clean_tokens # Clean each article cleaned_sp500 = {} for name,text in sp500_articles.items(): cleaned_sp500[name] = text_preprocessor(text) # Save to disk with open('sp500_wiki_articles_cleaned.json','w') as f: json.dump(cleaned_sp500,f)
2018/materials/boulder/day4-text-analysis/Day 4, Lecture 3 - Text networks and word embeddings.ipynb
compsocialscience/summer-institute
mit
Step 2: Compute the TFIDF matrix for the S&P 500 companies.
# Compute the word counts sp500_counts = # Compute the TF-IDF for the word counts from each biography sp500_tfidf = # Convert from sparse to dense array representation sp500_tfidf_dense =
2018/materials/boulder/day4-text-analysis/Day 4, Lecture 3 - Text networks and word embeddings.ipynb
compsocialscience/summer-institute
mit
Step 3: Compute the cosine similarities.
# Compute cosine similarity company_company_df = # Filter for edges in the 90th percentile or greater company_company_filtered_df = # Reshape and filter data sp500_edgelist_df = sp500_edgelist_df = # Rename and replace data sp500_edgelist_df.rename(columns={'level_0':'from','level_1':'to',0:'weight'},inplace=True) sp500_edgelist_df.replace(dict(enumerate(sp500_articles.keys())),inplace=True) # Inspect sp500_edgelist_df.head()
2018/materials/boulder/day4-text-analysis/Day 4, Lecture 3 - Text networks and word embeddings.ipynb
compsocialscience/summer-institute
mit
Step 4: Visualize the resulting network. Word2Vec We used TF-IDF vectors of documents and cosine similarities between these document vectors as a way of representing the similarity in the networks above. However, TF-IDF score are simply (normalized) word frequencies: they do not capture semantic information. A vector space model like the popular Word2Vec represents each token (word) in a high-dimensional (here we'll use 100-dimensions) space that is trained from some (ideally) large corpus of documents. Ideally, tokens that are used in similar contexts are placed into similar locations in this high-dimensional space. Once we have vectorized words into this space, we're able to efficiently compute do a variety of other operations such as compute similarities between words or do transformations that can find analogies. I lack the expertise and we lack the time to get into the math behind these methods, but here are some helpful tutorials I've found: * Word embeddings: exploration, explanation, and exploitation * Learning Word Embedding * On word embeddings * TensorFlow - Vector Representations of Words We'll use the 44 Presidential biographies as a small and specific corpus. We start by training a bios_model from the list of biographies using hyperparamaters for the number of dimensions (size), the number of surrounding words to use as training (window), and the minimum number of times a word has to occur to be included in the model (min_count).
from gensim.models import Word2Vec bios_model = Word2Vec(bios.values(),size=100,window=10,min_count=8)
2018/materials/boulder/day4-text-analysis/Day 4, Lecture 3 - Text networks and word embeddings.ipynb
compsocialscience/summer-institute
mit
Each word in the vocabulary exists as a N-dimensional vector, where N is the "size" hyper-parameter set in the model. The "congress" token in located at this position in the 100-dimensional space we trained in bios_model.
bios_model.wv['congress'] bios_model.wv.most_similar('congress') bios_model.wv.most_similar('court') bios_model.wv.most_similar('war') bios_model.wv.most_similar('election')
2018/materials/boulder/day4-text-analysis/Day 4, Lecture 3 - Text networks and word embeddings.ipynb
compsocialscience/summer-institute
mit
There's a doesnt_match method that predicts which word in a list doesn't match the other word senses in the list. Sometime the results are predictable/trivial.
bios_model.wv.doesnt_match(['democrat','republican','whig','panama'])
2018/materials/boulder/day4-text-analysis/Day 4, Lecture 3 - Text networks and word embeddings.ipynb
compsocialscience/summer-institute
mit
Other times the results are unexpected/interesting.
bios_model.wv.doesnt_match(['canada','mexico','cuba','japan','france'])
2018/materials/boulder/day4-text-analysis/Day 4, Lecture 3 - Text networks and word embeddings.ipynb
compsocialscience/summer-institute
mit
One of the most powerful implications of having these vectorized embeddings of word meanings is the ability to do operations similar arithmetic that recover or reveal interesting semantic meanings. The classic example is Man:Woman::King:Queen: What are some examples of these vector similarities from our trained model? republican - slavery = democrat - X -(republican - slavery) + democrat = X slavery + democrat - republican = X
bios_model.wv.most_similar(positive=['democrat','slavery'],negative=['republican']) bios_model.wv.most_similar(positive=['republican','labor'],negative=['democrat'])
2018/materials/boulder/day4-text-analysis/Day 4, Lecture 3 - Text networks and word embeddings.ipynb
compsocialscience/summer-institute
mit
Finally, you can use the similarity method to return the similarity between two terms. In our trained model, "britain" and "france" are more similar to each other than "mexico" and "canada".
bios_model.wv.similarity('republican','democrat') bios_model.wv.similarity('mexico','canada') bios_model.wv.similarity('britain','france')
2018/materials/boulder/day4-text-analysis/Day 4, Lecture 3 - Text networks and word embeddings.ipynb
compsocialscience/summer-institute
mit
Case study: S&P500 company Word2Vec model Step 1: Open the "sp500_wiki_articles_cleaned.json" you previous saved of the cleaned S&P500 company article content or use a text preprocessor on "sp500_wiki_articles.json" to generate a dictionary of cleaned article content. Train a sp500_model using the Word2Vec model on the values of the cleaned company article content. You can use default hyperparameters for size, window, and min_count, or experiment with alternative values. Step 2: Using the most_similar method, explore some similarities this model has learned for salient tokens about companies (e.g., "board", "controversy", "executive", "investigation"). Use the positive and negative options to explore different analogies. Using the doesnt_match method, experiment with word combinations to discover predictable and unexpected exceptions. Using the similarity method, identify interesting similarity scores. Dimensionality reduction Material from this segment is adapted from Jake Vanderplas's "Python Data Science Handbook" notebooks and Kevyn Collins-Thompson's "Applied Machine Learning in Python" module on Coursera. In the TF-IDF, we have over 17,000 dimensions (corresponding to the unique tokens) for each of the 44 presidential biographies. This data is sparse and large, which makes it hard to visualize. Ideally we'd only have two dimensions of data for a task like visualization. Dimensionality reduction encompasses a set of methods like principal component analysis, multidimensional scaling, and more advanced "manifold learning" that reduces high-dimensional data down to fewer dimensions. For the purposes of visualization, we typically want 2 dimensions. These methods use a variety of different assumptions and modeling approaches. If you want to understand the differences between them, you'll likely need to find a graduate-level machine learning course. Let's compare what each of these do on our presidential TF-IDF: the goal here is to understand there are different methods for dimensionality reduction and each generates different new components and/or clusters that you'll need to interpret.
print(bio_tfidf_dense.shape) bio_tfidf_dense
2018/materials/boulder/day4-text-analysis/Day 4, Lecture 3 - Text networks and word embeddings.ipynb
compsocialscience/summer-institute
mit
Principal component analysis (PCA) is probably one of the most widely-used and efficient methods for dimensionality reduction.
# Step 1: Choose a class of models from sklearn.decomposition import PCA # Step 2: Instantiate the model pca = PCA(n_components=2) # Step 3: Arrange the data into features matrices # Already done # Step 4: Fit the model to the data pca.fit(bio_tfidf_dense) # Step 5: Evaluate the model X_pca = pca.transform(bio_tfidf_dense) # Visualize f,ax = plt.subplots(1,1,figsize=(10,10)) ax.scatter(X_pca[:,0],X_pca[:,1]) ax.set_title('PCA') for i,txt in enumerate(bios.keys()): if txt == 'Barack Obama': ax.annotate(txt,(X_pca[i,0],X_pca[i,1]),color='blue',fontweight='bold') elif txt == 'Donald Trump': ax.annotate(txt,(X_pca[i,0],X_pca[i,1]),color='red',fontweight='bold') else: ax.annotate(txt,(X_pca[i,0],X_pca[i,1]))
2018/materials/boulder/day4-text-analysis/Day 4, Lecture 3 - Text networks and word embeddings.ipynb
compsocialscience/summer-institute
mit
Multi-dimensional scaling is another common technique in the social sciences.
# Step 1: Choose your model class(es) from sklearn.manifold import MDS # Step 2: Instantiate your model class(es) mds = MDS(n_components=2,metric=False,n_jobs=-1) # Step 3: Arrange data into features matrices # Done! # Step 4: Fit the data and transform X_mds = mds.fit_transform(bio_tfidf_dense) # Plot the data f,ax = plt.subplots(1,1,figsize=(10,10)) ax.scatter(X_mds[:,0],X_mds[:,1]) ax.set_title('Multi-Dimensional Scaling') for i,txt in enumerate(bios.keys()): if txt == 'Barack Obama': ax.annotate(txt,(X_mds[i,0],X_mds[i,1]),color='blue',fontweight='bold') elif txt == 'Donald Trump': ax.annotate(txt,(X_mds[i,0],X_mds[i,1]),color='red',fontweight='bold') else: ax.annotate(txt,(X_mds[i,0],X_mds[i,1]))
2018/materials/boulder/day4-text-analysis/Day 4, Lecture 3 - Text networks and word embeddings.ipynb
compsocialscience/summer-institute
mit
Isomap is an extension of MDS.
# Step 1: Choose your model class(es) from sklearn.manifold import Isomap # Step 2: Instantiate your model class(es) iso = Isomap(n_neighbors = 5, n_components = 2) # Step 3: Arrange data into features matrices # Done! # Step 4: Fit the data and transform X_iso = iso.fit_transform(bio_tfidf_dense) # Plot the data f,ax = plt.subplots(1,1,figsize=(10,10)) ax.scatter(X_iso[:,0],X_iso[:,1]) ax.set_title('IsoMap') for i,txt in enumerate(bios.keys()): if txt == 'Barack Obama': ax.annotate(txt,(X_iso[i,0],X_iso[i,1]),color='blue',fontweight='bold') elif txt == 'Donald Trump': ax.annotate(txt,(X_iso[i,0],X_iso[i,1]),color='red',fontweight='bold') else: ax.annotate(txt,(X_iso[i,0],X_iso[i,1]))
2018/materials/boulder/day4-text-analysis/Day 4, Lecture 3 - Text networks and word embeddings.ipynb
compsocialscience/summer-institute
mit
Spectral embedding does interesting things to the eigenvectors of a similarity matrix.
# Step 1: Choose your model class(es) from sklearn.manifold import SpectralEmbedding # Step 2: Instantiate your model class(es) se = SpectralEmbedding(n_components = 2) # Step 3: Arrange data into features matrices # Done! # Step 4: Fit the data and transform X_se = se.fit_transform(bio_tfidf_dense) # Plot the data f,ax = plt.subplots(1,1,figsize=(9,6)) ax.scatter(X_se[:,0],X_se[:,1]) ax.set_title('Spectral Embedding') for i,txt in enumerate(bios.keys()): if txt == 'Barack Obama': ax.annotate(txt,(X_se[i,0],X_se[i,1]),color='blue',fontweight='bold') elif txt == 'Donald Trump': ax.annotate(txt,(X_se[i,0],X_se[i,1]),color='red',fontweight='bold') else: ax.annotate(txt,(X_se[i,0],X_se[i,1]))
2018/materials/boulder/day4-text-analysis/Day 4, Lecture 3 - Text networks and word embeddings.ipynb
compsocialscience/summer-institute
mit
Locally Linear Embedding is yet another dimensionality reduction method, but not my favorite to date given performance (meaningful clusters as output) and cost (expensive to compute).
# Step 1: Choose your model class(es) from sklearn.manifold import LocallyLinearEmbedding # Step 2: Instantiate your model class(es) lle = LocallyLinearEmbedding(n_components = 2,n_jobs=-1) # Step 3: Arrange data into features matrices # Done! # Step 4: Fit the data and transform X_lle = lle.fit_transform(bio_tfidf_dense) # Plot the data f,ax = plt.subplots(1,1,figsize=(9,6)) ax.scatter(X_lle[:,0],X_lle[:,1]) ax.set_title('Locally Linear Embedding') for i,txt in enumerate(bios.keys()): if txt == 'Barack Obama': ax.annotate(txt,(X_lle[i,0],X_lle[i,1]),color='blue',fontweight='bold') elif txt == 'Donald Trump': ax.annotate(txt,(X_lle[i,0],X_lle[i,1]),color='red',fontweight='bold') else: ax.annotate(txt,(X_lle[i,0],X_lle[i,1]))
2018/materials/boulder/day4-text-analysis/Day 4, Lecture 3 - Text networks and word embeddings.ipynb
compsocialscience/summer-institute
mit
t-Distributed Stochastic Neighbor Embedding (t-SNE) is ubiquitous for visualizing word or document embeddings. It can be expensive to run, but it does a great job recovering clusters. There are some hyper-parameters, particularly "perplexity" that you'll need to tune to get things to look interesting. Wattenberg, Viégas, and Johnson have an outstanding interactive tool visualizing how t-SNE's different parameters influence the layout as well as good advice on how to make the best of it.
# Step 1: Choose your model class(es) from sklearn.manifold import TSNE # Step 2: Instantiate your model class(es) tsne = TSNE(n_components = 2, init='pca', random_state=42, perplexity=11) # Step 3: Arrange data into features matrices # Done! # Step 4: Fit the data and transform X_tsne = tsne.fit_transform(bio_tfidf_dense) # Plot the data f,ax = plt.subplots(1,1,figsize=(10,10)) ax.scatter(X_tsne[:,0],X_tsne[:,1]) ax.set_title('t-SNE') for i,txt in enumerate(bios.keys()): if txt == 'Barack Obama': ax.annotate(txt,(X_tsne[i,0],X_tsne[i,1]),color='blue',fontweight='bold') elif txt == 'Donald Trump': ax.annotate(txt,(X_tsne[i,0],X_tsne[i,1]),color='red',fontweight='bold') else: ax.annotate(txt,(X_tsne[i,0],X_tsne[i,1]))
2018/materials/boulder/day4-text-analysis/Day 4, Lecture 3 - Text networks and word embeddings.ipynb
compsocialscience/summer-institute
mit
Uniform Maniford Approximation and Projection (UMAP) is a new and particularly fast dimensionality reduction method with some comparatively great documentation. Unfortunately, UMAP is so new that it hasn't been translated into scikit-learn yet, so you'll need to install it separately from the terminal: conda install -c conda-forge umap-learn
# Step 1: Choose your model class(es) from umap import UMAP # Step 2: Instantiate your model class(es) umap_ = UMAP(n_components=2, n_neighbors=10, random_state=42) # Step 3: Arrange data into features matrices # Done! # Step 4: Fit the data and transform X_umap = umap_.fit_transform(bio_tfidf_dense) # Plot the data f,ax = plt.subplots(1,1,figsize=(10,10)) ax.scatter(X_umap[:,0],X_umap[:,1]) ax.set_title('UMAP') for i,txt in enumerate(bios.keys()): if txt == 'Barack Obama': ax.annotate(txt,(X_umap[i,0],X_umap[i,1]),color='blue',fontweight='bold') elif txt == 'Donald Trump': ax.annotate(txt,(X_umap[i,0],X_umap[i,1]),color='red',fontweight='bold') else: ax.annotate(txt,(X_umap[i,0],X_umap[i,1]))
2018/materials/boulder/day4-text-analysis/Day 4, Lecture 3 - Text networks and word embeddings.ipynb
compsocialscience/summer-institute
mit
Case study: S&P500 company clusters Step 1: Using the sp500_tfidf_dense array/DataFrame, experiment with different dimensionality reduction tools we covered above. Visualize and inspect the distribution of S&P500 companies for interesting dimensions (do X and Y dimensions in this reduced data capture anything meaningful?) or clusters (do companies clusters together as we'd expect?). Visualizing word embeddings Using the bio_counts, we can find the top-N most frequent words and save them as top_words.
top_words = pd.DataFrame(bio_counts.todense().sum(0).T, index=count_vect.get_feature_names())[0] top_words = top_words.sort_values(0,ascending=False).head(1000).index.tolist()
2018/materials/boulder/day4-text-analysis/Day 4, Lecture 3 - Text networks and word embeddings.ipynb
compsocialscience/summer-institute
mit
For each word in top_words, we get its vector from bios_model and add it to the top_word_vectors list and cast this list back to a numpy array.
top_word_vectors = [] for word in top_words: try: vector = bios_model.wv[word] top_word_vectors.append(vector) except KeyError: pass top_word_vectors = np.array(top_word_vectors)
2018/materials/boulder/day4-text-analysis/Day 4, Lecture 3 - Text networks and word embeddings.ipynb
compsocialscience/summer-institute
mit
We can then use the dimensionality tools we just covered in the previous section to visualize the word similarities. PCA is fast but rarely does a great job with this extremely high-dimensional and sparse data: it's a cloud of points with no discernable structure.
# Step 1: Choose your model class(es) # from sklearn.decomposition import PCA # Step 2: Instantiate the model pca = PCA(n_components=2) # Step 3: Arrange data into features matrices X_w2v = top_word_vectors # Step 4: Fit the data and transform X_w2v_pca = pca.fit_transform(X_w2v) # Plot the data f,ax = plt.subplots(1,1,figsize=(10,10)) ax.scatter(X_w2v_pca[:,0],X_w2v_pca[:,1],s=3) ax.set_title('PCA') for i,txt in enumerate(top_words): if i%10 == 0: ax.annotate(txt,(X_w2v_pca[i,0],X_w2v_pca[i,1])) f.savefig('term_pca.pdf')
2018/materials/boulder/day4-text-analysis/Day 4, Lecture 3 - Text networks and word embeddings.ipynb
compsocialscience/summer-institute
mit
t-SNE was more-or-less engineered for precisely the task of visualizing word embeddings. It likely takes on the order of a minute or more for t-SNE to reduce the top_words embeddings to only two dimensions. Assuming our perplexity and other t-SNE hyperparameters are well-behaved, there should be relatively easy-to-discern clusters of words with similar meanings. You can also open the "term_sne.pdf" file and zoome to inspect.
# Step 1: Choose your model class(es) from sklearn.manifold import TSNE # Step 2: Instantiate your model class(es) tsne = TSNE(n_components = 2, init='pca', random_state=42, perplexity=25) # Step 3: Arrange data into features matrices X_w2v = top_word_vectors # Step 4: Fit the data and transform X_w2v_tsne = tsne.fit_transform(X_w2v) # Plot the data f,ax = plt.subplots(1,1,figsize=(10,10)) ax.scatter(X_w2v_tsne[:,0],X_w2v_tsne[:,1],s=3) ax.set_title('t-SNE') for i,txt in enumerate(top_words): if i%10 == 0: ax.annotate(txt,(X_w2v_tsne[i,0],X_w2v_tsne[i,1])) f.savefig('term_tsne.pdf')
2018/materials/boulder/day4-text-analysis/Day 4, Lecture 3 - Text networks and word embeddings.ipynb
compsocialscience/summer-institute
mit
UMAP is faster and I think better, but you'll need to make sure this is installed on your system since it doesn't come with scikit-learn or Anaconda by default. Words like "nominee" and "campaign" or the names of the months cluster clearly together apart from the rest.
# Step 1: Choose your model class(es) from umap import UMAP # Step 2: Instantiate your model class(es) umap_ = UMAP(n_components=2, n_neighbors=5, random_state=42) # Step 3: Arrange data into features matrices # Done! # Step 4: Fit the data and transform X_w2v_umap = umap_.fit_transform(X_w2v) # Plot the data f,ax = plt.subplots(1,1,figsize=(10,10)) ax.scatter(X_w2v_umap[:,0],X_w2v_umap[:,1],s=3) ax.set_title('UMAP') for i,txt in enumerate(top_words): if i%10 == 0: ax.annotate(txt,(X_w2v_umap[i,0],X_w2v_umap[i,1])) f.savefig('term_umap.pdf')
2018/materials/boulder/day4-text-analysis/Day 4, Lecture 3 - Text networks and word embeddings.ipynb
compsocialscience/summer-institute
mit
Select the notebook runtime environment devices / settings Set the device to cpu / gpu for the test environment. If you have both CPU and GPU on your machine, you can optionally switch the devices. By default, we choose the best available device.
# Select the right target device when this notebook is being tested: if 'TEST_DEVICE' in os.environ: import cntk if os.environ['TEST_DEVICE'] == 'cpu': C.device.set_default_device(C.device.cpu()) else: C.device.set_default_device(C.device.gpu(0)) C.device.set_default_device(C.device.gpu(0))
simpleGan/CNTK_206B_DCGAN.ipynb
olgaliak/cntk-cyclegan
mit
Model Creation First we provide a brief recap of the basics of GAN. You may skip this block if you are familiar with CNTK 206A. A GAN network is composed of two sub-networks, one called the Generator ($G$) and the other Discriminator ($D$). - The Generator takes random noise vector ($z$) as input and strives to output synthetic (fake) image ($x^$) that is indistinguishable from the real image ($x$) from the MNIST dataset. - The Discriminator strives to differentiate between the real image ($x$) and the fake ($x^$) image. In each training iteration, the Generator produces more realistic fake images (in other words minimizes the difference between the real and generated counterpart) and the Discriminator maximizes the probability of assigning the correct label (real vs. fake) to both real examples (from training set) and the generated fake ones. The two conflicting objectives between the sub-networks ($G$ and $D$) leads to the GAN network (when trained) converge to an equilibrium, where the Generator produces realistic looking fake MNIST images and the Discriminator can at best randomly guess whether images are real or fake. The resulting Generator model once trained produces realistic MNIST image with the input being a random number. Model config First, we establish some of the architectural and training hyper-parameters for our model. The generator network is fractional strided convolutional network. The input is a 10-dimensional random vector and the output of the generator is a flattened version of a 28 x 28 fake image. The discriminator is strided-convolution network. It takes as input the 784 dimensional output of the generator or a real MNIST image, reshapes into a 28 x 28 image format and outputs a single scalar - the estimated probability that the input image is a real MNIST image. Model components We build a computational graph for our model, one each for the generator and the discriminator. First, we establish some of the architectural parameters of our model.
# architectural parameters img_h, img_w = 28, 28 kernel_h, kernel_w = 5, 5 stride_h, stride_w = 2, 2 # Input / Output parameter of Generator and Discriminator g_input_dim = 100 g_output_dim = d_input_dim = img_h * img_w # We expect the kernel shapes to be square in this tutorial and # the strides to be of the same length along each data dimension if kernel_h == kernel_w: gkernel = dkernel = kernel_h else: raise ValueError('This tutorial needs square shaped kernel') if stride_h == stride_w: gstride = dstride = stride_h else: raise ValueError('This tutorial needs same stride in all dims') # Helper functions def bn_with_relu(x, activation=C.relu): h = BatchNormalization(map_rank=1)(x) return C.relu(h) # We use param-relu function to use a leak=0.2 since CNTK implementation # of Leaky ReLU is fixed to 0.01 def bn_with_leaky_relu(x, leak=0.2): h = BatchNormalization(map_rank=1)(x) r = C.param_relu(C.constant((np.ones(h.shape)*leak).astype(np.float32)), h) return r
simpleGan/CNTK_206B_DCGAN.ipynb
olgaliak/cntk-cyclegan
mit
Generator The generator takes a 100-dimensional random vector (for starters) as input ($z$) and the outputs a 784 dimensional vector, corresponding to a flattened version of a 28 x 28 fake (synthetic) image ($x^*$). In this tutorial, we use fractionally strided convolutions (a.k.a ConvolutionTranspose) with ReLU activations except for the last layer. We use a tanh activation on the last layer to make sure that the output of the generator function is confined to the interval [-1, 1]. The use of ReLU and tanh activation functions are key in addition to using the fractionally strided convolutions.
def convolutional_generator(z): with default_options(init=C.normal(scale=0.02)): print('Generator input shape: ', z.shape) s_h2, s_w2 = img_h//2, img_w//2 #Input shape (14,14) s_h4, s_w4 = img_h//4, img_w//4 # Input shape (7,7) gfc_dim = 1024 gf_dim = 64 h0 = Dense(gfc_dim, activation=None)(z) h0 = bn_with_relu(h0) print('h0 shape', h0.shape) h1 = Dense([gf_dim * 2, s_h4, s_w4], activation=None)(h0) h1 = bn_with_relu(h1) print('h1 shape', h1.shape) h2 = ConvolutionTranspose2D(gkernel, num_filters=gf_dim*2, strides=gstride, pad=True, output_shape=(s_h2, s_w2), activation=None)(h1) h2 = bn_with_relu(h2) print('h2 shape', h2.shape) h3 = ConvolutionTranspose2D(gkernel, num_filters=1, strides=gstride, pad=True, output_shape=(img_h, img_w), activation=C.sigmoid)(h2) print('h3 shape :', h3.shape) return C.reshape(h3, img_h * img_w)
simpleGan/CNTK_206B_DCGAN.ipynb
olgaliak/cntk-cyclegan
mit