markdown
stringlengths
0
37k
code
stringlengths
1
33.3k
path
stringlengths
8
215
repo_name
stringlengths
6
77
license
stringclasses
15 values
Bonus2: Make sure the function works well with iterator such as files, generators etc
numbers = [1, 3, 8, 5, 4, 10, 6] odds = (n for n in numbers if n % 2 == 1) multimax(odds)
Python/Python Morsels/multimax/my_try/multimax.ipynb
nitin-cherian/LifeLongLearning
mit
Bonus3: The multimax function accept a keyword argument called "key" that is a function which will be used to determine the key by which to compare values as maximums. For example the key function could be used to find the longest words in a list of words
words = ["cheese", "shop", "ministry", "of", "silly", "walks", "argument", "clinic"] multimax(words, key=len) words = ["cheese", "shop", "ministry", "of", "silly", "walks", "argument", "clinic"] max(words, key=len) words = ["cheese", "shop", "argument", "of", "silly", "walks", "ministry", "clinic"] max(words, key=len)
Python/Python Morsels/multimax/my_try/multimax.ipynb
nitin-cherian/LifeLongLearning
mit
Unitests
import unittest class MultiMaxTests(unittest.TestCase): """Tests for multimax.""" def test_single_max(self): self.assertEqual(multimax([1, 2, 4, 3]), [4]) def test_two_max(self): self.assertEqual(multimax([1, 4, 2, 4, 3]), [4, 4]) def test_all_max(self): self.assertEqual(multimax([1, 1, 1, 1, 1]), [1, 1, 1, 1, 1]) def test_lists(self): inputs = [[0], [1], [], [0, 1], [1]] expected = [[1], [1]] self.assertEqual(multimax(inputs), expected) def test_order_maintained(self): inputs = [ (3, 2), (2, 1), (3, 2), (2, 0), (3, 2), ] expected = [ inputs[0], inputs[2], inputs[4], ] outputs = multimax(inputs) self.assertEqual(outputs, expected) self.assertIs(outputs[0], expected[0]) self.assertIs(outputs[1], expected[1]) self.assertIs(outputs[2], expected[2]) # To test the Bonus part of this exercise, comment out the following line # @unittest.expectedFailure def test_empty(self): self.assertEqual(multimax([]), []) # To test the Bonus part of this exercise, comment out the following line # @unittest.expectedFailure def test_iterator(self): numbers = [1, 4, 2, 4, 3] squares = (n**2 for n in numbers) self.assertEqual(multimax(squares), [16, 16]) # To test the Bonus part of this exercise, comment out the following line # @unittest.expectedFailure def test_key_function(self): words = ["alligator", "animal", "apple", "artichoke", "avalanche"] outputs = ["alligator", "artichoke", "avalanche"] self.assertEqual(multimax(words, key=len), outputs) if __name__ == "__main__": unittest.main(argv=['first-arg-is-ignored'], exit=False)
Python/Python Morsels/multimax/my_try/multimax.ipynb
nitin-cherian/LifeLongLearning
mit
2. sentiment scoring
motley = pd.read_csv('mfool.csv') negative = pd.read_csv('negative-words.txt', sep = ' ', header = None) positive = pd.read_csv('positive-words.txt', sep=' ', header=None) def score_word(word): """ returns -1 if negative meaning, +1 if positive meaning, else 0 input: a word ouput: -1, 0, or + 1 """ if word.lower() in negative.values: return -1 elif word.lower() in positive.values: return +1 return 0 def get_scores(article): """ returns sentiment scores for a given article input: an article output: sentiment score """ wordsArticle = article.split(' ') scores = [score_word(word) for word in wordsArticle] return sum(scores) motley['sentiment'] = motley['article'].apply(get_scores) plt.hist(motley.sentiment, bins=50) plt.xlabel('sentiment scores') plt.ylabel('frequency') plt.title('Distribution of sentiment scores of articles'); # motley.to_csv('motley_with_s_scores.csv', encoding='utf-8') most_positive_article = motley['article'][motley['sentiment'] == np.max(motley['sentiment'])].values[0] wc = WordCloud().generate(most_positive_article) plt.imshow(wc) plt.axis('off'); most_negative_article = motley['article'][motley['sentiment'] == np.min(motley['sentiment'])].values[0] wc = WordCloud().generate(most_negative_article) plt.imshow(wc) plt.axis('off');
.ipynb_checkpoints/Project-checkpoint.ipynb
akimbekov/Stock_prediction_using_ML_and_Deep_learning
mit
3. merging data sets APPLE stock data was obtained using Quandl API at "https://www.quandl.com/api/v3/datasets/WIKI/AAPL.csv"
path = "../datasets/" aapl = pd.read_csv(path+'WIKI_PRICES_AAPL.csv') fool = pd.read_csv(path+'motley_with_s_scores.csv') def format_df(stock_df, news_df, word): """ merges stock_df and news_df on "date" column input: stock df, news df, word output: merged df """ stock_df['diff'] = stock_df['close']-stock_df['open'] news_df['Count'] = news_df['article'].apply(lambda x: x.count(word)) news_df.loc[news_df['Count'] <= 5, 'sentiment'] = 0 news_df['date'] = pd.to_datetime(news_df['date']) news_df['date'] = news_df['date'].dt.strftime('%Y-%m-%d') news_df = news_df.groupby(['date'], as_index = False).sum() news_df = news_df[['date', 'sentiment', 'Count']] merged_df = pd.merge(news_df, stock_df) merged_df['bin_sentiment'] = pd.cut(merged_df['sentiment'], [-np.inf, -0.001, 0.001, np.inf], labels = [-1, 0, 1]) merged_df['bin_diff'] = pd.cut(merged_df['diff'], [-np.inf, -0.001, 0.001, np.inf], labels = [-1, 0, 1]) return merged_df merged_df = format_df(aapl, fool, 'Apple') merged_df.head() #merged_df.to_csv('merged_df.csv', encoding='utf-8')
.ipynb_checkpoints/Project-checkpoint.ipynb
akimbekov/Stock_prediction_using_ML_and_Deep_learning
mit
3. Methods selection, evaluation
def plot_ROC(y_test, scores, label, color): """ plots ROC curve input: y_test, scores, and title output: ROC curve """ false_pr, true_pr, _ = roc_curve(y_test, scores[:, 1]) roc_auc = auc(false_pr, true_pr) plt.plot(false_pr, true_pr, lw = 3, label='{}: area={:10.4f})'.format(label, roc_auc), color = color) plt.plot([0, 1], [0, 1], color='black', lw=1, linestyle='--') plt.xlabel('False positive rate') plt.ylabel('True positive rate') plt.legend(loc="best") plt.ylim([0.0, 1.05]) plt.xlim([0.0, 1.0]) plt.title('ROC') def plot_PR(y_test, scores, label, color): """ plots PR curve input: y_test, scores, title output: Precision-Recall curve """ precision, recall, _ = precision_recall_curve(y_test, scores[:, 1]) plt.plot(recall, precision,lw = 2, label='{}'.format(label), color = color) plt.xlabel('Recall') plt.ylabel('Precision') plt.legend(loc="best") plt.ylim([0.0, 1.05]) plt.xlim([0.0, 1.0]) plt.title('PR') def plot_confusionmatrix(ytrue, ypred): """ plots confusion matrix heatmap and prints out classification report input: ytrue (actual value), ypred(predicted value) output: confusion matrix heatmap and classification report """ print (classification_report(ytrue, ypred)) print ('##################################################################') cnf_matrix = confusion_matrix(ytrue, ypred) sns.heatmap(cnf_matrix, cmap='coolwarm_r', annot = True, linewidths=.5, fmt = '.4g') plt.title('Confusion matrix') plt.xlabel('Prediction') plt.ylabel('Actual'); apple = pd.read_csv(path + 'merged_df.csv') apple.head() print (apple.shape) apple.plot('date', 'diff');
.ipynb_checkpoints/Project-checkpoint.ipynb
akimbekov/Stock_prediction_using_ML_and_Deep_learning
mit
There is exterme fluctuation betweeen opening and closing prices of Apple, Inc. (as expected). Let's choose the features and label (bin_diff) and make the dataframe ready for machine learning and deep learning.
aapl = apple.copy()[['date', 'sentiment', 'bin_diff']] aapl.head() plt.hist(aapl['bin_diff']);
.ipynb_checkpoints/Project-checkpoint.ipynb
akimbekov/Stock_prediction_using_ML_and_Deep_learning
mit
Let's drop the observation with "0" and make it binary classification.
aapl = aapl[aapl['bin_diff'] != 0]
.ipynb_checkpoints/Project-checkpoint.ipynb
akimbekov/Stock_prediction_using_ML_and_Deep_learning
mit
Also, to make the models work properly, from now on, we re-code loss category from -1 to 0.
label = aapl['bin_diff'] == 1 label = label.astype(int)
.ipynb_checkpoints/Project-checkpoint.ipynb
akimbekov/Stock_prediction_using_ML_and_Deep_learning
mit
let's look at the features and standardize them.
InputDF = aapl.copy().drop('bin_diff', axis = 1) InputDF = InputDF.set_index('date') InputDF.head() InputDF = InputDF.apply(lambda x:(x -x.mean())/x.std()) InputDF.head() test_size = 600 xtrain, xtest = InputDF.iloc[:test_size, :], InputDF.iloc[test_size:, :] ytrain, ytest = label[:test_size], label[test_size:]
.ipynb_checkpoints/Project-checkpoint.ipynb
akimbekov/Stock_prediction_using_ML_and_Deep_learning
mit
Logistic regression
logreg = LogisticRegression() logreg_model = logreg.fit(xtrain, ytrain) logpred = logreg_model.predict(xtest) logscores = logreg_model.predict_proba(xtest) plot_confusionmatrix(ytest, logpred) plot_ROC(ytest, logscores, 'Logistic regression', 'r') plot_PR(ytest, logscores, 'Logistic regression', 'b')
.ipynb_checkpoints/Project-checkpoint.ipynb
akimbekov/Stock_prediction_using_ML_and_Deep_learning
mit
Support Vector Machines
svm = SVC(probability=True) svm_model = svm.fit(xtrain, ytrain) svmpred = svm_model.predict(xtest) svmscores = svm_model.predict_proba(xtest) plot_confusionmatrix(ytest, svmpred) plot_ROC(ytest, svmscores, 'SVM', 'r') plot_PR(ytest, svmscores, 'SVM', 'b')
.ipynb_checkpoints/Project-checkpoint.ipynb
akimbekov/Stock_prediction_using_ML_and_Deep_learning
mit
Random Forest Tree Classifiers
rf = RandomForestClassifier() rf_model = rf.fit(xtrain, ytrain) rfpred = rf.predict(xtest) rfscores = rf.predict_proba(xtest) plot_confusionmatrix(ytest, rfpred) plot_ROC(ytest, logscores, 'Random Forest', 'r') plot_PR(ytest, logscores, 'Random Forest', 'b')
.ipynb_checkpoints/Project-checkpoint.ipynb
akimbekov/Stock_prediction_using_ML_and_Deep_learning
mit
Feed Forward Neural Network
num_features = len(InputDF.columns) dropout=0.2 hidden_1_size = 25 hidden_2_size = 5 num_classes = label.nunique() NUM_EPOCHS=20 BATCH_SIZE=1 lr=0.0001 np.random.RandomState(52); val = (InputDF[:-test_size].values, label[:-test_size].values) train = (InputDF[-test_size:].values, label[-test_size:].values) NUM_TRAIN_BATCHES = int(len(train[0])/BATCH_SIZE) NUM_VAL_BATCHES = int(len(val[1])/BATCH_SIZE) class Model(): def __init__(self): global_step = tf.contrib.framework.get_or_create_global_step() self.input_data = tf.placeholder(dtype=tf.float32,shape=[None,num_features]) self.target_data = tf.placeholder(dtype=tf.int32,shape=[None]) self.dropout_prob = tf.placeholder(dtype=tf.float32,shape=[]) with tf.variable_scope("ff"): droped_input = tf.nn.dropout(self.input_data,keep_prob=self.dropout_prob) layer_1 = tf.contrib.layers.fully_connected( num_outputs=hidden_1_size, inputs=droped_input, ) layer_2 = tf.contrib.layers.fully_connected( num_outputs=hidden_2_size, inputs=layer_1, ) self.logits = tf.contrib.layers.fully_connected( num_outputs=num_classes, activation_fn =None, inputs=layer_2, ) with tf.variable_scope("loss"): self.losses = tf.nn.sparse_softmax_cross_entropy_with_logits(logits = self.logits, labels = self.target_data) mask = (1-tf.sign(1-self.target_data)) #Don't give credit for flat days mask = tf.cast(mask,tf.float32) self.loss = tf.reduce_sum(self.losses) with tf.name_scope("train"): opt = tf.train.AdamOptimizer(lr) gvs = opt.compute_gradients(self.loss) self.train_op = opt.apply_gradients(gvs, global_step=global_step) with tf.name_scope("predictions"): self.probs = tf.nn.softmax(self.logits) self.predictions = tf.argmax(self.probs, 1) correct_pred = tf.cast(tf.equal(self.predictions, tf.cast(self.target_data,tf.int64)),tf.float64) self.accuracy = tf.reduce_mean(correct_pred) with tf.Graph().as_default(): model = Model() input_ = train[0] target = train[1] losses = [] with tf.Session() as sess: init = tf.initialize_all_variables() sess.run([init]) epoch_loss =0 for e in range(NUM_EPOCHS): if epoch_loss >0 and epoch_loss <1: break epoch_loss =0 for batch in range(0,NUM_TRAIN_BATCHES): start = batch*BATCH_SIZE end = start + BATCH_SIZE feed = { model.input_data:input_[start:end], model.target_data:target[start:end], model.dropout_prob:0.9 } _,loss,acc = sess.run( [ model.train_op, model.loss, model.accuracy, ] ,feed_dict=feed ) epoch_loss+=loss losses.append(epoch_loss) #print('step - {0} loss - {1} acc - {2}'.format((1+batch+NUM_TRAIN_BATCHES*e),epoch_loss,acc)) print('################ done training ################') final_preds =np.array([]) final_scores =None for batch in range(0,NUM_VAL_BATCHES): start = batch*BATCH_SIZE end = start + BATCH_SIZE feed = { model.input_data:val[0][start:end], model.target_data:val[1][start:end], model.dropout_prob:1 } acc,preds,probs = sess.run( [ model.accuracy, model.predictions, model.probs ] ,feed_dict=feed ) #print(acc) final_preds = np.concatenate((final_preds,preds),axis=0) if final_scores is None: final_scores = probs else: final_scores = np.concatenate((final_scores,probs),axis=0) print ('################ done testing ################') prediction_conf = final_scores[np.argmax(final_scores, 1)] plt.scatter(np.linspace(0, 1, len(losses)), losses); plt.title('Validation loss with epoch') plt.ylabel('Validation Loss') plt.xlabel('epoch progression'); plot_confusionmatrix(ytest, final_preds) plot_ROC(ytest, final_scores, 'Feed forward neural net', 'r') plot_PR(ytest, final_probs, 'Feed forward neural net', 'b')
.ipynb_checkpoints/Project-checkpoint.ipynb
akimbekov/Stock_prediction_using_ML_and_Deep_learning
mit
Recursive Neural Nets
RNN_HIDDEN_SIZE=4 FIRST_LAYER_SIZE=50 SECOND_LAYER_SIZE=10 NUM_LAYERS=2 BATCH_SIZE=1 NUM_EPOCHS=25 lr=0.0003 NUM_TRAIN_BATCHES = int(len(train[0])/BATCH_SIZE) NUM_VAL_BATCHES = int(len(val[1])/BATCH_SIZE) ATTN_LENGTH=30 beta=0 np.random.RandomState(52); class RNNModel(): def __init__(self): global_step = tf.contrib.framework.get_or_create_global_step() self.input_data = tf.placeholder(dtype=tf.float32,shape=[BATCH_SIZE,num_features]) self.target_data = tf.placeholder(dtype=tf.int32,shape=[BATCH_SIZE]) self.dropout_prob = tf.placeholder(dtype=tf.float32,shape=[]) def makeGRUCells(): base_cell = rnn.GRUCell(num_units=RNN_HIDDEN_SIZE,) layered_cell = rnn.MultiRNNCell([base_cell] * NUM_LAYERS,state_is_tuple=False) attn_cell =tf.contrib.rnn.AttentionCellWrapper(cell=layered_cell,attn_length=ATTN_LENGTH,state_is_tuple=False) return attn_cell self.gru_cell = makeGRUCells() self.zero_state = self.gru_cell.zero_state(1, tf.float32) self.start_state = tf.placeholder(dtype=tf.float32,shape=[1,self.gru_cell.state_size]) with tf.variable_scope("ff",initializer=xavier_initializer(uniform=False)): droped_input = tf.nn.dropout(self.input_data,keep_prob=self.dropout_prob) layer_1 = tf.contrib.layers.fully_connected( num_outputs=FIRST_LAYER_SIZE, inputs=droped_input, ) layer_2 = tf.contrib.layers.fully_connected( num_outputs=RNN_HIDDEN_SIZE, inputs=layer_1, ) split_inputs = tf.reshape(droped_input,shape=[1,BATCH_SIZE,num_features],name="reshape_l1") # Each item in the batch is a time step, iterate through them split_inputs = tf.unstack(split_inputs,axis=1,name="unpack_l1") states =[] outputs =[] with tf.variable_scope("rnn",initializer=xavier_initializer(uniform=False)) as scope: state = self.start_state for i, inp in enumerate(split_inputs): if i >0: scope.reuse_variables() output, state = self.gru_cell(inp, state) states.append(state) outputs.append(output) self.end_state = states[-1] outputs = tf.stack(outputs,axis=1) # Pack them back into a single tensor outputs = tf.reshape(outputs,shape=[BATCH_SIZE,RNN_HIDDEN_SIZE]) self.logits = tf.contrib.layers.fully_connected( num_outputs=num_classes, inputs=outputs, activation_fn=None ) with tf.variable_scope("loss"): self.penalties = tf.reduce_sum([beta*tf.nn.l2_loss(var) for var in tf.trainable_variables()]) self.losses = tf.nn.sparse_softmax_cross_entropy_with_logits(logits = self.logits, labels = self.target_data) self.loss = tf.reduce_sum(self.losses + beta*self.penalties) with tf.name_scope("train_step"): opt = tf.train.AdamOptimizer(lr) gvs = opt.compute_gradients(self.loss) self.train_op = opt.apply_gradients(gvs, global_step=global_step) with tf.name_scope("predictions"): self.probs = tf.nn.softmax(self.logits) self.predictions = tf.argmax(self.probs, 1) correct_pred = tf.cast(tf.equal(self.predictions, tf.cast(self.target_data,tf.int64)),tf.float64) self.accuracy = tf.reduce_mean(correct_pred)
.ipynb_checkpoints/Project-checkpoint.ipynb
akimbekov/Stock_prediction_using_ML_and_Deep_learning
mit
Training the RNN
with tf.Graph().as_default(): model = RNNModel() input_ = train[0] target = train[1] losses = [] with tf.Session() as sess: init = tf.global_variables_initializer() sess.run([init]) loss = 2000 for e in range(NUM_EPOCHS): state = sess.run(model.zero_state) epoch_loss =0 for batch in range(0,NUM_TRAIN_BATCHES): start = batch*BATCH_SIZE end = start + BATCH_SIZE feed = { model.input_data:input_[start:end], model.target_data:target[start:end], model.dropout_prob:0.5, model.start_state:state } _,loss,acc,state = sess.run( [ model.train_op, model.loss, model.accuracy, model.end_state ] ,feed_dict=feed ) epoch_loss+=loss losses.append(epoch_loss) #print('step - {0} loss - {1} acc - {2}'.format((e),epoch_loss,acc)) print('################ done training ################') final_preds =np.array([]) final_scores = None for batch in range(0,NUM_VAL_BATCHES): start = batch*BATCH_SIZE end = start + BATCH_SIZE feed = { model.input_data:val[0][start:end], model.target_data:val[1][start:end], model.dropout_prob:1, model.start_state:state } acc,preds,state, probs = sess.run( [ model.accuracy, model.predictions, model.end_state, model.probs ] ,feed_dict=feed ) #print(acc) assert len(preds) == BATCH_SIZE final_preds = np.concatenate((final_preds,preds),axis=0) if final_scores is None: final_scores = probs else: final_scores = np.concatenate((final_scores,probs),axis=0) print('################ done testing ################') plt.scatter(np.linspace(0, 1, len(losses)), losses); plt.title('Validation loss with epoch') plt.ylabel('Validation Loss') plt.xlabel('epoch progression'); plot_confusionmatrix(ytest, final_preds) plot_ROC(ytest, final_scores, 'Feed forward neural net', 'r') plot_PR(ytest, final_scores, 'Feed forward neural net', 'b')
.ipynb_checkpoints/Project-checkpoint.ipynb
akimbekov/Stock_prediction_using_ML_and_Deep_learning
mit
1. Introduction The key idea of spectral clustering algorithms is to search for groups of connected data. I.e, rather than pursuing compact clusters, spectral clustering allows for arbitrary shape clusters. This can be illustrated with two artifitial datasets that we will use along this notebook. 1.1. Gaussian clusters: The first one consists of 4 compact clusters generated from a Gaussian distribution. This is the kind of dataset that are best suited to centroid-based clustering algorithms like $K$-means. If the goal of the clustering algorithm is to minimize the intra-cluster distances and find a representative prototype or centroid for each cluster, $K$-means may be a good option.
N = 300 nc = 4 Xs, ys = make_blobs(n_samples=N, centers=nc, random_state=6, cluster_std=0.60, shuffle = False) X, y = shuffle(Xs, ys, random_state=0) plt.scatter(X[:, 0], X[:, 1], s=30); plt.axis('equal') plt.show()
U2.SpectralClustering/.ipynb_checkpoints/SpecClustering_student-checkpoint.ipynb
ML4DS/ML4all
mit
Note that we have computed two data matrices: ${\bf X}$, which contains the data points in an arbitray ordering ${\bf X}_s$, where samples are ordered by clusters, according to the cluster id array, ${\bf y}$. Note that both matrices contain the same data (rows) but in different order. The sorted matrix will be useful later for illustration purposes, but keep in mind that, in a real clustering application, vector ${\bf y}$ is unknown (learning is not supervised), and only a data matrix with an arbitrary ordering (like ${\bf X}$) will be available. 1.2. Concentric rings The second dataset contains two concentric rings. One could expect from a clustering algorithm to identify two different clusters, one per each ring of points. If this is the case, $K$-means or any other algorithm focused on minimizing distances to some cluster centroids is not a good choice.
X2s, y2s = make_circles(n_samples=N, factor=.5, noise=.05, shuffle=False) X2, y2 = shuffle(X2s, y2s, random_state=0) plt.scatter(X2[:, 0], X2[:, 1], s=30) plt.axis('equal') plt.show()
U2.SpectralClustering/.ipynb_checkpoints/SpecClustering_student-checkpoint.ipynb
ML4DS/ML4all
mit
Note, again, that we have computed both the sorted (${\bf X}_{2s}$) and the shuffled (${\bf X}_2$) versions of the dataset in the code above. Exercise 1: Using the code of the previous notebook, run the $K$-means algorithm with 4 centroids for the two datasets. In the light of your results, why do you think $K$-means does not work well for the second dataset?
# <SOL> # </SOL>
U2.SpectralClustering/.ipynb_checkpoints/SpecClustering_student-checkpoint.ipynb
ML4DS/ML4all
mit
Spectral clustering algorithms are focused on connectivity: clusters are determined by maximizing some measure of intra-cluster connectivity and maximizing some form of inter-cluster connectivity. 2. The affinity matrix 2.1. Similarity function To implement a spectral clustering algorithm we must specify a similarity measure between data points. In this session, we will use the rbf kernel, that computes the similarity between ${\bf x}$ and ${\bf y}$ as: $$\kappa({\bf x},{\bf y}) = \exp(-\gamma \|{\bf x}-{\bf y}\|^2)$$ Other similarity functions can be used, like the kernel functions implemented in Scikit-learn (see the <a href=http://scikit-learn.org/stable/modules/metrics.html> metrics </a> module). 2.2. Affinity matrix For a dataset ${\cal S} = {{\bf x}^{(0)},\ldots,{\bf x}^{(N-1)}}$, the $N\times N$ affinity matrix ${\bf K}$ contains the similarity measure between each pair of samples. Thus, its components are $$K_{ij} = \kappa\left({\bf x}^{(i)}, {\bf x}^{(j)}\right)$$ The following fragment of code illustrates all pairs of distances between any two points in the dataset.
gamma = 0.5 K = rbf_kernel(X, X, gamma=gamma)
U2.SpectralClustering/.ipynb_checkpoints/SpecClustering_student-checkpoint.ipynb
ML4DS/ML4all
mit
2.3. Visualization We can visualize the affinity matrix as an image, by translating component values into pixel colors or intensities.
plt.imshow(K, cmap='hot') plt.colorbar() plt.title('RBF Affinity Matrix for gamma = ' + str(gamma)) plt.grid('off') plt.show()
U2.SpectralClustering/.ipynb_checkpoints/SpecClustering_student-checkpoint.ipynb
ML4DS/ML4all
mit
Despite the apparent randomness of the affinity matrix, it contains some hidden structure, that we can uncover by visualizing the affinity matrix computed with the sorted data matrix, ${\bf X}_s$.
Ks = rbf_kernel(Xs, Xs, gamma=gamma) plt.imshow(Ks, cmap='hot') plt.colorbar() plt.title('RBF Affinity Matrix for gamma = ' + str(gamma)) plt.grid('off') plt.show()
U2.SpectralClustering/.ipynb_checkpoints/SpecClustering_student-checkpoint.ipynb
ML4DS/ML4all
mit
Note that, despite their completely different appearance, both affinity matrices contain the same values, but with a different order of rows and columns. For this dataset, the sorted affinity matrix is almost block diagonal. Note, also, that the block-wise form of this matrix depends on parameter $\gamma$. Exercise 2: Modify the selection of $\gamma$, and check the effect of this in the appearance of the sorted similarity matrix. Write down the values for which you consider that the structure of the matrix better resembles the number of clusters in the datasets. Out from the diagonal block, similarities are close to zero. We can enforze a block diagonal structure be setting to zero the small similarity values. For instance, by thresholding ${\bf K}s$ with threshold $t$, we get the truncated (and sorted) affinity matrix $$ \overline{K}{s,ij} = K_{s,ij} \cdot \text{u}(K_{s,ij} - t) $$ (where $\text{u}()$ is the step function) which is block diagonal. Exercise 3: Compute the truncated and sorted affinity matrix with $t=0.001$
t = 0.001 # Kt = <FILL IN> # Truncated affinity matrix # Kst = <FILL IN> # Truncated and sorted affinity matrix # </SOL>
U2.SpectralClustering/.ipynb_checkpoints/SpecClustering_student-checkpoint.ipynb
ML4DS/ML4all
mit
3. Affinity matrix and data graph Any similarity matrix defines a weighted graph in such a way that the weight of the edge linking ${\bf x}^{(i)}$ and ${\bf x}^{(j)}$ is $K_{ij}$. If $K$ is a full matrix, the graph is fully connected (there is and edge connecting every pair of nodes). But we can get a more interesting sparse graph by setting to zero the edges with a small weights. For instance, let us visualize the graph for the truncated affinity matrix $\overline{\bf K}$ with threshold $t$. You can also check the effect of increasing or decreasing $t$.
G = nx.from_numpy_matrix(Kt) graphplot = nx.draw(G, X, node_size=40, width=0.5,) plt.axis('equal') plt.show()
U2.SpectralClustering/.ipynb_checkpoints/SpecClustering_student-checkpoint.ipynb
ML4DS/ML4all
mit
Note that, for this dataset, the graph connects edges from the same cluster only. Therefore, the number of diagonal blocks in $\overline{\bf K}_s$ is equal to the number of connected components in the graph. Note, also, the graph does not depend on the sample ordering in the data matrix: the graphs for any matrix ${\bf K}$ and its sorted version ${\bf K}_s$ are the same. 4. The Laplacian matrix The <a href = https://en.wikipedia.org/wiki/Laplacian_matrix>Laplacian matrix</a> of a given affinity matrix ${\bf K}$ is given by $${\bf L} = {\bf D} - {\bf K}$$ where ${\bf D}$ is the diagonal degree matrix given by $$D_{ii}=\sum^{n}{j} K{ij}$$ 4.1. Properties of the Laplacian matrix The Laplacian matrix of any symmetric matrix ${\bf K}$ has several interesting properties: P1. ${\bf L}$ is symmetric and positive semidefinite. Therefore, all its eigenvalues $\lambda_0,\ldots, \lambda_{N-1}$ are non-negative. Remind that each eigenvector ${\bf v}$ with eigenvalue $\lambda$ satisfies $${\bf L} \cdot {\bf v} = \lambda {\bf v}$$ P2. ${\bf L}$ has at least one eigenvector with zero eigenvalue: indeed, for ${\bf v} = {\bf 1}_N = (1, 1, \ldots, 1)^\intercal$ we get $${\bf L} \cdot {\bf 1}_N = {\bf 0}_N$$ where ${\bf 0}_N$ is the $N$ dimensional all-zero vector. P3. If ${\bf K}$ is block diagonal, its Laplacian is block diagonal. P4. If ${\bf L}$ is a block diagonal with blocks ${\bf L}0, {\bf L}_1, \ldots, {\bf L}{c-1}$, then it has at least $c$ orthogonal eigenvectors with zero eigenvalue: indeed, each block ${\bf L}_i$ is the Laplacian matrix of the graph containing the samples in the $i$ connected component, therefore, according to property P2, $${\bf L}i \cdot {\bf 1}{N_i} = {\bf 0}_{N_i}$$ where $N_i$ is the number of samples in the $i$-th connected component. Therefore, if $${\bf v}i = \left(\begin{array}{l} {\bf 0}{N_0} \ \vdots \ {\bf 0}{N{i-1}} \ {\bf 1}{N_i} \ {\bf 0}{N_{i+1}} \ \vdots \ {\bf 0}{N{c-1}} \end{array} \right) $$ then $${\bf L} \cdot {\bf v}{i} = {\bf 0}{N}$$ We can compute the Laplacian matrix for the given dataset and visualize the eigenvalues:
Dst = np.diag(np.sum(Kst, axis=1)) Lst = Dst - Kst # Next, we compute the eigenvalues of the matrix w = np.linalg.eigvalsh(Lst) plt.figure() plt.plot(w, marker='.'); plt.title('Eigenvalues of the matrix') plt.show()
U2.SpectralClustering/.ipynb_checkpoints/SpecClustering_student-checkpoint.ipynb
ML4DS/ML4all
mit
Exercise 4: Verify that ${\bf 1}N$ is an eigenvector with zero eigenvalues. To do so, compute ${\bf L}{st} \cdot {\bf 1}_N$ and verify that its <a href= https://docs.scipy.org/doc/numpy/reference/generated/numpy.linalg.norm.html>euclidean norm</a> is close to zero (it may be not exactly zero due to finite precission errors). Verify that vectors ${\bf v}_i$ defined above (that you can compute using vi = (ys==i)) also have zero eigenvalue.
# <SOL> # </SOL>
U2.SpectralClustering/.ipynb_checkpoints/SpecClustering_student-checkpoint.ipynb
ML4DS/ML4all
mit
Exercise 5: Verify that the spectral properties of the Laplacian matrix computed from ${\bf K}{st}$ still apply using the unsorted matrix, ${\bf K}_t$: compute ${\bf L}{t} \cdot {\bf v}'_{i}$, where ${\bf v}'_i$ is a binary vector with components equal to 1 at the positions corresponding to samples in cluster $i$ (that you can compute using vi = (y==i))), and verify that its euclidean norm is close to zero.
# <SOL> # </SOL>
U2.SpectralClustering/.ipynb_checkpoints/SpecClustering_student-checkpoint.ipynb
ML4DS/ML4all
mit
Note that the position of 1's in eigenvectors ${\bf v}_i$ points out the samples in the $i$-th connected component. This suggest the following tentative clustering algorithm: Compute the affinity matrix Compute the laplacian matrix Compute $c$ orthogonal eigenvectors with zero eigenvalue If $v_{in}=1$, assign ${\bf x}^{(n)}$ to cluster $i$. This is the grounding idea of some spectral clustering algorithms. In this precise form, this algorithm does not usually work, for several reasons that we will discuss next, but with some modifications it becomes a powerfull method. 4.2. Computing eigenvectors of the Laplacian Matrix One of the reasons why the algorithm above may not work is that vectors ${\bf v}'0, \ldots,{\bf v}'{c-1}$ are not the only zero eigenvectors or ${\bf L}_t$: any linear combination of them is also a zero eigenvector. Eigenvector computation algorithms may return a different set of orthogonal eigenvectors. However, one can expect that eigenvector should have similar component in the positions corresponding to samples in the same connected component.
wst, vst = np.linalg.eigh(Lst) for n in range(nc): plt.plot(vst[:,n], '.-')
U2.SpectralClustering/.ipynb_checkpoints/SpecClustering_student-checkpoint.ipynb
ML4DS/ML4all
mit
4.3. Non block diagonal matrices. Another reason to modify our tentative algorithm is that, in more realistic cases, the affinity matrix may have an imperfect block diagonal structure. In such cases, the smallest eigenvalues may be nonzero and eigenvectors may be not exactly piecewise constant. Exercise 6 Plot the eigenvector profile for the shuffled and not thresholded affinity matrix, ${\bf K}$.
# <SOL> # </SOL>
U2.SpectralClustering/.ipynb_checkpoints/SpecClustering_student-checkpoint.ipynb
ML4DS/ML4all
mit
Note that, despite the eigenvector components can not be used as a straighforward cluster indicator, they are strongly informative of the clustering structure. All points in the same cluster have similar values of the corresponding eigenvector components $(v_{n0}, \ldots, v_{n,c-1})$. Points from different clusters have different values of the corresponding eigenvector components $(v_{n0}, \ldots, v_{n,c-1})$. Therfore we can define vectors ${\bf z}^{(n)} = (v_{n0}, \ldots, v_{n,c-1})$ and apply a centroid based algorithm (like $K$-means) to identify all points with similar eigenvector components. The corresponding samples in ${\bf X}$ become the final clusters of the spectral clustering algorithm. One possible way to identify the cluster structure is to apply a $K$-means algorithm over the eigenvector coordinates. The steps of the spectral clustering algorithm become the following 5. A spectral clustering (graph cutting) algorithm 5.1. The steps of the spectral clustering algorithm. Summarizing, the steps of the spectral clustering algorithm for a data matrix ${\bf X}$ are the following: Compute the affinity matrix, ${\bf K}$. Optionally, truncate the smallest components to zero. Compute the laplacian matrix, ${\bf L}$ Compute the $c$ orthogonal eigenvectors with smallest eigenvalues, ${\bf v}0,\ldots,{\bf v}{c-1}$ Construct the sample set ${\bf Z}$ with rows ${\bf z}^{(n)} = (v_{0n}, \ldots, v_{c-1,n})$ Apply the $K$-means algorithms over ${\bf Z}$ with $K=c$ centroids. Assign samples in ${\bf X}$ to clusters: if ${\bf z}^{(n)}$ is assigned by $K$-means to cluster $i$, assign sample ${\bf x}^{(n)}$ in ${\bf X}$ to cluster $i$. Exercise 7: In this exercise we will apply the spectral clustering algorithm to the two-rings dataset ${\bf X}_2$, using $\gamma = 20$, $t=0.1$ and $c = 2$ clusters. Complete step 1, and plot the graph induced by ${\bf K}$
# <SOL> # </SOL>
U2.SpectralClustering/.ipynb_checkpoints/SpecClustering_student-checkpoint.ipynb
ML4DS/ML4all
mit
Complete step 2, 3 and 4, and draw a scatter plot of the samples in ${\bf Z}$
# <SOL> # </SOL>
U2.SpectralClustering/.ipynb_checkpoints/SpecClustering_student-checkpoint.ipynb
ML4DS/ML4all
mit
Complete step 5
est = KMeans(n_clusters=2) clusters = est.fit_predict(Z2t)
U2.SpectralClustering/.ipynb_checkpoints/SpecClustering_student-checkpoint.ipynb
ML4DS/ML4all
mit
Finally, complete step 6 and show, in a scatter plot, the result of the clustering algorithm
plt.scatter(X2[:, 0], X2[:, 1], c=clusters, s=50, cmap='rainbow') plt.axis('equal') plt.show()
U2.SpectralClustering/.ipynb_checkpoints/SpecClustering_student-checkpoint.ipynb
ML4DS/ML4all
mit
5.2. Scikit-learn implementation. The <a href=http://scikit-learn.org/stable/modules/generated/sklearn.cluster.SpectralClustering.html> spectral clustering algorithm </a> in Scikit-learn requires the number of clusters to be specified. It works well for a small number of clusters but is not advised when using many clusters and/or data. Finally, we are going to run spectral clustering on both datasets. Spend a few minutes figuring out the meaning of parameters of the Spectral Clustering implementation of Scikit-learn: http://scikit-learn.org/stable/modules/generated/sklearn.cluster.SpectralClustering.html Note that there is not equivalent parameter to our threshold $t$, which has been useful for the graph representations. However, playing with $\gamma$ should be enough to get a good clustering. The following piece of code executes the algorithm with an 'rbf' kernel. You can manually adjust the number of clusters and the parameter of the kernel to study the behavior of the algorithm. When you are done, you can also: Modify the code to allow for kernels different than the 'rbf' Repeat the analysis for the second dataset (two_rings)
n_clusters = 4 gamma = .1 # Warning do not exceed gamma=100 SpClus = SpectralClustering(n_clusters=n_clusters,affinity='rbf', gamma=gamma) SpClus.fit(X) plt.scatter(X[:, 0], X[:, 1], c=SpClus.labels_.astype(np.int), s=50, cmap='rainbow') plt.axis('equal') plt.show() nc = 2 gamma = 50 #Warning do not exceed gamma=300 SpClus = SpectralClustering(n_clusters=nc, affinity='rbf', gamma=gamma) SpClus.fit(X2) plt.scatter(X2[:, 0], X2[:, 1], c=SpClus.labels_.astype(np.int), s=50, cmap='rainbow') plt.axis('equal') plt.show() nc = 5 SpClus = SpectralClustering(n_clusters=nc, affinity='nearest_neighbors') SpClus.fit(X2) plt.scatter(X2[:, 0], X2[:, 1], c=SpClus.labels_.astype(np.int), s=50, cmap='rainbow') plt.axis('equal') plt.show()
U2.SpectralClustering/.ipynb_checkpoints/SpecClustering_student-checkpoint.ipynb
ML4DS/ML4all
mit
compute metrics
%time d_pagerank = G.pagerank() %time u_pagerank = G.as_undirected().pagerank() %time d_betweenness = G.betweenness(directed=True) %time u_betweenness = G.as_undirected().betweenness(directed=False) %time d_closeness = G.closeness(mode="IN", normalized=True) %time u_closeness = G.as_undirected().closeness(normalized=True) %time d_eigen = G.eigenvector_centrality() %time u_eigen = G.as_undirected().eigenvector_centrality() %time hubs = G.hub_score() %time authorities = G.authority_score() indegree = G.indegree() outdegree = G.outdegree() degree = G.degree() df = pd.DataFrame(index=G.vs['name']) df['year'] = G.vs['year'] df['indegree'] = indegree df['outdegree'] = outdegree df['degree'] = degree df['d_pagerank'] = d_pagerank df['u_pagerank'] = u_pagerank df['d_betweenness'] = d_betweenness df['u_betweenness'] = u_betweenness df['d_closeness'] = d_closeness df['u_closeness'] = u_closeness df['d_eigen'] = d_eigen df['u_eigen'] = u_eigen df['hubs'] = hubs df['authorities'] = authorities all_metrics = ['indegree', 'outdegree', 'degree', 'd_pagerank', 'u_pagerank', 'd_betweenness', 'u_betweenness', 'd_closeness', 'u_closeness', 'd_eigen', 'u_eigen', 'hubs', 'authorities']
vertex_metrics_experiment/procedural_v_substantive_scotus.ipynb
idc9/law-net
mit
issue area Procedural - 1 Criminal Procedure - 4 Due Process - 6 Attorneys - 9 Judicial Power Substantive - 2 Civil Rights - 3 First Amendment - 5 Privacy - 7 Unions - 8 Economic Activity - 12 Federal Taxation - 14 Private Action Other - 0 Missing - 10 Federalism - 11 Interstate Relations - 13 Miscellaneous hypothesis betweeness/closeness favor procedural cases eivenvector metrics (eigenvector centrality, hubs, authorities) favor substantive cases
# map types to issues type_to_issue = {'procedural': [1, 4, 6, 9], 'substantive': [2, 3, 5, 7, 8, 12, 14], 'other': [10, 11, 13, 0]} # map issues to type issue_to_type = {i: '' for i in range(13 + 1)} for t in type_to_issue.keys(): for i in type_to_issue[t]: issue_to_type[i] = t # create type G.vs['issueArea'] = [int(i) for i in G.vs['issueArea']] G.vs['type'] = [issue_to_type[i] for i in G.vs['issueArea']] # add to data frame df['issueArea'] = G.vs['issueArea'] df['type'] = G.vs['type'] # get type subsets df_sub = df[df['type'] == 'substantive'] df_pro = df[df['type'] == 'procedural'] df_oth = df[df['type'] == 'other'] print 'num substantive: %d' % df_sub.shape[0] print 'num procedural: %d' % df_pro.shape[0] print 'num other: %d' % df_oth.shape[0] df.to_csv(subnet_dir + 'issue_area/metrics.csv', index=True)
vertex_metrics_experiment/procedural_v_substantive_scotus.ipynb
idc9/law-net
mit
compare metric vs. issue type
df.columns metric = 'authorities' bins = np.linspace(min(df[metric]), max(df[metric]), 100) # substantive plt.hist(df_sub[metric], bins=bins, color='red', label='substantive (mean: %1.5f)' % np.mean(df_sub[metric])) # procedural plt.hist(df_pro[metric], bins=bins, color='blue', label='procedural (mean: %1.5f)' % np.mean(df_pro[metric])) # other plt.hist(df_oth[metric], bins=bins, color='green', label='other (mean: %1.5f)' % np.mean(df_oth[metric])) plt.xlim([0, .2]) plt.ylim([0, 2000]) plt.xlabel(metric) plt.legend(loc='upper right') # look at propotion of top cases of each type T = 100 top_cases = df.sort_values(by=metric, ascending=False).iloc[0:T]['type'] top_breakdown = top_cases.value_counts(normalize=True) # compare to proportion of all cases all_breakdown = df['type'].value_counts(normalize=True) diff = top_breakdown - all_breakdown diff
vertex_metrics_experiment/procedural_v_substantive_scotus.ipynb
idc9/law-net
mit
permutation test Rank cases by metric then look at the proportion of the top T (=100) cases that are substantive.
metric= 'indegree' df_pro_sub = df[df['type'] != 'other'] T = 100 # observed proportion of top cases that are substantive obs_top_breakdown = df_pro_sub.\ sort_values(by=metric, ascending=False).\ iloc[0:T]['type'].\ value_counts(normalize=True) obs_prop_sub = obs_top_breakdown['substantive'] R = 1000 perm_prop_sub = [0] * R for r in range(R): # randomly select T cases perm_indices = np.random.choice(range(df_pro_sub.shape[0]), replace=False, size=T) # compute the type breakdown of the T cases perm_breakdown = df_pro_sub.\ iloc[perm_indices]['type'].\ value_counts(normalize=True) # proportion of T cases that are substantive perm_prop_sub[r] = perm_breakdown['substantive'] perm_prop_sub = np.array(perm_prop_sub) pval = 1 - np.mean(perm_prop_sub < obs_prop_sub) plt.title('permutation test substantive vs. procedural (pval: %1.3f)' % pval) plt.hist(perm_prop_sub, color='blue', label='permutation') plt.axvline(obs_prop_sub, color='red', label='obs') plt.xlabel(metric)
vertex_metrics_experiment/procedural_v_substantive_scotus.ipynb
idc9/law-net
mit
Results hubs, authorities, u_eigen, d_eign, d_betweeness, u_betweeness are significant (confirming hypothesis) TODO: recompute u_closeness PC plot
df_pro_sub = df[df['type'] != 'other'] U, D, V = get_PCA(df_pro_sub[all_metrics], scale=True) plot_2class_scores(U, classes = df_pro_sub['type'], start=6, n_comp=5)
vertex_metrics_experiment/procedural_v_substantive_scotus.ipynb
idc9/law-net
mit
The function dfa2string converts the given deterministic <span style="font-variant:small-caps;">Fsm</span> into a string.
def dfa2string(Fsm): states, sigma, delta, q0, final = Fsm result = '' n = 0 statesToNames = {} for q in states: statesToNames[q] = f'S{n}' n += 1 result += 'states: {S0, ..., ' + f'S{n-1}' + '}\n\n' result += f'start state: {statesToNames[q0]}' + '\n\n' result += 'state encoding:\n' for q in states: result += f'{statesToNames[q]} = {q}' + '\n' result += '\ntransitions:\n' for q in states: for c in sigma: print(q, c, delta.get((q, c))) if delta.get((q, c)) != None: result += f'delta({statesToNames[q]}, {c}) = {statesToNames[delta[(q, c)]]}' + '\n' result += '\nset of accepting states: {' result += ', '.join({ statesToNames[q] for q in final }) result += '}\n' return result import graphviz as gv
Python/FSM-2-Dot.ipynb
karlstroetmann/Formal-Languages
gpl-2.0
The function dfa2dot converts the given deterministic <span style="font-variant:small-caps;">Fsm</span> into a graph in dot-format.
def dfa2dot(dfa): states, sigma, delta, q0, final = dfa dot = gv.Digraph('Deterministic FSM') dot.graph_attr['rankdir'] = 'LR' n = 0 # used to assign names to states statesToNames = {} # assigns a name to every state for q in states: statesToNames[q] = f'S{n}' n += 1 startName = statesToNames[q0] dot.node('1', label='', width='0.1', height='0.1', style='filled', color='blue') dot.edge('1', startName) for q in states: if q in final: dot.node(statesToNames[q], peripheries='2') else: dot.node(statesToNames[q]) for q in states: for c in sigma: p = delta.get((q, c)) if p != None: dot.edge(statesToNames[q], statesToNames[p], label = c) return dot, statesToNames
Python/FSM-2-Dot.ipynb
karlstroetmann/Formal-Languages
gpl-2.0
The function nfa2string converts a non-deterministic finite state machine nfa into a string.
def nfa2string(nfa): states, sigma, delta, q0, final = nfa n = 0 result = '' result += f'states: {states}' + '\n\n' result += f'start state: {q0}' + '\n\n' result += 'transitions:\n' for q in states: for c in sigma: S = delta.get((q, c)) if S != None: for p in S: result += f'[{q}, {c}] |-> {p}' + '\n' S = delta.get((q, '')) if S != None: for p in S: result += f'[{q}, ""] |-> {p}' + '\n' result += '\n' + f'set of accepting states: {final}' + '\n' return result
Python/FSM-2-Dot.ipynb
karlstroetmann/Formal-Languages
gpl-2.0
The function nfa2dot takes a non-deterministic finite state machine and converts it into a a dot graph.
def nfa2dot(nfa): states, sigma, delta, q0, final = nfa result = '' n = 0 startName = str(q0) dot = gv.Digraph('Non-Deterministic FSM') dot.graph_attr['rankdir'] = 'LR' dot.node('0', label='', width='0.1', height='0.1', style='filled', color='blue') dot.edge('0', startName) for q in states: if q in final: dot.node(str(q), peripheries='2') else: dot.node(str(q)) for q in states: S = delta.get((q, '')) if S != None: for p in S: dot.edge(str(q), str(p), label='𝜀', weight='0.1') for q in states: for c in sigma: S = delta.get((q, c)) if S != None: for p in S: dot.edge(str(q), str(p), label=c, weight='10') return dot
Python/FSM-2-Dot.ipynb
karlstroetmann/Formal-Languages
gpl-2.0
x, y_ 플레이스홀더를 지정하고 x 를 28x28x1 크기로 차원을 변경합니다.
x = tf.placeholder("float", shape=[None, 784]) y_ = tf.placeholder("float", shape=[None, 10]) x_image = tf.reshape(x, [-1,28,28,1]) print("x_image=", x_image)
first-contact-with-tensorflow/chapter5_convolution_neural_network.ipynb
rickiepark/tfk-notebooks
mit
가중치를 표준편차 0.1을 갖는 난수로 초기화하는 함수와 바이어스를 0.1로 초기화하는 함수를 정의합니다.
def weight_variable(shape): initial = tf.truncated_normal(shape, stddev=0.1) return tf.Variable(initial) def bias_variable(shape): initial = tf.constant(0.1, shape=shape) return tf.Variable(initial)
first-contact-with-tensorflow/chapter5_convolution_neural_network.ipynb
rickiepark/tfk-notebooks
mit
stride는 1로 하고 패딩은 0으로 하는 콘볼루션 레이어를 만드는 함수와 2x2 맥스 풀링 레이어를 위한 함수를 정의합니다.
def conv2d(x, W): return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME') def max_pool_2x2(x): return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
first-contact-with-tensorflow/chapter5_convolution_neural_network.ipynb
rickiepark/tfk-notebooks
mit
첫번째 콘볼루션 레이어를 만들기 위해 가중치와 바이어스 텐서를 만들고 활성화함수는 렐루 함수를 사용했습니다. 그리고 콘볼루션 레이어 뒤에 맥스 풀링 레이어를 추가했습니다.
W_conv1 = weight_variable([5, 5, 1, 32]) b_conv1 = bias_variable([32]) h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1) h_pool1 = max_pool_2x2(h_conv1)
first-contact-with-tensorflow/chapter5_convolution_neural_network.ipynb
rickiepark/tfk-notebooks
mit
SAME 패딩이므로 콘볼루션으로는 차원이 변경되지 않고 풀링 단계에서 스트라이드에 따라 차원이 반으로 줄어든다.
print(x_image.get_shape()) print(h_conv1.get_shape()) h_pool1.get_shape()
first-contact-with-tensorflow/chapter5_convolution_neural_network.ipynb
rickiepark/tfk-notebooks
mit
두번째 콘볼루션 레이어와 풀링 레이어를 만듭니다. 첫번째 콘볼루션의 필터가 32개라 두번째 콘볼루션의 컬러 채널이 32개가 되는 것과 같은 효과가 있습니다.
W_conv2 = weight_variable([5, 5, 32, 64]) b_conv2 = bias_variable([64]) h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2) h_pool2 = max_pool_2x2(h_conv2)
first-contact-with-tensorflow/chapter5_convolution_neural_network.ipynb
rickiepark/tfk-notebooks
mit
SAME 패딩이므로 콘볼루션으로는 차원이 변경되지 않고 풀링 단계에서 스트라이드에 따라 차원이 반으로 줄어든다.
print(h_conv2.get_shape()) h_pool2.get_shape()
first-contact-with-tensorflow/chapter5_convolution_neural_network.ipynb
rickiepark/tfk-notebooks
mit
마지막 소프트맥스 레이어에 연결하기 위해 완전연결 레이어를 추가합니다. 이전 콘볼루션의 레이어의 결과 텐서를 다시 1차원 텐서로 변환하여 렐루 활성화 함수에 전달합니다.
W_fc1 = weight_variable([7 * 7 * 64, 1024]) b_fc1 = bias_variable([1024]) h_pool2_flat = tf.reshape(h_pool2, [-1, 7*7*64]) h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
first-contact-with-tensorflow/chapter5_convolution_neural_network.ipynb
rickiepark/tfk-notebooks
mit
드롭아웃되지 않을 확률 값을 저장할 플레이스홀더를 만들고 드롭아웃 레이어를 추가합니다.
keep_prob = tf.placeholder("float") h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
first-contact-with-tensorflow/chapter5_convolution_neural_network.ipynb
rickiepark/tfk-notebooks
mit
마지막으로 소프트맥스 레이어를 추가합니다.
W_fc2 = weight_variable([1024, 10]) b_fc2 = bias_variable([10]) y_conv=tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)
first-contact-with-tensorflow/chapter5_convolution_neural_network.ipynb
rickiepark/tfk-notebooks
mit
크로스엔트로피와 최적화알고리즘, 평가를 위한 연산을 정의합니다.
cross_entropy = -tf.reduce_sum(y_*tf.log(y_conv)) train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy) correct_prediction = tf.equal(tf.argmax(y_conv,1), tf.argmax(y_,1)) accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
first-contact-with-tensorflow/chapter5_convolution_neural_network.ipynb
rickiepark/tfk-notebooks
mit
세션을 시작하고 변수를 초기화 합니다.
sess = tf.Session() sess.run(tf.initialize_all_variables())
first-contact-with-tensorflow/chapter5_convolution_neural_network.ipynb
rickiepark/tfk-notebooks
mit
20,000번 반복을 수행합니다.
for i in range(20000): batch = mnist.train.next_batch(50) if i % 1000 == 0: train_accuracy = sess.run(accuracy, feed_dict={ x:batch[0], y_: batch[1], keep_prob: 1.0}) print("step %d, training accuracy %g"%(i, train_accuracy)) sess.run(train_step,feed_dict={x: batch[0], y_: batch[1], keep_prob: 0.5})
first-contact-with-tensorflow/chapter5_convolution_neural_network.ipynb
rickiepark/tfk-notebooks
mit
최종 정확도를 출력합니다.
print("test accuracy %g"% sess.run( accuracy, feed_dict={x: mnist.test.images, y_: mnist.test.labels, keep_prob: 1.0}))
first-contact-with-tensorflow/chapter5_convolution_neural_network.ipynb
rickiepark/tfk-notebooks
mit
Get and process data
url = 'http://files.figshare.com/2182601/0023uLRpitc_NTP_20dT_0.5GndCl.hdf5' download_file(url, save_dir='./data') full_fname = "./data/0023uLRpitc_NTP_20dT_0.5GndCl.hdf5" d = loader.photon_hdf5(full_fname) loader.alex_apply_period(d) d.calc_bg(bg.exp_fit, time_s=1000, tail_min_us=(800, 4000, 1500, 1000, 3000)) d.burst_search(L=10, m=10, F=6) ds = d.select_bursts(select_bursts.size, add_naa=True, th1=30)
notebooks/Example - Customize the us-ALEX histogram.ipynb
tritemio/FRETBursts
gpl-2.0
ALEX joint plot The alex_jointplot function allows plotting an ALEX histogram with marginals. This is how it looks by default:
alex_jointplot(ds)
notebooks/Example - Customize the us-ALEX histogram.ipynb
tritemio/FRETBursts
gpl-2.0
The inner plot in an hexbin plot, basically a 2D histogram with hexagonal bins. This kind of histograms resembles a scatter plot when sample size is small, and is immune from grid artifacts typical of rectangular grids. For more info for hexbin see this document. The marginal plots are histograms with an overlay KDE plot. The same FRETBursts function that plots standalone E and S histograms is used here to plot the marginals in the joint plot. Below I show how to customize appearance and type of this plot. Changing colors By default the colormap range is computed on the range S=[0.2, 0.8], so that the FRET populations (S ~ 0.5) have more contrast. To normalize the colormap to the whole data use the vmax argument:
alex_jointplot(ds, vmax_fret=False) alex_jointplot(ds, vmax_fret=False, marginal_color=8) alex_jointplot(ds, vmax_fret=False, marginal_color=7) alex_jointplot(ds, kind='kde')
notebooks/Example - Customize the us-ALEX histogram.ipynb
tritemio/FRETBursts
gpl-2.0
Or you can manually choose the max value mapped by the colormap (vmax):
alex_jointplot(ds, vmax=40)
notebooks/Example - Customize the us-ALEX histogram.ipynb
tritemio/FRETBursts
gpl-2.0
Changing the colormap will affect both inner and marginal plots:
alex_jointplot(ds, cmap='plasma')
notebooks/Example - Customize the us-ALEX histogram.ipynb
tritemio/FRETBursts
gpl-2.0
To pick a different color from the colormap for the marginal histograms use histcolor_id:
alex_jointplot(ds, cmap='plasma', marginal_color=83)
notebooks/Example - Customize the us-ALEX histogram.ipynb
tritemio/FRETBursts
gpl-2.0
Kinds of joint-plots The inner plot can be changed to a scatter plot or a KDE plot:
alex_jointplot(ds, kind='scatter') alex_jointplot(ds, kind='kde') dsf = ds.select_bursts(select_bursts.naa, th1=40) alex_jointplot(dsf, kind='kde', joint_kws={'shade': False, 'n_levels': 12, 'bw': 0.04})
notebooks/Example - Customize the us-ALEX histogram.ipynb
tritemio/FRETBursts
gpl-2.0
No marginals Finally, we can plot only the hexbin 2D histogram without marginals:
plt.figure(figsize=(5,5)) hexbin_alex(ds)
notebooks/Example - Customize the us-ALEX histogram.ipynb
tritemio/FRETBursts
gpl-2.0
Figure layout You can get an handle of the different axes in the figure for layout customization:
g = alex_jointplot(ds) g.ax_marg_x.grid(False) g.ax_marg_y.grid(False) g.ax_joint.set_xlim(-0.1, 1.1) g.ax_joint.set_ylim(-0.1, 1.1)
notebooks/Example - Customize the us-ALEX histogram.ipynb
tritemio/FRETBursts
gpl-2.0
alex_jointplot returns g which contains the axis handles (g.ax_join, g.ax_marg_x, g.ax_marg_y). The object g is a seaborn.JointGrid.
g = alex_jointplot(ds) g.ax_marg_x.grid(False) g.ax_marg_y.grid(False) g.ax_joint.set_xlim(-0.19, 1.19) g.ax_joint.set_ylim(-0.19, 1.19) plt.subplots_adjust(wspace=0, hspace=0) g.ax_marg_y.spines['bottom'].set_visible(True) g.ax_marg_x.spines['left'].set_visible(True) g.ax_marg_y.tick_params(reset=True, bottom=True, top=False, right=False, labelleft=False) g.ax_marg_x.tick_params(reset=True, left=True, top=False, right=False, labelbottom=False) g = alex_jointplot(ds) g.ax_marg_x.grid(False) g.ax_marg_y.grid(False) g.ax_joint.set_xlim(-0.19, 1.19) g.ax_joint.set_ylim(-0.19, 1.19) plt.subplots_adjust(wspace=0, hspace=0) g.ax_marg_y.tick_params(reset=True, bottom=True, top=False, right=False, labelleft=False) g.ax_marg_x.tick_params(reset=True, left=True, top=False, right=False, labelbottom=False) g = alex_jointplot(ds) g.ax_marg_x.grid(False, axis='x') g.ax_marg_y.grid(False, axis='y') g.ax_joint.set_xlim(-0.19, 1.19) g.ax_joint.set_ylim(-0.19, 1.19) plt.subplots_adjust(wspace=0, hspace=0)
notebooks/Example - Customize the us-ALEX histogram.ipynb
tritemio/FRETBursts
gpl-2.0
Arguments of inner plots Additional arguments can be passed to the inner or marginal plots passing a dictionary to joint_kws and marginal_kws respectively. The marginal plots are created by hist_burst_data which is the same function used to plot standalone E and S histograms in FRETBursts. For example, we can remove the KDE overlay like this:
alex_jointplot(ds, marginal_kws={'show_kde': False})
notebooks/Example - Customize the us-ALEX histogram.ipynb
tritemio/FRETBursts
gpl-2.0
Interactive plot
from ipywidgets import widgets, interact, interactive, fixed from IPython.display import display, display_png, display_svg, clear_output from IPython.core.pylabtools import print_figure cmaps = ['viridis', 'plasma', 'inferno', 'magma', 'afmhot', 'Blues', 'BuGn', 'BuPu', 'GnBu', 'YlGnBu', 'coolwarm', 'RdYlBu', 'RdYlGn', 'Spectral',]# 'icefire'] uncomment if using seaborn 0.8 @interact(overlay = widgets.RadioButtons(options=['fit model', 'KDE'], value='KDE'), binwidth = widgets.FloatText(value=0.03, min=0.01, max=1), bandwidth = widgets.FloatText(value=0.03, min=0.01, max=1), gridsize = (10, 100), min_size=(10, 500, 5), cmap=widgets.Dropdown(value='Spectral', options=cmaps), reverse_cmap = True, vmax_fret = True, ) def plot_(min_size=50, overlay='KDE', binwidth=0.03, bandwidth=0.03, gridsize=50, cmap='Spectral', reverse_cmap=False, vmax_fret=True): dx = d.select_bursts(select_bursts.size, add_naa=True, th1=min_size) bext.bursts_fitter(dx, 'E', binwidth=binwidth, bandwidth=bandwidth, model=mfit.factory_three_gaussians()) bext.bursts_fitter(dx, 'S', binwidth=binwidth, bandwidth=bandwidth, model=mfit.factory_two_gaussians()) if reverse_cmap: cmap += '_r' if binwidth < 0.01: binwidth = 0.01 if bandwidth < 0.01: bandwidth = 0.01 if overlay == 'fit model': marginal_kws = dict(binwidth=binwidth, show_model=True, pdf=True, show_kde=False) else: marginal_kws = dict(binwidth=binwidth, show_kde=True, bandwidth=bandwidth) alex_jointplot(dx, cmap=cmap, gridsize=gridsize, vmax_fret=vmax_fret, marginal_kws=marginal_kws,) fig = gcf() plt.close() display(fig)
notebooks/Example - Customize the us-ALEX histogram.ipynb
tritemio/FRETBursts
gpl-2.0
Below is a spectrum which follows an almost bell-curve type distribution (anyway, the specific type of distribution is not important here).
spectrum = [[1, 2, 3, 4, 5, 6],[2000, 4040, 6500, 6000, 4020, 2070]] energies = np.array(spectrum[0]) fluxes = np.array(spectrum[1]) spectrum
Simulator/Concepts/Inverse Transform Sampling.ipynb
StingraySoftware/notebooks
mit
Below, first we compute probabilities of flux. Afterwards, we compute the cumulative probability.
prob = fluxes/float(sum(fluxes)) cum_prob = np.cumsum(prob) cum_prob
Simulator/Concepts/Inverse Transform Sampling.ipynb
StingraySoftware/notebooks
mit
We draw ten thousand numbers from uniform random distribution.
N = 10000 R = ra.uniform(0, 1, N) R[1:10]
Simulator/Concepts/Inverse Transform Sampling.ipynb
StingraySoftware/notebooks
mit
We assign energies to events corresponding to the random number drawn. Note: The command below finds bin interval using a single command. I am not sure though that it's very readble. Would we want to split that in multiple lines and maybe use explicit loops to make it more readable? Or is it fine as it is? Comments?
gen_energies = [int(energies[np.argwhere(cum_prob == min(cum_prob[(cum_prob - r) > 0]))]) for r in R] gen_energies[1:10]
Simulator/Concepts/Inverse Transform Sampling.ipynb
StingraySoftware/notebooks
mit
Histogram energies to get shape approximation.
gen_energies = ((np.array(gen_energies) - 1) / 1).astype(int) times = np.arange(1, 6, 1) lc = np.bincount(gen_energies, minlength=len(times)) lc plot1, = plt.plot(lc/float(sum(lc)), 'r--', label='Assigned energies') plot2, = plt.plot(prob,'g',label='Original Spectrum') plt.xlabel('Energies') plt.ylabel('Probability') plt.legend(handles=[plot1,plot2]) plt.show()
Simulator/Concepts/Inverse Transform Sampling.ipynb
StingraySoftware/notebooks
mit
Problem 2: Round 5.23222 to two decimal places
print round(5.2322, 2)
Advanced Python Objects - Test.ipynb
spacedrabbit/PythonBootcamp
mit
Advanced Strings Problem 3: Check if every letter in the string s is lower case
s = 'hello how are you Mary, are you feeling okay?' print 'Yup' if s.islower() else 'Nope'
Advanced Python Objects - Test.ipynb
spacedrabbit/PythonBootcamp
mit
Problem 4: How many times does the letter 'w' show up in the string below?
s = 'twywywtwywbwhsjhwuwshshwuwwwjdjdid' print s.count('w')
Advanced Python Objects - Test.ipynb
spacedrabbit/PythonBootcamp
mit
Advanced Sets Problem 5: Find the elements in set1 that are not in set2:
set1 = {2,3,1,5,6,8} set2 = {3,1,7,5,6,8} print set1.difference(set2) # in set 1 but not set 2 print set2.difference(set1) # in set 2 but not set 1
Advanced Python Objects - Test.ipynb
spacedrabbit/PythonBootcamp
mit
Problem 6: Find all elements that are in either set:
print set1.union(set2) # all unique elements in either set print set1.intersection(set2) # all elements in both sets
Advanced Python Objects - Test.ipynb
spacedrabbit/PythonBootcamp
mit
Advanced Dictionaries Problem 7: Create this dictionary: {0: 0, 1: 1, 2: 8, 3: 27, 4: 64} using dictionary comprehension.
{x:x**3 for x in range(5)}
Advanced Python Objects - Test.ipynb
spacedrabbit/PythonBootcamp
mit
Advanced Lists Problem 8: Reverse the list below:
l = [1,2,3,4] l.reverse() # reverses in place, call the list again to check l
Advanced Python Objects - Test.ipynb
spacedrabbit/PythonBootcamp
mit
Problem 9: Sort the list below
l = [3,4,2,5,1] l.sort() l
Advanced Python Objects - Test.ipynb
spacedrabbit/PythonBootcamp
mit
Q1. Let's practice the seq2seq framework with a simple example. In this example, we will take the last state of the encoder as the initial state of the decoder. Complete the code.
# Inputs and outputs: ten digits x = tf.placeholder(tf.int32, shape=(32, 10)) y = tf.placeholder(tf.int32, shape=(32, 10)) # One-hot encoding enc_inputs = tf.one_hot(x, 10) dec_inputs = tf.concat((tf.zeros_like(y[:, :1]), y[:, :-1]), -1) dec_inputs = tf.one_hot(dec_inputs, 10) # encoder encoder_cell = tf.contrib.rnn.GRUCell(128) memory, last_state = tf.nn.dynamic_rnn(encoder_cell, enc_inputs, dtype=tf.float32, scope="encoder") # decoder decoder_cell = tf.contrib.rnn.GRUCell(128) outputs, _ = tf.nn.dynamic_rnn(decoder_cell, dec_inputs, initial_state=last_state, scope="decoder") # Readout logits = tf.layers.dense(outputs, 10) preds = tf.argmax(logits, -1, output_type=tf.int32) # Evaluation hits = tf.reduce_sum(tf.to_float(tf.equal(preds, y))) acc = hits / tf.to_float(tf.size(x)) # Loss and train loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=y) mean_loss = tf.reduce_mean(loss) opt = tf.train.AdamOptimizer(0.001) train_op = opt.minimize(mean_loss) # Session with tf.Session() as sess: sess.run(tf.global_variables_initializer()) losses, accs = [], [] for step in range(2000): # Data design # We feed sequences of random digits in the `x`, # and take its reverse as the target. _x = np.random.randint(0, 10, size=(32, 10), dtype=np.int32) _y = _x[:, ::-1] # Reverse _, _loss, _acc = sess.run([train_op, mean_loss, acc], {x:_x, y:_y}) losses.append(_loss) accs.append(_acc) # Plot plt.plot(losses, label="loss") plt.plot(accs, label="accuracy") plt.legend() plt.grid() plt.show()
programming/Python/tensorflow/exercises/Seq2Seq_solutions.ipynb
diegocavalca/Studies
cc0-1.0
Q2. At this time, we will use the Bahdanau attention mechanism. Complete the code.
tf.reset_default_graph() # Inputs and outputs: ten digits x = tf.placeholder(tf.int32, shape=(32, 10)) y = tf.placeholder(tf.int32, shape=(32, 10)) # One-hot encoding enc_inputs = tf.one_hot(x, 10) dec_inputs = tf.concat((tf.zeros_like(y[:, :1]), y[:, :-1]), -1) dec_inputs = tf.one_hot(dec_inputs, 10) # encoder encoder_cell = tf.contrib.rnn.GRUCell(128) memory, last_state = tf.nn.dynamic_rnn(encoder_cell, enc_inputs, dtype=tf.float32, scope="encoder") # decoder attention_mechanism = tf.contrib.seq2seq.BahdanauAttention(128, memory) decoder_cell = tf.contrib.rnn.GRUCell(128) cell_with_attention = tf.contrib.seq2seq.AttentionWrapper(decoder_cell, attention_mechanism, attention_layer_size=256, alignment_history=True, output_attention=False) outputs, state = tf.nn.dynamic_rnn(cell_with_attention, dec_inputs, dtype=tf.float32) alignments = tf.transpose(state.alignment_history.stack(),[1,2,0]) # Readout logits = tf.layers.dense(outputs, 10) preds = tf.argmax(logits, -1, output_type=tf.int32) # Evaluation hits = tf.reduce_sum(tf.to_float(tf.equal(preds, y))) acc = hits / tf.to_float(tf.size(x)) # Loss and train loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=y) mean_loss = tf.reduce_mean(loss) opt = tf.train.AdamOptimizer(0.001) train_op = opt.minimize(mean_loss) # Session def plot_alignment(alignment): fig, ax = plt.subplots() im=ax.imshow(alignment, cmap='Greys', interpolation='none') fig.colorbar(im, ax=ax) plt.xlabel('Decoder timestep') plt.ylabel('Encoder timestep') plt.show() with tf.Session() as sess: sess.run(tf.global_variables_initializer()) losses, accs = [], [] for step in range(2000): # Data design # We feed sequences of random digits in the `x`, # and take its reverse as the target. _x = np.random.randint(0, 10, size=(32, 10), dtype=np.int32) _y = _x[:, ::-1] # Reverse _, _loss, _acc = sess.run([train_op, mean_loss, acc], {x:_x, y:_y}) losses.append(_loss) accs.append(_acc) if step % 100 == 0: print("step=", step) _alignments = sess.run(alignments, {x: _x, y: _y}) plot_alignment(_alignments[0]) # Plot plt.plot(losses, label="loss") plt.plot(accs, label="accuracy") plt.legend() plt.grid() plt.show()
programming/Python/tensorflow/exercises/Seq2Seq_solutions.ipynb
diegocavalca/Studies
cc0-1.0
What is a SparkSession? It is the driver process that controls a spark application A SparkSession instance is responsible for executing the driver program’s commands (code) across executors (in a cluster) to complete a given task. You can have as many SparkSessions as you want in a single Spark application. How do I create a SparkSession? You can use the SparkSession class attribute called Builder. The class attribute builder allows you to run some of the following functions: appName: Sets a name for the application master: URL for the Spark master (Local or Spark standalone cluster) enableHiveSupport: Enables Hive support, including connectivity to a persistent Hive metastore, support for Hive serdes, and Hive user-defined functions. getOrCreate:Gets an existing SparkSession or, if there is no existing one, creates a new one based on the options set in this builder. Create a SparkSession instance Define a spark variable Pass values to the appName and master functions For the master function, we are going to use the HELK's Spark Master container (helk-spark-master)
spark = SparkSession.builder \ .appName("Python Spark SQL basic example") \ .master("spark://helk-spark-master:7077") \ .enableHiveSupport() \ .getOrCreate()
docker/helk-jupyter/notebooks/tutorials/04-Intro_pyspark_sparkSQL.ipynb
Cyb3rWard0g/HELK
gpl-3.0
Check the SparkSession variable
spark
docker/helk-jupyter/notebooks/tutorials/04-Intro_pyspark_sparkSQL.ipynb
Cyb3rWard0g/HELK
gpl-3.0
What is a Dataframe? In Spark, a dataframe is the most common Structured API, and it is used to represent data in a table format with rows and columns. Think of a dataframe as a spreadsheet with headers. The difference is that one Spark Dataframe can be distributed across several computers due to its large size or high computation requirements for faster analysis. The list of column names from a dataframe with its respective data types is called the schema Is a Spark Dataframe the same as a Python Pandas Dataframe? A Python dataframe sits on one computer whereas a Spark Dataframe, once again, can be distributed across several computers. PySpark allows the conversion from Python Pandas dataframes to Spark dataframes. Create your first Dataframe Let's create our first dataframe by using range and toDF functions. * One column named numbers * 10 rows containing numbers from 0-9 range(start, end=None, step=1, numPartitions=None) * Create a DataFrame with single pyspark.sql.types.LongType column named id, containing elements in a range from start to end (exclusive) with step value step. toDF(*cols) * Returns a new class:DataFrame that with new specified column names
first_df = spark.range(10).toDF("numbers") first_df.show()
docker/helk-jupyter/notebooks/tutorials/04-Intro_pyspark_sparkSQL.ipynb
Cyb3rWard0g/HELK
gpl-3.0
Create another Dataframe createDataFrame(data, schema=None, samplingRatio=None, verifySchema=True) Creates a DataFrame from an RDD, a list or a pandas.DataFrame. When schema is a list of column names, the type of each column will be inferred from data. When schema is None, it will try to infer the schema (column names and types) from data, which should be an RDD of Row, or namedtuple, or dict.
dog_data=[['Pedro','Doberman',3],['Clementine','Golden Retriever',8],['Norah','Great Dane',6]\ ,['Mabel','Austrailian Shepherd',1],['Bear','Maltese',4],['Bill','Great Dane',10]] dog_df=spark.createDataFrame(dog_data, ['name','breed','age']) dog_df.show()
docker/helk-jupyter/notebooks/tutorials/04-Intro_pyspark_sparkSQL.ipynb
Cyb3rWard0g/HELK
gpl-3.0
Check the Dataframe schema We are going to do apply a concept called schema inference which lets spark takes its best guess at figuring out the schema. Spark reads part of the dataframe and then tries to parse the types of data in each row. You can also define a strict schema when you read in data which does not let Spark guess. This is recommended for production use cases. schema * Returns the schema of this DataFrame as a pyspark.sql.types.StructType.
dog_df.schema
docker/helk-jupyter/notebooks/tutorials/04-Intro_pyspark_sparkSQL.ipynb
Cyb3rWard0g/HELK
gpl-3.0
printSchema() * Prints out the schema in the tree format
dog_df.printSchema()
docker/helk-jupyter/notebooks/tutorials/04-Intro_pyspark_sparkSQL.ipynb
Cyb3rWard0g/HELK
gpl-3.0
Access Dataframe Columns select(*cols) * Projects a set of expressions and returns a new DataFrame. Access Dataframes's columns by attribute (df.name):
dog_df.select("name").show()
docker/helk-jupyter/notebooks/tutorials/04-Intro_pyspark_sparkSQL.ipynb
Cyb3rWard0g/HELK
gpl-3.0
Access Dataframe's columns by indexing (df['name']). * According to Sparks documentation, the indexing form is the recommended one because it is future proof and won’t break with column names that are also attributes on the DataFrame class.
dog_df.select(dog_df["name"]).show()
docker/helk-jupyter/notebooks/tutorials/04-Intro_pyspark_sparkSQL.ipynb
Cyb3rWard0g/HELK
gpl-3.0
Filter Dataframe filter(condition) * Filters rows using the given condition. Select dogs that are older than 4 years
dog_df.filter(dog_df["age"] > 4).show()
docker/helk-jupyter/notebooks/tutorials/04-Intro_pyspark_sparkSQL.ipynb
Cyb3rWard0g/HELK
gpl-3.0
Group Dataframe groupBy(*cols) * Groups the DataFrame using the specified columns, so we can run aggregation on them. See GroupedData for all the available aggregate functions. group dogs and count them by their age
dog_df.groupBy(dog_df["age"]).count().show()
docker/helk-jupyter/notebooks/tutorials/04-Intro_pyspark_sparkSQL.ipynb
Cyb3rWard0g/HELK
gpl-3.0
Run SQL queries on your Dataframe createOrReplaceTempView(name) * Creates or replaces a local temporary view with this DataFrame. * The lifetime of this temporary table is tied to the SparkSession that was used to create this DataFrame. Register the current Dataframe as a SQL temporary view
dog_df.createOrReplaceTempView("dogs") sql_dog_df = spark.sql("SELECT * FROM dogs") sql_dog_df.show() sql_dog_df = spark.sql("SELECT * FROM dogs WHERE name='Pedro'") sql_dog_df.show()
docker/helk-jupyter/notebooks/tutorials/04-Intro_pyspark_sparkSQL.ipynb
Cyb3rWard0g/HELK
gpl-3.0
Functions in vtktools
help(pygslib.vtktools)
pygslib/Ipython_templates/broken/vtk_tools.ipynb
opengeostat/pygslib
mit
Load a cube defined in an stl file and plot it STL is a popular mesh format included an many non-commercial and commercial software, example: Paraview, Datamine Studio, etc.
#load the cube mycube=pygslib.vtktools.loadSTL('../datasets/stl/cube.stl') # see the information about this data... Note that it is an vtkPolyData print mycube # Create a VTK render containing a surface (mycube) renderer = pygslib.vtktools.polydata2renderer(mycube, color=(1,0,0), opacity=0.50, background=(1,1,1)) # Now we plot the render pygslib.vtktools.vtk_show(renderer, camera_position=(-20,20,20), camera_focalpoint=(0,0,0))
pygslib/Ipython_templates/broken/vtk_tools.ipynb
opengeostat/pygslib
mit
Ray casting to find intersections of a lines with the cube This is basically how we plan to find points inside solid and to define blocks inside solid
# we have a line, for example a block model row # defined by two points or an infinite line passing trough a dillhole sample pSource = [-50.0, 0.0, 0.0] pTarget = [50.0, 0.0, 0.0] # now we want to see how this looks like pygslib.vtktools.addLine(renderer,pSource, pTarget, color=(0, 1, 0)) pygslib.vtktools.vtk_show(renderer) # the camera position was already defined # now we find the point coordinates of the intersections intersect, points, pointsVTK= pygslib.vtktools.vtk_raycasting(mycube, pSource, pTarget) print "the line intersects? ", intersect==1 print "the line is over the surface?", intersect==-1 # list of coordinates of the points intersecting print points #Now we plot the intersecting points # To do this we add the points to the renderer for p in points: pygslib.vtktools.addPoint(renderer, p, radius=0.5, color=(0.0, 0.0, 1.0)) pygslib.vtktools.vtk_show(renderer)
pygslib/Ipython_templates/broken/vtk_tools.ipynb
opengeostat/pygslib
mit
Test line on surface
# we have a line, for example a block model row # defined by two points or an infinite line passing trough a dillhole sample pSource = [-50.0, 5.01, 0] pTarget = [50.0, 5.01, 0] # now we find the point coordinates of the intersections intersect, points, pointsVTK= pygslib.vtktools.vtk_raycasting(mycube, pSource, pTarget) print "the line intersects? ", intersect==1 print "the line is over the surface?", intersect==-1 # list of coordinates of the points intersecting print points # now we want to see how this looks like pygslib.vtktools.addLine(renderer,pSource, pTarget, color=(0, 1, 0)) for p in points: pygslib.vtktools.addPoint(renderer, p, radius=0.5, color=(0.0, 0.0, 1.0)) pygslib.vtktools.vtk_show(renderer) # the camera position was already defined # note that there is a tolerance of about 0.01
pygslib/Ipython_templates/broken/vtk_tools.ipynb
opengeostat/pygslib
mit
Finding points
#using same cube but generation arbitrary random points x = np.random.uniform(-10,10,150) y = np.random.uniform(-10,10,150) z = np.random.uniform(-10,10,150)
pygslib/Ipython_templates/broken/vtk_tools.ipynb
opengeostat/pygslib
mit