markdown
stringlengths
0
37k
code
stringlengths
1
33.3k
path
stringlengths
8
215
repo_name
stringlengths
6
77
license
stringclasses
15 values
we dont need to start slicng at 0
print (data[5:10,7:15])
01-analysing-data.ipynb
drwalshaw/sc-python
mit
we dont even need to inc upper and lower limits
smallchunk=data[:3,36:] print(smallchunk)
01-analysing-data.ipynb
drwalshaw/sc-python
mit
arithmetic on arrays
doublesmallchunk=smallchunk*2.0 print(doublesmallchunk) triplesmallchunk=smallchunk+doublesmallchunk print(triplesmallchunk) print(numpy.mean(data)) print (numpy.max(data)) print (numpy.min(data))
01-analysing-data.ipynb
drwalshaw/sc-python
mit
get a set of data for the first station this is shorthand for "all the columns"
station_0=data[0,:] print(numpy.max(station_0))
01-analysing-data.ipynb
drwalshaw/sc-python
mit
we dont need to create @temporary@ array slices we can refer to what we call array axes
print(numpy.mean(data, axis=0)) print(numpy.mean(data, axis=1))
01-analysing-data.ipynb
drwalshaw/sc-python
mit
axis = 0 gets mean down eaach column axis=1 gets the mean across each row so the mean temp for each station for all periods see above do some simple vissualisations
import matplotlib.pyplot %matplotlib inline image=matplotlib.pyplot.imshow(data)
01-analysing-data.ipynb
drwalshaw/sc-python
mit
lets look at the average tempp over time
avg_temperature=numpy.mean(data,axis=0) avg_plot=matplotlib.pyplot.plot(avg_temperature) import numpy import matplotlib.pyplot %matplotlib inline data=numpy.loadtxt(fname='data/weather-01.csv',delimiter=',')
01-analysing-data.ipynb
drwalshaw/sc-python
mit
create a wide figure to hold sub plots
fig=matplotlib.pyplot.figure (figsize=(10.0,3.0))
01-analysing-data.ipynb
drwalshaw/sc-python
mit
create placeholders for plots
fig=matplotlib.pyplot.figure (figsize=(10.0,3.0)) subplot1=fig.add_subplot (1,3,1) subplot2=fig.add_subplot (1,3,2) subplot3=fig.add_subplot (1,3,3) subplot1.set_ylabel('average') subplot1.plot(numpy.mean(data, axis=0)) subplot2.set_ylabel('minimum') subplot2.plot(numpy.min(data, axis=0)) subplot3.set_ylabel('maximum') subplot3.plot(numpy.max(data, axis=0))
01-analysing-data.ipynb
drwalshaw/sc-python
mit
this is fine for small numbers of datasets, what if wwe have hundreds or thousands? we need more automaation loops
word='notebook' print (word[4])
01-analysing-data.ipynb
drwalshaw/sc-python
mit
see aabove note diff between squaare and normaal brackets
for char in word: # colon before word or indentation v imporetaant #indent is 4 spaces for char in word: print (char)
01-analysing-data.ipynb
drwalshaw/sc-python
mit
reading filenames get a list of all the filenames from disk
import glob
01-analysing-data.ipynb
drwalshaw/sc-python
mit
global..something~
print(glob.glob('data/weather*.csv'))
01-analysing-data.ipynb
drwalshaw/sc-python
mit
putting it all together
filenames=sorted(glob.glob('data/weather*.csv')) filenames=filenames[0:3] for f in filenames: print (f) data=numpy.loadtxt(fname=f, delimiter=',') #next bits need indenting fig=matplotlib.pyplot.figure (figsize=(10.0,3.0)) subplot1=fig.add_subplot (1,3,1) subplot2=fig.add_subplot (1,3,2) subplot3=fig.add_subplot (1,3,3) subplot1.set_ylabel('average') subplot1.plot(numpy.mean(data, axis=0)) subplot2.set_ylabel('minimum') subplot2.plot(numpy.min(data, axis=0)) subplot3.set_ylabel('maximum') subplot3.plot(numpy.max(data, axis=0)) fig.tight_layout() matplotlib.pyplot.show num=37 if num>100: print('greater') else: print('not greater') print ('done') num=107 if num>100: print('greater') else: print('not greater') print ('done')
01-analysing-data.ipynb
drwalshaw/sc-python
mit
didnt print "done" due to break in indentation sequence
num=-3 if num>0: print (num, "is positive") elif num ==0: print (num, "is zero") else: print (num, "is negative")
01-analysing-data.ipynb
drwalshaw/sc-python
mit
elif eqauls else if, always good to finish a chain with an else
filenames=sorted(glob.glob('data/weather*.csv')) filenames=sorted(glob.glob('data/weather*.csv')) filenames=filenames[0:3] for f in filenames: print (f) data=numpy.loadtxt(fname=f, delimiter=',') == 0 if numpy.max (data, axis=0)[0] ==0 and numpy.max (data, axis=0)[20] ==20: print ('suspicious looking maxima') elif numpy.sum(numpy.min(data, axis=0)) ==0: print ('minimum adds to zero') else: print ('data looks ok') #next bits need indenting fig=matplotlib.pyplot.figure (figsize=(10.0,3.0)) subplot1=fig.add_subplot (1,3,1) subplot2=fig.add_subplot (1,3,2) subplot3=fig.add_subplot (1,3,3) subplot1.set_ylabel('average') subplot1.plot(numpy.mean(data, axis=0)) subplot2.set_ylabel('minimum') subplot2.plot(numpy.min(data, axis=0)) subplot3.set_ylabel('maximum') subplot3.plot(numpy.max(data, axis=0)) fig.tight_layout() matplotlib.pyplot.show
01-analysing-data.ipynb
drwalshaw/sc-python
mit
something went wrong with the above
def fahr_to_kelvin(temp): return((temp-32)*(5/9)+ 273.15) print ('freezing point of water:', fahr_to_kelvin(32)) print ('boiling point of water:', fahr_to_kelvin(212))
01-analysing-data.ipynb
drwalshaw/sc-python
mit
using functions
def analyse (filename): data=numpy.loadtxt(fname=filename,)......
01-analysing-data.ipynb
drwalshaw/sc-python
mit
unfinsinshed
def detect_problems (filename): data=numpy.loadtxt(fname=filename, delimiter=',') if numpy.max (data, axis=0)[0] ==0 and numpy.max (data, axis=0)[20] ==20: print ('suspicious looking maxima') elif numpy.sum(numpy.min(data, axis=0)) ==0: print ('minimum adds to zero') else: print ('data looks ok') for f in filenames [0:5]: print (f) analyse (f) detect_problems (f) def analyse (filename): data=numpy.loadtxt(fname=filename,delimiter=',') fig=matplotlib.pyplot.figure (figsize=(10.0,3.0)) subplot1=fig.add_subplot (1,3,1) subplot2=fig.add_subplot (1,3,2) subplot3=fig.add_subplot (1,3,3) subplot1.set_ylabel('average') subplot1.plot(numpy.mean(data, axis=0)) subplot2.set_ylabel('minimum') subplot2.plot(numpy.min(data, axis=0)) subplot3.set_ylabel('maximum') subplot3.plot(numpy.max(data, axis=0)) fig.tight_layout() matplotlib.pyplot.show for f in filenames [0:5]: print (f) analyse (f) detect_problems (f) help(numpy.loadtxt) help(detect_problems) """some of our temperature files haave problems, check for these this function reads a file and reports on odd looking maxima and minimia that add to zero the function does not return any data """ def detect_problems (filename): data=numpy.loadtxt(fname=filename, delimiter=',') if numpy.max (data, axis=0)[0] ==0 and numpy.max (data, axis=0)[20] ==20: print ('suspicious looking maxima') elif numpy.sum(numpy.min(data, axis=0)) ==0: print ('minimum adds to zero') else: print ('data looks ok') def analyse (filename): data=numpy.loadtxt(fname=filename,delimiter=',') """ this function analyses a dataset and outputs plots for maax min and ave """ fig=matplotlib.pyplot.figure (figsize=(10.0,3.0)) subplot1=fig.add_subplot (1,3,1) subplot2=fig.add_subplot (1,3,2) subplot3=fig.add_subplot (1,3,3) subplot1.set_ylabel('average') subplot1.plot(numpy.mean(data, axis=0)) subplot2.set_ylabel('minimum') subplot2.plot(numpy.min(data, axis=0)) subplot3.set_ylabel('maximum') subplot3.plot(numpy.max(data, axis=0)) fig.tight_layout() matplotlib.pyplot.show
01-analysing-data.ipynb
drwalshaw/sc-python
mit
Tensorflow TensorFlow provides multiple APIs. The lowest level API--TensorFlow Core-- provides you with complete programming control. We recommend TensorFlow Core for machine learning researchers and others who require fine levels of control over their models Hello World We can think of TensorFlow Core programs as consisting of two discrete sections: Building the computational graph. Running the computational graph.
# note that this is simply telling tensorflow to # create a constant operation, nothing gets # executed until we start a session and run it hello = tf.constant('Hello, TensorFlow!') hello # start the session and run the graph with tf.Session() as sess: print(sess.run(hello))
deep_learning/softmax_tensorflow.ipynb
ethen8181/machine-learning
mit
We can think of tensorflow as a system to define our computation, and using the operation that we've defined it will construct a computation graph (where each operation becomes a node in the graph). The computation graph that we've defined will not be run unless we give it some context and explicitly tell it to do so. In this case, we create the Session that encapsulates the environment in which the objects are evaluated (execute the operations that are defined in the graph). Consider another example that simply add and multiply two constant numbers.
a = tf.constant(2.0, tf.float32) b = tf.constant(3.0) # also tf.float32 implicitly c = a + b with tf.Session() as sess: print('mutiply: ', sess.run(a * b)) print('add: ', sess.run(c)) # note that we can define the add operation outside print('add: ', sess.run(a + b)) # or inside the .run()
deep_learning/softmax_tensorflow.ipynb
ethen8181/machine-learning
mit
The example above is not especially interesting because it always produces a constant result. A graph can be parameterized to accept external inputs, known as placeholders. Think of it as the input data we would give to machine learning algorithm at some point. We can do the same operation as above by first defining a placeholder (note that we must specify the data type). Then feed in values using feed_dict when we run it.
a = tf.placeholder(tf.float32) b = tf.placeholder(tf.float32) # define some operations add = a + b mul = a * b with tf.Session() as sess: print('mutiply: ', sess.run(mul, feed_dict = {a: 2, b: 3})) print('add: ', sess.run(add, feed_dict = {a: 2, b: 3}))
deep_learning/softmax_tensorflow.ipynb
ethen8181/machine-learning
mit
Some matrix operations are the same compared to numpy. e.g.
c = np.array([[3.,4], [5.,6], [6.,7]]) print(c) print(np.mean(c, axis = 1)) print(np.argmax(c, axis = 1)) with tf.Session() as sess: result = sess.run(tf.reduce_mean(c, axis = 1)) print(result) print(sess.run(tf.argmax(c, axis = 1)))
deep_learning/softmax_tensorflow.ipynb
ethen8181/machine-learning
mit
The functionality of numpy.mean and tensorflow.reduce_mean are the same. When axis argument parameter is 1, it computes mean across (3,4) and (5,6) and (6,7), so 1 defines across which axis the mean is computed (axis = 1, means the operation is along the column, so it will compute the mean for each row). When it is 0, the mean is computed across(3,5,6) and (4,6,7), and so on. The same can be applied to argmax which returns the index that contains the maximum value along an axis. Linear Regression We'll start off by writing a simple linear regression model. To do so, we first need to understand the difference between tf.Variable and tf.placeholder. Stackoverflow. The difference is that with tf.Variable you have to provide an initial value when you declare it. With tf.placeholder you don't have to provide an initial value and you can specify it at run time with the feed_dict argument inside Session.run. In short, we will use tf.Variable for trainable variables such as weights (W) and biases (B) for our model. On the other hand, tf.placeholder is used to feed actual training examples. Also note that, constants are automatically initialized when we call tf.constant, and their value can never change. By contrast, variables are not initialized when we call tf.Variable. To initialize all the variables in a TensorFlow program, we must explicitly call a special operation called tf.global_variables_initializer(). Things will become clearer with the example below.
# Parameters learning_rate = 0.01 # learning rate for the optimizer (gradient descent) n_epochs = 1000 # number of iterations to train the model display_epoch = 100 # display the cost for every display_step iteration # make up some trainig data X_train = np.asarray([3.3, 4.4, 5.5, 6.71, 6.93, 4.168, 9.779, 6.182, 7.59, 2.167, 7.042, 10.791, 5.313, 7.997, 5.654, 9.27, 3.1], dtype = np.float32) y_train = np.asarray([1.7, 2.76, 2.09, 3.19, 1.694, 1.573, 3.366, 2.596, 2.53, 1.221, 2.827, 3.465, 1.65, 2.904, 2.42, 2.94, 1.3], dtype = np.float32) # placeholder for the input data X = tf.placeholder(tf.float32) Y = tf.placeholder(tf.float32) # give the model's parameter a randomized initial value W = tf.Variable(np.random.randn(), tf.float32, name = 'weight') b = tf.Variable(np.random.randn(), tf.float32, name = 'bias') # Construct the formula for the linear model # we can also do # pred = tf.add(tf.multiply(X, W), b) pred = W * X + b # we then define the loss function that the model is going to optimize on, # here we use the standard mean squared error, which is sums the squares of the # prediction and the true y divided by the number of observations, note # that we're computing the difference between the prediction and the y label # from the placeholder cost = tf.reduce_mean(tf.pow(pred - Y, 2)) # after defining the model structure and the function to optimize on, # tensorflow provides several optimizers that can do optimization task # for us, the simplest one being gradient descent optimizer = tf.train.GradientDescentOptimizer(learning_rate) train = optimizer.minimize(cost) # initializing the variables init = tf.global_variables_initializer() # change default figure and font size plt.rcParams['figure.figsize'] = 8, 6 plt.rcParams['font.size'] = 12 # Launch the graph with tf.Session() as sess: sess.run(init) # Fit on all the training data feed_dict = {X: X_train, Y: y_train} for epoch in range(n_epochs): sess.run(train, feed_dict = feed_dict) # Display logs per epoch step if (epoch + 1) % display_epoch == 0: # run the cost to obtain the value for the cost function at each step c = sess.run(cost, feed_dict = feed_dict) print("Epoch: {}, cost: {}".format(epoch + 1, c)) print("Optimization Finished!") c = sess.run(cost, feed_dict = feed_dict) weight = sess.run(W) bias = sess.run(b) print("Training cost: {}, W: {}, b: {}".format(c, weight, bias)) # graphic display plt.plot(X_train, y_train, 'ro', label = 'Original data') plt.plot(X_train, weight * X_train + bias, label = 'Fitted line') plt.legend() plt.show()
deep_learning/softmax_tensorflow.ipynb
ethen8181/machine-learning
mit
MNIST Using Softmax MNIST is a simple computer vision dataset. It consists of images of handwritten digits like these: <img src='images/mnist.png'> Each image is 28 pixels by 28 pixels, which is essentially a $28 \times 28$ array of numbers. To use it in a context of a machine learning problem, we can flatten this array into a vector of $28 \times 28 = 784$, this will be the number of features for each image. It doesn't matter how we flatten the array, as long as we're consistent between images. Note that, flattening the data throws away information about the 2D structure of the image. Isn't that bad? Well, the best computer vision methods do exploit this structure. But the simple method we will be using here, a softmax regression (defined below), won't. The dataset also includes labels for each image, telling us the each image's label. For example, the labels for the above images are 5, 0, 4, and 1. Here we're going to train a softmax model to look at images and predict what digits they are. The possible label values in the MNIST dataset are numbers between 0 and 9, hence this will be a 10-class classification problem.
n_class = 10 n_features = 784 # mnist is a 28 * 28 image # load the dataset and some preprocessing step that can be skipped (X_train, y_train), (X_test, y_test) = mnist.load_data() X_train = X_train.reshape(60000, n_features) X_test = X_test.reshape(10000, n_features) X_train = X_train.astype('float32') X_test = X_test.astype('float32') # images takes values between 0 - 255, we can normalize it # by dividing every number by 255 X_train /= 255 X_test /= 255 print(X_train.shape[0], 'train samples') print(X_test.shape[0], 'test samples') # convert class vectors to binary class matrices (one-hot encoding) # note: you HAVE to to this step Y_train = np_utils.to_categorical(y_train, n_class) Y_test = np_utils.to_categorical(y_test , n_class)
deep_learning/softmax_tensorflow.ipynb
ethen8181/machine-learning
mit
In the following code chunk, we define the overall computational graph/structure for the softmax classifier using the cross entropy cost function as the objective. Recall that the formula for this function can be denoted as: $$L = -\sum_i y'_i \log(y_i)$$ Where y is our predicted probability distribution, and y′ is the true distribution.
# define some global variables learning_rate = 0.1 n_iterations = 400 # define the input and output # here None means that a dimension can be of any length, # which is what we want, since the number of observations # we have can vary; # note that the shape argument to placeholder is optional, # but it allows TensorFlow to automatically catch bugs stemming # from inconsistent tensor shapes X = tf.placeholder(tf.float32, [None, n_features]) y = tf.placeholder(tf.float32, [None, n_class]) # initialize both W and b as tensors full of zeros. # these are parameters that the model is later going to learn, # Notice that W has a shape of [784, 10] because we want to multiply # the 784-dimensional image vectors by it to produce 10-dimensional # vectors of evidence for the difference classes. b has a shape of [10] # so we can add it to the output. W = tf.Variable(tf.zeros([n_features, n_class])) b = tf.Variable(tf.zeros([n_class]))
deep_learning/softmax_tensorflow.ipynb
ethen8181/machine-learning
mit
```python to define the softmax classifier and cross entropy cost we can do the following matrix multiplication using the .matmul command and add the softmax output output = tf.nn.softmax(tf.matmul(X, W) + b) cost function: cross entropy, the reduce mean is simply the average of the cost function across all observations cross_entropy = tf.reduce_mean(-tf.reduce_sum(y * tf.log(output), axis = 1)) ```
# but for numerical stability reason, the tensorflow documentation # suggests using the following function output = tf.matmul(X, W) + b cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels = y, logits = output))
deep_learning/softmax_tensorflow.ipynb
ethen8181/machine-learning
mit
Now that we defined the structure of our model, we'll: Define a optimization algorithm the train it. In this case, we ask TensorFlow to minimize our defined cross_entropy cost using the gradient descent algorithm with a learning rate of 0.5. There are also other off the shelf optimizers that we can use that are faster for more complex models. We'll also add an operation to initialize the variables we created Define helper "function" to evaluate the prediction accuracy
train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(cross_entropy) init = tf.global_variables_initializer() # here we're return the predicted class of each observation using argmax # and see if the ouput (prediction) is equal to the target variable (y) # since equal is a boolean type tensor, we cast it to a float type to compute # the actual accuracy correct_prediction = tf.equal(tf.argmax(y, axis = 1), tf.argmax(output, axis = 1)) accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
deep_learning/softmax_tensorflow.ipynb
ethen8181/machine-learning
mit
Now it's time to run it. During each step of the loop, we get a "batch" of one hundred random data points (defined by batch_size) from our training set. We run train_step feeding in the batches data to replace the placeholders. Using small batches of random data is called stochastic training -- in this case, stochastic gradient descent. Ideally, we'd like to use all our data for every step of training because that would give us a better sense of what we should be doing, but that's expensive. So, instead, we use a different subset every time. Doing this is cheap and has much of the same benefit.
with tf.Session() as sess: # initialize the variable, train the "batch" gradient descent # for a specified number of iterations and evaluate on accuracy score # remember the key to the feed_dict dictionary must match the variable we use # as the placeholder for the data in the beginning sess.run(init) for i in range(n_iterations): # X_batch, y_batch = mnist.train.next_batch(batch_size) _, acc = sess.run([train_step, accuracy], feed_dict = {X: X_train, y: Y_train}) # simply prints the training data's accuracy for every n iteration if i % 50 == 0: print(acc) # after training evaluate the accuracy on the testing data acc = sess.run(accuracy, feed_dict = {X: X_train, y: Y_train}) print('test:', acc)
deep_learning/softmax_tensorflow.ipynb
ethen8181/machine-learning
mit
Access the Database with the sqlite3 Package We can use the sqlite3 package from the Python standard library to connect to the sqlite database:
import sqlite3 conn = sqlite3.connect('data/iris/database.sqlite') cursor = conn.cursor() type(cursor)
Week-8-NLP-Databases/Working with Databases.ipynb
kkhenriquez/python-for-data-science
mit
A sqlite3.Cursor object is our interface to the database, mostly throught the execute method that allows to run any SQL query on our database. First of all we can get a list of all the tables saved into the database, this is done by reading the column name from the sqlite_master metadata table with: SELECT name FROM sqlite_master The output of the execute method is an iterator that can be used in a for loop to print the value of each row.
for row in cursor.execute("SELECT name FROM sqlite_master"): print(row)
Week-8-NLP-Databases/Working with Databases.ipynb
kkhenriquez/python-for-data-science
mit
a shortcut to directly execute the query and gather the results is the fetchall method:
cursor.execute("SELECT name FROM sqlite_master").fetchall()
Week-8-NLP-Databases/Working with Databases.ipynb
kkhenriquez/python-for-data-science
mit
Notice: this way of finding the available tables in a database is specific to sqlite, other databases like MySQL or PostgreSQL have different syntax. Then we can execute standard SQL query on the database, SQL is a language designed to interact with data stored in a relational database. It has a standard specification, therefore the commands below work on any database. If you need to connect to another database, you would use another package instead of sqlite3, for example: MySQL Connector for MySQL Psycopg for PostgreSQL pymssql for Microsoft MS SQL then you would connect to the database using specific host, port and authentication credentials but then you could execute the same exact SQL statements. Let's take a look for example at the first 3 rows in the Iris table:
sample_data = cursor.execute("SELECT * FROM Iris LIMIT 20").fetchall() print(type(sample_data)) sample_data [row[0] for row in cursor.description]
Week-8-NLP-Databases/Working with Databases.ipynb
kkhenriquez/python-for-data-science
mit
It is evident that the interface provided by sqlite3 is low-level, for data exploration purposes we would like to directly import data into a more user friendly library like pandas. Import data from a database to pandas
import pandas as pd iris_data = pd.read_sql_query("SELECT * FROM Iris", conn) iris_data.head() iris_data.dtypes
Week-8-NLP-Databases/Working with Databases.ipynb
kkhenriquez/python-for-data-science
mit
pandas.read_sql_query takes a SQL query and a connection object and imports the data into a DataFrame, also keeping the same data types of the database columns. pandas provides a lot of the same functionality of SQL with a more user-friendly interface. However, sqlite3 is extremely useful for downselecting data before importing them in pandas. For example you might have 1 TB of data in a table stored in a database on a server machine. You are interested in working on a subset of the data based on some criterion, unfortunately it would be impossible to first load data into pandas and then filter them, therefore we should tell the database to perform the filtering and just load into pandas the downsized dataset.
iris_setosa_data = pd.read_sql_query("SELECT * FROM Iris WHERE Species == 'Iris-setosa'", conn) iris_setosa_data print(iris_setosa_data.shape) print(iris_data.shape)
Week-8-NLP-Databases/Working with Databases.ipynb
kkhenriquez/python-for-data-science
mit
GPFlow first approximation
## Import modules import numpy as np import scipy.spatial.distance as sp from matplotlib import pyplot as plt plt.style.use('ggplot')
notebooks/Sandboxes/TensorFlow/GPFlow_examples.ipynb
molgor/spystats
bsd-2-clause
Simulating Data Simulate random uniform 4-d vector. Give N of this.
## Parameter definitions N = 1000 phi = 0.05 sigma2 = 1.0 beta_0 = 10.0 beta_1 = 1.5 beta_2 = -1.0 # AL NAGAT nugget = 0.03 X = np.random.rand(N,4)
notebooks/Sandboxes/TensorFlow/GPFlow_examples.ipynb
molgor/spystats
bsd-2-clause
Calculate distance X can be interpreted as covariate matrix in which the first two columns are the longitud and latitude. GPFlow requires that all the covariates (including spatio-temporal coordinates) are in X.
points = X[:,0:2] dist_points = sp.pdist(points) ## Reshape the vector to square matrix distance_matrix = sp.squareform(dist_points) correlation_matrix = np.exp(- distance_matrix / phi) covariance_matrix = correlation_matrix * sigma2 plt.imshow(covariance_matrix)
notebooks/Sandboxes/TensorFlow/GPFlow_examples.ipynb
molgor/spystats
bsd-2-clause
Simulate the Gaussian Process $S$ Remmember that for a stationary Gaussian Process, the value at Z is independent of the betas (Covariate weights). Mean 0's $\Sigma$ Correlation matrix $$S = MVN(0,\Sigma) + \epsilon$$ $ \epsilon \sim N(0,\sigma^{2}) $ S is a realization of a spatial process.
S = np.random.multivariate_normal(np.zeros(N), correlation_matrix) +\ np.random.normal(size = N) * nugget S.shape # We convert to Matrix [1 column] S = S.reshape(N,1) ## Plot x, y using as color the Gaussian process plt.scatter(X[:, 0], X[:, 1], c = S) plt.colorbar()
notebooks/Sandboxes/TensorFlow/GPFlow_examples.ipynb
molgor/spystats
bsd-2-clause
Simulate the Response Variable $y$ $$y_1(x_1,x_2) = S(x_1,x_2) $$ $$y_2(x_1,x_2) = \beta_0 + x_3\beta_1 + x_4\beta_2 + S(x_1,x_2)$$
# remmember index 0 is 1 mu = beta_0 + beta_1 * X[:, 2] + beta_2 * X[:, 3] mu = mu.reshape(N,1) Y1 = S Y2 = mu + S plt.scatter(X[:, 0], X[:, 1], c = Y2) plt.colorbar() S.shape
notebooks/Sandboxes/TensorFlow/GPFlow_examples.ipynb
molgor/spystats
bsd-2-clause
GP Model ! This model is without covariates
# Import GPFlow import GPflow as gf # Defining the model Matern function with \kappa = 0.5 k = gf.kernels.Matern12(2, lengthscales=1, active_dims = [0,1] ) type(k)
notebooks/Sandboxes/TensorFlow/GPFlow_examples.ipynb
molgor/spystats
bsd-2-clause
Model for $y_1$
m = gf.gpr.GPR(points, Y1, k) ## First guess init_nugget = 0.001 m.likelihood.variance = init_nugget print(m)
notebooks/Sandboxes/TensorFlow/GPFlow_examples.ipynb
molgor/spystats
bsd-2-clause
Like in tensorflow, m is a graph and has at least three nodes: lengthscale, kern variance and likelihood variance
# Estimation using symbolic gradient descent m.optimize() print(m)
notebooks/Sandboxes/TensorFlow/GPFlow_examples.ipynb
molgor/spystats
bsd-2-clause
compare with original parameters (made from the simulation)
print(phi,sigma2,nugget) print points.shape print Y1.shape
notebooks/Sandboxes/TensorFlow/GPFlow_examples.ipynb
molgor/spystats
bsd-2-clause
it was close enough GAUSSIAN PROCESS WITH LINEAR TREND Defining the model
k = gf.kernels.Matern12(2, lengthscales=1, active_dims = [0,1] ) gf.mean_functions.Linear() meanf = gf.mean_functions.Linear(np.ones((4,1)), np.ones(1)) m = gf.gpr.GPR(X, Y2, k, meanf) m.likelihood.variance = init_nugget print(m) # Estimation m.optimize() print(m)
notebooks/Sandboxes/TensorFlow/GPFlow_examples.ipynb
molgor/spystats
bsd-2-clause
Original parameters phi = 0.05 ---> lengthscale sigma2 = 1.0 ---> variance transform nugget = 0.03 ---> likelihood variance beta_0 = 10.0 ---> mean_function b beta_1 = 1.5 ---> mean_fucntionA [2] beta_2 = -1.0 ---> mean_functionA [3] mean_functionA[0] and mean_functionA[1] are the betas for for x and y (coordinates respectively) Without spatial coordinates as covariates
# Defining the model k = gf.kernels.Matern12(2, lengthscales=1, active_dims = [0,1])
notebooks/Sandboxes/TensorFlow/GPFlow_examples.ipynb
molgor/spystats
bsd-2-clause
Custom made mean function (Erick Chacón )
from GPflow.mean_functions import MeanFunction, Param import tensorflow as tf class LinearG(MeanFunction): """ y_i = A x_i + b """ def __init__(self, A=None, b=None): """ A is a matrix which maps each element of X to Y, b is an additive constant. If X has N rows and D columns, and Y is intended to have Q columns, then A must be D x Q, b must be a vector of length Q. """ A = np.ones((1, 1)) if A is None else A b = np.zeros(1) if b is None else b MeanFunction.__init__(self) self.A = Param(np.atleast_2d(A)) self.b = Param(b) def __call__(self, X): Anew = tf.concat([np.zeros((2,1)),self.A],0) return tf.matmul(X, Anew) + self.b
notebooks/Sandboxes/TensorFlow/GPFlow_examples.ipynb
molgor/spystats
bsd-2-clause
Now we can use the special mean function without the coordinates (covariates).
meanf = LinearG(np.ones((2,1)), np.ones(1)) X.shape Y2.shape m = gf.gpr.GPR(X, Y2, k, meanf)a m.likelihood.variance = 0.1 print(m)
notebooks/Sandboxes/TensorFlow/GPFlow_examples.ipynb
molgor/spystats
bsd-2-clause
Only 2 parameters now!
# Estimation m.optimize() print(m)
notebooks/Sandboxes/TensorFlow/GPFlow_examples.ipynb
molgor/spystats
bsd-2-clause
Original parameters phi = 0.05 ---> lengthscale sigma2 = 1.0 ---> variance transform nugget = 0.03 ---> likelihood variance beta_0 = 10.0 ---> mean_function b beta_1 = 1.5 ---> mean_fucntionA [2] beta_2 = -1.0 ---> mean_functionA [3]
predicted_x = np.linspace(0.0,1.0,100) from external_plugins.spystats.models import makeDuples predsX = makeDuples(predicted_x) pX = np.array(predsX) tt = np.ones((10000,2)) *0.5 ## Concatenate with horizontal stack SuperX = np.hstack((pX,tt)) SuperX.shape mean, variance = m.predict_y(SuperX) minmean = min(mean) maxmean = max(mean) #plt.figure(figsize=(12, 6)) plt.scatter(pX[:,0], pX[:,1]) Xx, Yy = np.meshgrid(predicted_x,predicted_x) plt.pcolor(Xx,Yy,mean.reshape(100,100),cmap=plt.cm.Accent) Nn = 300 predicted_x = np.linspace(0.0,1.0,Nn) Xx, Yy = np.meshgrid(predicted_x,predicted_x) ## Predict from external_plugins.spystats.models import makeDuples predsX = makeDuples(predicted_x) pX = np.array(predsX) tt = np.ones((Nn**2,2)) *0.5 SuperX = np.hstack((pX,tt)) mean, variance = m.predict_y(SuperX) minmean = min(mean) maxmean = max(mean) width = 12 height = 8 minz = minmean maxz = maxmean plt.figure(figsize=(width, height)) plt.subplot(1,2,1) scat = plt.scatter(X[:, 0], X[:, 1], c = Y2) #plt.axis('equal') plt.xlim((0,1)) plt.ylim((0,1)) plt.clim(minz,maxz) #plt.colorbar() plt.subplot(1,2,2) #field = plt.imshow(mean.reshape(100,100).transpose().transpose(),interpolation=None) plt.pcolor(Xx,Yy,mean.reshape(Nn,Nn).transpose()) plt.colorbar() plt.clim(minz,maxz) fig, axes = plt.subplots(nrows=1, ncols=2) scat = plt.scatter(X[:, 0], X[:, 1], c = Y2) field = plt.imshow(mean.reshape(100,100),interpolation=None) #fig.subplots_adjust(right=0.8) #cbar_ax = fig.add_axes([0.85, 0.05]) fig.colorbar(field, ax=axes.ravel().tolist()) plt.imshow(mean.reshape(100,100),interpolation=None) plt.colorbar()
notebooks/Sandboxes/TensorFlow/GPFlow_examples.ipynb
molgor/spystats
bsd-2-clause
Create topographic ERF maps in delayed SSP mode This script shows how to apply SSP projectors delayed, that is, at the evoked stage. This is particularly useful to support decisions related to the trade-off between denoising and preserving signal. In this example we demonstrate how to use topographic maps for delayed SSP application.
# Authors: Denis Engemann <[email protected]> # Christian Brodbeck <[email protected]> # Alexandre Gramfort <[email protected]> # # License: BSD (3-clause) import numpy as np import mne from mne import io from mne.datasets import sample print(__doc__) data_path = sample.data_path()
0.14/_downloads/plot_evoked_topomap_delayed_ssp.ipynb
mne-tools/mne-tools.github.io
bsd-3-clause
Set parameters
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif' event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif' ecg_fname = data_path + '/MEG/sample/sample_audvis_ecg_proj.fif' event_id, tmin, tmax = 1, -0.2, 0.5 # Setup for reading the raw data raw = io.Raw(raw_fname) events = mne.read_events(event_fname) # delete EEG projections (we know it's the last one) raw.del_proj(-1) # add ECG projs for magnetometers [raw.add_proj(p) for p in mne.read_proj(ecg_fname) if 'axial' in p['desc']] # pick magnetometer channels picks = mne.pick_types(raw.info, meg='mag', stim=False, eog=True, include=[], exclude='bads') # We will make of the proj `delayed` option to # interactively select projections at the evoked stage. # more information can be found in the example/plot_evoked_delayed_ssp.py epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks, baseline=(None, 0), reject=dict(mag=4e-12), proj='delayed') evoked = epochs.average() # average epochs and get an Evoked dataset.
0.14/_downloads/plot_evoked_topomap_delayed_ssp.ipynb
mne-tools/mne-tools.github.io
bsd-3-clause
Interactively select / deselect the SSP projection vectors
# set time instants in seconds (from 50 to 150ms in a step of 10ms) times = np.arange(0.05, 0.15, 0.01) evoked.plot_topomap(times, proj='interactive') # Hint: the same works for evoked.plot and evoked.plot_topo
0.14/_downloads/plot_evoked_topomap_delayed_ssp.ipynb
mne-tools/mne-tools.github.io
bsd-3-clause
Using the optimal control module to find the pulse This feature integrated into the sub-class OptPulseProcessor which use methods in the optimal control module to find the optimal pulse sequence for the desired gates. It can find the optimal pulse either for the whole unitary evolution or for each gate. Here we choose the second option.
setting_args = {"SNOT": {"num_tslots": 5, "evo_time": 1}, "CNOT": {"num_tslots": 12, "evo_time": 5}} processor = OptPulseProcessor(N=3) processor.add_control(sigmaz(), cyclic_permutation=True) processor.add_control(sigmax(), cyclic_permutation=True) processor.add_control(tensor([sigmax(), sigmax(), identity(2)])) processor.add_control(tensor([identity(2), sigmax(), sigmax()])) processor.load_circuit(qc, setting_args=setting_args, merge_gates=False, verbose=True, amp_ubound=5, amp_lbound=0);
examples/qip-processor-DJ-algorithm.ipynb
ajgpitch/qutip-notebooks
lgpl-3.0
To quickly visualize the pulse, Processor has a method called plot_pulses. In the figure bellow, each colour represents the pulse sequence of one control Hamiltonian in the system as a function of time. In each time interval, the pulse remains constant.
processor.plot_pulses(title="Control pulse of OptPulseProcessor", figsize=(8, 4), dpi=100);
examples/qip-processor-DJ-algorithm.ipynb
ajgpitch/qutip-notebooks
lgpl-3.0
To simulate the evolution, we only need to call the method run_state which calls one of the open system solvers in QuTiP and calculate the time evolution. Without decoherence
psi0 = tensor([basis(2, 0), basis(2, 0), basis(2, 1)]) result = processor.run_state(init_state=psi0) print("Probability of measuring state 00:") print(np.real((basis00.dag() * ptrace(result.states[-1], [0,1]) * basis00)[0,0]))
examples/qip-processor-DJ-algorithm.ipynb
ajgpitch/qutip-notebooks
lgpl-3.0
With decoherence
processor.t1 = 100 processor.t2 = 30 psi0 = tensor([basis(2, 0), basis(2, 0), basis(2, 1)]) result = processor.run_state(init_state=psi0) print("Probability of measuring state 00:") print(np.real((basis00.dag() * ptrace(result.states[-1], [0,1]) * basis00)[0,0]))
examples/qip-processor-DJ-algorithm.ipynb
ajgpitch/qutip-notebooks
lgpl-3.0
We can see that under noisy evolution their is a none zero probability of measuring state 00. Generating pulse based on quantum computing model Below, we simulate the same quantum circuit using one sub-class LinearSpinChain. It will find the pulse based on the Hamiltonian available on a quantum computer of the linear spin chain system. Please refer to the notebook of the spin chain model for more details.
processor2 = LinearSpinChain(3) processor2.load_circuit(qc); processor2.plot_pulses(title="Control pulse of Spin chain");
examples/qip-processor-DJ-algorithm.ipynb
ajgpitch/qutip-notebooks
lgpl-3.0
The first three pulse periods (from $t=0$ to $t\approx5$) are for the three Hadamard gates, they are followed by two long periods for the CNOT gates and then again two Hadamard. Different colours represent different kinds of interaction, as shown in the legend. Without decoherence
psi0 = tensor([basis(2, 0), basis(2, 0), basis(2, 1)]) result = processor2.run_state(init_state=psi0) print("Probability of measuring state 00:") print(np.real((basis00.dag() * ptrace(result.states[-1], [0,1]) * basis00)[0,0]))
examples/qip-processor-DJ-algorithm.ipynb
ajgpitch/qutip-notebooks
lgpl-3.0
With decoherence
processor2.t1 = 100 processor2.t2 = 30 psi0 = tensor([basis(2, 0), basis(2, 0), basis(2, 1)]) result = processor2.run_state(init_state=psi0) print("Probability of measuring state 00:") print(np.real((basis00.dag() * ptrace(result.states[-1], [0,1]) * basis00)[0,0])) from qutip.ipynbtools import version_table version_table()
examples/qip-processor-DJ-algorithm.ipynb
ajgpitch/qutip-notebooks
lgpl-3.0
移動モデル ロボットを100台出してみます。 どのロボットも1前進という命令を出しましたが、バラバラしていることがわかります。
actual_landmarks = Landmarks([[1.0,0.0]]) robots = [] for i in range(100): robots.append(Robot(0,0,0)) for robot in robots: robot.move(1.0, 0) draw(0)
monte_calro_localization/notebook_demo.ipynb
Kuwamai/probrobo_note
mit
観測モデル 同様にロボットを100台出して、1先に見える星を観測させます。 カラフルなドットがロボットから見える星の位置です。
actual_landmarks = Landmarks([[1.0,0.0]]) robots = [] for i in range(100): robots.append(Robot(0,0,0)) for robot in robots: robot.observation(actual_landmarks) draw(1)
monte_calro_localization/notebook_demo.ipynb
Kuwamai/probrobo_note
mit
尤度計算 今回は尤度計算に正規分布を使っています。 ロボットの観測データ$d, \varphi$と、パーティクルの観測データ$d', \varphi'$の差が大きいほど小さな値になります。 $$ p(z|x_t) \propto \frac {\exp (- \frac {1}{2\sigma_d}(d-d')^2)}{\sigma_d \sqrt{2 \pi}} \frac {\exp (- \frac {1}{2\sigma_\varphi}(\varphi-\varphi')^2)}{\sigma_\varphi \sqrt{2 \pi}} $$ 下のグラフは$d$と$\varphi$についてそれぞれ別々に描画した正規分布のグラフです。 $d$がオレンジ、$\varphi$が青のグラフです。 上の観測モデルと同じように、ランドマークはロボットから見て、0°の向きに1離れた位置にあります。 なので$d$、distanceのグラフは、1に近いほど値が大きく、$\varphi$、directionのグラフは0に近いほど値が大きくなっています。
rd = 1 rf = 0 sigma_rd = 1.0 * 0.2 sigma_rf = math.pi * 3 / 180 pd = np.arange(-3, 3, 0.01) pf = np.arange(-3, 3, 0.01) d = np.exp(-(rd - pd) ** 2 / (2 * (sigma_rd ** 2))) / (sigma_rd * np.sqrt(2 * np.pi)) f = np.exp(-(rf - pf) ** 2 / (2 * (sigma_rf ** 2))) / (sigma_rf * np.sqrt(2 * np.pi)) fig = plt.figure(figsize=(10,4)) sp = fig.add_subplot(111) sp.set_xlim(-0.5,2.0) sp.set_ylim(-0.5,8) plt.plot(pd, d, color = "orange",label="distance") plt.plot(pf, f, color = "blue",label="direction") plt.legend()
monte_calro_localization/notebook_demo.ipynb
Kuwamai/probrobo_note
mit
The features? - TV: advertising dollars spent on TV (for a single product, in a given market) - Radio: advertising dollars spent on Radio (for a single product, in a given market) - Newspaper: advertising dollars spent on Newspaper (for a single product, in a given market) What is the response? - Sales: sales of a single product in a given market (in thousands of widgets)
# print the size of the DataFrame object, i.e., the size of the dataset data.shape
Data Science UA - September 2017/Lecture 05 - Modeling Techniques and Regression/Linear_Regression.ipynb
iRipVanWinkle/ml
mit
There are 200 observations, corresponding to 200 markets. We can try to discover if there is any relationship between the money spend on a specific type of ad, in a given market, and the sales in that market by plotting the sales figures against each category of advertising expenditure.
fig, axs = plt.subplots(1, 3, sharey=True) data.plot(kind='scatter', x='TV', y='Sales', ax=axs[0], figsize=(16, 8)) data.plot(kind='scatter', x='Radio', y='Sales', ax=axs[1]) data.plot(kind='scatter', x='Newspaper', y='Sales', ax=axs[2])
Data Science UA - September 2017/Lecture 05 - Modeling Techniques and Regression/Linear_Regression.ipynb
iRipVanWinkle/ml
mit
Questions How can the company selling the product decide on how to spend its advertising money in the future? We first need to answer the following question: "Based on this data, does there apear to be a relationship between ads and sales?" If yes, 1. Which ad types contribute to sales? 2. How strong is the relationship between each ad type and sales? 4. What is the effect of each ad type of sales? 5. Given ad spending in a particular market, can sales be predicted? We will use Linear Regression to try and asnwer these questions. Simple Linear Regression Simple linear regression is an approach for modeling the relatrionship between a dependent variable (a "response") and an explanatory variable, also known as a "predictor" or "feature". The relationship is modeled as a linear function $y = \beta_0 + \beta_1x$ whose parameters are estimated from the available data. In the equation above: - $y$ is called the response, regressand, endogenous variable, dependent variable, etc. - $x$ is the feature, regressor, exogenous variable, explanatory variables, predictor, etc. - $\beta_0$ is known as the intercept - $\beta_1$ is the regression coefficient, effect, etc. Together, $\beta_0$ and $\beta_1$ are called paramaters, model/regression coefficients, or effects. To create a model, we must discover/learn/estimate the values of these coefficients. Estimating/Learning Model/Regression Coefficients Regression coefficients are estimated using a variety of methods. The least squares method, which finds the line which minimizes the sum of squared residuals (or "sum of squared errors") is among the most oftenly used. In the pictures below: - The blue dots are the observed values of x and y. - The red line is the least squares line. - The residuals are the distances between the observed values and the least squares line. $\beta_0$ is the intercept of the least squares line (the value of $y$ when $x$=0) $\beta_1$ is the slope of the least squares line, i.e. the ratio of the vertical change (in $y$) and the horizontal change (in $x$). We can use the statsmodels package to estimate the model coefficients for the advertising data:
import statsmodels.formula.api as sf #create a model with Sales as dependent variable and TV as explanatory variable model = sf.ols('Sales ~ TV', data) #fit the model to the data fitted_model = model.fit() # print the coefficients print(fitted_model.params)
Data Science UA - September 2017/Lecture 05 - Modeling Techniques and Regression/Linear_Regression.ipynb
iRipVanWinkle/ml
mit
Interpreting Model Coefficients Q: How do we interpret the coefficient ($\beta_1$) of the explanatory variable "TV"? A: A unit (a thousand dollars) increase in TV ad spending is associated with a 0.047537 unit (a thousand widgets) increase in Sales, i.e., an additional $1000 spent on TV ads is associated with an increase in sales of ~47.5 widgets. Note that it is, in general, possible to have a negative effect, e.g., an increase in TV ad spending to be associated with a decrease in sales. $\beta_1$ would be negative in this case. Using the Model for Prediction Can we use the model we develop to guide advertising spending decisions? For example, if the company spends $50,000 on TV advertising in a new market, what would the model predict for the sales in that market? $$y = \beta_0 + \beta_1x$$ $$y = 7.032594 + 0.047537 \times 50$$
7.032594 + 0.047537*50
Data Science UA - September 2017/Lecture 05 - Modeling Techniques and Regression/Linear_Regression.ipynb
iRipVanWinkle/ml
mit
The predicted Sales in that market are of 9.409444 * 1000 =~ 9409 widgets Using Statsmodels:
# create a DataFrame to use with the Statsmodels formula interface New_TV_spending = pd.DataFrame({'TV': [50]}) #check the newly created DataFrame New_TV_spending.head() # use the model created above to predict the sales to be generated by the new TV ad money sales = fitted_model.predict(New_TV_spending) print(sales)
Data Science UA - September 2017/Lecture 05 - Modeling Techniques and Regression/Linear_Regression.ipynb
iRipVanWinkle/ml
mit
Plotting the Least Squares Line Let's make predictions for the smallest and largest observed values of money spent on TV ads, and then use the predicted values to plot the least squares line:
# create a DataFrame with the minimum and maximum values of TV ad money New_TV_money = pd.DataFrame({'TV': [data.TV.min(), data.TV.max()]}) print(New_TV_money.head()) # make predictions for those x values and store them sales_predictions = fitted_model.predict(New_TV_money) print(sales_predictions) # plot the observed data data.plot(kind='scatter', x='TV', y='Sales') # plot the least squares line plt.plot(New_TV_money, sales_predictions, c='red', linewidth=2)
Data Science UA - September 2017/Lecture 05 - Modeling Techniques and Regression/Linear_Regression.ipynb
iRipVanWinkle/ml
mit
Confidence in Linear Regression Models Q: Is linear regression a high bias/low variance model, or a low variance/high bias model? A: High bias/low variance. Under repeated sampling, the line will stay roughly in the same place (low variance), but the average of those models won't do a great job capturing the true relationship (high bias). (A low variance is a useful characteristic when limited training data is available.) We can use Statsmodels to calculate 95% confidence intervals for the model coefficients, which are interpreted as follows: If the population from which this sample was drawn was sampled 100 times, approximately 95 of those confidence intervals would contain the "true" coefficient.
# print the confidence intervals for the model coefficients print(fitted_model.conf_int())
Data Science UA - September 2017/Lecture 05 - Modeling Techniques and Regression/Linear_Regression.ipynb
iRipVanWinkle/ml
mit
Since we only have a single sample of data, and not the entire population the "true" value of the regression coefficient is either within this interval or it isn't, but there is no way to actually know. We estimate the regression coefficient using the data we have, and then we characterize the uncertainty about that estimate by giving a confidence interval, an interval that will "probably" contain the value coefficient. Note that there is no probability associated with the true value of the regression coefficient being in the given confidence interval! Also note that using 95% confidence intervals is simply a convention. One can create 90% confidence intervals (narrower intervals), 99% confidence intervals (wider intervals), etc. Hypothesis Testing and p-values Closely related to confidence intervals is hypothesis testing. Generally speaking, you start with a null hypothesis and an alternative hypothesis (that is opposite the null). Then, you check whether the data supports rejecting the null hypothesis or failing to reject the null hypothesis. (Note that "failing to reject" the null is not the same as "accepting" the null hypothesis. The alternative hypothesis may indeed be true, except that you just don't have enough data to show that.) As it relates to model coefficients, here is the conventional hypothesis test: - null hypothesis: There is no relationship between TV ads and Sales (and thus $\beta_1$ equals zero) - alternative hypothesis: There is a relationship between TV ads and Sales (and thus $\beta_1$ is not equal to zero) How do we test this hypothesis? Intuitively, we reject the null (and thus believe the alternative) if the 95% confidence interval does not include zero. Conversely, the p-value represents the probability that the coefficient is actually zero:
# print the p-values for the model coefficients fitted_model.pvalues
Data Science UA - September 2017/Lecture 05 - Modeling Techniques and Regression/Linear_Regression.ipynb
iRipVanWinkle/ml
mit
If the 95% confidence interval includes zero, the p-value for that coefficient will be greater than 0.05. If the 95% confidence interval does not include zero, the p-value will be less than 0.05. Thus, a p-value less than 0.05 is one way to decide whether there is likely a relationship between the feature and the response. (Again, using 0.05 as the cutoff is just a convention.) In this case, the p-value for TV is far less than 0.05, and so we believe that there is a relationship between TV ads and Sales. Note that we generally ignore the p-value for the intercept. How Well Does the Model Fit the data? The most common way to evaluate the overall fit of a linear model to the available data is by calculating the R-squared (a.k.a, "coefficient of determination") value. R-squared has several interpretations: (1) R-squared ×100 percent of the variation in the dependent variable ($y$) is reduced by taking into account predictor $x$ (2) R-squared is the proportion of variance in the observed data that is "explained" by the model. R-squared is between 0 and 1, and, generally speaking, higher is considered to be better because more variance is accounted for ("explained") by the model. Note, however, that R-squared does not indicate whether a regression model is actually good. You can have a low R-squared value for a good model, or a high R-squared value for a model that does not fit the data! One should evaluate the adequacy of a model by looking at R-squared values as well as residual (i.e., observed value - fitted value) plots, other model statistics, and subject area knowledge. The R-squared value for our simple linear regression model is:
# print the R-squared value for the model fitted_model.rsquared
Data Science UA - September 2017/Lecture 05 - Modeling Techniques and Regression/Linear_Regression.ipynb
iRipVanWinkle/ml
mit
Is that a "good" R-squared value? One cannot generally assess that. What a "good" R-squared value is depends on the domain and therefore R-squared is most useful as a tool for comparing different models. Multiple Linear Regression Simple linear regression can be extended to include multiple explanatory variables: $y = \beta_0 + \beta_1x_1 + ... + \beta_nx_n$ Each $x$ represents a different predictor/feature, and each predictor has its own coefficient. In our case: $y = \beta_0 + \beta_1 \times TV + \beta_2 \times Radio + \beta_3 \times Newspaper$ Let's use Statsmodels to estimate these coefficients:
# create a model with all three features multi_model = sf.ols(formula='Sales ~ TV + Radio + Newspaper', data=data) fitted_multi_model = multi_model.fit() # print the coefficients print(fitted_multi_model.params)
Data Science UA - September 2017/Lecture 05 - Modeling Techniques and Regression/Linear_Regression.ipynb
iRipVanWinkle/ml
mit
How do we interpret the coefficients? For a given amount of Radio and Newspaper ad spending, an increase of a unit ($1000 dollars) in TV ad spending is associated with an increase in Sales of 45.765 widgets. Other information is available in the model summary output:
# print a summary of the fitted model fitted_multi_model.summary()
Data Science UA - September 2017/Lecture 05 - Modeling Techniques and Regression/Linear_Regression.ipynb
iRipVanWinkle/ml
mit
TV and Radio have significant p-values, whereas Newspaper does not. Thus we reject the null hypothesis for TV and Radio (that there is no association between those features and Sales), and fail to reject the null hypothesis for Newspaper. TV and Radio ad spending are both positively associated with Sales, whereas Newspaper ad spending is slightly negatively associated with Sales. This model has a higher R-squared (0.897) than the previous model, which means that this model provides a better fit to the data than a model that only includes TV. Feature Selection How do I decide which features to include in a linear model? - Try different models and check whether the R-squared value goes up when you add new predictors. What are the drawbacks to this approach? - Linear models rely upon a lot of assumptions (such as the predictors/features being independent), and if those assumptions are violated (which they usually are), R-squared are less reliable. - R-squared is susceptible to overfitting, and thus there is no guarantee that a model with a high R-squared value will generalize well to new data. For example:
# only include TV and Radio in the model model1 = sf.ols(formula='Sales ~ TV + Radio', data=data).fit() print(model1.rsquared) # add Newspaper to the model (which we believe has no association with Sales) model2 = sf.ols(formula='Sales ~ TV + Radio + Newspaper', data=data).fit() print(model2.rsquared)
Data Science UA - September 2017/Lecture 05 - Modeling Techniques and Regression/Linear_Regression.ipynb
iRipVanWinkle/ml
mit
R-squared will always increase as you add more features to the model, even if they are unrelated to the response. Thus, selecting the model with the highest R-squared is not a reliable approach for choosing the best linear model. There is alternative to R-squared called adjusted R-squared that penalizes model complexity (to control for overfitting), but this approach has its own set of issues. Is there a better approach to feature selection? Cross-validation, which provides a more reliable estimate of out-of-sample error, and thus is better at choosing which model will better generalize to out-of-sample data. Cross-validation can be applied to any type of model, not just linear models. Linear Regression in scikit-learn The work done using Statsmodels can also be using scikit-learn:
# create a DataFrame feature_cols = ['TV', 'Radio', 'Newspaper'] X = data[feature_cols] y = data.Sales from sklearn.linear_model import LinearRegression lm = LinearRegression() lm.fit(X, y) # print intercept and coefficients print(lm.intercept_) print(lm.coef_) # pair the feature names with the coefficients print(zip(feature_cols, lm.coef_)) # predict for a new observation lm.predict([[100, 25, 25]]) # calculate the R-squared lm.score(X, y)
Data Science UA - September 2017/Lecture 05 - Modeling Techniques and Regression/Linear_Regression.ipynb
iRipVanWinkle/ml
mit
Handling Categorical Predictors with Two Categories What if one of the predictors was categorical? Let's create a new feature called Size, and randomly assign observations to be small or large:
import numpy as np # create a Series of booleans in which roughly half are True #generate len(data) numbers between 0 and 1 numbers = np.random.rand(len(data)) #create and index of 0s and 1s by based on whether the corresponding random number #is greater than 0.5. index_for_large = (numbers > 0.5) #create a new data column called Size and set its values to 'small' data['Size'] = 'small' # change the values of Size to 'large' whenever the corresponding value of the index is 1 data.loc[index_for_large, 'Size'] = 'large' data.head()
Data Science UA - September 2017/Lecture 05 - Modeling Techniques and Regression/Linear_Regression.ipynb
iRipVanWinkle/ml
mit
When using scikit-learn, we need to represent all data numerically. For example, if the feature we want to represent has only two categories, we create a dummy variable that represents the categories as a binary value:
# create a new Series called IsLarge data['IsLarge'] = data.Size.map({'small':0, 'large':1}) data.head()
Data Science UA - September 2017/Lecture 05 - Modeling Techniques and Regression/Linear_Regression.ipynb
iRipVanWinkle/ml
mit
The multiple linear regression including the IsLarge predictor:
# create X and y feature_cols = ['TV', 'Radio', 'Newspaper', 'IsLarge'] X = data[feature_cols] y = data.Sales # instantiate, fit lm = LinearRegression() lm.fit(X, y) # print coefficients list(zip(feature_cols, lm.coef_))
Data Science UA - September 2017/Lecture 05 - Modeling Techniques and Regression/Linear_Regression.ipynb
iRipVanWinkle/ml
mit
How do we interpret the coefficient of IsLarge? For a given amount of TV/Radio/Newspaper ad spending, a large market is associated with an average increase in Sales of 51.55 widgets (as compared to sales in a Small market). If we reverse the 0/1 encoding and created the feature 'IsSmall', the coefficient would be the same in absolute value, but negative instead of positive. All that changes is the interpretation of the coefficient. Handling Categorical Predictors with More than Two Categories Let's create a new feature called Area, and randomly assign observations to be rural, suburban, or urban:
# set a seed for reproducibility np.random.seed(123456) # assign roughly one third of observations to each group nums = np.random.rand(len(data)) mask_suburban = (nums > 0.33) & (nums < 0.66) mask_urban = nums > 0.66 data['Area'] = 'rural' data.loc[mask_suburban, 'Area'] = 'suburban' data.loc[mask_urban, 'Area'] = 'urban' data.head()
Data Science UA - September 2017/Lecture 05 - Modeling Techniques and Regression/Linear_Regression.ipynb
iRipVanWinkle/ml
mit
We have to represent Area numerically, but an encoding such as 0=rural, 1=suburban, 2=urban would not work because that would imply that there is an ordered relationship between suburban and urban. Instead, we can create another dummy variable.
# create three dummy variables using get_dummies, then exclude the first dummy column area_dummies = pd.get_dummies(data.Area, prefix='Area').iloc[:, 1:] # concatenate the dummy variable columns onto the original DataFrame (axis=0 means rows, axis=1 means columns) data = pd.concat([data, area_dummies], axis=1) data.head()
Data Science UA - September 2017/Lecture 05 - Modeling Techniques and Regression/Linear_Regression.ipynb
iRipVanWinkle/ml
mit
rural is coded as Area_suburban=0 and Area_urban=0 suburban is coded as Area_suburban=1 and Area_urban=0 urban is coded as Area_suburban=0 and Area_urban=1 Only two dummies are needed to captures all of the information about the Area feature.(In general, for a categorical feature with k levels, we create k-1 dummy variables.) Let's include the two new dummy variables in the model:
# read data into a DataFrame #data = pd.read_csv('http://www-bcf.usc.edu/~gareth/ISL/Advertising.csv', index_col=0) # create X and y feature_cols = ['TV', 'Radio', 'Newspaper', 'IsLarge', 'Area_suburban', 'Area_urban'] X = data[feature_cols] y = data.Sales # instantiate, fit lm = LinearRegression() lm.fit(X, y) # print coefficients list(zip(feature_cols, lm.coef_))
Data Science UA - September 2017/Lecture 05 - Modeling Techniques and Regression/Linear_Regression.ipynb
iRipVanWinkle/ml
mit
Hat potential The following potential is often used in Physics and other fields to describe symmetry breaking and is often known as the "hat potential": $$ V(x) = -a x^2 + b x^4 $$ Write a function hat(x,a,b) that returns the value of this function:
def hat(x,a,b): return -a*x**2+b*x**4 assert hat(0.0, 1.0, 1.0)==0.0 assert hat(0.0, 1.0, 1.0)==0.0 assert hat(1.0, 10.0, 1.0)==-9.0
assignments/assignment11/OptimizationEx01.ipynb
brettavedisian/phys202-2015-work
mit
Plot this function over the range $x\in\left[-3,3\right]$ with $b=1.0$ and $a=5.0$:
a = 5.0 b = 1.0 x=np.linspace(-3,3,100) plt.figure(figsize=(9,6)) plt.xlabel('Range'), plt.ylabel('V(x)'), plt.title('Hat Potential') plt.plot(x, hat(x,a,b)) plt.box(False) plt.grid(True) plt.tick_params(axis='x', top='off', direction='out') plt.tick_params(axis='y', right='off', direction='out'); assert True # leave this to grade the plot
assignments/assignment11/OptimizationEx01.ipynb
brettavedisian/phys202-2015-work
mit
Write code that finds the two local minima of this function for $b=1.0$ and $a=5.0$. Use scipy.optimize.minimize to find the minima. You will have to think carefully about how to get this function to find both minima. Print the x values of the minima. Plot the function as a blue line. On the same axes, show the minima as red circles. Customize your visualization to make it beatiful and effective.
res1 = opt.minimize_scalar(hat, bounds=(-3,0), args=(a,b), method='bounded') res2 = opt.minimize_scalar(hat, bounds=(0,3), args=(a,b), method='bounded') print('Local minima: %f, %f' % (res1.x, res2.x)) plt.figure(figsize=(9,6)) plt.xlabel('Range'), plt.ylabel('V(x)') plt.plot(x, hat(x,a,b), label="Potential") plt.scatter(res1.x, res1.fun, marker="o", color="r") plt.scatter(res2.x, res2.fun, marker="o", color="r") plt.title('Finding Local Minima of Hat Potential') plt.box(False), plt.grid(True), plt.xlim(-2.5,2.5), plt.ylim(-8,4) plt.tick_params(axis='x', top='off', direction='out') plt.tick_params(axis='y', right='off', direction='out') plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.); assert True # leave this for grading the plot
assignments/assignment11/OptimizationEx01.ipynb
brettavedisian/phys202-2015-work
mit
MDR: and needs by GPU-fan code, too...
import utils_MDR from utils_MDR import *
deeplearning1/nbs/lesson5.ipynb
Mdround/fastai-deeplearning1
apache-2.0
Setup data We're going to look at the IMDB dataset, which contains movie reviews from IMDB, along with their sentiment. Keras comes with some helpers for this dataset.
from keras.datasets import imdb idx = imdb.get_word_index()
deeplearning1/nbs/lesson5.ipynb
Mdround/fastai-deeplearning1
apache-2.0
This is the word list:
idx_arr = sorted(idx, key=idx.get) idx_arr[:10]
deeplearning1/nbs/lesson5.ipynb
Mdround/fastai-deeplearning1
apache-2.0
...and this is the mapping from id to word
## idx2word = {v: k for k, v in idx.iteritems()} ## Py 2.7 idx2word = {v: k for k, v in idx.items()} ## Py 3.x
deeplearning1/nbs/lesson5.ipynb
Mdround/fastai-deeplearning1
apache-2.0
We download the reviews using code copied from keras.datasets:
path = get_file('imdb_full.pkl', origin='https://s3.amazonaws.com/text-datasets/imdb_full.pkl', md5_hash='d091312047c43cf9e4e38fef92437263') f = open(path, 'rb') (x_train, labels_train), (x_test, labels_test) = pickle.load(f) len(x_train)
deeplearning1/nbs/lesson5.ipynb
Mdround/fastai-deeplearning1
apache-2.0
Here's the 1st review. As you see, the words have been replaced by ids. The ids can be looked up in idx2word.
', '.join(map(str, x_train[0]))
deeplearning1/nbs/lesson5.ipynb
Mdround/fastai-deeplearning1
apache-2.0
The first word of the first review is 23022. Let's see what that is.
idx2word[23022]
deeplearning1/nbs/lesson5.ipynb
Mdround/fastai-deeplearning1
apache-2.0
Here's the whole review, mapped from ids to words.
' '.join([idx2word[o] for o in x_train[0]])
deeplearning1/nbs/lesson5.ipynb
Mdround/fastai-deeplearning1
apache-2.0
The labels are 1 for positive, 0 for negative.
labels_train[:10]
deeplearning1/nbs/lesson5.ipynb
Mdround/fastai-deeplearning1
apache-2.0
Reduce vocab size by setting rare words to max index.
vocab_size = 5000 trn = [np.array([i if i<vocab_size-1 else vocab_size-1 for i in s]) for s in x_train] test = [np.array([i if i<vocab_size-1 else vocab_size-1 for i in s]) for s in x_test]
deeplearning1/nbs/lesson5.ipynb
Mdround/fastai-deeplearning1
apache-2.0
Look at distribution of lengths of sentences.
## create an array of 'len'...gths # lens = np.array(map(len, trn)) ## only works in Py2.x, not 3.x ... ## 'map in Python 3 return an iterator, while map in Python 2 returns a list' ## (https://stackoverflow.com/questions/35691489/error-in-python-3-5-cant-add-map-results-together) # This is a quick fix - not really a proper P3x approach. lens = np.array(list(map(len, trn))) ## wrapped a list around it (lens.max(), lens.min(), lens.mean())
deeplearning1/nbs/lesson5.ipynb
Mdround/fastai-deeplearning1
apache-2.0
Pad (with zero) or truncate each sentence to make consistent length.
seq_len = 500 trn = sequence.pad_sequences(trn, maxlen=seq_len, value=0) test = sequence.pad_sequences(test, maxlen=seq_len, value=0)
deeplearning1/nbs/lesson5.ipynb
Mdround/fastai-deeplearning1
apache-2.0
This results in nice rectangular matrices that can be passed to ML algorithms. Reviews shorter than 500 words are pre-padded with zeros, those greater are truncated.
trn.shape
deeplearning1/nbs/lesson5.ipynb
Mdround/fastai-deeplearning1
apache-2.0
Create simple models Single hidden layer NN The simplest model that tends to give reasonable results is a single hidden layer net. So let's try that. Note that we can't expect to get any useful results by feeding word ids directly into a neural net - so instead we use an embedding to replace them with a vector of 32 (initially random) floats for each word in the vocab.
model = Sequential([ Embedding(vocab_size, 32, input_length=seq_len), Flatten(), Dense(100, activation='relu'), Dropout(0.7), Dense(1, activation='sigmoid')]) model.compile(loss='binary_crossentropy', optimizer=Adam(), metrics=['accuracy']) model.summary() set_gpu_fan_speed(90) model.fit(trn, labels_train, validation_data=(test, labels_test), nb_epoch=2, batch_size=64) set_gpu_fan_speed(0)
deeplearning1/nbs/lesson5.ipynb
Mdround/fastai-deeplearning1
apache-2.0
The stanford paper that this dataset is from cites a state of the art accuracy (without unlabelled data) of 0.883. So we're short of that, but on the right track. Single conv layer with max pooling A CNN is likely to work better, since it's designed to take advantage of ordered data. We'll need to use a 1D CNN, since a sequence of words is 1D.
conv1 = Sequential([ Embedding(vocab_size, 32, input_length=seq_len, dropout=0.2), Dropout(0.2), Convolution1D(64, 5, border_mode='same', activation='relu'), Dropout(0.2), MaxPooling1D(), Flatten(), Dense(100, activation='relu'), Dropout(0.7), Dense(1, activation='sigmoid')]) conv1.compile(loss='binary_crossentropy', optimizer=Adam(), metrics=['accuracy']) set_gpu_fan_speed(90) conv1.fit(trn, labels_train, validation_data=(test, labels_test), nb_epoch=4, batch_size=64) set_gpu_fan_speed(0)
deeplearning1/nbs/lesson5.ipynb
Mdround/fastai-deeplearning1
apache-2.0