markdown
stringlengths
0
37k
code
stringlengths
1
33.3k
path
stringlengths
8
215
repo_name
stringlengths
6
77
license
stringclasses
15 values
CelebA The CelebFaces Attributes Dataset (CelebA) dataset contains over 200,000 celebrity images with annotations. Since you're going to be generating faces, you won't need the annotations. You can view the first number of examples by changing show_n_images.
show_n_images = 25 """ DON'T MODIFY ANYTHING IN THIS CELL """ mnist_images = helper.get_batch(glob(os.path.join(data_dir, 'img_align_celeba/*.jpg'))[:show_n_images], 28, 28, 'RGB') pyplot.imshow(helper.images_square_grid(mnist_images, 'RGB')) mnist_images.shape
udacity-dl/GAN/dlnd_face_generation.ipynb
arasdar/DL
unlicense
Preprocess the Data Since the project's main focus is on building the GANs, we'll preprocess the data for you. The values of the MNIST and CelebA dataset will be in the range of -0.5 to 0.5 of 28x28 dimensional images. The CelebA images will be cropped to remove parts of the image that don't include a face, then resized down to 28x28. The MNIST images are black and white images with a single [color channel](https://en.wikipedia.org/wiki/Channel_(digital_image%29) while the CelebA images have [3 color channels (RGB color channel)](https://en.wikipedia.org/wiki/Channel_(digital_image%29#RGB_Images). Build the Neural Network You'll build the components necessary to build a GANs by implementing the following functions below: - model_inputs - discriminator - generator - model_loss - model_opt - train Check the Version of TensorFlow and Access to GPU This will check to make sure you have the correct version of TensorFlow and access to a GPU.
""" DON'T MODIFY ANYTHING IN THIS CELL """ from distutils.version import LooseVersion import warnings import tensorflow as tf # Check TensorFlow Version assert LooseVersion(tf.__version__) >= LooseVersion('1.0'), 'Please use TensorFlow version 1.0 or newer. You are using {}'.format(tf.__version__) print('TensorFlow Version: {}'.format(tf.__version__)) # Check for a GPU if not tf.test.gpu_device_name(): warnings.warn('No GPU found. Please use a GPU to train your neural network.') else: print('Default GPU Device: {}'.format(tf.test.gpu_device_name()))
udacity-dl/GAN/dlnd_face_generation.ipynb
arasdar/DL
unlicense
Input Implement the model_inputs function to create TF Placeholders for the Neural Network. It should create the following placeholders: - Real input images placeholder with rank 4 using image_width, image_height, and image_channels. - Z input placeholder with rank 2 using z_dim. - Learning rate placeholder with rank 0. Return the placeholders in the following the tuple (tensor of real input images, tensor of z data).
import problem_unittests as tests def model_inputs(image_width, image_height, image_channels, z_dim): """ Create the model inputs :param image_width: The input image width --> 28 :param image_height: The input image height --> 28 :param image_channels: The number of image channels --> 3 RGB :param z_dim: The dimension of Z --> n :return: Tuple of (tensor of real input images, tensor of z data, learning rate) """ # TODO: Implement Function # def model_inputs(real_dim, z_dim): # real_size = (32,32,3) real_dim = (image_height, image_width, image_channels) # hxwxc input_real = tf.placeholder(dtype=tf.float32, shape=(None, *real_dim), name='input_real') input_z = tf.placeholder(dtype=tf.float32, shape=(None, z_dim), name='input_z') learning_rate = tf.placeholder(dtype=tf.float32, shape=None, name='learning_rate') return input_real, input_z, learning_rate """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ tests.test_model_inputs(model_inputs)
udacity-dl/GAN/dlnd_face_generation.ipynb
arasdar/DL
unlicense
Discriminator Implement discriminator to create a discriminator neural network that discriminates on images. This function should be able to reuse the variabes in the neural network. Use tf.variable_scope with a scope name of "discriminator" to allow the variables to be reused. The function should return a tuple of (tensor output of the generator, tensor logits of the generator).
def conv2d_xavier(inputs, filters, kernel_size, strides, padding): #, trainable, reuse out_conv = tf.layers.conv2d(inputs, filters, kernel_size, strides, padding, data_format='channels_last', #strides=(1, 1), padding='valid', dilation_rate=(1, 1), activation=None, use_bias=True, kernel_initializer=tf.contrib.layers.xavier_initializer_conv2d(uniform=True, seed=None, dtype=tf.float32), bias_initializer=tf.zeros_initializer(), kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, trainable=True, name=None, reuse=None) return out_conv def conv2d_transpose_xavier(inputs, filters, kernel_size, strides, padding): #, trainable, reuse out_conv_T = tf.layers.conv2d_transpose(inputs, filters, kernel_size, strides, padding, data_format='channels_last', #strides=(1, 1), padding='valid', dilation_rate=(1, 1), activation=None, use_bias=True, kernel_initializer=tf.contrib.layers.xavier_initializer_conv2d(uniform=True, seed=None, dtype=tf.float32), bias_initializer=tf.zeros_initializer(), kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, trainable=True, name=None, reuse=None) return out_conv_T def dense_xavier(inputs, units): out_dense = tf.layers.dense(inputs, units, activation=None, use_bias=True, kernel_initializer=tf.contrib.layers.xavier_initializer(uniform=True, seed=None, dtype=tf.float32), bias_initializer=tf.zeros_initializer(), kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, trainable=True, name=None, reuse=None) return out_dense def discriminator(images, reuse=False): """ Create the discriminator network :param image: Tensor of input image(s) --> txwxhxc --> tx28x28x3 :param reuse: Boolean if the weights should be reused :return: Tuple of (tensor output of the discriminator, tensor logits of the discriminator) """ # TODO: Implement Function # Implementation for DCGAN-svhn # def discriminator(x, reuse=False, alpha=0.2): alpha=0.2 # for leaky ReLU with tf.variable_scope('discriminator', reuse=reuse): # Input layer is 32x32x3 -> 28x28x3 in this case # x1 = tf.layers.conv2d(inputs=images, filters=64, kernel_size=5, strides=2, padding='same') x1 = conv2d_xavier(inputs=images, filters=64, kernel_size=5, strides=2, padding='same') bn1 = tf.layers.batch_normalization(inputs=x1, training=True) lrelu1 = tf.maximum(alpha * bn1, bn1) # 16x16x64 -> 14x14x64 with stride 2 ans same image # x2 = tf.layers.conv2d(inputs=lrelu1, filters=128, kernel_size=5, strides=2, padding='same') x2 = conv2d_xavier(inputs=lrelu1, filters=128, kernel_size=5, strides=2, padding='same') bn2 = tf.layers.batch_normalization(inputs=x2, training=True) lrelu2 = tf.maximum(alpha * bn2, bn2) # 8x8x128 -> 7x7x128 with stride 2 # x3 = tf.layers.conv2d(inputs=lrelu2, filters=256, kernel_size=7, strides=1, padding='same') x3 = conv2d_xavier(inputs=lrelu2, filters=256, kernel_size=7, strides=1, padding='same') bn3 = tf.layers.batch_normalization(inputs=x3, training=True) lrelu3 = tf.maximum(alpha * bn3, bn3) # 4x4x256 -> 7x7x256 with stride 1 so NO change # Flatten it/ flattening layer # 1st way # flat = tf.reshape(relu3, (-1, 4*4*256)) # 2nd way to flattening # Feedback tip # Excellent work. # You can flatten a layer with tf.contrib.layers.flatten() # check out: https://www.tensorflow.org/api_docs/python/tf/contrib/layers/flatten # flat = tf.contrib.layers.flatten(inputs=relu3) flat = tf.reshape(tensor=lrelu3, shape=(-1, 7*7*256), name=None) # # How to add dropout and WHERE to add the dropout layer # # Dense Layer # pool2_flat = tf.reshape(pool2, [-1, 7 * 7 * 64]) # dense = tf.layers.dense(inputs=pool2_flat, units=1024, activation=tf.nn.relu) # dropout = tf.layers.dropout( # inputs=dense, rate=0.4, training=mode == learn.ModeKeys.TRAIN) # # Logits Layer # logits = tf.layers.dense(inputs=dropout, units=10) flat = tf.layers.dropout(inputs=flat, rate=0.5, training=True) # logits = tf.layers.dense(inputs=flat, units=1) # 1 output unit for 1, 0 or real/fake as the binary output logits = dense_xavier(inputs=flat, units=1) # 1 output unit for 1, 0 or real/fake as the binary output out = tf.sigmoid(x=logits) # prob/confidence for one output/unit classification return out, logits # # 2nd feedback tips # Most of the suggestions are same for both Generator and Discriminator. # Discriminator # 1- Using dropout in discriminator so that it is less prone to learning the data distribution. # 2- Use custom weight initialization. Xavier init is proposed to work best when working with GANs. # Generator # 1- Try setting leak for leaky_relu a bit lower. Did you tried 0.1 ? # 2- Try decreasing the width of layers from 512 -> 64. In context of GANs, # a sharp decline in number of filters for Generator helps produce better results. """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ tests.test_discriminator(discriminator, tf)
udacity-dl/GAN/dlnd_face_generation.ipynb
arasdar/DL
unlicense
Generator Implement generator to generate an image using z. This function should be able to reuse the variabes in the neural network. Use tf.variable_scope with a scope name of "generator" to allow the variables to be reused. The function should return the generated 28 x 28 x out_channel_dim images.
def generator(z, out_channel_dim, is_train=True): """ Create the generator network :param z: Input z :param out_channel_dim: The number of channels in the output image :param is_train: Boolean if generator is being used for training :return: The tensor output of the generator """ # TODO: Implement Function # def generator(z, output_dim, reuse=False, alpha=0.2, training=True): alpha=0.2 reuse = not is_train with tf.variable_scope('generator', reuse=reuse): # First fully connected layer # x1 = tf.layers.dense(z, 4*4*512) # z is z_1x100=mat_txn, t/time/bacth, n/space/num_dim # x1 = tf.layers.dense(inputs=z, units=7*7*512) # start from the last layer of discriminator/classifier/recognizer/perdictor x1 = dense_xavier(inputs=z, units=7*7*512) # start from the last layer of discriminator/classifier/recognizer/perdictor # x1_1x7*7*512 -> 1x7x7x512 # Reshape it to start the convolutional stack x1 = tf.reshape(tensor=x1, shape=(-1, 7, 7, 512), name=None) bn1 = tf.layers.batch_normalization(inputs=x1, training=is_train) lrelu1 = tf.maximum(alpha * bn1, bn1) # 4x4x512 now -> tx7x7x512, t=1 means one sample/record/image in each fwd/bwd prop/pass/path/route # x2 = tf.layers.conv2d_transpose(inputs=lrelu1, filters=256, kernel_size=7, strides=1, padding='same') x2 = conv2d_transpose_xavier(inputs=lrelu1, filters=256, kernel_size=7, strides=1, padding='same') bn2 = tf.layers.batch_normalization(inputs=x2, training=is_train) lrelu2 = tf.maximum(alpha * bn2, bn2) # 8x8x256 now -> 7x7x256 with stride=1 # x3 = tf.layers.conv2d_transpose(inputs=lrelu2, filters=128, kernel_size=5, strides=2, padding='same') x3 = conv2d_transpose_xavier(inputs=lrelu2, filters=128, kernel_size=5, strides=2, padding='same') bn3 = tf.layers.batch_normalization(inputs=x3, training=is_train) lrelu3 = tf.maximum(alpha * bn3, bn3) # 16x16x128 now -> tx14x14x128 # # How to add dropout and WHERE to add the dropout layer # # Dense Layer # pool2_flat = tf.reshape(pool2, [-1, 7 * 7 * 64]) # dense = tf.layers.dense(inputs=pool2_flat, units=1024, activation=tf.nn.relu) # dropout = tf.layers.dropout( # inputs=dense, rate=0.4, training=mode == learn.ModeKeys.TRAIN) # # Logits Layer # logits = tf.layers.dense(inputs=dropout, units=10) lrelu3 = tf.layers.dropout(inputs=lrelu3, rate=0.5, training=is_train) # Output layer # logits = tf.layers.conv2d_transpose(inputs=lrelu3, filters=out_channel_dim, kernel_size=5, strides=2, padding='same') logits = conv2d_transpose_xavier(inputs=lrelu3, filters=out_channel_dim, kernel_size=5, strides=2, padding='same') # 32x32x3 now -> tx28x28x3 the actual image size # prediction and regression -1, +1 out = tf.tanh(logits) return out """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ tests.test_generator(generator, tf)
udacity-dl/GAN/dlnd_face_generation.ipynb
arasdar/DL
unlicense
Loss Implement model_loss to build the GANs for training and calculate the loss. The function should return a tuple of (discriminator loss, generator loss). Use the following functions you implemented: - discriminator(images, reuse=False) - generator(z, out_channel_dim, is_train=True)
import numpy as np def model_loss(input_real, input_z, out_channel_dim): """ Get the loss for the discriminator and generator :param input_real: Images from the real dataset :param input_z: Z input :param out_channel_dim: The number of channels in the output image :return: A tuple of (discriminator loss, generator loss) """ # TODO: Implement Function g_model = generator(z=input_z, out_channel_dim=out_channel_dim) # alpha is included inside d_model_real, d_logits_real = discriminator(images=input_real) # alpha included d_model_fake, d_logits_fake = discriminator(images=g_model, reuse=True) # alpha included # Label smoothing to add noise to the labels for the fake and real labels d_loss_real = tf.reduce_mean(input_tensor= # tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_real, labels=tf.ones_like(d_model_real))) tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_real, # should be around 1.0 labels=tf.ones_like(d_model_real) * np.random.uniform(low=0.7, high=1.2))) d_loss_fake = tf.reduce_mean(input_tensor= # tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_fake, labels=tf.zeros_like(d_model_fake))) tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_fake, # should close to 0.0 as noise labels=tf.zeros_like(d_model_fake) + np.random.uniform(low=0.0, high=0.3))) g_loss = tf.reduce_mean(input_tensor= tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_fake, labels=tf.ones_like(d_model_fake))) d_loss = d_loss_real + d_loss_fake return d_loss, g_loss # 2nd feedback hints/tips # The function model_loss is implemented correctly. # Perfect. # Now that was the trickiest part (and my personal favorite in GAN :) # Tips # 1- Use Label Smoothing for Discriminator loss, will help it generalize better. # If you have two target labels: Real=1 and Fake=0, then for each incoming sample, # if it is real, then replace the label with a random number between 0.7 and 1.2, # and if it is a fake sample, replace it with 0.0 and 0.3 (for example). # A simple change like labels = tf.ones_like(d_logits_real) * np.random.uniform(0.7, 1.2) will help with # the training process. # This is known as label smoothing, typically used with classifiers to improve performance. """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ tests.test_model_loss(model_loss)
udacity-dl/GAN/dlnd_face_generation.ipynb
arasdar/DL
unlicense
Optimization Implement model_opt to create the optimization operations for the GANs. Use tf.trainable_variables to get all the trainable variables. Filter the variables with names that are in the discriminator and generator scope names. The function should return a tuple of (discriminator training operation, generator training operation).
def model_opt(d_loss, g_loss, learning_rate, beta1): """ Get optimization operations :param d_loss: Discriminator loss Tensor :param g_loss: Generator loss Tensor :param learning_rate: Learning Rate Placeholder :param beta1: The exponential decay rate for the 1st moment in the optimizer :return: A tuple of (discriminator training operation, generator training operation) """ # TODO: Implement Function # Get weights and bias to update t_vars = tf.trainable_variables() d_vars = [var for var in t_vars if var.name.startswith('discriminator')] g_vars = [var for var in t_vars if var.name.startswith('generator')] # Optimize d_train_opt = tf.train.AdamOptimizer(learning_rate, beta1=beta1).minimize(d_loss, var_list=d_vars) g_train_opt = tf.train.AdamOptimizer(learning_rate, beta1=beta1).minimize(g_loss, var_list=g_vars) return d_train_opt, g_train_opt """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ tests.test_model_opt(model_opt, tf)
udacity-dl/GAN/dlnd_face_generation.ipynb
arasdar/DL
unlicense
Train Implement train to build and train the GANs. Use the following functions you implemented: - model_inputs(image_width, image_height, image_channels, z_dim) - model_loss(input_real, input_z, out_channel_dim) - model_opt(d_loss, g_loss, learning_rate, beta1) Use the show_generator_output to show generator output while you train. Running show_generator_output for every batch will drastically increase training time and increase the size of the notebook. It's recommended to print the generator output every 100 batches.
def train(epoch_count, batch_size, z_dim, learning_rate, beta1, get_batches, data_shape, data_image_mode): """ Train the GAN :param epoch_count: Number of epochs :param batch_size: Batch Size :param z_dim: Z dimension :param learning_rate: Learning Rate :param beta1: The exponential decay rate for the 1st moment in the optimizer :param get_batches: Function to get batches :param data_shape: Shape of the data :param data_image_mode: The image mode to use for images ("RGB" or "L") """ # TODO: Build Model # data_shape, e.g. mnist_dataset.shape == txhxwxc == shape== 0, 1, 2, 3 input_real, input_z, lr = model_inputs(image_width=data_shape[1], image_height=data_shape[2], image_channels=data_shape[3], z_dim=z_dim) # 1st and 2nd Feedback tip # Great success has been shown if you wrap g_train_opt with # tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)): # More details can be found here: http://ruishu.io/2016/12/27/batchnorm/ # tf.control_dependencies() is used in the batch normalization lessons, # and is necessary to get the normalization layers created # with tf.layers.batch_normalization to update their population statistics, # which we need when performing inference. update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) with tf.control_dependencies(update_ops): # Ensures that we execute the update_ops before performing the train_step # train_step = tf.train.GradientDescentOptimizer(0.01).minimize(loss) d_loss, g_loss = model_loss(input_real=input_real, input_z=input_z, out_channel_dim=data_shape[3]) d_opt, g_opt = model_opt(d_loss=d_loss, g_loss=g_loss, learning_rate=lr, beta1=beta1) steps = 0 # for printing the generator output every 100 batches show_every = 100 # show the generated images print_every = 10 # printing out the loss for g and d with tf.Session() as sess: sess.run(tf.global_variables_initializer()) for epoch_i in range(epoch_count): # batches_txn: t== number of minibatches, # n== batch_size/ number of smaples/records/images in each minibatch for batch_images in get_batches(batch_size): # TODO: Train Model # 1st and 2nd Feedback tip # Since the images are scaled to -0.5:0.5 and we are using tanh in the generator, # you will likely see improved performance # if you perform batch_images *= 2 in order to scale it to -1:1 batch_images *= 2 # for x, y in dataset.batches(batch_size): steps += 1 # Sample random noise for G batch_z = np.random.uniform(-1, 1, size=(batch_size, z_dim)) # txn: t==batch_size, n==z_dim # Run optimizers _ = sess.run(d_opt, feed_dict={input_real: batch_images, input_z: batch_z, lr: learning_rate}) _ = sess.run(g_opt, feed_dict={input_z: batch_z, lr: learning_rate}) # 1st time # 1st feedback tip # Another tip, while not required, # you can try to run the optimization for the generator twice --> g_opt # to make sure that the discriminator loss does not go to zero. _ = sess.run(g_opt, feed_dict={input_z: batch_z, lr: learning_rate}) # 2nd time # Feedback tip: 1st one # You're close! # You may need to tune your discriminator and generator a bit, # but first follow my advice below. # Additionally, you should print the loss after each batch for both of your networks, # that will give you some hints on what you need to change to get good output. # For example, if your discriminator loss goes to zero, # you know you may need to make your generator stronger, or # run the optimizer for it again for each batch. if steps % print_every == 0: # At the end of each epoch, get the losses and print them out train_loss_d = d_loss.eval(feed_dict={input_z: batch_z, input_real: batch_images, lr: learning_rate}) train_loss_g = g_loss.eval(feed_dict={input_z: batch_z, lr: learning_rate}) print("Epoch {}/{}...".format(epoch_i+1, epoch_count), "stpes: {:.4f}...".format(steps), "Discriminator Loss: {:.4f}...".format(train_loss_d), "Generator Loss: {:.4f}".format(train_loss_g)) # show generator output every 100 minibatches training in each epoch # one epoch, t minibatches, each minibatch for training/updating the params in gnet and dnet if steps % show_every == 0: show_n_images = 4 # nrows=5 , ncols=5, nrows*ncols=25 show_generator_output(sess=sess, n_images=show_n_images, input_z=input_z, out_channel_dim=data_shape[3], image_mode=data_image_mode)
udacity-dl/GAN/dlnd_face_generation.ipynb
arasdar/DL
unlicense
Setting up and fine-tuning the hyperparameters for both datasets
# Hyperparameters for both dataset GANs training, valid, and testing batch_size = 128 z_dim = 100 learning_rate = 0.0002 # 2/128 = 1/64 beta1 = 0.5 # The hyperparameters in DCGAN SVHN impl recom. # real_size = (32,32,3) -> (28, 28, 3) in this case for both mnist and celebA datasets # z_size = 100 # learning_rate = 0.0002 # batch_size = 128 # epochs = 25 # alpha = 0.2 # beta1 = 0.5 # 2nd feedback comments/hints/tips # Given your network architecture, the choice of hyper-parameter are reasonable. # Tips # 1- You selected a good value for beta1. # Here's a good post explaining the importance of beta values and which value might be empirically better. # Also try lowering it even further, ~0.1 might even produce better results. # 2- An important point to note is, batch size and learning rate are linked. # If the batch size is too small then the gradients will become more unstable and would need to # reduce the learning rate. # Batch size used is on higher side. Try setting a value around 32/64. # We know that larger batch sizes might speed up the training but can degrade the quality of the model at the same time. # This link might help you. # You can also read about this in the hyperparameters module in your classroom.
udacity-dl/GAN/dlnd_face_generation.ipynb
arasdar/DL
unlicense
MNIST Test your GANs architecture on MNIST. After 2 epochs, the GANs should be able to generate images that look like handwritten digits. Make sure the loss of the generator is lower than the loss of the discriminator or close to 0.
""" DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ epochs = 2 mnist_dataset = helper.Dataset('mnist', glob(os.path.join(data_dir, 'mnist/*.jpg'))) with tf.Graph().as_default(): train(epochs, batch_size, z_dim, learning_rate, beta1, mnist_dataset.get_batches, mnist_dataset.shape, mnist_dataset.image_mode)
udacity-dl/GAN/dlnd_face_generation.ipynb
arasdar/DL
unlicense
CelebA Run your GANs on CelebA. It will take around 20 minutes on the average GPU to run one epoch. You can run the whole epoch or stop when it starts to generate realistic faces.
""" DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ epochs = 1 celeba_dataset = helper.Dataset('celeba', glob(os.path.join(data_dir, 'img_align_celeba/*.jpg'))) with tf.Graph().as_default(): train(epochs, batch_size, z_dim, learning_rate, beta1, celeba_dataset.get_batches, celeba_dataset.shape, celeba_dataset.image_mode)
udacity-dl/GAN/dlnd_face_generation.ipynb
arasdar/DL
unlicense
json格式 mysql数据库基本操作 命令行操作
# 链接数据库? mysql -u root -p # u 是用户名 p: 需要用密码登录数据库 # 查看数据库 show databases; # 选择数据库 use database_name; # 查看数据库中的table表 show tables; # 查看表格的结构 desc tables; # 查看表中的数据 select * from table_name; # 查看数据并限制数量 select * from table_name limit 10;
0809下午python第三课课件.ipynb
superliaoyong/plist-forsource
apache-2.0
数据库管理工具 sequelpro 链接:http://www.sequelpro.com/ mysql与Excel的不同 | 姓名 | 性别 | 年龄 | 班级 | 考试 | 语文 | 数学 | 英语 | 物理 | 化学 | 生物 | | --- | --- | --- | --- | --- | --- | --- | --- | --- | --- | --- | | 高海 | 男 | 18 | 高三一班 | 第一次模拟 | 90 | 126 | 119 | 75 | 59 | 89 | | 高海 | 男 | 18 | 高三一班 | 第二次模拟 | 80 | 120 | 123 | 85 | 78 | 87 | | 秦佳艺 | 女 | 18 | 高三二班 | 第一次模拟 | 78 | 118 | 140 | 89 | 80 | 78 | | 秦佳艺 | 女 | 18 | 高三二班 | 第二次模拟 | 79 | 120 | 140 | 83 | 78 | 82 | 命令行操作数据库 创建数据库 create database Examination_copy; 删除数据库 drop database Examination_coopy; 指定字符集和校对集,创建数据库 create database Examination_copy default charset utf8mb4 collate utf8mb4_general_ci; 创建表格 CREATE TABLE class ( id int(11) unsigned NOT NULL AUTO_INCREMENT, name varchar(80) NOT NULL, PRIMARY KEY (id) ); 小数转二进制
3 转换为二进制 11 整数部分 0.4 转换为二进制 0.5*0 + 0.25*1 + 0.125*1 0 1 1 1 1 0 1
0809下午python第三课课件.ipynb
superliaoyong/plist-forsource
apache-2.0
mysql数据类型 http://www.runoob.com/mysql/mysql-data-types.html 插入数据
insert into `class`(`id`, `name`) values(1, '高一三班');
0809下午python第三课课件.ipynb
superliaoyong/plist-forsource
apache-2.0
修改数据
update `class` set `name` = '高一五班' where `name` = '高一三班';
0809下午python第三课课件.ipynb
superliaoyong/plist-forsource
apache-2.0
删除操作
delete from `class` where `id` = 6;
0809下午python第三课课件.ipynb
superliaoyong/plist-forsource
apache-2.0
使用python去操作数据库 python 安装第三方库 1、 pip ; 举例: pip install pymysql 2、 conda; 举例: conda install pymysql
import MySQLdb DATABASE = { 'host': '127.0.0.1', # 如果是远程数据库,此处为远程服务器的ip地址 'database': 'Examination', 'user': 'root', 'password': 'wangwei', 'charset': 'utf8mb4' } db = MySQLdb.connect(host='localhost', user='root', password='wangwei', db='Examination') # 等价于 db = MySQLdb.connect('localhost', 'root', 'wangwei', 'Examination') # 等价于 db = MySQLdb.connect(**DATABASE) # db就代表是我们的数据库
0809下午python第三课课件.ipynb
superliaoyong/plist-forsource
apache-2.0
游标
cursor = db.cursor()
0809下午python第三课课件.ipynb
superliaoyong/plist-forsource
apache-2.0
查询
sql = "select * from student where id <= 20 limit 4" cursor.execute(sql) results = cursor.fetchall() for row in results: print(row)
0809下午python第三课课件.ipynb
superliaoyong/plist-forsource
apache-2.0
插入操作
sql = "insert into `class`(`name`) values('高一五班');" cursor = db.cursor() cursor.execute(sql) cursor.execute(sql) db.commit()
0809下午python第三课课件.ipynb
superliaoyong/plist-forsource
apache-2.0
删除操作
sql = "delete from `class` where `name`='高一五班'" cursor = db.cursor() cursor.execute(sql) db.commit()
0809下午python第三课课件.ipynb
superliaoyong/plist-forsource
apache-2.0
更新操作
sql = "update `class` set `name`='高一十四班' where `id`=4;" cursor = db.cursor() cursor.execute(sql) db.commit()
0809下午python第三课课件.ipynb
superliaoyong/plist-forsource
apache-2.0
捕捉异常
a = 10 b = a + 'hello' try: a = 10 b = a + 'hello' except TypeError as e: print(e)
0809下午python第三课课件.ipynb
superliaoyong/plist-forsource
apache-2.0
遗留问题: 数据库回滚操作失败
try: sql = "insert into `class`(`name`) values('高一十六班')" cursor = db.cursor() cursor.execute(sql) error = 10 + 'sdfsdf' db.commit() except Exception as e: print(e) db.rollback()
0809下午python第三课课件.ipynb
superliaoyong/plist-forsource
apache-2.0
爬虫 python库 1、requests 用来获取页面内容 2、BeautifulSoup 安装 pip install reqeusts pip install bs4
import time import MySQLdb import requests from bs4 import BeautifulSoup # 此处为数据库配置文件,每个人的配置不同,因此需要同学们自己配置 DATABASE = { 'host': '127.0.0.1', # 如果是远程数据库,此处为远程服务器的ip地址 'database': '', 'user': '', 'password': '', 'charset': 'utf8mb4' } # 获取url下的页面内容,返回soup对象 def get_page(url): responce = requests.get(url) soup = BeautifulSoup(responce.text, 'lxml') return soup # 封装成函数,作用是获取列表页下面的所有租房页面的链接,返回一个链接列表 def get_links(link_url): soup = get_page(link_url) links_div = soup.find_all('div', class_="pic-panel") links = [div.a.get('href') for div in links_div] return links def get_house_info(house_url): soup = get_page(house_url) price = soup.find('span', class_='total').text unit = soup.find('span', class_='unit').text.strip() house_info = soup.find_all('p') area = house_info[0].text[3:] layout = house_info[1].text[5:] floor = house_info[2].text[3:] direction = house_info[3].text[5:] subway = house_info[4].text[3:] community = house_info[5].text[3:] location = house_info[6].text[3:] create_time = house_info[7].text[3:] agent = soup.find('a', class_='name LOGCLICK') agent_name = agent.text agent_id = agent.get('data-el') evaluate = soup.find('div', class_='evaluate') score, number = evaluate.find('span', class_='rate').text.split('/') times = evaluate.find('span', class_='time').text[5:-1] info = { '价格': price, '单位': unit, '面积': area, '户型': layout, '楼层': floor, '朝向': direction, '发布时间': create_time, '地铁': subway, '小区': community, '位置': location, '经纪人名字': agent_name, '经纪人id': agent_id } return info def get_db(setting): return MySQLdb.connect(**setting) def insert(db, house): values = "'{}',"* 10 + "'{}'" sql_values = values.format(house['价格'],house['单位'],house['面积'],house['户型'], house['楼层'],house['朝向'],house['地铁'],house['小区'], house['位置'],house['经纪人名字'],house['经纪人id']) sql = """ insert into `house`(`price`, `unit`, `area`, `layout`, `floor`, `direction`, `subway`, `community`, `location`, `agent_name`, `agent_id`) values({}) """.format(sql_values) print(sql) cursor = db.cursor() cursor.execute(sql) db.commit() db = get_db(DATABASE) links = get_links('https://bj.lianjia.com/zufang/') for link in links: time.sleep(2) print('获取一个房子信息成功') house = get_house_info(link) insert(db, house)
0809下午python第三课课件.ipynb
superliaoyong/plist-forsource
apache-2.0
Dados de desempenho de um servidor web Os dados para este trabalho foram coletados de um servidor web que hospeda um site. As observações são as médias das variáveis por minuto: - Duracao_media_ms: Duração média do processamento de um Request HTTP (em milissegundos); - Perc_medio_CPU: Percentual médio de ocupação da CPU; - Load_avg_minute: Carga total dos processadores; - Requests_média: Quantidade média de requests processados.
df = pd.read_csv('servidor.csv') df.head() df.info() df.describe() results = smf.ols('Duracao_media_ms ~ Perc_medio_CPU + Load_avg_minute + Requests_média', data=df).fit() results.summary() X = df.drop('Duracao_media_ms',axis=1) Xe = sm.add_constant(X,prepend=True) vif = [variance_inflation_factor(Xe.values, i) for i in range(Xe.shape[1])] vif_s =pd.Series(vif, index =Xe.columns) print(vif_s) name = ['Lagrange multiplier statistic', 'p-value', 'f-value', 'f p-value'] testeH = het_breuschpagan(results.resid, results.model.exog) lzip(name, testeH)
book/capt10/server_load.ipynb
cleuton/datascience
apache-2.0
In the paper there are two suggested kernels for modelling the covariance of the Kepler data (Eqs. 55 & 56). In the paper the authors fit Eq 56 - here we are going to fit Eq. 55. We can do this using a combination of kernels from the george library. Exponential Squared Kernel: $$ k_1(x_i,x_j)=h_1 \exp(−\frac{(x_i-x_j)^2}{2 \sigma^2}) $$ Exp-Sine-Squared Kernel: $$ k_2(x_i,x_j)=h_2 \exp \left( \Gamma \sin^2 \left[ \frac{\pi}{P}|x_i - x_j|\right] \right) $$ Our combined kernel is therefore: $$ k(x_i,x_j)=h \exp(−\frac{(x_i-x_j)^2}{2 \sigma^2})\exp \left( \Gamma \sin^2 \left[ \frac{\pi}{P}|x_i - x_j|\right] \right) $$ where $h = h_1 h_2$. However, following https://arxiv.org/pdf/1706.05459.pdf, we are also going to add a white noise kernel in: $$ k_3(x_i,x_j)=c \delta_{ij} $$ So our final kernel will be: $$ k = (k_1k_2) + k_3 $$
# h =1.0; sigma = 1.0; Gamma = 2.0/1.0^2; P = 3.8 k1 = 1.0**2 * kernels.ExpSquaredKernel(1.0**2) \ * kernels.ExpSine2Kernel(2.0 / 1.0**2, 1.0) k2 = kernels.WhiteKernel(0.01) kernel = k1+k2 # first we feed our combined kernel to the George library: gp = george.GP(kernel, mean=0.0) # then we compute the covariance matrix: gp.compute(time) t = np.arange(np.min(time),np.max(time),0.1) # calculate expectation and variance at each point: mu, cov = gp.predict(value, t) std = np.sqrt(np.diag(cov)) ax = pl.subplot(111) pl.plot(t,mu) ax.fill_between(t,mu-std,mu+std,facecolor='lightblue', lw=0, interpolate=True) pl.scatter(time,value,s=2) pl.axis([0.,60.,-1.,1.]) pl.ylabel("Relative flux [ppt]") pl.xlabel("Time [days]") pl.show()
CDT-KickOff/TUTORIAL/KeplerLightCurve.ipynb
as595/AllOfYourBases
gpl-3.0
Now we have the tools to solve the email problem.
# Original character string string = '"Carl Friedrich Gauss" <[email protected]>, "Leonhard Euler" <[email protected]>, "Bernhard Riemann" <[email protected]>' # Remove <, >, and " from string and overwrite and print the result # Create a new variable called string_formatted with the commas replaced by the new line character '\n' # Print string_formatted
winter2017/econ129/python/Econ129_Class_03.ipynb
letsgoexploring/teaching
mit
A related problem might be to extract only the email address from the orginal string. To do this, we can use replace() method to remove the '&lt;', '&gt;', and ',' characters. Then we use the split() method to break the string apart at the spaces. The we loop over the resulting list of strings and take only the strings with '@' characters in them.
string = '"Carl Friedrich Gauss" <[email protected]>, "Leonhard Euler" <[email protected]>, "Bernhard Riemann" <[email protected]>'
winter2017/econ129/python/Econ129_Class_03.ipynb
letsgoexploring/teaching
mit
Numpy NumPy is a powerful Python module for scientific computing. Among other things, NumPy defines an N-dimensional array object that is especially convenient to use for plotting functions and for simulating and storing time series data. NumPy also defines many useful mathematical functions like, for example, the sine, cosine, and exponential functions and has excellent functions for probability and statistics including random number generators, and many cumulative density functions and probability density functions. Importing NumPy The standard way to import NumPy so that the namespace is np. This is for the sake of brevity. NumPy arrays A NumPy ndarray is a homogeneous multidimensional array. Here, homogeneous means that all of the elements of the array have the same type. An nadrray is a table of numbers (like a matrix but with possibly more dimensions) indexed by a tuple of positive integers. The dimensions of NumPy arrays are called axes and the number of axes is called the rank. For this course, we will work almost exclusively with 1-dimensional arrays that are effectively vectors. Occasionally, we might run into a 2-dimensional array. Basics The most straightforward way to create a NumPy array is to call the array() function which takes as an argument a list. For example:
# Create a variable called a1 equal to a numpy array containing the numbers 1 through 5 # Find the type of a1 # find the shape of a1 # Use ndim to find the rank or number of dimensions of a1 # Create a variable called a2 equal to a 2-dimensionl numpy array containing the numbers 1 through 4 # find the shape of a2 # Use ndim to find the rank or number of dimensions of a2 # Create a variable called c an empty numpy array # find the shape of a3 # Use ndim to find the rank or number of dimensions of a3
winter2017/econ129/python/Econ129_Class_03.ipynb
letsgoexploring/teaching
mit
Special functions for creating arrays Numpy has several built-in functions that can assist you in creating certain types of arrays: arange(), zeros(), and ones(). Of these, arrange() is probably the most useful because it allows you a create an array of numbers by specifying the initial value in the array, the maximum value in the array, and a step size between elements. arrange() has three arguments: start, stop, and step: arange([start,] stop[, step,]) The stop argument is required. The default for start is 0 and the default for step is 1. Note that the values in the created array will stop one increment below stop. That is, if arrange() is called with stop equal to 9 and step equal to 0.5, then the last value in the returned array will be 8.5.
# Create a variable called b that is equal to a numpy array containing the numbers 1 through 5 # Create a variable called c that is equal to a numpy array containing the numbers 0 through 10
winter2017/econ129/python/Econ129_Class_03.ipynb
letsgoexploring/teaching
mit
The zeros() and ones() take as arguments the desired shape of the array to be returned and fill that array with either zeros or ones.
# Construct a 1x5 array of zeros # Construct a 2x2 array of ones
winter2017/econ129/python/Econ129_Class_03.ipynb
letsgoexploring/teaching
mit
Math with NumPy arrays A nice aspect of NumPy arrays is that they are optimized for mathematical operations. The following standard Python arithemtic operators +, -, *, /, and ** operate element-wise on NumPy arrays as the following examples indicate.
# Define two 1-dimensional arrays A = np.array([2,4,6]) B = np.array([3,2,1]) C = np.array([-1,3,2,-4]) # Multiply A by a constant # Exponentiate A # Add A and B together # Exponentiate A with B # Add A and C together
winter2017/econ129/python/Econ129_Class_03.ipynb
letsgoexploring/teaching
mit
The error in the preceding example arises because addition is element-wise and A and C don't have the same shape.
# Compute the sine of the values in A
winter2017/econ129/python/Econ129_Class_03.ipynb
letsgoexploring/teaching
mit
Iterating through Numpy arrays NumPy arrays are iterable objects just like lists, strings, tuples, and dictionaries which means that you can use for loops to iterate through the elements of them.
# Use a for loop with a NumPy array to print the numbers 0 through 4
winter2017/econ129/python/Econ129_Class_03.ipynb
letsgoexploring/teaching
mit
Example: Basel problem One of my favorite math equations is: \begin{align} \sum_{n=1}^{\infty} \frac{1}{n^2} & = \frac{\pi^2}{6} \end{align} We can use an iteration through a NumPy array to approximate the lefthand-side and verify the validity of the expression.
# Set N equal to the number of terms to sum # Initialize a variable called summation equal to 0 # loop over the numbers 1 through N # Print the approximation and the exact solution
winter2017/econ129/python/Econ129_Class_03.ipynb
letsgoexploring/teaching
mit
Import neuroimaging data using nilearn. We recover a few MRI datapoints...
n_subjects = 4 dataset_files = datasets.fetch_oasis_vbm(n_subjects=n_subjects) gm_imgs = np.array(dataset_files.gray_matter_maps)
docs/notebooks/Sinkhorn_Barycenters.ipynb
google-research/ott
apache-2.0
... and plot their gray matter densities.
for i in range(n_subjects): plotting.plot_epi(gm_imgs[i]) plt.show()
docs/notebooks/Sinkhorn_Barycenters.ipynb
google-research/ott
apache-2.0
Represent data as histograms We normalize those gray matter densities so that they sum to 1, and check their size.
a = jnp.array(get_data(gm_imgs)).transpose((3, 0, 1, 2)) grid_size = a.shape[1:4] a = a.reshape((n_subjects, -1)) + 1e-2 a = a / np.sum(a,axis=1)[:, np.newaxis] print('Grid size: ', grid_size)
docs/notebooks/Sinkhorn_Barycenters.ipynb
google-research/ott
apache-2.0
Instantiate a grid geometry to compute $W_p^p$ We instantiate the grid geometry corresponding to these data points, living in a space of dimension $91 \times 109 \times 91$, for a total total dimension $d=902629$. Rather than stretch these voxel histograms and put them in the $[0,1]^3$ hypercube, we use a simpler rescaled grid, $[0, 0.9] \times [0, 1.08] \times [0, 0.9]$, with increments of 1/100. We endow points on that 3D grid with the Custom cost function defined below: we use a $p$-norm, with $p$ slighter larger than 1 following previous works [1, 2] on brain signals. We use an 𝜀 scheduler that will decrease the regularization strength from 0.1 down to 1e-4 with a decay factor of 0.95.
@jax.tree_util.register_pytree_node_class class Custom(ott.geometry.costs.CostFn): """Custom function.""" def pairwise(self, x, y): return jnp.sum(jnp.abs(x-y) ** 1.1) # Instantiate Grid Geometry of suitable size, epsilon parameter and cost. g_grid = grid.Grid(x = [jnp.arange(0, n)/100 for n in grid_size], cost_fns=[Custom()], epsilon=ott.geometry.epsilon_scheduler.Epsilon( target=1e-4, init=1e-1, decay=0.95))
docs/notebooks/Sinkhorn_Barycenters.ipynb
google-research/ott
apache-2.0
Compute their regularized $W_p^p$ iso-barycenter A small trick: If we jit and run the discrete_barycenter function with a small 𝜀 directly, it takes ages because it's both solving a hard problem and jitting the function at the same time. It's slightly more efficient to jit it with an easy problem, and run next the problem with the 𝜀 target we need.
%%time g_grid._epsilon.target=1 barycenter = discrete_barycenter.discrete_barycenter(g_grid, a) %%time g_grid._epsilon.target=1e-4 barycenter = discrete_barycenter.discrete_barycenter(g_grid, a)
docs/notebooks/Sinkhorn_Barycenters.ipynb
google-research/ott
apache-2.0
Plot decrease of marginal error The computation of the barycenter of $N$ histograms involves [3] the resolution of $N$ OT problems pointing towards the same, but unknown, marginal. The convergence of that algorithm can be monitored by evaluating the distance between the marginals of these different transport matrices w.r.t. that same, common marginal. Upon convergence that should be close to 0.
plt.figure(figsize=(8,5)) errors = barycenter.errors[:-1] plt.plot(np.arange(errors.size) * 10, errors, lw=3) plt.title('Marginal error decrease in barycenter computation') plt.yscale("log") plt.xlabel('Iterations') plt.ylabel('Marginal Error') plt.show()
docs/notebooks/Sinkhorn_Barycenters.ipynb
google-research/ott
apache-2.0
Plot the barycenter itself
def data_to_nii(x): return nilearn.image.new_img_like( gm_imgs[0], data=np.array(x.reshape(grid_size))) plotting.plot_epi(data_to_nii(barycenter.histogram)) plt.show()
docs/notebooks/Sinkhorn_Barycenters.ipynb
google-research/ott
apache-2.0
Euclidean barycenter, for reference
plotting.plot_epi(data_to_nii(np.mean(a,axis=0)))
docs/notebooks/Sinkhorn_Barycenters.ipynb
google-research/ott
apache-2.0
Next we'll setup our data sources and acquire the data via OPeNDAP using xarray.
API_key = open('APIKEY').readlines()[0].strip() #'<YOUR API KEY HERE>' dataset_key = 'noaa_ndbc_swden_stations' variables = 'spectral_wave_density,mean_wave_dir,principal_wave_dir,wave_spectrum_r1,wave_spectrum_r2' # OpenDAP URLs for each product now = datetime.datetime.now() ndbc_rt_url='http://dods.ndbc.noaa.gov/thredds/dodsC/data/swden/41047/41047w9999.nc' ndbc_url = 'http://dods.ndbc.noaa.gov/thredds/dodsC/data/swden/41047/41047w' + str(now.year) + '.nc' planetos_filename = download_data_station(dataset_key,API_key,'41047','2018-01-01T00:00:00',datetime.datetime.strftime(now,'%Y-%m-%dT%H:%M:%S') ,variables,'41047') #planetos_tds_url = 'https://api.planetos.com/v1/datasets/noaa_ndbc_swden_stations/stations/41047?origin=dataset-details&station=46237&apikey=8428878e4b944abeb84790e832c633fc&_ga=2.215365009.721611707.1530692788-133742091.1504032768&netcdf=true'#'http://thredds.planetos.com/thredds/dodsC/dpipe//rel_0_8x03_dataset/transform/ns=/noaa_ndbc_swden_stations/scheme=/http/authority=/dods.ndbc.noaa.gov/path=/thredds/dodsC/data/swden/41047/41047w9999.nc/chunk=/1/1/data' # acquire OpenDAP datasets ds_ndbc_rt = xr.open_dataset(ndbc_rt_url) ds_ndbc = xr.open_dataset(ndbc_url) ds_planetos = xr.open_dataset(planetos_filename) # Let's focus on a specific hour of interest... time = str((datetime.datetime.now() - datetime.timedelta(days=60)).strftime('%Y-%m-%d')) + ' 00:00:00' #'2014-08-09 00:00:00' # Select the specific hour for each dataset ds_ndbc_rt_hour = ds_ndbc_rt.sel(time=time).isel(latitude=0, longitude=0) ds_ndbc_hour = ds_ndbc.sel(time=time).isel(latitude=0, longitude=0) ds_planetos_hour = ds_planetos.sel(time=time).isel(latitude=0, longitude=0)
api-examples/ndbc-spectral-wave-density-data-validation.ipynb
planet-os/notebooks
mit
Product Inspection: Planet OS / NDBC Realtime / NDBC 2014 Historical For each of our three data products, we'll create an associated Dataframe for analysis.
# First, the Planet OS data which is acquired from the NDBC realtime station file. df_planetos = ds_planetos_hour.to_dataframe().drop(['context_time_latitude_longitude_frequency','mx_dataset','mx_creator_institution'], axis=1) df_planetos.head(8) # Second, the NDBC realtime station data. df_ndbc_rt = ds_ndbc_rt_hour.to_dataframe() df_ndbc_rt.head(8) # Finally, the 2014 archival data. df_ndbc = ds_ndbc_hour.to_dataframe() df_ndbc.head(8)
api-examples/ndbc-spectral-wave-density-data-validation.ipynb
planet-os/notebooks
mit
Based on the sample outputs above, it appears that the Planet OS data matches the NDBC realtime file that it is acquired from. We will further verify this below by performing an equality test against the two Dataframes. We can also see that the historical data is indeed different, with frequency bins that are neatly rounded and values for wave direction and wave spectrum even when spectral wave density is 0. Using the describe() method we can explore the statistical characteristics of each in more detail below. Note that the NaN values present in the Planet OS and NDBC realtime datasets will raise warnings for percentile calculations.
df_planetos.describe() df_ndbc_rt.describe() df_ndbc.describe()
api-examples/ndbc-spectral-wave-density-data-validation.ipynb
planet-os/notebooks
mit
Confirm Planet OS Equality to NDBC Realtime To confirm that the Planet OS and NDBC realtime Dataframes are indeed equal, we'll perform a diff. Note that NaN != NaN evaluates as True, so NaN values will be raised as inconsistent across the dataframes. This could be resolved using fillna() and an arbitrary fill value such as -9999.99.
# function below requires identical index structure def df_diff(df1, df2): ne_stacked = (df1 != df2).stack() changed = ne_stacked[ne_stacked] difference_locations = np.where(df1 != df2) changed_from = df1.values[difference_locations] changed_to = df2.values[difference_locations] return pd.DataFrame({'df1': changed_from, 'df2': changed_to}, index=changed.index) # Compare the NDBC realtime to Planet OS data # Note that NaN != NaN evaluates as True, so NaN values will be raised as inconsistent across the dataframes # We could use fillna() to fix this issue, however this is not implemented here. df_diff(df_ndbc_rt, df_planetos)
api-examples/ndbc-spectral-wave-density-data-validation.ipynb
planet-os/notebooks
mit
The df_dff results are as expected, only NaN values are different between the two datasets. Spectral Wave Density Plot Let's plot the spectral wave density for all three datasets across the frequency coverage to see how they differ.
plt.figure(figsize=(20,10)) ds_ndbc_rt_hour.spectral_wave_density.plot(label='NDBC Realtime') ds_ndbc_hour.spectral_wave_density.plot(label='NDBC ' + str(now.year)) ds_planetos_hour.spectral_wave_density.plot(label='Planet OS') plt.legend() plt.show()
api-examples/ndbc-spectral-wave-density-data-validation.ipynb
planet-os/notebooks
mit
There is a very slight discrepancy between the 2014 NDBC product and the Planet OS product, but no difference between the realtime NDBC product and Planet OS product. Wave Spectrum Plots
vars = ['wave_spectrum_r1','wave_spectrum_r2'] df_planetos.loc[:,vars].plot(label="Planet OS", figsize=(18,6)) df_ndbc_rt.loc[:,vars].plot(label="NDBC Realtime", figsize=(18,6)) df_ndbc.loc[:,vars].plot(label="NDBC " + str(now.year), figsize=(18,6)) plt.show()
api-examples/ndbc-spectral-wave-density-data-validation.ipynb
planet-os/notebooks
mit
Wave Direction Plots
vars = ['principal_wave_dir','mean_wave_dir'] df_planetos.loc[:,vars].plot(label="Planet OS", figsize=(18,6)) df_ndbc_rt.loc[:,vars].plot(label="NDBC Realtime", figsize=(18,6)) df_ndbc.loc[:,vars].plot(label="NDBC " + str(now.year), figsize=(18,6)) plt.show()
api-examples/ndbc-spectral-wave-density-data-validation.ipynb
planet-os/notebooks
mit
Strategy 2: Implementation of the CM sketch
import sys import random import numpy as np import heapq import json import time BIG_PRIME = 9223372036854775783 def random_parameter(): return random.randrange(0, BIG_PRIME - 1) class Sketch: def __init__(self, delta, epsilon, k): """ Setup a new count-min sketch with parameters delta, epsilon and k The parameters delta and epsilon control the accuracy of the estimates of the sketch Cormode and Muthukrishnan prove that for an item i with count a_i, the estimate from the sketch a_i_hat will satisfy the relation a_hat_i <= a_i + epsilon * ||a||_1 with probability at least 1 - delta, where a is the the vector of all all counts and ||x||_1 is the L1 norm of a vector x Parameters ---------- delta : float A value in the unit interval that sets the precision of the sketch epsilon : float A value in the unit interval that sets the precision of the sketch k : int A positive integer that sets the number of top items counted Examples -------- >>> s = Sketch(10**-7, 0.005, 40) Raises ------ ValueError If delta or epsilon are not in the unit interval, or if k is not a positive integer """ if delta <= 0 or delta >= 1: raise ValueError("delta must be between 0 and 1, exclusive") if epsilon <= 0 or epsilon >= 1: raise ValueError("epsilon must be between 0 and 1, exclusive") if k < 1: raise ValueError("k must be a positive integer") self.w = int(np.ceil(np.exp(1) / epsilon)) self.d = int(np.ceil(np.log(1 / delta))) print("cm data Structure: {} hashes x {} cells = {} counters".format(self.d, self.w, self.d*self.w)) self.k = k self.hash_functions = [self.__generate_hash_function() for i in range(self.d)] self.count = np.zeros((self.d, self.w), dtype='int32') self.heap, self.top_k = [], {} # top_k => [estimate, key] pairs def update(self, key, increment): """ Updates the sketch for the item with name of key by the amount specified in increment Parameters ---------- key : string The item to update the value of in the sketch increment : integer The amount to update the sketch by for the given key Examples -------- >>> s = Sketch(10**-7, 0.005, 40) >>> s.update('http://www.cnn.com/', 1) """ for row, hash_function in enumerate(self.hash_functions): column = hash_function(abs(hash(key))) self.count[row, column] += increment self.update_heap(key) def update_heap(self, key): """ Updates the class's heap that keeps track of the top k items for a given key For the given key, it checks whether the key is present in the heap, updating accordingly if so, and adding it to the heap if it is absent Parameters ---------- key : string The item to check against the heap """ estimate = self.get(key) if not self.heap or estimate >= self.heap[0][0]: if key in self.top_k: old_pair = self.top_k.get(key) old_pair[0] = estimate heapq.heapify(self.heap) else: if len(self.top_k) < self.k: heapq.heappush(self.heap, [estimate, key]) self.top_k[key] = [estimate, key] else: new_pair = [estimate, key] old_pair = heapq.heappushpop(self.heap, new_pair) if new_pair[1] != old_pair[1]: del self.top_k[old_pair[1]] self.top_k[key] = new_pair self.top_k[key] = new_pair def get(self, key): """ Fetches the sketch estimate for the given key Parameters ---------- key : string The item to produce an estimate for Returns ------- estimate : int The best estimate of the count for the given key based on the sketch Examples -------- >>> s = Sketch(10**-7, 0.005, 40) >>> s.update('http://www.cnn.com/', 1) >>> s.get('http://www.cnn.com/') 1 """ value = sys.maxsize for row, hash_function in enumerate(self.hash_functions): column = hash_function(abs(hash(key))) value = min(self.count[row, column], value) return value def __generate_hash_function(self): """ Returns a hash function from a family of pairwise-independent hash functions """ a, b = random_parameter(), random_parameter() return lambda x: (a * x + b) % BIG_PRIME % self.w ! head CM_small.txt ! cat CM_small.txt | sort | uniq -c | sort -n f = open('CM_small.txt') results_exact = sorted(exact_top_users(f)) print("\n".join(results_exact)) # define a function to return a list of the estimated top users, sorted by count def CM_top_users(f, s, top_n = 10): for user_name in f: s.update(user_name.rstrip('\n'),1) results = [] counter = 0 for value in reversed(sorted(s.top_k.values())): if counter >= top_n: break results.append('{1},{0}'.format(str(value[0]),str(value[1]))) counter += 1 return results # note that the output format is '[user] [count]' # instantiate a Sketch object s = Sketch(10**-3, 0.1, 10) f = open('CM_small.txt') results_CM = sorted(CM_top_users(f,s)) print("\n".join(results_CM)) for item in zip(results_exact,results_CM): print(item)
count-min-101/CountMinSketch.ipynb
DrSkippy/Data-Science-45min-Intros
unlicense
Is it possible to make the sketch so coarse that its estimates are wrong even for this data set?
s = Sketch(0.9, 0.9, 10) f = open('CM_small.txt') results_coarse_CM = CM_top_users(f,s) print("\n".join(results_coarse_CM))
count-min-101/CountMinSketch.ipynb
DrSkippy/Data-Science-45min-Intros
unlicense
Yes! (if you try enough) Why? The 'w' parameter goes like $\text{ceiling}\exp(1/\epsilon)$, which is always >=~ 3. The 'd' parameter goes like $\text{ceiling}\log(1/\delta)$, which is always >= 1. So, you're dealing with a space with minimum size 3 x 1. With 10 records, it's possible that all 4 users map their counts to the point. So it's possible to see an estimate as high as 10, in this case. Now for a larger data set...
! wc -l CM_large.txt ! cat CM_large.txt | sort | uniq | wc -l ! cat CM_large.txt | sort | uniq -c | sort -rn f = open('CM_large.txt') %time results_exact = exact_top_users(f) print("\n".join(results_exact)) # this could take a few minutes f = open('CM_large.txt') s = Sketch(10**-4, 10**-4, 10) %time results_CM = CM_top_users(f,s) print("\n".join(results_CM))
count-min-101/CountMinSketch.ipynb
DrSkippy/Data-Science-45min-Intros
unlicense
For this precision and dataset size, the CM algo takes much longer than the exact solution. In fact, the crossover point at which the CM sketch can achieve reasonable accuracy in the same time as the exact solution is a very large number of entries.
for item in zip(results_exact,results_CM): print(item) # the CM sketch gets the top entry (an outlier) correct but doesn't do well # estimating the order of the more degenerate counts # let's decrease the precision via both the epsilon and delta parameters, # and see whether it still gets the "heavy-hitter" correct f = open('CM_large.txt') s = Sketch(10**-3, 10**-2, 50) %time results_CM = CM_top_users(f,s) print("\n".join(results_CM)) # nope...sketch is too coarse, too many collisions, and the prominence of user 'user_0 129' is obscured for item in zip(results_exact,results_CM): print(item)
count-min-101/CountMinSketch.ipynb
DrSkippy/Data-Science-45min-Intros
unlicense
Contents Add Video IPython.display.YouTubeVideo lets you play Youtube video directly in the notebook. Library support is available to play Vimeo and local videos as well
from IPython.display import YouTubeVideo YouTubeVideo('ooOLl4_H-IE')
docs/source/getting_started/jupyter_notebooks_advanced_features.ipynb
cathalmccabe/PYNQ
bsd-3-clause
Video Link with image display <a href="https://www.youtube.com/watch?v=ooOLl4_H-IE"> <img src="http://img.youtube.com/vi/ooOLl4_H-IE/0.jpg" width="400" height="400" align="left"></a> Contents Add webpages as Interactive Frames Embed an entire page from another site in an iframe; for example this is the PYNQ documentation page on readthedocs
from IPython.display import IFrame IFrame('https://pynq.readthedocs.io/en/latest/getting_started.html', width='100%', height=500)
docs/source/getting_started/jupyter_notebooks_advanced_features.ipynb
cathalmccabe/PYNQ
bsd-3-clause
set up pipeline This class creates a simple pipeline that writes all found items to a JSON file, where each line contains one JSON element.
class JsonWriterPipeline(object): def open_spider(self, spider): self.file = open('quoteresult.jl', 'w') def close_spider(self, spider): self.file.close() def process_item(self, item, spider): line = json.dumps(dict(item)) + "\n" self.file.write(line) return item
Scrapy_nb/Quotes base case.ipynb
CLEpy/CLEpy-MotM
mit
Define Spider The QuotesSpider class defines from which URLs to start crawling and which values to retrieve. I set the logging level of the crawler to warning, otherwise the notebook is overloaded with DEBUG messages about the retrieved data.
class QuotesSpider(scrapy.Spider): name = "quotes" start_urls = [ 'http://quotes.toscrape.com/page/1/', 'http://quotes.toscrape.com/page/2/', ] custom_settings = { 'LOG_LEVEL': logging.WARNING, 'ITEM_PIPELINES': {'__main__.JsonWriterPipeline': 1}, # Used for pipeline 1 'FEED_FORMAT':'json', # Used for pipeline 2 'FEED_URI': 'quoteresult.json' # Used for pipeline 2 } def parse(self, response): #A Response object represents an HTTP response, which is usually downloaded (by the Downloader) # and fed to the Spiders for processing. for quote in response.css('div.quote'): yield { 'text': quote.css('span.text::text').extract_first(), 'author': quote.css('span small::text').extract_first(), 'tags': quote.css('div.tags a.tag::text').extract(), }
Scrapy_nb/Quotes base case.ipynb
CLEpy/CLEpy-MotM
mit
Start the crawler
process = CrawlerProcess({ 'USER_AGENT': 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1)' }) process.crawl(QuotesSpider) process.start()
Scrapy_nb/Quotes base case.ipynb
CLEpy/CLEpy-MotM
mit
The majority of tagged questions have one or two tags with four tags max. I'm curious about the relationships and connections between different tags so for now we'll limit our scope to only looking at questions with 4 tags and only look at the 1000 most popular tags.
tag_counts = tags.groupby("Id")["Tag"].count() many_tags = tag_counts[tag_counts > 3].index popular_tags = tags.Tag.value_counts().iloc[:1000].index tags = tags[tags["Id"].isin(many_tags)] #getting questions with 4 tags tags = tags[tags["Tag"].isin(popular_tags)] #getting only top 1000 tags tags.shape tags.head(20)
other notebook/Tags statistics.ipynb
sjqgithub/rquestions
mit
Creating a Bag of Tags: Now I am going to create a bag of tags and do some kind of dimensionality reduction on it. To do this I'll basically have to spread the tags and create one column for each tag. Using pd.pivot works but it's very memory-intensive. Instead I'll take advantage of the sparsity and use scipy sparse matrices the create the bag of tags. This sparse bag idea was inspired by dune_dweller's script.
from sklearn.preprocessing import LabelEncoder from sklearn.decomposition import TruncatedSVD from sklearn.manifold import TSNE from sklearn.pipeline import make_pipeline #let's integer encode the id's and tags: tag_encoder = LabelEncoder() question_encoder = LabelEncoder() tags["Tag"] = tag_encoder.fit_transform(tags["Tag"]) tags["Id"] = question_encoder.fit_transform(tags["Id"]) tags.head() tag_num = np.max(tags["Tag"]) + 1 print (tag_num) id_num = np.max(tags["Id"]) + 1 print (id_num)
other notebook/Tags statistics.ipynb
sjqgithub/rquestions
mit
!@!@!@!|
X = csr_matrix((np.ones(tags.shape[0]), (tags.Id, tags.Tag))) X.shape #one row for each question, one column for each tag tags.shape
other notebook/Tags statistics.ipynb
sjqgithub/rquestions
mit
Dimensionality Reduction using SVD: Now we will project our bags of words matrix into a 3 dimensional subspace that captures as much of the variance as possible. Hopefully this will help us better understand the connections between the tags.
model = TruncatedSVD(n_components=3) model.fit(X) two_components = pd.DataFrame(model.transform(X),\ columns=["one", "two", "three"]) two_components.plot(x = "one", y = "two",kind = "scatter",\ title = "2D PCA projection components 1 and 2") two_components.plot(x = "two", y = "three", kind = "scatter", \ title = "2D PCA projection - components 2 and 3") tagz = popular_tags[:20] tag_ids = tag_encoder.transform(tagz) n = len(tag_ids) print (n) X_new = csr_matrix((np.ones(n), (pd.Series(range(n)), tag_ids)),\ shape = (n, 998)) proj = pd.DataFrame(model.transform(X_new)[:,:2], index=tagz, \ columns = ["one", "two"]) proj["tag"] = proj.index from ggplot import * #ggplot! plt = (ggplot(proj, aes(x = "one", y = "two", label = "tag")) + geom_point() + geom_text()) plt.show() sm_proj = proj[proj["one"] < 0.2][proj["two"] < 0.2] plt = (ggplot(sm_proj, aes(x = "one", y = "two", label = "tag")) + geom_point() + geom_text() + xlim(0, 0.1) + ylim(-0.1, 0.2)) plt.show()
other notebook/Tags statistics.ipynb
sjqgithub/rquestions
mit
64k Particle LJ System This benchmark is designed to closely follow the HOOMD-BLUE LJ benchmark here.
!nvidia-smi
notebooks/lj_benchmark.ipynb
google/jax-md
apache-2.0
Prepare the system
lattice_constant = 1.37820 N_rep = 40 box_size = N_rep * lattice_constant # Using float32 for positions / velocities, but float64 for reductions. dtype = np.float32 # Specify the format of the neighbor list. # Options are Dense, Sparse, or OrderedSparse. format = partition.OrderedSparse displacement, shift = space.periodic(box_size) R = [] for i in range(N_rep): for j in range(N_rep): for k in range(N_rep): R += [[i, j, k]] R = np.array(R, dtype=dtype) * lattice_constant N = R.shape[0] phi = N / (lattice_constant * N_rep) ** 3 print(f'Created a system of {N} LJ particles with number density {phi:.3f}')
notebooks/lj_benchmark.ipynb
google/jax-md
apache-2.0
Benchmark using fixed size neighbor list.
neighbor_fn, energy_fn = energy.lennard_jones_neighbor_list(displacement, box_size, r_cutoff=3.0, dr_threshold=1., format=format) init, apply = simulate.nvt_nose_hoover(energy_fn, shift, 5e-3, kT=1.2) key = random.PRNGKey(0) # We pick an "extra capacity" to ensure ahead of time that the neighbor # list will have enough capacity. Since sparse neighbor lists are more # robust to changes in the number of particles, in this case we only # need to actually add more capacity for dense neighbor lists. if format is partition.Dense: nbrs = neighbor_fn.allocate(R, extra_capacity=55) else: nbrs = neighbor_fn.allocate(R) state = init(key, R, neighbor=nbrs) def step(i, state_and_nbrs): state, nbrs = state_and_nbrs nbrs = nbrs.update(state.position) return apply(state, neighbor=nbrs), nbrs # Run once to make sure the JIT cache is occupied. new_state, new_nbrs = lax.fori_loop(0, 10000, step, (state, nbrs)) new_state.position.block_until_ready() # Check to make sure the neighbor list didn't overflow. new_nbrs.did_buffer_overflow %%timeit new_state, new_nbrs = lax.fori_loop(0, 10000, step, (state, nbrs)) new_state.position.block_until_ready()
notebooks/lj_benchmark.ipynb
google/jax-md
apache-2.0
On an A100 this comes out to 22.4 s / loop which is 2.24 ms / step.
renderer.render( box_size, {'particles': renderer.Sphere(new_state.position)} )
notebooks/lj_benchmark.ipynb
google/jax-md
apache-2.0
Define a function to identify outliers using Tukey's boxplot method. The method is very simple: We define InterQuartile Range as: $IQR = Q3-Q1; $ Then $Whiskers = (Q3 + \beta * IQR, Q1 - \beta * IQR)$. Then points outside $Whiskers$ are outliers. The "magic number" for $\beta$ is $1.5$ This method can be extended to accommodate the subject-matter experts' knowledge: for example, if we know that the lowest possible value of a data point cannot be negative, we can force the lower whisker never to drop below $0$; similarly, if we know that we are dealing with a distribution of probabilities, we know that the upper whisker cannot be higher than $1$. The function IdentifyBoxplotWhiskers below computes the whiskers and allows the user to set the lowest_possible and highest_possible values.
def IdentifyBoxplotWhiskers (myvector, beta = 1.5, lowest_possible = None, highest_possible = None): pctls = np.percentile(myvector, q= (25, 75)) if VERBOSE: print pctls iqr = pctls[1] - pctls[0] if VERBOSE: print iqr whiskers = [(pctls[0] - beta * iqr), (pctls[1] + beta * iqr)] if lowest_possible is not None: whiskers[0] = max (whiskers[0], lowest_possible) if highest_possible is not None: whiskers[0] = min (whiskers[1], highest_possible) return whiskers def IdentifyOutlierIndices (myvector, beta = 1.5, lowest_possible = None, highest_possible = None): whiskers = IdentifyBoxplotWhiskers(myvector, beta, lowest_possible, highest_possible) if VERBOSE: print whiskers hi_ol_indices = np.where(myvector > whiskers[1]) lo_ol_indices = np.where(myvector < whiskers[0]) olIndices = np.hstack((hi_ol_indices, lo_ol_indices)) return olIndices
reference/Outliers.ipynb
jbocharov-mids/W207-Machine-Learning
apache-2.0
Generate a bunch of random numbers and call the function defined above
np.random.seed(1234567890) thedata = np.random.exponential(10, 1000) outliers = np.random.exponential(30, 50) mydata = np.hstack((thedata, outliers)) print mydata.shape mydata_outliers = IdentifyOutlierIndices(mydata, 1.5, 0.0) print "Found %d outliers" % (mydata_outliers.shape[1]) plt.boxplot(x=mydata, sym='*', vert=True,manage_xticks=True, meanline = True, showmeans=True) plt.ylabel("Response Times") plt.show()
reference/Outliers.ipynb
jbocharov-mids/W207-Machine-Learning
apache-2.0
How will it work with a pair of linearly correlated variables?
############################################################################################################ X = np.arange (0, 100, 0.1) #print X.shape Y = 15 + 0.15*X #print Y.shape plt.scatter (X, Y) plt.show()
reference/Outliers.ipynb
jbocharov-mids/W207-Machine-Learning
apache-2.0
A regression through these data is trivial and not interesting. Let's "Make Some Noise"
Yblur = np.random.exponential(10, X.shape[0]) Y_fuzzy = Y + Yblur ## Some numpy-required manupulations here X_regr =X[:, np.newaxis] Y_regr = Y_fuzzy[:, np.newaxis] print X_regr.__class__ print Y_regr.__class__ ## And now let's fit the LinearRegression lr = LinearRegression() lr.fit(X_regr, Y_regr) print "LinearRegression: ", lr print "Y = %.3f * X + %.3f" %(lr.coef_[0], lr.intercept_) print "Regression RSq:", lr.score(X_regr, Y_regr) pred = lr.predict(X_regr)
reference/Outliers.ipynb
jbocharov-mids/W207-Machine-Learning
apache-2.0
Why is the RSq so bad? Plot the noisy data with the regression line
plt.plot(X_regr, pred, color = 'red', label="Model") plt.pcolor='r' plt.scatter(X_regr, Y_regr, cmap='b', label="Observed", marker='*') plt.legend(loc='best') plt.show() ####################################################################### ## Graphically Analyze Residuals: ####################################################################### myResid = Y_regr - pred #print myResid.shape fig, axes = plt.subplots(nrows=1, ncols=2, sharex=False, sharey=True) ax1 = axes[0] ax2 = axes[1] ax1.scatter(X_regr, myResid) ax2.boxplot (x=myResid, sym='*', vert=True,manage_xticks=True, meanline = True, showmeans=True) ax1.set_title("Scatterplot of residuals") ax2.set_title("Distribution of residuals") plt.show()
reference/Outliers.ipynb
jbocharov-mids/W207-Machine-Learning
apache-2.0
We see that: (1) Residuals are independent of X - this is good news: it means we've got the right $Y(X)$ relationship. (2) Residuals are not normally distributed - this is bad news: it means we've chosen the wrong model for prediction. Let's try to build a 95% confidence interval for the residuals. We know that for a normal distribution, 95% confidence corresponds to +/- 3 $\sigma$
resid_3sd = 3*np.std(myResid) ## The lines corresponding to the 3-sigma confidence interval will be: plus3SD = pred + resid_3sd minus3SD = pred - resid_3sd print "The 95-pct CI for residuals = +/- %.3f" %(resid_3sd) ## Now rebuild the scatter plot with the regression line, adding the confidence interval: plt.plot(X_regr, pred, color = 'red', label="Model") plt.scatter(X_regr, Y_regr, cmap='b', label="Observed", marker='*') plt.plot(X_regr, plus3SD, '-', color = "orange", label = "+/-3SD") plt.plot(X_regr, minus3SD, '-', color = "orange") plt.legend(loc='best') plt.show()
reference/Outliers.ipynb
jbocharov-mids/W207-Machine-Learning
apache-2.0
This is a nonsensical result: we know that the data cannot be below zero: we built it to be above the $Y = 15 + 0.15*X$ line, and yet we cannot say with 95% confidence that it will not happen, especially at low values of X. We need to redefine the confidence interval if we are dealing with non-normally distributed data. One - very convenient - solution comes from the boxplot: we can define confidence intervals as whiskers of the boxplot. Downside: we will not know the "p-value". Upside: we'll be able to draw the outlier boundary lines (and p-value is overrated, anyway).
myResidWhiskers = IdentifyBoxplotWhiskers(myResid) print myResidWhiskers loBound = pred + myResidWhiskers[0] hiBound = pred + myResidWhiskers[1] print "Outlier Boundaries on Residuals = ", myResidWhiskers print "The 95-pct CI for residuals = +/- %.3f" %(resid_3sd) ## Now rebuild the scatter plot with the regression line, adding the confidence interval: plt.plot(X_regr, pred, color = 'red', label="Model") plt.scatter(X_regr, Y_regr, cmap='b', label="Observed", marker='*') plt.plot(X_regr, plus3SD, '-', color = "orange", label = "+/-3SD") plt.plot(X_regr, minus3SD, '-', color = "orange") plt.plot(X_regr, loBound, '-', color = "black", label = "Outlier Boundaries") plt.plot(X_regr, hiBound, '-', color = "black") plt.legend(loc='best') plt.show()
reference/Outliers.ipynb
jbocharov-mids/W207-Machine-Learning
apache-2.0
Now looking at the outlier boundaries (the black lines in the plot above), we see that any value of $Y$ that happens to be negative will be an outlier. If we know that the lowest value of myResid is the lowest possible value, we can force the outlier boundary never to cross that line (BE VERY CAUTIOUS WHEN MAKING SUCH DECISION). For the scenario we are looking into, the modified outlier boundaries will be shown in the plot below:
## Check min(myResid) and set the low whisker to its value if it is < minresid: minresid = min(myResid) print minresid if myResidWhiskers[0] < minresid: myResidWhiskers[0] = minresid ## Predict the low and high boundaries: loBound = pred + myResidWhiskers[0] hiBound = pred + myResidWhiskers[1] print "Outlier Boundaries on Residuals = ", myResidWhiskers print "The 95-pct CI for residuals = +/- %.3f" %(resid_3sd) ## Now rebuild the scatter plot with the regression line, adding the confidence interval: plt.plot(X_regr, pred, color = 'red', label="Model") plt.scatter(X_regr, Y_regr, cmap='b', label="Observed", marker='*') plt.plot(X_regr, plus3SD, '-', color = "orange", label = "+/-3SD") plt.plot(X_regr, minus3SD, '-', color = "orange") plt.plot(X_regr, loBound, '-', color = "black", label = "Corrected Outlier Boundaries") plt.plot(X_regr, hiBound, '-', color = "black") plt.legend(loc='best') plt.show()
reference/Outliers.ipynb
jbocharov-mids/W207-Machine-Learning
apache-2.0
Retrieving training and test data The MNIST data set already contains both training and test data. There are 55,000 data points of training data, and 10,000 points of test data. Each MNIST data point has: 1. an image of a handwritten digit and 2. a corresponding label (a number 0-9 that identifies the image) We'll call the images, which will be the input to our neural network, X and their corresponding labels Y. We're going to want our labels as one-hot vectors, which are vectors that holds mostly 0's and one 1. It's easiest to see this in a example. As a one-hot vector, the number 0 is represented as [1, 0, 0, 0, 0, 0, 0, 0, 0, 0], and 4 is represented as [0, 0, 0, 0, 1, 0, 0, 0, 0, 0]. Flattened data For this example, we'll be using flattened data or a representation of MNIST images in one dimension rather than two. So, each handwritten number image, which is 28x28 pixels, will be represented as a one dimensional array of 784 pixel values. Flattening the data throws away information about the 2D structure of the image, but it simplifies our data so that all of the training data can be contained in one array whose shape is [55000, 784]; the first dimension is the number of training images and the second dimension is the number of pixels in each image. This is the kind of data that is easy to analyze using a simple neural network.
# Retrieve the training and test data trainX, trainY, testX, testY = mnist.load_data(one_hot=True) trainX.shape trainY.shape
tutorials/intro-to-tflearn/TFLearn_Digit_Recognition.ipynb
liumengjun/cn-deep-learning
mit
Building the network TFLearn lets you build the network by defining the layers in that network. For this example, you'll define: The input layer, which tells the network the number of inputs it should expect for each piece of MNIST data. Hidden layers, which recognize patterns in data and connect the input to the output layer, and The output layer, which defines how the network learns and outputs a label for a given image. Let's start with the input layer; to define the input layer, you'll define the type of data that the network expects. For example, net = tflearn.input_data([None, 100]) would create a network with 100 inputs. The number of inputs to your network needs to match the size of your data. For this example, we're using 784 element long vectors to encode our input data, so we need 784 input units. Adding layers To add new hidden layers, you use net = tflearn.fully_connected(net, n_units, activation='ReLU') This adds a fully connected layer where every unit (or node) in the previous layer is connected to every unit in this layer. The first argument net is the network you created in the tflearn.input_data call, it designates the input to the hidden layer. You can set the number of units in the layer with n_units, and set the activation function with the activation keyword. You can keep adding layers to your network by repeated calling tflearn.fully_connected(net, n_units). Then, to set how you train the network, use: net = tflearn.regression(net, optimizer='sgd', learning_rate=0.1, loss='categorical_crossentropy') Again, this is passing in the network you've been building. The keywords: optimizer sets the training method, here stochastic gradient descent learning_rate is the learning rate loss determines how the network error is calculated. In this example, with categorical cross-entropy. Finally, you put all this together to create the model with tflearn.DNN(net). Exercise: Below in the build_model() function, you'll put together the network using TFLearn. You get to choose how many layers to use, how many hidden units, etc. Hint: The final output layer must have 10 output nodes (one for each digit 0-9). It's also recommended to use a softmax activation layer as your final output layer.
# Define the neural network def build_model(): # This resets all parameters and variables, leave this here tf.reset_default_graph() #### Your code #### # Include the input layer, hidden layer(s), and set how you want to train the model net = tflearn.input_data([None, trainX.shape[1]]) # Hidden layer(s) net = tflearn.fully_connected(net, 196, activation='ReLU') net = tflearn.fully_connected(net, 28, activation='ReLU') # Output layer and training model net = tflearn.fully_connected(net, 10, activation='softmax') net = tflearn.regression(net, optimizer='sgd', learning_rate=0.05, loss='categorical_crossentropy') # This model assumes that your network is named "net" model = tflearn.DNN(net) return model # Build the model model = build_model()
tutorials/intro-to-tflearn/TFLearn_Digit_Recognition.ipynb
liumengjun/cn-deep-learning
mit
Import directives
import collections
python_collections_en.ipynb
jdhp-docs/python-notebooks
mit
Ordered dictionaries See https://docs.python.org/3/library/collections.html#collections.OrderedDict
d = collections.OrderedDict() d["2"] = 2 d["3"] = 3 d["1"] = 1 print(d) print(type(d.keys())) print(list(d.keys())) print(type(d.values())) print(list(d.values())) for k, v in d.items(): print(k, v)
python_collections_en.ipynb
jdhp-docs/python-notebooks
mit
There shouldn't be any output from that cell, but if you get any error messages, it's most likely because you don't have one or more of these modules installed on your system. Running pip3 install pandas matplotlib numpy seaborn bokeh from the command line should take care of that. If not, holler and I'll try to help you. As well as running your code, hitting shift-return in that first cell should have automatically created an empty cell below it. In that cell, we're going to use the read_csv method provided by pandas to, um, read our CSV. When pandas reads data from a CSV file, it automagically puts it into something called a dataframe. It's not important at this point to understand what a dataframe is or how it differs from other Python data structures. All you need to know for now is that it's an object containing structured data that's stored in memory for the duration of your notebook session. We'll also assign our new dataframe to another variable—df—so we can do things with it down the line. We do all of this like so (remember to hit shift-return):
url = 'https://raw.githubusercontent.com/davidbjourno/finding-stories-in-data/master/data/leave-demographics.csv' # Pass in the URL of the CSV file: df = pd.read_csv(url)
finding-stories-in-data.ipynb
davidbjourno/finding-stories-in-data
mit
See how easy that was? Now let's check that df is in fact a dataframe. Using the .head(n=[number]) method on any dataframe will return the first [number] rows of that dataframe. Let's take a look at the first ten:
df.head(n=10)
finding-stories-in-data.ipynb
davidbjourno/finding-stories-in-data
mit
Looks good! (FYI: .tail(n=[number]) will give you the last [number] rows.) By now, you may have noticed that some of the row headers in this CSV aren't particularly descriptive (var1, var2 etc.). This is the game: by the end of this tutorial, you should be able to identify the variables that correlated most strongly with the percentage of ‘leave’ votes (the leave column), i.e. which factors were the most predictive of people voting ‘leave’. At the end of the meetup, before we all go down the pub, you can tell me which variables you think correlated most strongly and I'll tell you what each of them are 😁 2. Explore the data The main advantage of the workflow we're using here is that it enables us to inspect a dataset visually, which can often be the quickest way to identify patterns, trends or outliers in data. A common first step in this process is to use scatter plots to visualise the relationship, if any, between two variables. So let's use Matplotlib to create a first, super basic scatter plot:
# Configure Matplotlib's pyplot method (plt) to plot at a size of 8x8 inches and # a resolution of 72 dots per inch plt.figure( figsize=(8, 8), dpi=72 ) # Plot the data as a scatter plot g = plt.scatter( x=df['var1'], # The values we want to plot along the x axis y=df['leave'], # The values we want to plot along the y axis s=50, # The size… c='#0571b0', # …colour… alpha=0.5 # …and opacity we want the data point markers to be )
finding-stories-in-data.ipynb
davidbjourno/finding-stories-in-data
mit
Yikes, not much of a relationship there. Let's try a different variable:
plt.figure( figsize=(8, 8), dpi=72 ) g = plt.scatter( x=df['var2'], # Plotting var2 along the x axis this time y=df['leave'], s=50, c='#0571b0', alpha=0.5 )
finding-stories-in-data.ipynb
davidbjourno/finding-stories-in-data
mit
Hmm, that distribution looks better—there's a stronger, negative correlation there—but it's still a little unclear what we're looking at. Let's add some context. We know from our provisional data-munging (that we didn't do) that many of the boroughs of London were among the strongest ‘remain’ areas in the country. We can add an additional column called is_london to our dataframe and set the values of that column to either True or False depending on whether the value in the row's region_name column is London:
df['is_london'] = np.where(df['region_name'] == 'London', True, False) # Print all the rows in the dataframe in which is_london is equal to True df[df['is_london'] == True]
finding-stories-in-data.ipynb
davidbjourno/finding-stories-in-data
mit
Those names should look familiar. That's numpy's .where method coming in handy there to help us generate a new column of data based on the values of another column—in this case, region_name. At this point, we're going to abandon Matplotlib like merciless narcissists and turn our attention to the younger, hotter Seaborn. Though it sounds like one of the factions from Game of Thrones, it's actually another plotting module that includes some handy analytical shortcuts and statistical methods. One of those analytical shortcuts is the FacetGrid. If you've ever used OpenRefine, you're probably familiar with the concept of faceting. I'll fumblingly describe it here as a method whereby data is apportioned into distinct matrices according to the values of a single field. You get the idea. Right now, we're going to facet on the is_london column so that we can distinguish the London boroughs from the rest of the UK:
# Set the chart background colour (completely unnecessary, I just don't like the # default) sns.set_style('darkgrid', { 'axes.facecolor': '#efefef' }) # Tell Seaborn that what we want from it is a FacetGrid, and assign this to the # variable ‘fg’ fg = sns.FacetGrid( data=df, # Use our dataframe as the input data hue='is_london', # Highlight the data points for which is_london == True palette=['#0571b0', '#ca0020'], # Define a tasteful blue/red colour combo size=7 # Make the plots size 7, whatever that means ) # Tell Seaborn that what we want to do with our FacetGrid (fg) is visualise it # as a scatter plot fg.map( plt.scatter, 'var2', # Values to plot along the x axis 'leave', # Values to plot along the y axis alpha=0.5 )
finding-stories-in-data.ipynb
davidbjourno/finding-stories-in-data
mit
Now we're cooking with gas! We can see a slight negative correlation in the distribution of the data points and we can see how London compares to all the other regions of the country. Whatever var2 is, we now know that the London boroughs generally have higher levels of it than most of the rest of the UK, and that it has a (weak) negative correlation with ‘leave’ vote percentage. So what's to stop you faceting on is_london but with a different variable plotted along the x axis? The answer is: nothing! Try doing that exact thing right now:
# Plot the chart above with a different variable along the x axis.
finding-stories-in-data.ipynb
davidbjourno/finding-stories-in-data
mit
What's more, faceting isn't limited to just highlighting specific data points. We can also pass FacetGrid a col (column) argument with the name of a column that we'd like to use to further segment our data. So let's create another True/False (Boolean) column to flag the areas with the largest populations—the ones with electorates of 100,000 people or more—and plot a new facet grid:
df['is_largest'] = np.where(df['electorate'] >= 100000, True, False) g = sns.FacetGrid( df, hue='is_london', col='is_largest', palette=['#0571b0', '#ca0020'], size=7 ) g.map( plt.scatter, 'var2', 'leave', alpha=0.5 )
finding-stories-in-data.ipynb
davidbjourno/finding-stories-in-data
mit
Now we're able to make the following statements based solely on a visual inspection of this facet grid: Most of the less populous areas (electorate < 100,000) voted ‘leave’ Most of the less populous areas had var2 levels below 35. Only two—both London boroughs—had levels higher than 35 There is a stronger correlation between the strength of the ‘leave’ vote and the level of var2 among the more populous areas So you see how faceting can come in handy when you come to a dataset cold and need to start to understand it quickly. As yet, we still don't have much of a story, just a few observations—not exactly Pulitzer material. The next and most important step is to narrow down which of the variables in the dataset were the most indicative of ‘leave’ vote percentage. The good news is that we don't have to repeat the facet grid steps above for every variable, because Seaborn provides another useful analytical shortcut called a PairGrid. 3. Optimise for efficiency Apparently there's an equivalent to the pair grid in R called a correlogram or something (I wouldn't know). But the pair grid is super sweet because it allows us to check for correlations across a large number of variables at once. By passing the PairGrid function an array of column headers from our dataset, we can plot each of those variables against every other variable in one amazing ultra-grid:
# Just adding the first four variables, plus leave, to start with—you'll see why columns = [ 'var1', 'var2', 'var3', 'var4', 'leave', 'is_london' ] g = sns.PairGrid( data=df[columns], hue='is_london', palette=['#0571b0', '#ca0020'] ) g.map_offdiag(plt.scatter);
finding-stories-in-data.ipynb
davidbjourno/finding-stories-in-data
mit
Try passing the remaining variables (var5-var9) to the pair grid. You should be able to see which of the variables in the dataset correlate most strongly with ‘leave’ vote percentage and whether the correlations are positive or negative. 4. Go into detail Seaborn also provides a heatmap method that we can use to quickly compare the correlation coefficient of each pair of variables (the value between -1 and 1 that describes the strength of the relationship between them). We can pass all the columns we're interested in to the heatmap in one go, because heatmaps are easier to read than pair grids:
plt.figure( figsize=(15, 15), dpi=72 ) columns = [ # ALL THE COLUMNS 'var1', 'var2', 'var3', 'var4', 'var5', 'var6', 'var7', 'var8', 'var9', 'leave' ] # Calculate the standard correlation coefficient of each pair of columns correlations = df[columns].corr(method='pearson') sns.heatmap( data=correlations, square=True, xticklabels=correlations.columns.values, yticklabels=correlations.columns.values, # The Matplotlib colormap to use # (https://matplotlib.org/examples/color/colormaps_reference.html) cmap='plasma' )
finding-stories-in-data.ipynb
davidbjourno/finding-stories-in-data
mit
By now, you should have a pretty good idea which variables are worth reporting as being significant demographic factors in the ‘leave’ vote. If you wanted to take your analysis even further, you could also report on whether London boroughs returned higher or lower ‘leave’ vote percentages than we would expect based on the values of any correlating variable. A convenient way to do this would be to use Seaborn's built-in linear regression plotting:
columns = ['var2', 'leave'] g = sns.lmplot( data=df, x=columns[0], y=columns[1], hue='is_london', palette=['#0571b0', '#ca0020'], size=7, fit_reg=False, ) sns.regplot( data=df, x=columns[0], y=columns[1], scatter=False, color='#0571b0', ax=g.axes[0, 0] )
finding-stories-in-data.ipynb
davidbjourno/finding-stories-in-data
mit
Reading this plot, we're able to say that, all things being equal, most of the London boroughs have lower ‘leave’ vote percentages than we would expect based on their levels of var2 alone. This suggests—rightly—that variables other than var2 are in play in determining London's lower-than-expected levels of ‘leave’ voting. 5. Make a graphic and get it out of the notebook Everyone knows that data journalism without pretty graphics is just boring. While the Matplotlib and Seaborn scatter plots get the job done, they're not exactly 😍 For that, we need Bokeh. You can pretty much throw a stone and hit a data visualisation library these days, but Bokeh is a good fit for Jupyter notebooks because it's made for Python and can work with dataframes and all that other good stuff we've got going on in here. So let's fire it up by telling it that, like Matplotlib, we want it to plot in the notebook:
output_notebook()
finding-stories-in-data.ipynb
davidbjourno/finding-stories-in-data
mit
Because we want this to be our output graphic, we're going to be much fussier about how it looks, so there's quite a bit of configuration involved here:
color_map = {False: '#0571b0', True: '#ca0020'} # Instantiate our plot p = figure( plot_width=600, plot_height=422, background_fill_color='#d3d3d3', title='Leave demographics' ) # Add a circle renderer to the plot p.circle( x=df['var2'], y=df['leave'], # Size the markers according to the size of the electorate (scaled down) size=df['electorate'] / 20000, fill_color=df['is_london'].map(color_map), line_color=df['is_london'].map(color_map), line_width=1, alpha=0.5 ) # Configure the plot's x axis p.xaxis.axis_label = 'var5' p.xgrid.grid_line_color = None # Configure the plot's y axis p.yaxis.axis_label = 'Percentage voting leave' p.ygrid.grid_line_color = '#999999' p.ygrid.grid_line_alpha = 1 p.ygrid.grid_line_dash = [6, 4] # Show the plot show(p)
finding-stories-in-data.ipynb
davidbjourno/finding-stories-in-data
mit
Normalization Q1. Apply l2_normalize to x.
_x = np.arange(1, 11) epsilon = 1e-12 x = tf.convert_to_tensor(_x, tf.float32)
programming/Python/tensorflow/exercises/Neural_Network_Part2.ipynb
diegocavalca/Studies
cc0-1.0
Q2. Calculate the mean and variance of x based on the sufficient statistics.
_x = np.arange(1, 11) x = tf.convert_to_tensor(_x, tf.float32)
programming/Python/tensorflow/exercises/Neural_Network_Part2.ipynb
diegocavalca/Studies
cc0-1.0
Q3. Calculate the mean and variance of x.
tf.reset_default_graph() _x = np.arange(1, 11) x = tf.convert_to_tensor(_x, tf.float32)
programming/Python/tensorflow/exercises/Neural_Network_Part2.ipynb
diegocavalca/Studies
cc0-1.0
Q4. Calculate the mean and variance of x using unique_x and counts.
tf.reset_default_graph() x = tf.constant([1, 1, 2, 2, 2, 3], tf.float32) # From `x` mean, variance = tf.nn.moments(x, [0]) with tf.Session() as sess: print(sess.run([mean, variance])) # From unique elements and their counts unique_x, _, counts = tf.unique_with_counts(x) mean, variance = ... with tf.Session() as sess: print(sess.run([mean, variance]))
programming/Python/tensorflow/exercises/Neural_Network_Part2.ipynb
diegocavalca/Studies
cc0-1.0
Q5. The code below is to implement the mnist classification task. Complete it by adding batch normalization.
# Load data from tensorflow.examples.tutorials.mnist import input_data mnist = input_data.read_data_sets('MNIST_data', one_hot=False) # build graph class Graph: def __init__(self, is_training=False): # Inputs and labels self.x = tf.placeholder(tf.float32, shape=[None, 784]) self.y = tf.placeholder(tf.int32, shape=[None]) # Layer 1 w1 = tf.get_variable("w1", shape=[784, 100], initializer=tf.truncated_normal_initializer()) output1 = tf.matmul(self.x, w1) output1 = tf.contrib.layers.batch_norm(...) #Layer 2 w2 = tf.get_variable("w2", shape=[100, 10], initializer=tf.truncated_normal_initializer()) logits = tf.matmul(output1, w2) preds = tf.to_int32(tf.arg_max(logits, dimension=1)) # training loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=self.y, logits=logits) self.train_op = tf.train.GradientDescentOptimizer(0.01).minimize(loss) self.acc = tf.reduce_mean(tf.to_float(tf.equal(self.y, preds))) # Training tf.reset_default_graph() g = Graph(is_training=True) init_op = tf.global_variables_initializer() with tf.Session() as sess: sess.run(init_op) saver = tf.train.Saver() for i in range(1, 10000+1): batch = mnist.train.next_batch(60) sess.run(g.train_op, {g.x: batch[0], g.y: batch[1]}) # Evaluation if i % 100 == 0: print("training steps=", i, "Acc. =", sess.run(g.acc, {g.x: mnist.test.images, g.y: mnist.test.labels})) save_path = saver.save(sess, './my-model') # Inference tf.reset_default_graph() g2 = Graph(is_training=False) with tf.Session() as sess: saver = tf.train.Saver() saver.restore(sess, save_path) hits = 0 for i in range(100): hits += sess.run(g2.acc, {g2.x: [mnist.test.images[i]], g2.y: [mnist.test.labels[i]]}) print(hits)
programming/Python/tensorflow/exercises/Neural_Network_Part2.ipynb
diegocavalca/Studies
cc0-1.0
Losses Q06. Compute half the L2 norm of x without the sqrt.
tf.reset_default_graph() x = tf.constant([1, 1, 2, 2, 2, 3], tf.float32)
programming/Python/tensorflow/exercises/Neural_Network_Part2.ipynb
diegocavalca/Studies
cc0-1.0
Classification Q7. Compute softmax cross entropy between logits and labels. Note that the rank of them is not the same.
tf.reset_default_graph() logits = tf.random_normal(shape=[2, 5, 10]) labels = tf.convert_to_tensor(np.random.randint(0, 10, size=[2, 5]), tf.int32) output = tf.nn.... with tf.Session() as sess: print(sess.run(output))
programming/Python/tensorflow/exercises/Neural_Network_Part2.ipynb
diegocavalca/Studies
cc0-1.0