hexsha
stringlengths 40
40
| size
int64 6
14.9M
| ext
stringclasses 1
value | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 6
260
| max_stars_repo_name
stringlengths 6
119
| max_stars_repo_head_hexsha
stringlengths 40
41
| max_stars_repo_licenses
sequence | max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 6
260
| max_issues_repo_name
stringlengths 6
119
| max_issues_repo_head_hexsha
stringlengths 40
41
| max_issues_repo_licenses
sequence | max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 6
260
| max_forks_repo_name
stringlengths 6
119
| max_forks_repo_head_hexsha
stringlengths 40
41
| max_forks_repo_licenses
sequence | max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | avg_line_length
float64 2
1.04M
| max_line_length
int64 2
11.2M
| alphanum_fraction
float64 0
1
| cells
sequence | cell_types
sequence | cell_type_groups
sequence |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e7dde148b0ae70ed5d0367871c023c9085747eb9 | 128,231 | ipynb | Jupyter Notebook | Matplotlib_1_Basics.ipynb | Buckminster007/Matplotlib | 106ec4fec6479cbaad0a5d3d0cf4de425b316d4c | [
"MIT"
] | null | null | null | Matplotlib_1_Basics.ipynb | Buckminster007/Matplotlib | 106ec4fec6479cbaad0a5d3d0cf4de425b316d4c | [
"MIT"
] | null | null | null | Matplotlib_1_Basics.ipynb | Buckminster007/Matplotlib | 106ec4fec6479cbaad0a5d3d0cf4de425b316d4c | [
"MIT"
] | null | null | null | 657.594872 | 78,282 | 0.945263 | [
[
[
"from matplotlib import pyplot as plt",
"_____no_output_____"
],
[
"import numpy as np\nages_x=list(np.arange(25,36))",
"_____no_output_____"
],
[
"ages_x",
"_____no_output_____"
],
[
"py_dev_y=[45372, 48567, 53784, 57896, 63874, 65259, 70003, 70000, 73256, 78546, 83142]",
"_____no_output_____"
],
[
"js_dev_y = [37810, 43515, 46823, 49293, 53437,\n 56373, 62375, 66674, 68745, 68746, 74583]",
"_____no_output_____"
],
[
"\ndev_y = [34368, 38496, 42000, 46752, 49320, 53200, 56000, 62316, 64928, 67317, 68748]",
"_____no_output_____"
],
[
"plt.plot(ages_x, js_dev_y, linestyle='--', linewidth=3, label='Javascript')\n\nplt.plot(ages_x, dev_y, color='#444444', linestyle='--', label='All Devs')\n\nplt.plot(ages_x, py_dev_y, label='Python')\n\nplt.title('Median Salary by Age(USD)')\nplt.xlabel('Ages')\nplt.ylabel('Median Salary(USD)')\n\nplt.legend()\n# to save the figure\nplt.grid(True)\nplt.show()",
"_____no_output_____"
],
[
"#plt.style.available ---> to check the available styles for the plots. \n#choose any style \n#plt.style.use('style_name') --->to use that style.\n#plt.xkcd() ---> to make the plot look like hand-drawn \n#plt.rcdefaults() ---> to remove the hand drawn effect of plt.xckd()",
"_____no_output_____"
],
[
"plt.rcdefaults()\nplt.plot(ages_x, dev_y, label='All Devs')\n#plt.style.available\nplt.grid(True)\nplt.legend()\nplt.title('Median Salary by Age(USD)')\nplt.xlabel('Ages')\nplt.ylabel('Median Salary(USD)')\nplt.style.use('fivethirtyeight')\nplt.show()",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e7dde232093d9d97654c1f930299dcc888ab032c | 138,571 | ipynb | Jupyter Notebook | Studienarbeit/04_NN_Training_and_Evaluation/.ipynb_checkpoints/CreateDataSet-128x128-checkpoint.ipynb | anish-pratheepkumar/AI-Machine-Learning-and-Deep-Learning | 3d1873e2ac70b9ee6cca309e96759e1714bc73f1 | [
"MIT"
] | null | null | null | Studienarbeit/04_NN_Training_and_Evaluation/.ipynb_checkpoints/CreateDataSet-128x128-checkpoint.ipynb | anish-pratheepkumar/AI-Machine-Learning-and-Deep-Learning | 3d1873e2ac70b9ee6cca309e96759e1714bc73f1 | [
"MIT"
] | null | null | null | Studienarbeit/04_NN_Training_and_Evaluation/.ipynb_checkpoints/CreateDataSet-128x128-checkpoint.ipynb | anish-pratheepkumar/AI-Machine-Learning-and-Deep-Learning | 3d1873e2ac70b9ee6cca309e96759e1714bc73f1 | [
"MIT"
] | null | null | null | 219.605388 | 93,708 | 0.912608 | [
[
[
"#import essential libraries\nimport math\nimport os\nimport cv2\nimport numpy as np\nimport h5py\nimport matplotlib.pyplot as plt\n\nfrom sklearn.utils import shuffle\nfrom sklearn.model_selection import train_test_split",
"_____no_output_____"
],
[
"#Defining path for data loading\nPATH = '/home/anish/anaconda_py3_copelia'\ndata_path = PATH + '/NN_DATA_CLASSES' #Name of the folder that contains the data\ndata_dir_list = os.listdir(data_path)\n\nprint(data_path)\nprint(data_dir_list)",
"/home/anish/anaconda_py3_copelia/NN_DATA_CLASSES\n['L', 'R', 'F']\n"
],
[
"#plot a sample image\nimg_path = data_path+'/L/1.jpg'\nimg = cv2.imread(img_path)\nprint(img_path)\n\nplt.imshow(img)",
"/home/anish/anaconda_py3_copelia/NN_DATA_CLASSES/L/1.jpg\n"
],
[
"#plot resized image\nprint(img.shape)\nimg1 =cv2.resize(img,(256,256))\nprint(img1.shape)\n\nplt.imshow(img1)",
"(512, 512, 3)\n(256, 256, 3)\n"
],
[
"#plot resized image\nprint(img.shape)\nimg1 =cv2.resize(img,(128,128))\nprint(img1.shape)\n\nplt.imshow(img1)",
"(512, 512, 3)\n(128, 128, 3)\n"
],
[
"#Defining input image dimensions\nimg_Height=int(512/4) #Image Height\nimg_Width=int(512/4) #Image Width\nnum_channel=3 #Number of Chanels\nprint(img_Height, img_Width)",
"128 128\n"
],
[
"#Creating an empty vector to save the resized images\nimg_data_list=[]\n\n#Initialize number of images per folder to 0\nnumber_of_images = 0\ntotal_number_of_images = 0\n\n# Load the images\nfor data_dir in data_dir_list:\n img_list=os.listdir(data_path+'/'+ data_dir)\n print ('Images loaded from: {}'.format(data_dir))\n for img in img_list:\n input_img=cv2.imread(data_path+'/'+ data_dir + '/'+ img)\n #input_img=cv2.cvtColor(input_img, cv2.COLOR_BGR2GRAY)\n input_img_resize=cv2.resize(input_img,(img_Width,img_Height))\n img_data_list.append(input_img_resize)\n number_of_images = number_of_images + 1\n total_number_of_images += 1\n print('Number of images in the folder:{}'.format(number_of_images))\n print('Total number of images:{}'.format(total_number_of_images) + '\\n')\n number_of_images = 0\n\n#convert the image list into a numpy array\nimg_data = np.array(img_data_list) \nprint ('Total image shape:', img_data.shape)",
"Images loaded from: L\nNumber of images in the folder:5199\nTotal number of images:5199\n\nImages loaded from: R\nNumber of images in the folder:5153\nTotal number of images:10352\n\nImages loaded from: F\nNumber of images in the folder:5252\nTotal number of images:15604\n\nTotal image shape: (15604, 128, 128, 3)\n"
],
[
"#Defining the number of classes\nnum_classes = 3\n\n#creating a numpy array which corresponds to the label of each image\nnum_of_samples = img_data.shape[0]\nlabels = np.ones((num_of_samples,),dtype='int64')\n\nlabels[0:5199]=0 #images belonging to class 'Forward Left' assigned to class label 0\nlabels[5199:10352]=1 #images belonging to class 'Forward Right' assigned to class label 1\nlabels[10352:15604]=2 #images belonging to class 'Forward Straight' assigned to class label 2\n\nclasses = [0, 1, 2]\n\nprint('Total labels shape:',labels.shape)",
"Total labels shape: (15604,)\n"
],
[
"#Shuffle the dataset\nx,y = shuffle(img_data,labels, random_state=2)\n\n#Split the dataset to Train set and (Validation+Test)set\nX_train_orig, X_valtest_orig, Y_train_orig, Y_valtest_orig = train_test_split(x, y, test_size=0.2, random_state=2)\n#Split the (Validation+Test)set to validation and test set\nX_val_orig, X_test_orig, Y_val_orig, Y_test_orig = train_test_split(X_valtest_orig, Y_valtest_orig, test_size=0.5, random_state=2)\n\nprint(X_train_orig.shape, Y_train_orig.shape) #shape of training data and its labels\nprint(X_val_orig.shape, Y_val_orig.shape) #shape of training data and its labels\nprint(X_test_orig.shape, Y_test_orig.shape ) #shape of training data and its labels",
"(12483, 128, 128, 3) (12483,)\n(1560, 128, 128, 3) (1560,)\n(1561, 128, 128, 3) (1561,)\n"
],
[
"#Create HDF5(binary data format)file of the training data \nwith h5py.File(PATH + '/Dataset_Red/train_set.h5', 'w') as hdf: #w means write(create a file)\n hdf.create_dataset('list_classes', data=classes)\n hdf.create_dataset('train_set_x', data=X_train_orig)\n hdf.create_dataset('train_set_y', data=Y_train_orig) ",
"_____no_output_____"
],
[
"#Check if training data was saved correctly\nwith h5py.File(PATH + '/Dataset_Red/train_set.h5', \"r\") as hdf:\n ls = list(hdf.keys())\n print('List of datasets in this file:{}'.format(ls)+'\\n')\n \n dataset1 = np.array(hdf.get('list_classes'))\n print('Shape of list_classes:{}'.format(dataset1.shape)+'\\n')\n \n dataset2 = np.array(hdf.get('train_set_x'))\n print('Shape of train_set_x: {}'.format(dataset2.shape)+'\\n')\n\n dataset3 = np.array(hdf.get('train_set_y'))\n print('Shape of train_set_y: {}'.format(dataset3.shape)+'\\n')",
"List of datasets in this file:['list_classes', 'train_set_x', 'train_set_y']\n\nShape of list_classes:(3,)\n\nShape of train_set_x: (12483, 128, 128, 3)\n\nShape of train_set_y: (12483,)\n\n"
],
[
"#Create HDF5(binary data format)file of the validation data \nwith h5py.File(PATH + '/Dataset_Red/val_set.h5', 'w') as hdf:\n hdf.create_dataset('list_classes', data=classes)\n hdf.create_dataset('val_set_x', data=X_val_orig)\n hdf.create_dataset('val_set_y', data=Y_val_orig) ",
"_____no_output_____"
],
[
"#Check if validation data was saved correctly\nwith h5py.File(PATH + '/Dataset_Red/val_set.h5', \"r\") as hdf:\n ls = list(hdf.keys())\n print('List of datasets in this file:{}'.format(ls)+'\\n')\n \n dataset1 = np.array(hdf.get('list_classes'))\n print('Shape of list_classes:{}'.format(dataset1.shape)+'\\n')\n \n dataset2 = np.array(hdf.get('val_set_x'))\n print('Shape of val_set_x: {}'.format(dataset2.shape)+'\\n')\n\n dataset3 = np.array(hdf.get('val_set_y'))\n print('Shape of val_set_y: {}'.format(dataset3.shape)+'\\n')",
"List of datasets in this file:['list_classes', 'val_set_x', 'val_set_y']\n\nShape of list_classes:(3,)\n\nShape of val_set_x: (1560, 128, 128, 3)\n\nShape of val_set_y: (1560,)\n\n"
],
[
"#Create HDF5(binary data format)file of the test data \nwith h5py.File(PATH + '/Dataset_Red/test_set.h5', 'w') as hdf:\n hdf.create_dataset('list_classes', data=classes)\n hdf.create_dataset('test_set_x', data=X_test_orig)\n hdf.create_dataset('test_set_y', data=Y_test_orig) ",
"_____no_output_____"
],
[
"#Check if validation data was saved correctly\nwith h5py.File(PATH + '/Dataset_Red/test_set.h5', \"r\") as hdf:\n ls = list(hdf.keys())\n print('List of datasets in this file:{}'.format(ls))\n \n dataset1 = np.array(hdf.get('list_classes'))\n print('Shape of list_classes:{}'.format(dataset1.shape))\n \n dataset2 = np.array(hdf.get('test_set_x'))\n print('Shape of test_set_x: {}'.format(dataset2.shape))\n\n dataset3 = np.array(hdf.get('test_set_y'))\n print('Shape of test_set_y: {}'.format(dataset3.shape))",
"List of datasets in this file:['list_classes', 'test_set_x', 'test_set_y']\nShape of list_classes:(3,)\nShape of test_set_x: (1561, 128, 128, 3)\nShape of test_set_y: (1561,)\n"
],
[
"hdf = h5py.File(PATH + '/Dataset_Red/test_set.h5', \"r\")\nprint(hdf['list_classes'][:])",
"[0 1 2]\n"
],
[
"#function to load dataset\ndef load_dataset():\n train_dataset = h5py.File(PATH + '/Dataset_Red/train_set.h5', \"r\")\n train_set_x_orig = np.array(train_dataset['train_set_x'][:]) #train set features(images)\n train_set_y_orig = np.array(train_dataset['train_set_y'][:]) #train set labels\n\n val_dataset = h5py.File(PATH + '/Dataset_Red/val_set.h5', \"r\")\n val_set_x_orig = np.array(val_dataset['val_set_x'][:]) #val set features(images)\n val_set_y_orig = np.array(val_dataset['val_set_y'][:]) #val set labels\n \n test_dataset = h5py.File(PATH + '/Dataset_Red/test_set.h5', \"r\")\n test_set_x_orig = np.array(test_dataset['test_set_x'][:]) #test set features(images)\n test_set_y_orig = np.array(test_dataset['test_set_y'][:]) #test set labels\n\n classes = np.array(test_dataset[\"list_classes\"][:]) #list of classes\n \n #reshape labels into row vectors(sinle dimension vector)\n train_set_y_orig = train_set_y_orig.reshape((1, train_set_y_orig.shape[0])) \n val_set_y_orig = val_set_y_orig.reshape((1, val_set_y_orig.shape[0])) \n test_set_y_orig = test_set_y_orig.reshape((1, test_set_y_orig.shape[0]))\n \n return train_set_x_orig, train_set_y_orig, val_set_x_orig, val_set_y_orig, test_set_x_orig, test_set_y_orig, classes",
"_____no_output_____"
],
[
"#function for one hot encoding\ndef convert_to_one_hot(Y, C): #C = no of labels\n Y = np.eye(C)[Y.reshape(-1)].T #reshape(-1) will convert it into a single dimension vector\n return Y\n",
"_____no_output_____"
],
[
"#load dataset\nX_train_orig, Y_train_orig, X_val_orig, Y_val_orig, X_test_orig, Y_test_orig, classes = load_dataset()\n\n#Data Preprocessing\n# Normalize the image vectors\nX_train = X_train_orig/255.\nX_val = X_val_orig/255.\nX_test = X_test_orig/255.\n\n# Convert training, validation and test labels to one hot matrices\nY_train = convert_to_one_hot(Y_train_orig, 3).T\nY_val = convert_to_one_hot(Y_val_orig, 3).T\nY_test = convert_to_one_hot(Y_test_orig, 3).T\n\nprint (\"number of training samples = \" + str(X_train.shape[0]))\nprint (\"number of validation samples = \" + str(X_val.shape[0]))\nprint (\"number of test samples = \" + str(X_test.shape[0]))\n\nprint (\"X_train shape: \" + str(X_train.shape))\nprint (\"Y_train shape: \" + str(Y_train.shape))\n\nprint (\"X_val shape: \" + str(X_val.shape))\nprint (\"Y_val shape: \" + str(Y_val.shape))\n\nprint (\"X_test shape: \" + str(X_test.shape))\nprint (\"Y_test shape: \" + str(Y_test.shape))",
"number of training samples = 12483\nnumber of validation samples = 1560\nnumber of test samples = 1561\nX_train shape: (12483, 128, 128, 3)\nY_train shape: (12483, 3)\nX_val shape: (1560, 128, 128, 3)\nY_val shape: (1560, 3)\nX_test shape: (1561, 128, 128, 3)\nY_test shape: (1561, 3)\n"
],
[
"fig=plt.figure(figsize=(20, 20))\ncolumns = 5\nrows = 4\nfor i in range(1, columns*rows +1):\n img = X_train_orig[i]\n fig.add_subplot(rows, columns, i)\n plt.xlabel(\"y = \" + str(np.squeeze(Y_train_orig[:, i])))\n plt.imshow(img)\nplt.show()",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e7ddeb42611d5578bf56898d3e10b9bc7f5189cc | 57,965 | ipynb | Jupyter Notebook | 1_Preliminaries.ipynb | lanhhv84/Image-Captioning | f6be131e8f00a0d3497d3f5a0a38b5ba5aaf49be | [
"MIT"
] | null | null | null | 1_Preliminaries.ipynb | lanhhv84/Image-Captioning | f6be131e8f00a0d3497d3f5a0a38b5ba5aaf49be | [
"MIT"
] | null | null | null | 1_Preliminaries.ipynb | lanhhv84/Image-Captioning | f6be131e8f00a0d3497d3f5a0a38b5ba5aaf49be | [
"MIT"
] | null | null | null | 52.791439 | 2,181 | 0.633675 | [
[
[
"# Computer Vision Nanodegree\n\n## Project: Image Captioning\n\n---\n\nIn this notebook, you will learn how to load and pre-process data from the [COCO dataset](http://cocodataset.org/#home). You will also design a CNN-RNN model for automatically generating image captions.\n\nNote that **any amendments that you make to this notebook will not be graded**. However, you will use the instructions provided in **Step 3** and **Step 4** to implement your own CNN encoder and RNN decoder by making amendments to the **models.py** file provided as part of this project. Your **models.py** file **will be graded**. \n\nFeel free to use the links below to navigate the notebook:\n- [Step 1](#step1): Explore the Data Loader\n- [Step 2](#step2): Use the Data Loader to Obtain Batches\n- [Step 3](#step3): Experiment with the CNN Encoder\n- [Step 4](#step4): Implement the RNN Decoder",
"_____no_output_____"
],
[
"<a id='step1'></a>\n## Step 1: Explore the Data Loader\n\nWe have already written a [data loader](http://pytorch.org/docs/master/data.html#torch.utils.data.DataLoader) that you can use to load the COCO dataset in batches. \n\nIn the code cell below, you will initialize the data loader by using the `get_loader` function in **data_loader.py**. \n\n> For this project, you are not permitted to change the **data_loader.py** file, which must be used as-is.\n\nThe `get_loader` function takes as input a number of arguments that can be explored in **data_loader.py**. Take the time to explore these arguments now by opening **data_loader.py** in a new window. Most of the arguments must be left at their default values, and you are only allowed to amend the values of the arguments below:\n1. **`transform`** - an [image transform](http://pytorch.org/docs/master/torchvision/transforms.html) specifying how to pre-process the images and convert them to PyTorch tensors before using them as input to the CNN encoder. For now, you are encouraged to keep the transform as provided in `transform_train`. You will have the opportunity later to choose your own image transform to pre-process the COCO images.\n2. **`mode`** - one of `'train'` (loads the training data in batches) or `'test'` (for the test data). We will say that the data loader is in training or test mode, respectively. While following the instructions in this notebook, please keep the data loader in training mode by setting `mode='train'`.\n3. **`batch_size`** - determines the batch size. When training the model, this is number of image-caption pairs used to amend the model weights in each training step.\n4. **`vocab_threshold`** - the total number of times that a word must appear in the in the training captions before it is used as part of the vocabulary. Words that have fewer than `vocab_threshold` occurrences in the training captions are considered unknown words. \n5. **`vocab_from_file`** - a Boolean that decides whether to load the vocabulary from file. \n\nWe will describe the `vocab_threshold` and `vocab_from_file` arguments in more detail soon. For now, run the code cell below. Be patient - it may take a couple of minutes to run!",
"_____no_output_____"
]
],
[
[
"import sys\nsys.path.append('./cocoapi/PythonAPI')\nfrom pycocotools.coco import COCO\n!pip install nltk\nimport nltk\nnltk.download('punkt')\nfrom data_loader import get_loader\nfrom torchvision import transforms\n\ncocoapi_loc = '/mnt/data2/Project/Image-Captioning/'\n\n# Define a transform to pre-process the training images.\ntransform_train = transforms.Compose([ \n transforms.Resize(256), # smaller edge of image resized to 256\n transforms.RandomCrop(224), # get 224x224 crop from random location\n transforms.RandomHorizontalFlip(), # horizontally flip image with probability=0.5\n transforms.ToTensor(), # convert the PIL Image to a tensor\n transforms.Normalize((0.485, 0.456, 0.406), # normalize image for pre-trained model\n (0.229, 0.224, 0.225))])\n\n# Set the minimum word count threshold.\nvocab_threshold = 5\n\n# Specify the batch size.\nbatch_size = 10\n\n# Obtain the data loader.\ndata_loader = get_loader(transform=transform_train,\n mode='train',\n batch_size=batch_size,\n vocab_threshold=vocab_threshold,\n cocoapi_loc=cocoapi_loc,\n vocab_from_file=False)",
"Requirement already satisfied: nltk in /home/hvlpr/anaconda3/lib/python3.7/site-packages (3.4.1)\nRequirement already satisfied: six in /home/hvlpr/anaconda3/lib/python3.7/site-packages (from nltk) (1.12.0)\nloading annotations into memory...\nDone (t=0.46s)\ncreating index...\n"
]
],
[
[
"When you ran the code cell above, the data loader was stored in the variable `data_loader`. \n\nYou can access the corresponding dataset as `data_loader.dataset`. This dataset is an instance of the `CoCoDataset` class in **data_loader.py**. If you are unfamiliar with data loaders and datasets, you are encouraged to review [this PyTorch tutorial](http://pytorch.org/tutorials/beginner/data_loading_tutorial.html).\n\n### Exploring the `__getitem__` Method\n\nThe `__getitem__` method in the `CoCoDataset` class determines how an image-caption pair is pre-processed before being incorporated into a batch. This is true for all `Dataset` classes in PyTorch; if this is unfamiliar to you, please review [the tutorial linked above](http://pytorch.org/tutorials/beginner/data_loading_tutorial.html). \n\nWhen the data loader is in training mode, this method begins by first obtaining the filename (`path`) of a training image and its corresponding caption (`caption`).\n\n#### Image Pre-Processing \n\nImage pre-processing is relatively straightforward (from the `__getitem__` method in the `CoCoDataset` class):\n```python\n# Convert image to tensor and pre-process using transform\nimage = Image.open(os.path.join(self.img_folder, path)).convert('RGB')\nimage = self.transform(image)\n```\nAfter loading the image in the training folder with name `path`, the image is pre-processed using the same transform (`transform_train`) that was supplied when instantiating the data loader. \n\n#### Caption Pre-Processing \n\nThe captions also need to be pre-processed and prepped for training. In this example, for generating captions, we are aiming to create a model that predicts the next token of a sentence from previous tokens, so we turn the caption associated with any image into a list of tokenized words, before casting it to a PyTorch tensor that we can use to train the network.\n\nTo understand in more detail how COCO captions are pre-processed, we'll first need to take a look at the `vocab` instance variable of the `CoCoDataset` class. The code snippet below is pulled from the `__init__` method of the `CoCoDataset` class:\n```python\ndef __init__(self, transform, mode, batch_size, vocab_threshold, vocab_file, start_word, \n end_word, unk_word, annotations_file, vocab_from_file, img_folder):\n ...\n self.vocab = Vocabulary(vocab_threshold, vocab_file, start_word,\n end_word, unk_word, annotations_file, vocab_from_file)\n ...\n```\nFrom the code snippet above, you can see that `data_loader.dataset.vocab` is an instance of the `Vocabulary` class from **vocabulary.py**. Take the time now to verify this for yourself by looking at the full code in **data_loader.py**. \n\nWe use this instance to pre-process the COCO captions (from the `__getitem__` method in the `CoCoDataset` class):\n\n```python\n# Convert caption to tensor of word ids.\ntokens = nltk.tokenize.word_tokenize(str(caption).lower()) # line 1\ncaption = [] # line 2\ncaption.append(self.vocab(self.vocab.start_word)) # line 3\ncaption.extend([self.vocab(token) for token in tokens]) # line 4\ncaption.append(self.vocab(self.vocab.end_word)) # line 5\ncaption = torch.Tensor(caption).long() # line 6\n```\n\nAs you will see soon, this code converts any string-valued caption to a list of integers, before casting it to a PyTorch tensor. To see how this code works, we'll apply it to the sample caption in the next code cell.",
"_____no_output_____"
]
],
[
[
"sample_caption = 'A person doing a trick on a rail while riding a skateboard.'",
"_____no_output_____"
]
],
[
[
"In **`line 1`** of the code snippet, every letter in the caption is converted to lowercase, and the [`nltk.tokenize.word_tokenize`](http://www.nltk.org/) function is used to obtain a list of string-valued tokens. Run the next code cell to visualize the effect on `sample_caption`.",
"_____no_output_____"
]
],
[
[
"import nltk\n\nsample_tokens = nltk.tokenize.word_tokenize(str(sample_caption).lower())\nprint(sample_tokens)",
"['a', 'person', 'doing', 'a', 'trick', 'on', 'a', 'rail', 'while', 'riding', 'a', 'skateboard', '.']\n"
]
],
[
[
"In **`line 2`** and **`line 3`** we initialize an empty list and append an integer to mark the start of a caption. The [paper](https://arxiv.org/pdf/1411.4555.pdf) that you are encouraged to implement uses a special start word (and a special end word, which we'll examine below) to mark the beginning (and end) of a caption.\n\nThis special start word (`\"<start>\"`) is decided when instantiating the data loader and is passed as a parameter (`start_word`). You are **required** to keep this parameter at its default value (`start_word=\"<start>\"`).\n\nAs you will see below, the integer `0` is always used to mark the start of a caption.",
"_____no_output_____"
]
],
[
[
"sample_caption = []\n\nstart_word = data_loader.dataset.vocab.start_word\nprint('Special start word:', start_word)\nsample_caption.append(data_loader.dataset.vocab(start_word))\nprint(sample_caption)",
"Special start word: <start>\n[0]\n"
]
],
[
[
"In **`line 4`**, we continue the list by adding integers that correspond to each of the tokens in the caption.",
"_____no_output_____"
]
],
[
[
"sample_caption.extend([data_loader.dataset.vocab(token) for token in sample_tokens])\nprint(sample_caption)",
"[0, 3, 98, 754, 3, 396, 39, 3, 1009, 207, 139, 3, 753, 18]\n"
]
],
[
[
"In **`line 5`**, we append a final integer to mark the end of the caption. \n\nIdentical to the case of the special start word (above), the special end word (`\"<end>\"`) is decided when instantiating the data loader and is passed as a parameter (`end_word`). You are **required** to keep this parameter at its default value (`end_word=\"<end>\"`).\n\nAs you will see below, the integer `1` is always used to mark the end of a caption.",
"_____no_output_____"
]
],
[
[
"end_word = data_loader.dataset.vocab.end_word\nprint('Special end word:', end_word)\n\nsample_caption.append(data_loader.dataset.vocab(end_word))\nprint(sample_caption)",
"Special end word: <end>\n[0, 3, 98, 754, 3, 396, 39, 3, 1009, 207, 139, 3, 753, 18, 1]\n"
]
],
[
[
"Finally, in **`line 6`**, we convert the list of integers to a PyTorch tensor and cast it to [long type](http://pytorch.org/docs/master/tensors.html#torch.Tensor.long). You can read more about the different types of PyTorch tensors on the [website](http://pytorch.org/docs/master/tensors.html).",
"_____no_output_____"
]
],
[
[
"import torch\n\nsample_caption = torch.Tensor(sample_caption).long()\nprint(sample_caption)",
"tensor([ 0, 3, 98, 754, 3, 396, 39, 3, 1009, 207, 139, 3,\n 753, 18, 1])\n"
]
],
[
[
"And that's it! In summary, any caption is converted to a list of tokens, with _special_ start and end tokens marking the beginning and end of the sentence:\n```\n[<start>, 'a', 'person', 'doing', 'a', 'trick', 'while', 'riding', 'a', 'skateboard', '.', <end>]\n```\nThis list of tokens is then turned into a list of integers, where every distinct word in the vocabulary has an associated integer value:\n```\n[0, 3, 98, 754, 3, 396, 207, 139, 3, 753, 18, 1]\n```\nFinally, this list is converted to a PyTorch tensor. All of the captions in the COCO dataset are pre-processed using this same procedure from **`lines 1-6`** described above. \n\nAs you saw, in order to convert a token to its corresponding integer, we call `data_loader.dataset.vocab` as a function. The details of how this call works can be explored in the `__call__` method in the `Vocabulary` class in **vocabulary.py**. \n\n```python\ndef __call__(self, word):\n if not word in self.word2idx:\n return self.word2idx[self.unk_word]\n return self.word2idx[word]\n```\n\nThe `word2idx` instance variable is a Python [dictionary](https://docs.python.org/3/tutorial/datastructures.html#dictionaries) that is indexed by string-valued keys (mostly tokens obtained from training captions). For each key, the corresponding value is the integer that the token is mapped to in the pre-processing step.\n\nUse the code cell below to view a subset of this dictionary.",
"_____no_output_____"
]
],
[
[
"# Preview the word2idx dictionary.\ndict(list(data_loader.dataset.vocab.word2idx.items())[:10])",
"_____no_output_____"
]
],
[
[
"We also print the total number of keys.",
"_____no_output_____"
]
],
[
[
"# Print the total number of keys in the word2idx dictionary.\nprint('Total number of tokens in vocabulary:', len(data_loader.dataset.vocab))",
"Total number of tokens in vocabulary: 8856\n"
]
],
[
[
"As you will see if you examine the code in **vocabulary.py**, the `word2idx` dictionary is created by looping over the captions in the training dataset. If a token appears no less than `vocab_threshold` times in the training set, then it is added as a key to the dictionary and assigned a corresponding unique integer. You will have the option later to amend the `vocab_threshold` argument when instantiating your data loader. Note that in general, **smaller** values for `vocab_threshold` yield a **larger** number of tokens in the vocabulary. You are encouraged to check this for yourself in the next code cell by decreasing the value of `vocab_threshold` before creating a new data loader. ",
"_____no_output_____"
]
],
[
[
"# Modify the minimum word count threshold.\nvocab_threshold = 4\n\n# Obtain the data loader.\ndata_loader = get_loader(transform=transform_train,\n mode='train',\n batch_size=batch_size,\n vocab_threshold=vocab_threshold,\n cocoapi_loc=cocoapi_loc,\n vocab_from_file=False)",
"loading annotations into memory...\nDone (t=0.42s)\ncreating index...\nindex created!\n[0/414113] Tokenizing captions...\n[100000/414113] Tokenizing captions...\n[200000/414113] Tokenizing captions...\n[300000/414113] Tokenizing captions...\n[400000/414113] Tokenizing captions...\nloading annotations into memory...\nDone (t=0.43s)\ncreating index...\n"
],
[
"# Print the total number of keys in the word2idx dictionary.\nprint('Total number of tokens in vocabulary:', len(data_loader.dataset.vocab))",
"Total number of tokens in vocabulary: 9955\n"
]
],
[
[
"There are also a few special keys in the `word2idx` dictionary. You are already familiar with the special start word (`\"<start>\"`) and special end word (`\"<end>\"`). There is one more special token, corresponding to unknown words (`\"<unk>\"`). All tokens that don't appear anywhere in the `word2idx` dictionary are considered unknown words. In the pre-processing step, any unknown tokens are mapped to the integer `2`.",
"_____no_output_____"
]
],
[
[
"unk_word = data_loader.dataset.vocab.unk_word\nprint('Special unknown word:', unk_word)\n\nprint('All unknown words are mapped to this integer:', data_loader.dataset.vocab(unk_word))",
"Special unknown word: <unk>\nAll unknown words are mapped to this integer: 2\n"
]
],
[
[
"Check this for yourself below, by pre-processing the provided nonsense words that never appear in the training captions. ",
"_____no_output_____"
]
],
[
[
"print(data_loader.dataset.vocab('jfkafejw'))\nprint(data_loader.dataset.vocab('ieowoqjf'))",
"2\n2\n"
]
],
[
[
"The final thing to mention is the `vocab_from_file` argument that is supplied when creating a data loader. To understand this argument, note that when you create a new data loader, the vocabulary (`data_loader.dataset.vocab`) is saved as a [pickle](https://docs.python.org/3/library/pickle.html) file in the project folder, with filename `vocab.pkl`.\n\nIf you are still tweaking the value of the `vocab_threshold` argument, you **must** set `vocab_from_file=False` to have your changes take effect. \n\nBut once you are happy with the value that you have chosen for the `vocab_threshold` argument, you need only run the data loader *one more time* with your chosen `vocab_threshold` to save the new vocabulary to file. Then, you can henceforth set `vocab_from_file=True` to load the vocabulary from file and speed the instantiation of the data loader. Note that building the vocabulary from scratch is the most time-consuming part of instantiating the data loader, and so you are strongly encouraged to set `vocab_from_file=True` as soon as you are able.\n\nNote that if `vocab_from_file=True`, then any supplied argument for `vocab_threshold` when instantiating the data loader is completely ignored.",
"_____no_output_____"
]
],
[
[
"# Obtain the data loader (from file). Note that it runs much faster than before!\ndata_loader = get_loader(transform=transform_train,\n mode='train',\n batch_size=batch_size,\n cocoapi_loc=cocoapi_loc,\n vocab_from_file=True)",
"Vocabulary successfully loaded from vocab.pkl file!\nloading annotations into memory...\n"
]
],
[
[
"In the next section, you will learn how to use the data loader to obtain batches of training data.",
"_____no_output_____"
],
[
"<a id='step2'></a>\n## Step 2: Use the Data Loader to Obtain Batches\n\nThe captions in the dataset vary greatly in length. You can see this by examining `data_loader.dataset.caption_lengths`, a Python list with one entry for each training caption (where the value stores the length of the corresponding caption). \n\nIn the code cell below, we use this list to print the total number of captions in the training data with each length. As you will see below, the majority of captions have length 10. Likewise, very short and very long captions are quite rare. ",
"_____no_output_____"
]
],
[
[
"from collections import Counter\n\n# Tally the total number of training captions with each length.\ncounter = Counter(data_loader.dataset.caption_lengths)\nlengths = sorted(counter.items(), key=lambda pair: pair[1], reverse=True)\nfor value, count in lengths:\n print('value: %2d --- count: %5d' % (value, count))",
"value: 10 --- count: 86332\nvalue: 11 --- count: 79945\nvalue: 9 --- count: 71935\nvalue: 12 --- count: 57639\nvalue: 13 --- count: 37648\nvalue: 14 --- count: 22335\nvalue: 8 --- count: 20769\nvalue: 15 --- count: 12842\nvalue: 16 --- count: 7729\nvalue: 17 --- count: 4842\nvalue: 18 --- count: 3103\nvalue: 19 --- count: 2015\nvalue: 7 --- count: 1597\nvalue: 20 --- count: 1451\nvalue: 21 --- count: 999\nvalue: 22 --- count: 683\nvalue: 23 --- count: 534\nvalue: 24 --- count: 383\nvalue: 25 --- count: 277\nvalue: 26 --- count: 215\nvalue: 27 --- count: 159\nvalue: 28 --- count: 115\nvalue: 29 --- count: 86\nvalue: 30 --- count: 58\nvalue: 31 --- count: 49\nvalue: 32 --- count: 44\nvalue: 34 --- count: 39\nvalue: 37 --- count: 32\nvalue: 33 --- count: 31\nvalue: 35 --- count: 31\nvalue: 36 --- count: 26\nvalue: 38 --- count: 18\nvalue: 39 --- count: 18\nvalue: 43 --- count: 16\nvalue: 44 --- count: 16\nvalue: 48 --- count: 12\nvalue: 45 --- count: 11\nvalue: 42 --- count: 10\nvalue: 40 --- count: 9\nvalue: 49 --- count: 9\nvalue: 46 --- count: 9\nvalue: 47 --- count: 7\nvalue: 50 --- count: 6\nvalue: 51 --- count: 6\nvalue: 41 --- count: 6\nvalue: 52 --- count: 5\nvalue: 54 --- count: 3\nvalue: 56 --- count: 2\nvalue: 6 --- count: 2\nvalue: 53 --- count: 2\nvalue: 55 --- count: 2\nvalue: 57 --- count: 1\n"
]
],
[
[
"To generate batches of training data, we begin by first sampling a caption length (where the probability that any length is drawn is proportional to the number of captions with that length in the dataset). Then, we retrieve a batch of size `batch_size` of image-caption pairs, where all captions have the sampled length. This approach for assembling batches matches the procedure in [this paper](https://arxiv.org/pdf/1502.03044.pdf) and has been shown to be computationally efficient without degrading performance.\n\nRun the code cell below to generate a batch. The `get_train_indices` method in the `CoCoDataset` class first samples a caption length, and then samples `batch_size` indices corresponding to training data points with captions of that length. These indices are stored below in `indices`.\n\nThese indices are supplied to the data loader, which then is used to retrieve the corresponding data points. The pre-processed images and captions in the batch are stored in `images` and `captions`.",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport torch.utils.data as data\n\n# Randomly sample a caption length, and sample indices with that length.\nindices = data_loader.dataset.get_train_indices()\nprint('sampled indices:', indices)\n\n# Create and assign a batch sampler to retrieve a batch with the sampled indices.\nnew_sampler = data.sampler.SubsetRandomSampler(indices=indices)\ndata_loader.batch_sampler.sampler = new_sampler\n \n# Obtain the batch.\nimages, captions = next(iter(data_loader))\n \nprint('images.shape:', images.shape)\nprint('captions.shape:', captions.shape)\n\n# (Optional) Uncomment the lines of code below to print the pre-processed images and captions.\n# print('images:', images)\n# print('captions:', captions)",
"sampled indices: [233186, 219334, 248528, 332607, 300925, 24377, 336380, 59426, 23758, 306722]\nimages.shape: torch.Size([10, 3, 224, 224])\ncaptions.shape: torch.Size([10, 10])\n"
]
],
[
[
"Each time you run the code cell above, a different caption length is sampled, and a different batch of training data is returned. Run the code cell multiple times to check this out!\n\nYou will train your model in the next notebook in this sequence (**2_Training.ipynb**). This code for generating training batches will be provided to you.\n\n> Before moving to the next notebook in the sequence (**2_Training.ipynb**), you are strongly encouraged to take the time to become very familiar with the code in **data_loader.py** and **vocabulary.py**. **Step 1** and **Step 2** of this notebook are designed to help facilitate a basic introduction and guide your understanding. However, our description is not exhaustive, and it is up to you (as part of the project) to learn how to best utilize these files to complete the project. __You should NOT amend any of the code in either *data_loader.py* or *vocabulary.py*.__\n\nIn the next steps, we focus on learning how to specify a CNN-RNN architecture in PyTorch, towards the goal of image captioning.",
"_____no_output_____"
],
[
"<a id='step3'></a>\n## Step 3: Experiment with the CNN Encoder\n\nRun the code cell below to import `EncoderCNN` and `DecoderRNN` from **model.py**. ",
"_____no_output_____"
]
],
[
[
"# Watch for any changes in model.py, and re-load it automatically.\n%load_ext autoreload\n%autoreload 2\n\n# Import EncoderCNN and DecoderRNN. \nfrom model import EncoderCNN, DecoderRNN",
"The autoreload extension is already loaded. To reload it, use:\n %reload_ext autoreload\n"
],
[
"%reload_ext model",
"_____no_output_____"
]
],
[
[
"In the next code cell we define a `device` that you will use move PyTorch tensors to GPU (if CUDA is available). Run this code cell before continuing.",
"_____no_output_____"
]
],
[
[
"device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")",
"_____no_output_____"
]
],
[
[
"Run the code cell below to instantiate the CNN encoder in `encoder`. \n\nThe pre-processed images from the batch in **Step 2** of this notebook are then passed through the encoder, and the output is stored in `features`.",
"_____no_output_____"
]
],
[
[
"# Specify the dimensionality of the image embedding.\nembed_size = 256\n\n#-#-#-# Do NOT modify the code below this line. #-#-#-#\n\n# Initialize the encoder. (Optional: Add additional arguments if necessary.)\nencoder = EncoderCNN(embed_size)\n\n# Move the encoder to GPU if CUDA is available.\nencoder.to(device)\n \n# Move last batch of images (from Step 2) to GPU if CUDA is available. \nimages = images.to(device)\n\n# Pass the images through the encoder.\nfeatures = encoder(images)\n\nprint('type(features):', type(features))\nprint('features.shape:', features.shape)\n\n# Check that your encoder satisfies some requirements of the project! :D\nassert type(features)==torch.Tensor, \"Encoder output needs to be a PyTorch Tensor.\" \nassert (features.shape[0]==batch_size) & (features.shape[1]==embed_size), \"The shape of the encoder output is incorrect.\"",
"Downloading: \"https://download.pytorch.org/models/resnet50-19c8e357.pth\" to /home/hvlpr/.cache/torch/checkpoints/resnet50-19c8e357.pth\n100%|██████████| 102502400/102502400 [00:09<00:00, 11202810.52it/s]\n"
]
],
[
[
"The encoder that we provide to you uses the pre-trained ResNet-50 architecture (with the final fully-connected layer removed) to extract features from a batch of pre-processed images. The output is then flattened to a vector, before being passed through a `Linear` layer to transform the feature vector to have the same size as the word embedding.\n\n\n\nYou are welcome (and encouraged) to amend the encoder in **model.py**, to experiment with other architectures. In particular, consider using a [different pre-trained model architecture](http://pytorch.org/docs/master/torchvision/models.html). You may also like to [add batch normalization](http://pytorch.org/docs/master/nn.html#normalization-layers). \n\n> You are **not** required to change anything about the encoder.\n\nFor this project, you **must** incorporate a pre-trained CNN into your encoder. Your `EncoderCNN` class must take `embed_size` as an input argument, which will also correspond to the dimensionality of the input to the RNN decoder that you will implement in Step 4. When you train your model in the next notebook in this sequence (**2_Training.ipynb**), you are welcome to tweak the value of `embed_size`.\n\nIf you decide to modify the `EncoderCNN` class, save **model.py** and re-execute the code cell above. If the code cell returns an assertion error, then please follow the instructions to modify your code before proceeding. The assert statements ensure that `features` is a PyTorch tensor with shape `[batch_size, embed_size]`.",
"_____no_output_____"
],
[
"<a id='step4'></a>\n## Step 4: Implement the RNN Decoder\n\nBefore executing the next code cell, you must write `__init__` and `forward` methods in the `DecoderRNN` class in **model.py**. (Do **not** write the `sample` method yet - you will work with this method when you reach **3_Inference.ipynb**.)\n\n> The `__init__` and `forward` methods in the `DecoderRNN` class are the only things that you **need** to modify as part of this notebook. You will write more implementations in the notebooks that appear later in the sequence.\n\nYour decoder will be an instance of the `DecoderRNN` class and must accept as input:\n- the PyTorch tensor `features` containing the embedded image features (outputted in Step 3, when the last batch of images from Step 2 was passed through `encoder`), along with\n- a PyTorch tensor corresponding to the last batch of captions (`captions`) from Step 2.\n\nNote that the way we have written the data loader should simplify your code a bit. In particular, every training batch will contain pre-processed captions where all have the same length (`captions.shape[1]`), so **you do not need to worry about padding**. \n> While you are encouraged to implement the decoder described in [this paper](https://arxiv.org/pdf/1411.4555.pdf), you are welcome to implement any architecture of your choosing, as long as it uses at least one RNN layer, with hidden dimension `hidden_size`. \n\nAlthough you will test the decoder using the last batch that is currently stored in the notebook, your decoder should be written to accept an arbitrary batch (of embedded image features and pre-processed captions [where all captions have the same length]) as input. \n\n\n\nIn the code cell below, `outputs` should be a PyTorch tensor with size `[batch_size, captions.shape[1], vocab_size]`. Your output should be designed such that `outputs[i,j,k]` contains the model's predicted score, indicating how likely the `j`-th token in the `i`-th caption in the batch is the `k`-th token in the vocabulary. In the next notebook of the sequence (**2_Training.ipynb**), we provide code to supply these scores to the [`torch.nn.CrossEntropyLoss`](http://pytorch.org/docs/master/nn.html#torch.nn.CrossEntropyLoss) optimizer in PyTorch.",
"_____no_output_____"
]
],
[
[
"from model import EncoderCNN, DecoderRNN",
"[autoreload of model failed: Traceback (most recent call last):\n File \"/home/hvlpr/anaconda3/lib/python3.7/site-packages/IPython/extensions/autoreload.py\", line 244, in check\n superreload(m, reload, self.old_objects)\n File \"/home/hvlpr/anaconda3/lib/python3.7/site-packages/IPython/extensions/autoreload.py\", line 394, in superreload\n update_generic(old_obj, new_obj)\n File \"/home/hvlpr/anaconda3/lib/python3.7/site-packages/IPython/extensions/autoreload.py\", line 331, in update_generic\n update(a, b)\n File \"/home/hvlpr/anaconda3/lib/python3.7/site-packages/IPython/extensions/autoreload.py\", line 289, in update_class\n if update_generic(old_obj, new_obj): continue\n File \"/home/hvlpr/anaconda3/lib/python3.7/site-packages/IPython/extensions/autoreload.py\", line 331, in update_generic\n update(a, b)\n File \"/home/hvlpr/anaconda3/lib/python3.7/site-packages/IPython/extensions/autoreload.py\", line 265, in update_function\n setattr(old, name, getattr(new, name))\nValueError: __init__() requires a code object with 0 free vars, not 1\n]\n"
],
[
"# Specify the number of features in the hidden state of the RNN decoder.\nhidden_size = 512\n\n#-#-#-# Do NOT modify the code below this line. #-#-#-#\n\n# Store the size of the vocabulary.\nvocab_size = len(data_loader.dataset.vocab)\n\n# Initialize the decoder.\ndecoder = DecoderRNN(embed_size, hidden_size, vocab_size)\n\n# Move the decoder to GPU if CUDA is available.\ndecoder.to(device)\n \n# Move last batch of captions (from Step 1) to GPU if CUDA is available \ncaptions = captions.to(device)\n\n# Pass the encoder output and captions through the decoder.\noutputs = decoder(features, captions)\n\nprint('type(outputs):', type(outputs))\nprint('outputs.shape:', outputs.shape)\n\n# Check that your decoder satisfies some requirements of the project! :D\nassert type(outputs)==torch.Tensor, \"Decoder output needs to be a PyTorch Tensor.\"\nassert (outputs.shape[0]==batch_size) & (outputs.shape[1]==captions.shape[1]) & (outputs.shape[2]==vocab_size), \"The shape of the decoder output is incorrect.\"",
"torch.Size([10, 1, 256])\n"
]
],
[
[
"When you train your model in the next notebook in this sequence (**2_Training.ipynb**), you are welcome to tweak the value of `hidden_size`.",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
]
] |
e7ddee4f4f870720a119611715ecc6f0d73c67bf | 443,211 | ipynb | Jupyter Notebook | Request/.ipynb_checkpoints/sin-checkpoint.ipynb | mrpal39/Python_Tutorials | b245f0d219684eaf09c26a13442ad25c7a986dd2 | [
"MIT"
] | 1,040 | 2016-08-07T02:27:28.000Z | 2022-03-30T02:31:40.000Z | Request/.ipynb_checkpoints/sin-checkpoint.ipynb | mrpal39/Python_Tutorials | b245f0d219684eaf09c26a13442ad25c7a986dd2 | [
"MIT"
] | 16 | 2017-09-18T05:39:52.000Z | 2022-01-28T20:40:43.000Z | Request/.ipynb_checkpoints/sin-checkpoint.ipynb | mrpal39/Python_Tutorials | b245f0d219684eaf09c26a13442ad25c7a986dd2 | [
"MIT"
] | 1,102 | 2016-08-07T02:27:24.000Z | 2022-03-31T16:18:48.000Z | 598.125506 | 208,792 | 0.927087 | [
[
[
"%pylab inline\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport numpy as np",
"Populating the interactive namespace from numpy and matplotlib\n"
],
[
"helix = pd.read_csv('helix_parameters.csv')\nhelix.head() # just seeing that data was imported properly by outputing first 5 cells",
"_____no_output_____"
],
[
"# checking what the columns are\nhelix.columns",
"_____no_output_____"
],
[
"# selecting a couple columns\ncouple_columns = helix[['Energy', 'helix3 phase','helix 2 phase', 'helix1 phase']]\ncouple_columns.head()",
"_____no_output_____"
],
[
"# selecting same columns a different way\nhelix.ix[:,['Energy', 'helix3 phase','helix 2 phase', 'helix1 phase']].head()",
"_____no_output_____"
],
[
"# Correlations\nfrom pandas.tools.plotting import scatter_matrix",
"_____no_output_____"
],
[
"# correlations. you can also have hist on the diagonal\nscatter_matrix(couple_columns,alpha=0.03, figsize=(10, 10), diagonal='kde');",
"_____no_output_____"
],
[
"# Now Subplots",
"_____no_output_____"
],
[
"fig, axes = plt.subplots(nrows = 3, ncols = 1, figsize = (15,5));\n#fig.suptitle('Price Variation', size = 15, x = .515, y=1.02)\n# 'Energy', 'helix3 phase','helix 2 phase', 'helix1 phase'\n\naxes[0].scatter('helix1 phase', 'Energy', data = couple_columns, alpha = .05);\naxes[0].set_xlabel('helix1 phase', fontsize=10);\n\naxes[1].scatter('helix 2 phase', 'Energy', data = couple_columns, alpha = .05);\naxes[1].set_xlabel('helix 2 phase', fontsize=10);\n\naxes[2].scatter('helix3 phase', 'Energy', data = couple_columns, alpha = .05);\naxes[2].set_xlabel('helix3 phase', fontsize=10);",
"_____no_output_____"
],
[
"from mpl_toolkits.mplot3d import Axes3D\nimport matplotlib.pyplot as plt\n\n\n\nfig = plt.figure(figsize=(10, 10))\nax = fig.add_subplot(111, projection='3d')\n\n# c is the variable to color map\n\n# reference for cmap. note cmap and c are different!\n# http://matplotlib.org/examples/color/colormaps_reference.html\nax.scatter(couple_columns[['helix1 phase']], couple_columns[['helix 2 phase']], couple_columns[['Energy']], marker='o', c=couple_columns[['Energy']], cmap=\"RdBu\");\n\nax.set_xlabel('helix1 phase');\nax.set_ylabel('helix 2 phase');\nax.set_zlabel('Energy');\n\n# change view angle \n# http://infamousheelfilcher.blogspot.com/2013/02/changing-viewing-angle-of-matplotlib.html\nax.view_init(azim = 180+40,elev = 10)",
"_____no_output_____"
],
[
"# now filtering data to helix 2 at 100 and helix 0\nnp.clip(randn(250, 250), -1, 1)",
"_____no_output_____"
]
],
[
[
"## Was this Cell below what you wanted? ",
"_____no_output_____"
]
],
[
[
"# remove the mean for the c\nlow = (couple_columns[['Energy']] - couple_columns[['Energy']].mean()).min()[0]\nhigh = (couple_columns[['Energy']] - couple_columns[['Energy']].mean()).max()[0]\nplt.scatter(couple_columns[['helix1 phase']], couple_columns[['helix 2 phase']], c=(couple_columns[['Energy']] - couple_columns[['Energy']].mean()), edgecolors='none',vmin = low, vmax = high, cmap = 'Blues', marker = 's',s = 190)\nplt.xlabel('helix1 phase');\nplt.ylabel('helix 2 phase');",
"_____no_output_____"
],
[
"low, high",
"_____no_output_____"
],
[
"np.unique(couple_columns[['helix1 phase']].values)",
"_____no_output_____"
],
[
"(couple_columns[['Energy']] - couple_columns[['Energy']].mean()).head()",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
]
] |
e7de0c1585b4808125334150244923d609a633a8 | 61,607 | ipynb | Jupyter Notebook | Chapter 2 Reservoir Fluid Properties/Examples/Chapter 2 Examples.ipynb | boomitsheth/Reservoir-Engineering-Handbook- | 0008cd92c400008f521aed154843a9e37a6bddba | [
"MIT"
] | 5 | 2020-07-21T00:25:58.000Z | 2021-12-14T16:12:58.000Z | Chapter 2 Reservoir Fluid Properties/Examples/Chapter 2 Examples.ipynb | boomitsheth/Reservoir-Engineering-Handbook- | 0008cd92c400008f521aed154843a9e37a6bddba | [
"MIT"
] | null | null | null | Chapter 2 Reservoir Fluid Properties/Examples/Chapter 2 Examples.ipynb | boomitsheth/Reservoir-Engineering-Handbook- | 0008cd92c400008f521aed154843a9e37a6bddba | [
"MIT"
] | 1 | 2021-07-29T13:14:01.000Z | 2021-07-29T13:14:01.000Z | 28.761438 | 144 | 0.340838 | [
[
[
"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt ",
"_____no_output_____"
],
[
"import sys,os ",
"_____no_output_____"
]
],
[
[
"# Example 2.1 Calculation of Volume of Gas Using Ideal Gas Behaviour ",
"_____no_output_____"
]
],
[
[
"\"Question is to calculate the volume of 3pounds of N-Butane gas using ideal gas behaviour\"\n\n#known \nmass = 58.123 #lbs\ntemp = 120 #Fdegrees \npressure = 60 #psia\nm =3 #lbs\nR = 10.73\n\nV= (m/mass)*(R*(temp+460)/pressure) #ft3\n\nprint(\"Volume of Gas using Ideal Gas Behaviour is:\", V,\"ft3\")\n",
"Volume of Gas using Ideal Gas Behaviour is: 5.353646577086525 ft3\n"
]
],
[
[
"# Example 2.2 Calculation of Density of N-Butane",
"_____no_output_____"
]
],
[
[
"\"Question is to calculate the density of N-Butane\"\n\nden = m/V\n\nprint(\"Density of N-Butane is:\", den,\"lb/ft3\")\n",
"Density of N-Butane is: 0.5603657164893786 lb/ft3\n"
]
],
[
[
"# Example 2.3 Calculation of Density & Molecular Weight ",
"_____no_output_____"
]
],
[
[
"\"Calcualte apparent molecular weight of gas and gas density, \"\n\nspg= 0.65 \nQg= 1.1 #Mmscf/d\npressure= 1500 #psia\ntemp= 150 #Fdegree\n\nMolweight= 28.96 * spg #lbs\n\nprint(\"Molecular Weight of the gas is:\", Molweight,\"lbs\")\n\nGasden = (pressure * Molweight) / (10.73 * (temp+460)) #lb/ft3 \n\nprint(\"Gas Density is:\",Gasden,\"lbs/ft3\")\n\n\n \n ",
"Molecular Weight of the gas is: 18.824 lbs\nGas Density is: 4.3139351901364344 lbs/ft3\n"
]
],
[
[
"# Example 2.4 Calculation of Specific Gravity and Molecular Weight ",
"_____no_output_____"
]
],
[
[
"component= pd.read_csv(\"F:\\Tarek Ahmed Reservoir Engineering Data\\Chapter 2 Reservoir Fluid Properties\\example2.4.csv\")\ncomponent",
"_____no_output_____"
],
[
"Mi= [44.01,16.04,30.07,44.11]\ncomponent[ 'Mi' ] = Mi\ncomponent",
"_____no_output_____"
],
[
"component['yiMi'] = component['yi'] * component['Mi']\ncomponent ",
"_____no_output_____"
],
[
"Ma = component.sum(axis=0)\nMa\n",
"_____no_output_____"
],
[
"\"Hence the apparent weight is 18.042\"",
"_____no_output_____"
],
[
"Ma= 18.042 \nspecificgravity= Ma/28.96\nspecificgravity",
"_____no_output_____"
]
],
[
[
"# Example 2.5 Calculation of Gas Compressibility Factor ",
"_____no_output_____"
]
],
[
[
"components = pd.read_csv(\"F:\\Tarek Ahmed Reservoir Engineering Data\\Chapter 2 Reservoir Fluid Properties\\example2.5.csv\")\ncomponents",
"_____no_output_____"
],
[
"pressure = 3000 #psia\ntemp = 180 #Fdegree\nTci =[547.91,227.49,343.33,549.92,666.06,734.46,765.62]\ncomponents['Tci'] = Tci\ncomponents",
"_____no_output_____"
],
[
"components['yiTci'] = components['yi'] * components['Tci']\ncomponents",
"_____no_output_____"
],
[
"Pci=[1071,493.1,666.4,706.5,616.4,527.9,550.6]\ncomponents['Pci']= Pci\ncomponents",
"_____no_output_____"
],
[
"components['yiPci'] = components['yi'] * components['Pci']\ncomponents ",
"_____no_output_____"
],
[
"components.sum(axis=0)",
"_____no_output_____"
]
],
[
[
"# Example 2.17 Calculate Specific Gravity of Separated Gas",
"_____no_output_____"
]
],
[
[
"\"Separator tests were conducted on a crude oil sample. Results of the test in terms of GOR and Gas Specific Gravity are calculated. \"\n\nresults = pd.read_csv(\"F:\\Tarek Ahmed Reservoir Engineering Data\\Chapter 2 Reservoir Fluid Properties\\example2.17.csv\")\nresults ",
"_____no_output_____"
],
[
"results['GORGasSg']= results['GOR'] * results['GasSg']\nresults",
"_____no_output_____"
],
[
"results.sum()",
"_____no_output_____"
],
[
"Sg = 806.212 / 984 \nSg",
"_____no_output_____"
]
],
[
[
"# Example 2.18 Using Standing Correlation, Estimate Gas Solubility at Pb ",
"_____no_output_____"
]
],
[
[
"table= pd.read_csv(\"F:\\Tarek Ahmed Reservoir Engineering Data\\Chapter 2 Reservoir Fluid Properties\\example2.18.csv\")\ntable",
"_____no_output_____"
],
[
"#T=Reservoir Temperature\n#Pb=Bubble Point Pressure\n#Bo=Oil Formation Volume Factor\n#Psep=Sperator Pressure\n#Tsep=separator Temperature\n#Co=Isothermal compressibility coeffeicient of the oil ",
"_____no_output_____"
],
[
"table['x']= (0.0125*table['API'].astype(float)) - 0.00091*table['T'].astype(float) \ntable",
"_____no_output_____"
],
[
"table['10^x'] = pow(10,table['x'])\ntable",
"_____no_output_____"
],
[
"table['Predicted Rs']= table['SG'] * pow((((table['Pb'].astype(float)/18.2)+1.4) * table['10^x'] ),1.2048) \ntable \n#Standing Corelation used to calculate the predicted Solubility ",
"_____no_output_____"
],
[
"#to calculate the absolute average error in prediction of solubility at bubble point pressure\ntable['%error'] = (table['Predicted Rs'] - table['Rs']) * 100 / table['Predicted Rs']\ntable",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e7de0d6e32344d6ac6883cf36f53b732f40c6f45 | 17,404 | ipynb | Jupyter Notebook | notebooks/Chapter 1 - Mining Twitter.ipynb | KaranamVijayKumar/Mining-the-Social-Web-3rd-Edition | 02bd88b11bce27b471ccd42a11013fe376446887 | [
"BSD-2-Clause"
] | 1 | 2019-06-02T16:38:23.000Z | 2019-06-02T16:38:23.000Z | notebooks/Chapter 1 - Mining Twitter.ipynb | KaranamVijayKumar/Mining-the-Social-Web-3rd-Edition | 02bd88b11bce27b471ccd42a11013fe376446887 | [
"BSD-2-Clause"
] | null | null | null | notebooks/Chapter 1 - Mining Twitter.ipynb | KaranamVijayKumar/Mining-the-Social-Web-3rd-Edition | 02bd88b11bce27b471ccd42a11013fe376446887 | [
"BSD-2-Clause"
] | 1 | 2020-02-10T02:55:04.000Z | 2020-02-10T02:55:04.000Z | 28.161812 | 692 | 0.552517 | [
[
[
"# Mining Twitter\n\nTwitter implements OAuth 1.0A as its standard authentication mechanism, and in order to use it to make requests to Twitter's API, you'll need to go to https://developer.twitter.com/en/apps and create a sample application. There are four primary identifiers you'll need to note for an OAuth 1.0A workflow: consumer key, consumer secret, access token, and access token secret. Note that you will need an ordinary Twitter account in order to login, create an app, and get these credentials.\n\n<img src=\"resources/ch01-twitter/images/Twitter-AppCredentials.png\" width=\"600px\">",
"_____no_output_____"
],
[
"If you are taking advantage of the virtual machine experience for this chapter that is powered by Vagrant, you should just be able to execute the code in this notebook without any worries whatsoever about installing dependencies. If you are running the code from your own development envioronment, however, be advised that these examples in this chapter take advantage of a Python package called [twitter](https://github.com/sixohsix/twitter) to make API calls. You can install this package in a terminal with [pip](https://pypi.python.org/pypi/pip) with the command `pip install twitter`, preferably from within a [Python virtual environment](https://pypi.python.org/pypi/virtualenv). ",
"_____no_output_____"
],
[
"Once installed, you should be able to open up a Python interpreter (or better yet, your [IPython](http://ipython.org/) interpreter) and get rolling.",
"_____no_output_____"
],
[
"## Authorizing an application to access Twitter account data",
"_____no_output_____"
]
],
[
[
"import twitter\n\n# Go to https://developer.twitter.com/en/apps to create an app and get values\n# for these credentials, which you'll need to provide in place of these\n# empty string values that are defined as placeholders.\n# See https://developer.twitter.com/en/docs/basics/authentication/overview/oauth\n# for more information on Twitter's OAuth implementation.\n\nCONSUMER_KEY = ''\nCONSUMER_SECRET = ''\nOAUTH_TOKEN = ''\nOAUTH_TOKEN_SECRET = ''\n\nauth = twitter.oauth.OAuth(OAUTH_TOKEN, OAUTH_TOKEN_SECRET,\n CONSUMER_KEY, CONSUMER_SECRET)\n\ntwitter_api = twitter.Twitter(auth=auth)\n\n# Nothing to see by displaying twitter_api except that it's now a\n# defined variable\n\nprint(twitter_api)",
"_____no_output_____"
]
],
[
[
"## Retrieving trends",
"_____no_output_____"
]
],
[
[
"# The Yahoo! Where On Earth ID for the entire world is 1.\n# See https://dev.twitter.com/docs/api/1.1/get/trends/place and\n# http://developer.yahoo.com/geo/geoplanet/\n\nWORLD_WOE_ID = 1\nUS_WOE_ID = 23424977\n\n# Prefix ID with the underscore for query string parameterization.\n# Without the underscore, the twitter package appends the ID value\n# to the URL itself as a special case keyword argument.\n\nworld_trends = twitter_api.trends.place(_id=WORLD_WOE_ID)\nus_trends = twitter_api.trends.place(_id=US_WOE_ID)\n\nprint(world_trends)\nprint()\nprint(us_trends)",
"_____no_output_____"
],
[
"for trend in world_trends[0]['trends']:\n print(trend['name'])",
"_____no_output_____"
],
[
"for trend in us_trends[0]['trends']:\n print(trend['name'])",
"_____no_output_____"
],
[
"world_trends_set = set([trend['name'] \n for trend in world_trends[0]['trends']])\n\nus_trends_set = set([trend['name'] \n for trend in us_trends[0]['trends']]) \n\ncommon_trends = world_trends_set.intersection(us_trends_set)\n\nprint(common_trends)",
"_____no_output_____"
]
],
[
[
"## Anatomy of a Tweet",
"_____no_output_____"
]
],
[
[
"import json\n\n# Set this variable to a trending topic, \n# or anything else for that matter. The example query below\n# was a trending topic when this content was being developed\n# and is used throughout the remainder of this chapter.\n\nq = '#MothersDay' \n\ncount = 100\n\n# Import unquote to prevent url encoding errors in next_results\nfrom urllib.parse import unquote\n\n# See https://dev.twitter.com/rest/reference/get/search/tweets\n\nsearch_results = twitter_api.search.tweets(q=q, count=count)\n\nstatuses = search_results['statuses']\n\n\n# Iterate through 5 more batches of results by following the cursor\nfor _ in range(5):\n print('Length of statuses', len(statuses))\n try:\n next_results = search_results['search_metadata']['next_results']\n except KeyError as e: # No more results when next_results doesn't exist\n break\n \n # Create a dictionary from next_results, which has the following form:\n # ?max_id=847960489447628799&q=%23RIPSelena&count=100&include_entities=1\n kwargs = dict([ kv.split('=') for kv in unquote(next_results[1:]).split(\"&\") ])\n \n search_results = twitter_api.search.tweets(**kwargs)\n statuses += search_results['statuses']\n\n# Show one sample search result by slicing the list...\nprint(json.dumps(statuses[0], indent=1))",
"_____no_output_____"
],
[
"for i in range(10):\n print()\n print(statuses[i]['text'])\n print('Favorites: ', statuses[i]['favorite_count'])\n print('Retweets: ', statuses[i]['retweet_count'])",
"_____no_output_____"
]
],
[
[
"## Extracting text, screen names, and hashtags from tweets",
"_____no_output_____"
]
],
[
[
"status_texts = [ status['text'] \n for status in statuses ]\n\nscreen_names = [ user_mention['screen_name'] \n for status in statuses\n for user_mention in status['entities']['user_mentions'] ]\n\nhashtags = [ hashtag['text'] \n for status in statuses\n for hashtag in status['entities']['hashtags'] ]\n\n# Compute a collection of all words from all tweets\nwords = [ w \n for t in status_texts \n for w in t.split() ]\n\n# Explore the first 5 items for each...\n\nprint(json.dumps(status_texts[0:5], indent=1))\nprint(json.dumps(screen_names[0:5], indent=1) )\nprint(json.dumps(hashtags[0:5], indent=1))\nprint(json.dumps(words[0:5], indent=1))",
"_____no_output_____"
]
],
[
[
"## Creating a basic frequency distribution from the words in tweets",
"_____no_output_____"
]
],
[
[
"from collections import Counter\n\nfor item in [words, screen_names, hashtags]:\n c = Counter(item)\n print(c.most_common()[:10]) # top 10\n print()",
"_____no_output_____"
]
],
[
[
"## Using prettytable to display tuples in a nice tabular format",
"_____no_output_____"
]
],
[
[
"from prettytable import PrettyTable\n\nfor label, data in (('Word', words), \n ('Screen Name', screen_names), \n ('Hashtag', hashtags)):\n pt = PrettyTable(field_names=[label, 'Count']) \n c = Counter(data)\n [ pt.add_row(kv) for kv in c.most_common()[:10] ]\n pt.align[label], pt.align['Count'] = 'l', 'r' # Set column alignment\n print(pt)",
"_____no_output_____"
]
],
[
[
"## Calculating lexical diversity for tweets",
"_____no_output_____"
]
],
[
[
"# A function for computing lexical diversity\ndef lexical_diversity(tokens):\n return len(set(tokens))/len(tokens) \n\n# A function for computing the average number of words per tweet\ndef average_words(statuses):\n total_words = sum([ len(s.split()) for s in statuses ]) \n return total_words/len(statuses)\n\nprint(lexical_diversity(words))\nprint(lexical_diversity(screen_names))\nprint(lexical_diversity(hashtags))\nprint(average_words(status_texts))",
"_____no_output_____"
]
],
[
[
"## Finding the most popular retweets",
"_____no_output_____"
]
],
[
[
"retweets = [\n # Store out a tuple of these three values ...\n (status['retweet_count'], \n status['retweeted_status']['user']['screen_name'],\n status['retweeted_status']['id'],\n status['text']) \n \n # ... for each status ...\n for status in statuses \n \n # ... so long as the status meets this condition.\n if 'retweeted_status' in status.keys()\n ]\n\n# Slice off the first 5 from the sorted results and display each item in the tuple\n\npt = PrettyTable(field_names=['Count', 'Screen Name', 'Tweet ID', 'Text'])\n[ pt.add_row(row) for row in sorted(retweets, reverse=True)[:5] ]\npt.max_width['Text'] = 50\npt.align= 'l'\nprint(pt)",
"_____no_output_____"
]
],
[
[
"## Looking up users who have retweeted a status",
"_____no_output_____"
]
],
[
[
"# Get the original tweet id for a tweet from its retweeted_status node \n# and insert it here\n\n_retweets = twitter_api.statuses.retweets(id=862359093398261760)\nprint([r['user']['screen_name'] for r in _retweets])",
"_____no_output_____"
]
],
[
[
"## Plotting frequencies of words",
"_____no_output_____"
]
],
[
[
"import matplotlib.pyplot as plt\n%matplotlib inline\nword_counts = sorted(Counter(words).values(), reverse=True)\n\nplt.loglog(word_counts)\nplt.ylabel(\"Freq\")\nplt.xlabel(\"Word Rank\")",
"_____no_output_____"
]
],
[
[
"## Generating histograms of words, screen names, and hashtags",
"_____no_output_____"
]
],
[
[
"for label, data in (('Words', words), \n ('Screen Names', screen_names), \n ('Hashtags', hashtags)):\n\n # Build a frequency map for each set of data\n # and plot the values\n c = Counter(data)\n plt.hist(list(c.values()))\n \n # Add a title and y-label ...\n plt.title(label)\n plt.ylabel(\"Number of items in bin\")\n plt.xlabel(\"Bins (number of times an item appeared)\")\n \n # ... and display as a new figure\n plt.figure()",
"_____no_output_____"
]
],
[
[
"## Generating a histogram of retweet counts",
"_____no_output_____"
]
],
[
[
"# Using underscores while unpacking values in\n# a tuple is idiomatic for discarding them\n\ncounts = [count for count, _, _, _ in retweets]\n\nplt.hist(counts)\nplt.title('Retweets')\nplt.xlabel('Bins (number of times retweeted)')\nplt.ylabel('Number of tweets in bin')",
"_____no_output_____"
]
],
[
[
"## Sentiment Analysis",
"_____no_output_____"
]
],
[
[
"# pip install nltk\nimport nltk\nnltk.download('vader_lexicon')\n\nimport numpy as np\nfrom nltk.sentiment.vader import SentimentIntensityAnalyzer",
"_____no_output_____"
],
[
"twitter_stream = twitter.TwitterStream(auth=auth)\niterator = twitter_stream.statuses.sample()",
"_____no_output_____"
],
[
"tweets = []\nfor tweet in iterator:\n try:\n if tweet['lang'] == 'en':\n tweets.append(tweet)\n except:\n pass\n if len(tweets) == 100:\n break",
"_____no_output_____"
],
[
"analyzer = SentimentIntensityAnalyzer()",
"_____no_output_____"
],
[
"analyzer.polarity_scores('Hello')",
"_____no_output_____"
],
[
"analyzer.polarity_scores('I really enjoy this video series.')",
"_____no_output_____"
],
[
"analyzer.polarity_scores('I REALLY enjoy this video series.')",
"_____no_output_____"
],
[
"analyzer.polarity_scores('I REALLY enjoy this video series!!!')",
"_____no_output_____"
],
[
"analyzer.polarity_scores('I REALLY did not enjoy this video series!!!')",
"_____no_output_____"
],
[
"scores = np.zeros(len(tweets))\n\nfor i, t in enumerate(tweets):\n # Extract the text portion of the tweet\n text = t['text']\n \n # Measure the polarity of the tweet\n polarity = analyzer.polarity_scores(text)\n \n # Store the normalized, weighted composite score\n scores[i] = polarity['compound']",
"_____no_output_____"
],
[
"most_positive = np.argmax(scores)\nmost_negative = np.argmin(scores)",
"_____no_output_____"
],
[
"print('{0:6.3f} : \"{1}\"'.format(scores[most_positive], tweets[most_positive]['text']))",
"_____no_output_____"
],
[
"print('{0:6.3f} : \"{1}\"'.format(scores[most_negative], tweets[most_negative]['text']))",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e7de40df5061157fa42a509be83748c9ebb6a5fa | 15,614 | ipynb | Jupyter Notebook | docs_src/text.ipynb | holmesal/fastai | cbd2a0c91d01842fb2e780072aed510b1325d1e5 | [
"Apache-2.0"
] | 1 | 2019-01-31T08:53:34.000Z | 2019-01-31T08:53:34.000Z | docs_src/text.ipynb | holmesal/fastai | cbd2a0c91d01842fb2e780072aed510b1325d1e5 | [
"Apache-2.0"
] | null | null | null | docs_src/text.ipynb | holmesal/fastai | cbd2a0c91d01842fb2e780072aed510b1325d1e5 | [
"Apache-2.0"
] | null | null | null | 29.185047 | 617 | 0.559306 | [
[
[
"# Text models, data, and training",
"_____no_output_____"
]
],
[
[
"from fastai.gen_doc.nbdoc import *",
"_____no_output_____"
]
],
[
[
"The [`text`](/text.html#text) module of the fastai library contains all the necessary functions to define a Dataset suitable for the various NLP (Natural Language Processing) tasks and quickly generate models you can use for them. Specifically:\n- [`text.transform`](/text.transform.html#text.transform) contains all the scripts to preprocess your data, from raw text to token ids,\n- [`text.data`](/text.data.html#text.data) contains the definition of [`TextDataset`](/text.data.html#TextDataset), which the main class you'll need in NLP,\n- [`text.learner`](/text.learner.html#text.learner) contains helper functions to quickly create a language model or an RNN classifier.\n\nHave a look at the links above for full details of the API of each module, of read on for a quick overview.",
"_____no_output_____"
],
[
"## Quick Start: Training an IMDb sentiment model with *ULMFiT*",
"_____no_output_____"
],
[
"Let's start with a quick end-to-end example of training a model. We'll train a sentiment classifier on a sample of the popular IMDb data, showing 4 steps:\n\n1. Reading and viewing the IMDb data\n1. Getting your data ready for modeling\n1. Fine-tuning a language model\n1. Building a classifier",
"_____no_output_____"
],
[
"### Reading and viewing the IMDb data",
"_____no_output_____"
],
[
"First let's import everything we need for text.",
"_____no_output_____"
]
],
[
[
"from fastai import *\nfrom fastai.text import * ",
"_____no_output_____"
]
],
[
[
"Contrary to images in Computer Vision, text can't directly be transformed into numbers to be fed into a model. The first thing we need to do is to preprocess our data so that we change the raw texts to lists of words, or tokens (a step that is called tokenization) then transform these tokens into numbers (a step that is called numericalization). These numbers are then passed to embedding layers that wil convert them in arrays of floats before passing them through a model.\n\nYou can find on the web plenty of [Word Embeddings](https://en.wikipedia.org/wiki/Word_embedding) to directly convert your tokens into floats. Those word embeddings have generally be trained on a large corpus such as wikipedia. Following the work of [ULMFiT](https://arxiv.org/abs/1801.06146), the fastai library is more focused on using pre-trained Language Models and fine-tuning them. Word embeddings are just vectors of 300 or 400 floats that represent different words, but a pretrained language model not only has those, but has also been trained to get a representation of full sentences and documents.\n\nThat's why the library is structured around three steps:\n\n1. Get your data preprocessed and ready to use in a minimum amount of code,\n1. Create a language model with pretrained weights that you can fine-tune to your dataset,\n1. Create other models such as classifiers on top of the encoder of the language model.\n\nTo show examples, we have provided a small sample of the [IMDB dataset](https://www.imdb.com/interfaces/) which contains 1,000 reviews of movies with labels (positive or negative).",
"_____no_output_____"
]
],
[
[
"path = untar_data(URLs.IMDB_SAMPLE)\npath",
"_____no_output_____"
]
],
[
[
"Creating a dataset from your raw texts is very simple if you have it in one of those ways\n- organized it in folders in an ImageNet style\n- organized in a csv file with labels columns and a text columns\n\nHere, the sample from imdb is in a texts csv files that looks like this:",
"_____no_output_____"
]
],
[
[
"df = pd.read_csv(path/'texts.csv')\ndf.head()",
"_____no_output_____"
]
],
[
[
"### Getting your data ready for modeling",
"_____no_output_____"
]
],
[
[
"for file in ['train_tok.npy', 'valid_tok.npy']:\n if os.path.exists(path/'tmp'/file): os.remove(path/'tmp'/file)",
"_____no_output_____"
]
],
[
[
"To get a [`DataBunch`](/basic_data.html#DataBunch) quickly, there are also several factory methods depending on how our data is structured. They are all detailed in [`text.data`](/text.data.html#text.data), here we'll use the method <code>from_csv</code> of the [`TextLMDataBunch`](/text.data.html#TextLMDataBunch) (to get the data ready for a language model) and [`TextClasDataBunch`](/text.data.html#TextClasDataBunch) (to get the data ready for a text classifier) classes.",
"_____no_output_____"
]
],
[
[
"# Language model data\ndata_lm = TextLMDataBunch.from_csv(path, 'texts.csv')\n# Classifier model data\ndata_clas = TextClasDataBunch.from_csv(path, 'texts.csv', vocab=data_lm.train_ds.vocab, bs=32)",
"_____no_output_____"
]
],
[
[
"This does all the necessary preprocessing behing the scene. For the classifier, we also pass the vocabulary (mapping from ids to words) that we want to use: this is to ensure that `data_clas` will use the same dictionary as `data_lm`.\n\nSince this step can be a bit time-consuming, it's best to save the result with:",
"_____no_output_____"
]
],
[
[
"data_lm.save()\ndata_clas.save()",
"_____no_output_____"
]
],
[
[
"This will create a 'tmp' directory where all the computed stuff will be stored. You can then reload those results with:",
"_____no_output_____"
]
],
[
[
"data_lm = TextLMDataBunch.load(path)\ndata_clas = TextClasDataBunch.load(path, bs=32)",
"_____no_output_____"
]
],
[
[
"Note that you can load the data with different [`DataBunch`](/basic_data.html#DataBunch) parameters (batch size, `bptt`,...)",
"_____no_output_____"
],
[
"### Fine-tuning a language model",
"_____no_output_____"
],
[
"We can use the `data_lm` object we created earlier to fine-tune a pretrained language model. [fast.ai](http://www.fast.ai/) has an English model available that we can download. We can create a learner object that will directly create a model, download the pretrained weights and be ready for fine-tuning.",
"_____no_output_____"
]
],
[
[
"learn = language_model_learner(data_lm, pretrained_model=URLs.WT103, drop_mult=0.5)\nlearn.fit_one_cycle(1, 1e-2)",
"Total time: 00:04\nepoch train_loss valid_loss accuracy\n1 4.720898 4.212008 0.248862 (00:04)\n\n"
]
],
[
[
"Like a computer vision model, we can then unfreeze the model and fine-tune it.",
"_____no_output_____"
]
],
[
[
"learn.unfreeze()\nlearn.fit_one_cycle(1, 1e-3)",
"Total time: 00:22\nepoch train_loss valid_loss accuracy\n1 4.450525 4.127853 0.253167 (00:22)\n\n"
]
],
[
[
"To evaluate your language model, you can run the [`Learner.predict`](/basic_train.html#Learner.predict) method and specify the number of words you want it to guess.",
"_____no_output_____"
]
],
[
[
"learn.predict(\"This is a review about\", n_words=10)",
"Total time: 00:00\n\n"
]
],
[
[
"It doesn't make much sense (we have a tiny vocabulary here and didn't train much on it) but note that it respects basic grammar (which comes from the pretrained model).\n\nFinally we save the encoder to be able to use it for classification in the next section.",
"_____no_output_____"
]
],
[
[
"learn.save_encoder('ft_enc')",
"_____no_output_____"
]
],
[
[
"### Building a classifier",
"_____no_output_____"
],
[
"We now use the `data_clas` object we created earlier to build a classifier with our fine-tuned encoder. The learner object can be done in a single line.",
"_____no_output_____"
]
],
[
[
"learn = text_classifier_learner(data_clas, drop_mult=0.5)\nlearn.load_encoder('ft_enc')\nlearn.fit_one_cycle(1, 1e-2)",
"Total time: 00:26\nepoch train_loss valid_loss accuracy\n1 0.686503 0.632651 0.701493 (00:26)\n\n"
]
],
[
[
"Again, we can unfreeze the model and fine-tune it.",
"_____no_output_____"
]
],
[
[
"learn.freeze_to(-2)\nlearn.fit_one_cycle(1, slice(5e-3/2., 5e-3))",
"Total time: 00:33\nepoch train_loss valid_loss accuracy\n1 0.612237 0.542365 0.706468 (00:33)\n\n"
],
[
"learn.unfreeze()\nlearn.fit_one_cycle(1, slice(2e-3/100, 2e-3))",
"Total time: 00:55\nepoch train_loss valid_loss accuracy\n1 0.510760 0.479997 0.791045 (00:55)\n\n"
]
],
[
[
"Again, we can predict on a raw text by using the [`Learner.predict`](/basic_train.html#Learner.predict) method.",
"_____no_output_____"
]
],
[
[
"learn.predict(\"This was a great movie!\")",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
]
] |
e7de41ea21c5707100dab6cd496c71ce13777a24 | 433,476 | ipynb | Jupyter Notebook | NLP on Financial Statements/project_5_starter.ipynb | saidulislam/AI-for-Trading | 90d6851646f4ddf60b51c98de187267316e516f9 | [
"Apache-2.0"
] | null | null | null | NLP on Financial Statements/project_5_starter.ipynb | saidulislam/AI-for-Trading | 90d6851646f4ddf60b51c98de187267316e516f9 | [
"Apache-2.0"
] | null | null | null | NLP on Financial Statements/project_5_starter.ipynb | saidulislam/AI-for-Trading | 90d6851646f4ddf60b51c98de187267316e516f9 | [
"Apache-2.0"
] | null | null | null | 115.347525 | 91,052 | 0.787529 | [
[
[
"# Project 5: NLP on Financial Statements\n## Instructions\nEach problem consists of a function to implement and instructions on how to implement the function. The parts of the function that need to be implemented are marked with a `# TODO` comment. After implementing the function, run the cell to test it against the unit tests we've provided. For each problem, we provide one or more unit tests from our `project_tests` package. These unit tests won't tell you if your answer is correct, but will warn you of any major errors. Your code will be checked for the correct solution when you submit it to Udacity.\n\n## Packages\nWhen you implement the functions, you'll only need to you use the packages you've used in the classroom, like [Pandas](https://pandas.pydata.org/) and [Numpy](http://www.numpy.org/). These packages will be imported for you. We recommend you don't add any import statements, otherwise the grader might not be able to run your code.\n\nThe other packages that we're importing are `project_helper` and `project_tests`. These are custom packages built to help you solve the problems. The `project_helper` module contains utility functions and graph functions. The `project_tests` contains the unit tests for all the problems.\n\n### Install Packages",
"_____no_output_____"
]
],
[
[
"import sys\n!{sys.executable} -m pip install -r requirements.txt",
"Collecting alphalens==0.3.2 (from -r requirements.txt (line 1))\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/a5/dc/2f9cd107d0d4cf6223d37d81ddfbbdbf0d703d03669b83810fa6b97f32e5/alphalens-0.3.2.tar.gz (18.9MB)\n\u001b[K 100% |████████████████████████████████| 18.9MB 1.8MB/s eta 0:00:01 3% |█▏ | 696kB 9.8MB/s eta 0:00:02 26% |████████▌ | 5.0MB 25.4MB/s eta 0:00:01 37% |███████████▉ | 7.0MB 21.1MB/s eta 0:00:01 43% |█████████████▊ | 8.1MB 23.9MB/s eta 0:00:01 71% |███████████████████████ | 13.5MB 21.4MB/s eta 0:00:01\n\u001b[?25hCollecting nltk==3.3.0 (from -r requirements.txt (line 2))\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/50/09/3b1755d528ad9156ee7243d52aa5cd2b809ef053a0f31b53d92853dd653a/nltk-3.3.0.zip (1.4MB)\n\u001b[K 100% |████████████████████████████████| 1.4MB 8.0MB/s eta 0:00:01 20% |██████▋ | 286kB 19.6MB/s eta 0:00:01\n\u001b[?25hCollecting numpy==1.13.3 (from -r requirements.txt (line 3))\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/57/a7/e3e6bd9d595125e1abbe162e323fd2d06f6f6683185294b79cd2cdb190d5/numpy-1.13.3-cp36-cp36m-manylinux1_x86_64.whl (17.0MB)\n\u001b[K 100% |████████████████████████████████| 17.0MB 1.9MB/s eta 0:00:01 2% |▉ | 440kB 19.2MB/s eta 0:00:01 8% |██▋ | 1.4MB 18.8MB/s eta 0:00:01 31% |██████████ | 5.3MB 20.1MB/s eta 0:00:01 37% |████████████ | 6.3MB 20.9MB/s eta 0:00:01 43% |█████████████▊ | 7.3MB 20.5MB/s eta 0:00:01 60% |███████████████████▌ | 10.3MB 21.4MB/s eta 0:00:01 66% |█████████████████████▎ | 11.3MB 20.9MB/s eta 0:00:01 72% |███████████████████████ | 12.2MB 22.4MB/s eta 0:00:01 77% |████████████████████████▊ | 13.1MB 21.0MB/s eta 0:00:01 83% |██████████████████████████▋ | 14.1MB 22.0MB/s eta 0:00:01\n\u001b[?25hCollecting ratelimit==2.2.0 (from -r requirements.txt (line 4))\n Downloading https://files.pythonhosted.org/packages/b5/73/956d739706da2f74891ba46391381ce7e680dce27cce90df7c706512d5bf/ratelimit-2.2.0.tar.gz\nRequirement already satisfied: requests==2.18.4 in /opt/conda/lib/python3.6/site-packages (from -r requirements.txt (line 5)) (2.18.4)\nRequirement already satisfied: scikit-learn==0.19.1 in /opt/conda/lib/python3.6/site-packages (from -r requirements.txt (line 6)) (0.19.1)\nRequirement already satisfied: six==1.11.0 in /opt/conda/lib/python3.6/site-packages (from -r requirements.txt (line 7)) (1.11.0)\nCollecting tqdm==4.19.5 (from -r requirements.txt (line 8))\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/71/3c/341b4fa23cb3abc335207dba057c790f3bb329f6757e1fcd5d347bcf8308/tqdm-4.19.5-py2.py3-none-any.whl (51kB)\n\u001b[K 100% |████████████████████████████████| 61kB 12.4MB/s ta 0:00:01\n\u001b[?25hRequirement already satisfied: matplotlib>=1.4.0 in /opt/conda/lib/python3.6/site-packages (from alphalens==0.3.2->-r requirements.txt (line 1)) (2.1.0)\nRequirement already satisfied: pandas>=0.18.0 in /opt/conda/lib/python3.6/site-packages (from alphalens==0.3.2->-r requirements.txt (line 1)) (0.23.3)\nRequirement already satisfied: scipy>=0.14.0 in /opt/conda/lib/python3.6/site-packages (from alphalens==0.3.2->-r requirements.txt (line 1)) (1.2.1)\nRequirement already satisfied: seaborn>=0.6.0 in /opt/conda/lib/python3.6/site-packages (from alphalens==0.3.2->-r requirements.txt (line 1)) (0.8.1)\nRequirement already satisfied: statsmodels>=0.6.1 in /opt/conda/lib/python3.6/site-packages (from alphalens==0.3.2->-r requirements.txt (line 1)) (0.8.0)\nRequirement already satisfied: IPython>=3.2.3 in /opt/conda/lib/python3.6/site-packages (from alphalens==0.3.2->-r requirements.txt (line 1)) (6.5.0)\nRequirement already satisfied: chardet<3.1.0,>=3.0.2 in /opt/conda/lib/python3.6/site-packages (from requests==2.18.4->-r requirements.txt (line 5)) (3.0.4)\nRequirement already satisfied: idna<2.7,>=2.5 in /opt/conda/lib/python3.6/site-packages (from requests==2.18.4->-r requirements.txt (line 5)) (2.6)\nRequirement already satisfied: urllib3<1.23,>=1.21.1 in /opt/conda/lib/python3.6/site-packages (from requests==2.18.4->-r requirements.txt (line 5)) (1.22)\nRequirement already satisfied: certifi>=2017.4.17 in /opt/conda/lib/python3.6/site-packages (from requests==2.18.4->-r requirements.txt (line 5)) (2019.11.28)\nRequirement already satisfied: python-dateutil>=2.0 in /opt/conda/lib/python3.6/site-packages (from matplotlib>=1.4.0->alphalens==0.3.2->-r requirements.txt (line 1)) (2.6.1)\nRequirement already satisfied: pytz in /opt/conda/lib/python3.6/site-packages (from matplotlib>=1.4.0->alphalens==0.3.2->-r requirements.txt (line 1)) (2017.3)\nRequirement already satisfied: cycler>=0.10 in /opt/conda/lib/python3.6/site-packages/cycler-0.10.0-py3.6.egg (from matplotlib>=1.4.0->alphalens==0.3.2->-r requirements.txt (line 1)) (0.10.0)\nRequirement already satisfied: pyparsing!=2.0.4,!=2.1.2,!=2.1.6,>=2.0.1 in /opt/conda/lib/python3.6/site-packages (from matplotlib>=1.4.0->alphalens==0.3.2->-r requirements.txt (line 1)) (2.2.0)\nRequirement already satisfied: backcall in /opt/conda/lib/python3.6/site-packages (from IPython>=3.2.3->alphalens==0.3.2->-r requirements.txt (line 1)) (0.1.0)\nRequirement already satisfied: decorator in /opt/conda/lib/python3.6/site-packages (from IPython>=3.2.3->alphalens==0.3.2->-r requirements.txt (line 1)) (4.0.11)\nRequirement already satisfied: pexpect; sys_platform != \"win32\" in /opt/conda/lib/python3.6/site-packages (from IPython>=3.2.3->alphalens==0.3.2->-r requirements.txt (line 1)) (4.3.1)\nRequirement already satisfied: prompt-toolkit<2.0.0,>=1.0.15 in /opt/conda/lib/python3.6/site-packages (from IPython>=3.2.3->alphalens==0.3.2->-r requirements.txt (line 1)) (1.0.15)\nRequirement already satisfied: simplegeneric>0.8 in /opt/conda/lib/python3.6/site-packages (from IPython>=3.2.3->alphalens==0.3.2->-r requirements.txt (line 1)) (0.8.1)\nRequirement already satisfied: traitlets>=4.2 in /opt/conda/lib/python3.6/site-packages (from IPython>=3.2.3->alphalens==0.3.2->-r requirements.txt (line 1)) (4.3.2)\nRequirement already satisfied: setuptools>=18.5 in /opt/conda/lib/python3.6/site-packages (from IPython>=3.2.3->alphalens==0.3.2->-r requirements.txt (line 1)) (38.4.0)\nRequirement already satisfied: pygments in /opt/conda/lib/python3.6/site-packages (from IPython>=3.2.3->alphalens==0.3.2->-r requirements.txt (line 1)) (2.2.0)\nRequirement already satisfied: pickleshare in /opt/conda/lib/python3.6/site-packages (from IPython>=3.2.3->alphalens==0.3.2->-r requirements.txt (line 1)) (0.7.4)\nRequirement already satisfied: jedi>=0.10 in /opt/conda/lib/python3.6/site-packages (from IPython>=3.2.3->alphalens==0.3.2->-r requirements.txt (line 1)) (0.10.2)\nRequirement already satisfied: ptyprocess>=0.5 in /opt/conda/lib/python3.6/site-packages (from pexpect; sys_platform != \"win32\"->IPython>=3.2.3->alphalens==0.3.2->-r requirements.txt (line 1)) (0.5.2)\nRequirement already satisfied: wcwidth in /opt/conda/lib/python3.6/site-packages (from prompt-toolkit<2.0.0,>=1.0.15->IPython>=3.2.3->alphalens==0.3.2->-r requirements.txt (line 1)) (0.1.7)\nRequirement already satisfied: ipython-genutils in /opt/conda/lib/python3.6/site-packages (from traitlets>=4.2->IPython>=3.2.3->alphalens==0.3.2->-r requirements.txt (line 1)) (0.2.0)\nBuilding wheels for collected packages: alphalens, nltk, ratelimit\n Running setup.py bdist_wheel for alphalens ... \u001b[?25ldone\n\u001b[?25h Stored in directory: /root/.cache/pip/wheels/77/1e/9a/223b4c94d7f564f25d94b48ca5b9c53e3034016ece3fd8c8c1\n Running setup.py bdist_wheel for nltk ... \u001b[?25ldone\n\u001b[?25h Stored in directory: /root/.cache/pip/wheels/d1/ab/40/3bceea46922767e42986aef7606a600538ca80de6062dc266c\n Running setup.py bdist_wheel for ratelimit ... \u001b[?25ldone\n\u001b[?25h Stored in directory: /root/.cache/pip/wheels/a6/2a/13/3c6e42757ca0b6873a60e0697d30f7dd9d521a52874c44f201\nSuccessfully built alphalens nltk ratelimit\n\u001b[31mtensorflow 1.3.0 requires tensorflow-tensorboard<0.2.0,>=0.1.0, which is not installed.\u001b[0m\n\u001b[31mmoviepy 0.2.3.2 has requirement tqdm==4.11.2, but you'll have tqdm 4.19.5 which is incompatible.\u001b[0m\nInstalling collected packages: numpy, alphalens, nltk, ratelimit, tqdm\n Found existing installation: numpy 1.12.1\n Uninstalling numpy-1.12.1:\n"
]
],
[
[
"### Load Packages",
"_____no_output_____"
]
],
[
[
"import nltk\nimport numpy as np\nimport pandas as pd\nimport pickle\nimport pprint\nimport project_helper\nimport project_tests\n\nfrom tqdm import tqdm",
"_____no_output_____"
]
],
[
[
"### Download NLP Corpora\nYou'll need two corpora to run this project: the stopwords corpus for removing stopwords and wordnet for lemmatizing.",
"_____no_output_____"
]
],
[
[
"nltk.download('stopwords')\nnltk.download('wordnet')",
"[nltk_data] Downloading package stopwords to /root/nltk_data...\n[nltk_data] Unzipping corpora/stopwords.zip.\n[nltk_data] Downloading package wordnet to /root/nltk_data...\n[nltk_data] Unzipping corpora/wordnet.zip.\n"
]
],
[
[
"## Get 10ks\nWe'll be running NLP analysis on 10-k documents. To do that, we first need to download the documents. For this project, we'll download 10-ks for a few companies. To lookup documents for these companies, we'll use their CIK. If you would like to run this against other stocks, we've provided the dict `additional_cik` for more stocks. However, the more stocks you try, the long it will take to run.",
"_____no_output_____"
]
],
[
[
"cik_lookup = {\n 'AMZN': '0001018724',\n 'BMY': '0000014272', \n 'CNP': '0001130310',\n 'CVX': '0000093410',\n 'FL': '0000850209',\n 'FRT': '0000034903',\n 'HON': '0000773840'}\n\nadditional_cik = {\n 'AEP': '0000004904',\n 'AXP': '0000004962',\n 'BA': '0000012927', \n 'BK': '0001390777',\n 'CAT': '0000018230',\n 'DE': '0000315189',\n 'DIS': '0001001039',\n 'DTE': '0000936340',\n 'ED': '0001047862',\n 'EMR': '0000032604',\n 'ETN': '0001551182',\n 'GE': '0000040545',\n 'IBM': '0000051143',\n 'IP': '0000051434',\n 'JNJ': '0000200406',\n 'KO': '0000021344',\n 'LLY': '0000059478',\n 'MCD': '0000063908',\n 'MO': '0000764180',\n 'MRK': '0000310158',\n 'MRO': '0000101778',\n 'PCG': '0001004980',\n 'PEP': '0000077476',\n 'PFE': '0000078003',\n 'PG': '0000080424',\n 'PNR': '0000077360',\n 'SYY': '0000096021',\n 'TXN': '0000097476',\n 'UTX': '0000101829',\n 'WFC': '0000072971',\n 'WMT': '0000104169',\n 'WY': '0000106535',\n 'XOM': '0000034088'}",
"_____no_output_____"
]
],
[
[
"### Get list of 10-ks\nThe SEC has a limit on the number of calls you can make to the website per second. In order to avoid hiding that limit, we've created the `SecAPI` class. This will cache data from the SEC and prevent you from going over the limit.",
"_____no_output_____"
]
],
[
[
"sec_api = project_helper.SecAPI()",
"_____no_output_____"
]
],
[
[
"With the class constructed, let's pull a list of filled 10-ks from the SEC for each company.",
"_____no_output_____"
]
],
[
[
"from bs4 import BeautifulSoup\n\ndef get_sec_data(cik, doc_type, start=0, count=60):\n newest_pricing_data = pd.to_datetime('2018-01-01')\n rss_url = 'https://www.sec.gov/cgi-bin/browse-edgar?action=getcompany' \\\n '&CIK={}&type={}&start={}&count={}&owner=exclude&output=atom' \\\n .format(cik, doc_type, start, count)\n sec_data = sec_api.get(rss_url)\n feed = BeautifulSoup(sec_data.encode('ascii'), 'xml').feed\n entries = [\n (\n entry.content.find('filing-href').getText(),\n entry.content.find('filing-type').getText(),\n entry.content.find('filing-date').getText())\n for entry in feed.find_all('entry', recursive=False)\n if pd.to_datetime(entry.content.find('filing-date').getText()) <= newest_pricing_data]\n\n return entries",
"_____no_output_____"
]
],
[
[
"Let's pull the list using the `get_sec_data` function, then display some of the results. For displaying some of the data, we'll use Amazon as an example. ",
"_____no_output_____"
]
],
[
[
"example_ticker = 'AMZN'\nsec_data = {}\n\nfor ticker, cik in cik_lookup.items():\n sec_data[ticker] = get_sec_data(cik, '10-K')\n\npprint.pprint(sec_data[example_ticker][:5])",
"[('https://www.sec.gov/Archives/edgar/data/1018724/000101872417000011/0001018724-17-000011-index.htm',\n '10-K',\n '2017-02-10'),\n ('https://www.sec.gov/Archives/edgar/data/1018724/000101872416000172/0001018724-16-000172-index.htm',\n '10-K',\n '2016-01-29'),\n ('https://www.sec.gov/Archives/edgar/data/1018724/000101872415000006/0001018724-15-000006-index.htm',\n '10-K',\n '2015-01-30'),\n ('https://www.sec.gov/Archives/edgar/data/1018724/000101872414000006/0001018724-14-000006-index.htm',\n '10-K',\n '2014-01-31'),\n ('https://www.sec.gov/Archives/edgar/data/1018724/000119312513028520/0001193125-13-028520-index.htm',\n '10-K',\n '2013-01-30')]\n"
]
],
[
[
"### Download 10-ks\nAs you see, this is a list of urls. These urls point to a file that contains metadata related to each filling. Since we don't care about the metadata, we'll pull the filling by replacing the url with the filling url.",
"_____no_output_____"
]
],
[
[
"raw_fillings_by_ticker = {}\n\nfor ticker, data in sec_data.items():\n raw_fillings_by_ticker[ticker] = {}\n for index_url, file_type, file_date in tqdm(data, desc='Downloading {} Fillings'.format(ticker), unit='filling'):\n if (file_type == '10-K'):\n file_url = index_url.replace('-index.htm', '.txt').replace('.txtl', '.txt') \n \n raw_fillings_by_ticker[ticker][file_date] = sec_api.get(file_url)\n\n\nprint('Example Document:\\n\\n{}...'.format(next(iter(raw_fillings_by_ticker[example_ticker].values()))[:1000]))",
"Downloading AMZN Fillings: 100%|██████████| 22/22 [00:03<00:00, 6.59filling/s]\nDownloading BMY Fillings: 100%|██████████| 27/27 [00:05<00:00, 4.56filling/s]\nDownloading CNP Fillings: 100%|██████████| 19/19 [00:03<00:00, 5.34filling/s]\nDownloading CVX Fillings: 100%|██████████| 25/25 [00:05<00:00, 4.65filling/s]\nDownloading FL Fillings: 100%|██████████| 22/22 [00:03<00:00, 5.65filling/s]\nDownloading FRT Fillings: 100%|██████████| 29/29 [00:03<00:00, 7.95filling/s]\nDownloading HON Fillings: 100%|██████████| 25/25 [00:04<00:00, 5.04filling/s]"
]
],
[
[
"### Get Documents\nWith theses fillings downloaded, we want to break them into their associated documents. These documents are sectioned off in the fillings with the tags `<DOCUMENT>` for the start of each document and `</DOCUMENT>` for the end of each document. There's no overlap with these documents, so each `</DOCUMENT>` tag should come after the `<DOCUMENT>` with no `<DOCUMENT>` tag in between.\n\nImplement `get_documents` to return a list of these documents from a filling. Make sure not to include the tag in the returned document text.",
"_____no_output_____"
]
],
[
[
"import re\n\n\ndef get_documents(text):\n \"\"\"\n Extract the documents from the text\n\n Parameters\n ----------\n text : str\n The text with the document strings inside\n\n Returns\n -------\n extracted_docs : list of str\n The document strings found in `text`\n \"\"\"\n \n # TODO: Implement\n start_doc = re.compile(r'<DOCUMENT>')\n end_doc = re.compile(r'</DOCUMENT>')\n \n start_idx = [x.end() for x in re.finditer(start_doc, text)]\n end_idx = [x.start() for x in re.finditer(end_doc, text)]\n \n extracted_docs = []\n for doc_start, doc_end in zip(start_idx, end_idx):\n extracted_docs.append(text[doc_start:doc_end])\n \n return extracted_docs\n\n\nproject_tests.test_get_documents(get_documents)",
"Tests Passed\n"
]
],
[
[
"With the `get_documents` function implemented, let's extract all the documents.",
"_____no_output_____"
]
],
[
[
"filling_documents_by_ticker = {}\n\nfor ticker, raw_fillings in raw_fillings_by_ticker.items():\n filling_documents_by_ticker[ticker] = {}\n for file_date, filling in tqdm(raw_fillings.items(), desc='Getting Documents from {} Fillings'.format(ticker), unit='filling'):\n filling_documents_by_ticker[ticker][file_date] = get_documents(filling)\n\n\nprint('\\n\\n'.join([\n 'Document {} Filed on {}:\\n{}...'.format(doc_i, file_date, doc[:200])\n for file_date, docs in filling_documents_by_ticker[example_ticker].items()\n for doc_i, doc in enumerate(docs)][:3]))",
"Getting Documents from AMZN Fillings: 100%|██████████| 17/17 [00:00<00:00, 41.88filling/s]\nGetting Documents from BMY Fillings: 100%|██████████| 23/23 [00:01<00:00, 20.93filling/s]\nGetting Documents from CNP Fillings: 100%|██████████| 15/15 [00:00<00:00, 21.72filling/s]\nGetting Documents from CVX Fillings: 100%|██████████| 21/21 [00:00<00:00, 23.42filling/s]\nGetting Documents from FL Fillings: 100%|██████████| 16/16 [00:00<00:00, 27.37filling/s]\nGetting Documents from FRT Fillings: 100%|██████████| 19/19 [00:00<00:00, 34.29filling/s]\nGetting Documents from HON Fillings: 100%|██████████| 20/20 [00:00<00:00, 28.93filling/s]"
]
],
[
[
"### Get Document Types\nNow that we have all the documents, we want to find the 10-k form in this 10-k filing. Implement the `get_document_type` function to return the type of document given. The document type is located on a line with the `<TYPE>` tag. For example, a form of type \"TEST\" would have the line `<TYPE>TEST`. Make sure to return the type as lowercase, so this example would be returned as \"test\".",
"_____no_output_____"
]
],
[
[
"def get_document_type(doc):\n \"\"\"\n Return the document type lowercased\n\n Parameters\n ----------\n doc : str\n The document string\n\n Returns\n -------\n doc_type : str\n The document type lowercased\n \"\"\"\n \n # TODO: Implement\n # (?<= positive lookbehind. matches a group before the main expression\n # without including it in the result\n # \\w alpha numeric and underscore\n # + 1 or more\n # [^\\n]+ 1 or more, anything but new line\n\n regex = re.compile(r'(?<=<TYPE>)\\w+[^\\n]+') \n doc_type = re.search(regex, doc).group(0).lower()\n \n return doc_type\n\n\nproject_tests.test_get_document_type(get_document_type)",
"Tests Passed\n"
]
],
[
[
"With the `get_document_type` function, we'll filter out all non 10-k documents.",
"_____no_output_____"
]
],
[
[
"ten_ks_by_ticker = {}\n\nfor ticker, filling_documents in filling_documents_by_ticker.items():\n ten_ks_by_ticker[ticker] = []\n for file_date, documents in filling_documents.items():\n for document in documents:\n if get_document_type(document) == '10-k':\n ten_ks_by_ticker[ticker].append({\n 'cik': cik_lookup[ticker],\n 'file': document,\n 'file_date': file_date})\n\n\nproject_helper.print_ten_k_data(ten_ks_by_ticker[example_ticker][:5], ['cik', 'file', 'file_date'])",
"[\n {\n cik: '0001018724'\n file: '\\n<TYPE>10-K\\n<SEQUENCE>1\\n<FILENAME>amzn-2016123...\n file_date: '2017-02-10'},\n {\n cik: '0001018724'\n file: '\\n<TYPE>10-K\\n<SEQUENCE>1\\n<FILENAME>amzn-2015123...\n file_date: '2016-01-29'},\n {\n cik: '0001018724'\n file: '\\n<TYPE>10-K\\n<SEQUENCE>1\\n<FILENAME>amzn-2014123...\n file_date: '2015-01-30'},\n {\n cik: '0001018724'\n file: '\\n<TYPE>10-K\\n<SEQUENCE>1\\n<FILENAME>amzn-2013123...\n file_date: '2014-01-31'},\n {\n cik: '0001018724'\n file: '\\n<TYPE>10-K\\n<SEQUENCE>1\\n<FILENAME>d445434d10k....\n file_date: '2013-01-30'},\n]\n"
]
],
[
[
"## Preprocess the Data\n### Clean Up\nAs you can see, the text for the documents are very messy. To clean this up, we'll remove the html and lowercase all the text.",
"_____no_output_____"
]
],
[
[
"def remove_html_tags(text):\n text = BeautifulSoup(text, 'html.parser').get_text()\n \n return text\n\n\ndef clean_text(text):\n text = text.lower()\n text = remove_html_tags(text)\n \n return text",
"_____no_output_____"
]
],
[
[
"Using the `clean_text` function, we'll clean up all the documents.",
"_____no_output_____"
]
],
[
[
"for ticker, ten_ks in ten_ks_by_ticker.items():\n for ten_k in tqdm(ten_ks, desc='Cleaning {} 10-Ks'.format(ticker), unit='10-K'):\n ten_k['file_clean'] = clean_text(ten_k['file'])\n\n\nproject_helper.print_ten_k_data(ten_ks_by_ticker[example_ticker][:5], ['file_clean'])",
"Cleaning AMZN 10-Ks: 100%|██████████| 17/17 [00:35<00:00, 2.08s/10-K]\nCleaning BMY 10-Ks: 100%|██████████| 23/23 [01:15<00:00, 3.30s/10-K]\nCleaning CNP 10-Ks: 100%|██████████| 15/15 [00:57<00:00, 3.83s/10-K]\nCleaning CVX 10-Ks: 100%|██████████| 21/21 [01:52<00:00, 5.36s/10-K]\nCleaning FL 10-Ks: 100%|██████████| 16/16 [00:25<00:00, 1.61s/10-K]\nCleaning FRT 10-Ks: 100%|██████████| 19/19 [00:55<00:00, 2.93s/10-K]\nCleaning HON 10-Ks: 100%|██████████| 20/20 [01:00<00:00, 3.04s/10-K]"
]
],
[
[
"### Lemmatize\nWith the text cleaned up, it's time to distill the verbs down. Implement the `lemmatize_words` function to lemmatize verbs in the list of words provided.",
"_____no_output_____"
]
],
[
[
"from nltk.stem import WordNetLemmatizer\nfrom nltk.corpus import wordnet\n\n\ndef lemmatize_words(words):\n \"\"\"\n Lemmatize words \n\n Parameters\n ----------\n words : list of str\n List of words\n\n Returns\n -------\n lemmatized_words : list of str\n List of lemmatized words\n \"\"\"\n \n # TODO: Implement\n WNL = WordNetLemmatizer()\n lemmatized_words = [WNL.lemmatize(w, 'v') for w in words]\n \n return lemmatized_words\n\n\nproject_tests.test_lemmatize_words(lemmatize_words)",
"Tests Passed\n"
]
],
[
[
"With the `lemmatize_words` function implemented, let's lemmatize all the data.",
"_____no_output_____"
]
],
[
[
"word_pattern = re.compile('\\w+')\n\nfor ticker, ten_ks in ten_ks_by_ticker.items():\n for ten_k in tqdm(ten_ks, desc='Lemmatize {} 10-Ks'.format(ticker), unit='10-K'):\n ten_k['file_lemma'] = lemmatize_words(word_pattern.findall(ten_k['file_clean']))\n\n\nproject_helper.print_ten_k_data(ten_ks_by_ticker[example_ticker][:5], ['file_lemma'])",
"Lemmatize AMZN 10-Ks: 100%|██████████| 17/17 [00:04<00:00, 3.9110-K/s]\nLemmatize BMY 10-Ks: 100%|██████████| 23/23 [00:09<00:00, 2.4010-K/s]\nLemmatize CNP 10-Ks: 100%|██████████| 15/15 [00:07<00:00, 1.9210-K/s]\nLemmatize CVX 10-Ks: 100%|██████████| 21/21 [00:09<00:00, 2.3310-K/s]\nLemmatize FL 10-Ks: 100%|██████████| 16/16 [00:03<00:00, 4.4210-K/s]\nLemmatize FRT 10-Ks: 100%|██████████| 19/19 [00:05<00:00, 3.3110-K/s]\nLemmatize HON 10-Ks: 100%|██████████| 20/20 [00:05<00:00, 3.6610-K/s]"
]
],
[
[
"### Remove Stopwords",
"_____no_output_____"
]
],
[
[
"from nltk.corpus import stopwords\n\n\nlemma_english_stopwords = lemmatize_words(stopwords.words('english'))\n\nfor ticker, ten_ks in ten_ks_by_ticker.items():\n for ten_k in tqdm(ten_ks, desc='Remove Stop Words for {} 10-Ks'.format(ticker), unit='10-K'):\n ten_k['file_lemma'] = [word for word in ten_k['file_lemma'] if word not in lemma_english_stopwords]\n\n\nprint('Stop Words Removed')",
"Remove Stop Words for AMZN 10-Ks: 100%|██████████| 17/17 [00:01<00:00, 9.2810-K/s]\nRemove Stop Words for BMY 10-Ks: 100%|██████████| 23/23 [00:04<00:00, 5.6110-K/s]\nRemove Stop Words for CNP 10-Ks: 100%|██████████| 15/15 [00:03<00:00, 4.5310-K/s]\nRemove Stop Words for CVX 10-Ks: 100%|██████████| 21/21 [00:03<00:00, 5.2910-K/s]\nRemove Stop Words for FL 10-Ks: 100%|██████████| 16/16 [00:01<00:00, 10.2810-K/s]\nRemove Stop Words for FRT 10-Ks: 100%|██████████| 19/19 [00:02<00:00, 7.5110-K/s]\nRemove Stop Words for HON 10-Ks: 100%|██████████| 20/20 [00:02<00:00, 8.7210-K/s]"
]
],
[
[
"## Analysis on 10ks\n### Loughran McDonald Sentiment Word Lists\nWe'll be using the Loughran and McDonald sentiment word lists. These word lists cover the following sentiment:\n- Negative \n- Positive\n- Uncertainty\n- Litigious\n- Constraining\n- Superfluous\n- Modal\n\nThis will allow us to do the sentiment analysis on the 10-ks. Let's first load these word lists. We'll be looking into a few of these sentiments.",
"_____no_output_____"
]
],
[
[
"import os\n\n\nsentiments = ['negative', 'positive', 'uncertainty', 'litigious', 'constraining', 'interesting']\n\nsentiment_df = pd.read_csv(os.path.join('..', '..', 'data', 'project_5_loughran_mcdonald', 'loughran_mcdonald_master_dic_2016.csv'))\nsentiment_df.columns = [column.lower() for column in sentiment_df.columns] # Lowercase the columns for ease of use\n\n# Remove unused information\nsentiment_df = sentiment_df[sentiments + ['word']]\nsentiment_df[sentiments] = sentiment_df[sentiments].astype(bool)\nsentiment_df = sentiment_df[(sentiment_df[sentiments]).any(1)]\n\n# Apply the same preprocessing to these words as the 10-k words\nsentiment_df['word'] = lemmatize_words(sentiment_df['word'].str.lower())\nsentiment_df = sentiment_df.drop_duplicates('word')\n\n\nsentiment_df.head()",
"_____no_output_____"
]
],
[
[
"### Bag of Words\nusing the sentiment word lists, let's generate sentiment bag of words from the 10-k documents. Implement `get_bag_of_words` to generate a bag of words that counts the number of sentiment words in each doc. You can ignore words that are not in `sentiment_words`.",
"_____no_output_____"
]
],
[
[
"from collections import defaultdict, Counter\nfrom sklearn.feature_extraction.text import CountVectorizer\n\n\ndef get_bag_of_words(sentiment_words, docs):\n \"\"\"\n Generate a bag of words from documents for a certain sentiment\n\n Parameters\n ----------\n sentiment_words: Pandas Series\n Words that signify a certain sentiment\n docs : list of str\n List of documents used to generate bag of words\n\n Returns\n -------\n bag_of_words : 2-d Numpy Ndarray of int\n Bag of words sentiment for each document\n The first dimension is the document.\n The second dimension is the word.\n \"\"\"\n \n # TODO: Implement\n # filter out words not in sentiment_words\n vectorizer = CountVectorizer(vocabulary=sentiment_words.values)\n \n \n word_matrix = vectorizer.fit_transform(docs)\n bag_of_words = word_matrix.toarray()\n \n return bag_of_words\n\n\nproject_tests.test_get_bag_of_words(get_bag_of_words)",
"Tests Passed\n"
]
],
[
[
"Using the `get_bag_of_words` function, we'll generate a bag of words for all the documents.",
"_____no_output_____"
]
],
[
[
"sentiment_bow_ten_ks = {}\n\nfor ticker, ten_ks in ten_ks_by_ticker.items():\n lemma_docs = [' '.join(ten_k['file_lemma']) for ten_k in ten_ks]\n \n sentiment_bow_ten_ks[ticker] = {\n sentiment: get_bag_of_words(sentiment_df[sentiment_df[sentiment]]['word'], lemma_docs)\n for sentiment in sentiments}\n\n\nproject_helper.print_ten_k_data([sentiment_bow_ten_ks[example_ticker]], sentiments)",
"[\n {\n negative: '[[0 0 0 ..., 0 0 0]\\n [0 0 0 ..., 0 0 0]\\n [0 0 0...\n positive: '[[16 0 0 ..., 0 0 0]\\n [16 0 0 ..., 0 0 ...\n uncertainty: '[[0 0 0 ..., 1 1 3]\\n [0 0 0 ..., 1 1 3]\\n [0 0 0...\n litigious: '[[0 0 0 ..., 0 0 0]\\n [0 0 0 ..., 0 0 0]\\n [0 0 0...\n constraining: '[[0 0 0 ..., 0 0 2]\\n [0 0 0 ..., 0 0 2]\\n [0 0 0...\n interesting: '[[2 0 0 ..., 0 0 0]\\n [2 0 0 ..., 0 0 0]\\n [2 0 0...},\n]\n"
]
],
[
[
"### Jaccard Similarity\nUsing the bag of words, let's calculate the jaccard similarity on the bag of words and plot it over time. Implement `get_jaccard_similarity` to return the jaccard similarities between each tick in time. Since the input, `bag_of_words_matrix`, is a bag of words for each time period in order, you just need to compute the jaccard similarities for each neighboring bag of words. Make sure to turn the bag of words into a boolean array when calculating the jaccard similarity.",
"_____no_output_____"
]
],
[
[
"from sklearn.metrics import jaccard_similarity_score\n\n\ndef get_jaccard_similarity(bag_of_words_matrix):\n \"\"\"\n Get jaccard similarities for neighboring documents\n\n Parameters\n ----------\n bag_of_words : 2-d Numpy Ndarray of int\n Bag of words sentiment for each document\n The first dimension is the document.\n The second dimension is the word.\n\n Returns\n -------\n jaccard_similarities : list of float\n Jaccard similarities for neighboring documents\n \"\"\"\n \n # TODO: Implement\n jaccard_similarities = []\n bag_of_words_matrix_bool = bag_of_words_matrix.astype(bool)\n \n # compute jaccard similary for neighboring docs\n for i in range(bag_of_words_matrix.shape[0]-1):\n jaccard_similarities.append(jaccard_similarity_score(bag_of_words_matrix_bool[i], bag_of_words_matrix_bool[i+1]))\n \n return jaccard_similarities\n\n\nproject_tests.test_get_jaccard_similarity(get_jaccard_similarity)",
"Tests Passed\n"
]
],
[
[
"Using the `get_jaccard_similarity` function, let's plot the similarities over time.",
"_____no_output_____"
]
],
[
[
"# Get dates for the universe\nfile_dates = {\n ticker: [ten_k['file_date'] for ten_k in ten_ks]\n for ticker, ten_ks in ten_ks_by_ticker.items()} \n\njaccard_similarities = {\n ticker: {\n sentiment_name: get_jaccard_similarity(sentiment_values)\n for sentiment_name, sentiment_values in ten_k_sentiments.items()}\n for ticker, ten_k_sentiments in sentiment_bow_ten_ks.items()}\n\n\nproject_helper.plot_similarities(\n [jaccard_similarities[example_ticker][sentiment] for sentiment in sentiments],\n file_dates[example_ticker][1:],\n 'Jaccard Similarities for {} Sentiment'.format(example_ticker),\n sentiments)",
"_____no_output_____"
]
],
[
[
"### TFIDF\nusing the sentiment word lists, let's generate sentiment TFIDF from the 10-k documents. Implement `get_tfidf` to generate TFIDF from each document, using sentiment words as the terms. You can ignore words that are not in `sentiment_words`.",
"_____no_output_____"
]
],
[
[
"from sklearn.feature_extraction.text import TfidfVectorizer\n\n\ndef get_tfidf(sentiment_words, docs):\n \"\"\"\n Generate TFIDF values from documents for a certain sentiment\n\n Parameters\n ----------\n sentiment_words: Pandas Series\n Words that signify a certain sentiment\n docs : list of str\n List of documents used to generate bag of words\n\n Returns\n -------\n tfidf : 2-d Numpy Ndarray of float\n TFIDF sentiment for each document\n The first dimension is the document.\n The second dimension is the word.\n \"\"\"\n \n # TODO: Implement\n vectorizer = TfidfVectorizer(vocabulary=sentiment_words.values)\n \n # build tfidf matrix\n tfidf = vectorizer.fit_transform(docs)\n tfidf = tfidf.toarray()\n \n return tfidf\n\n\nproject_tests.test_get_tfidf(get_tfidf)",
"Tests Passed\n"
]
],
[
[
"Using the `get_tfidf` function, let's generate the TFIDF values for all the documents.",
"_____no_output_____"
]
],
[
[
"sentiment_tfidf_ten_ks = {}\n\nfor ticker, ten_ks in ten_ks_by_ticker.items():\n lemma_docs = [' '.join(ten_k['file_lemma']) for ten_k in ten_ks]\n \n sentiment_tfidf_ten_ks[ticker] = {\n sentiment: get_tfidf(sentiment_df[sentiment_df[sentiment]]['word'], lemma_docs)\n for sentiment in sentiments}\n\n \nproject_helper.print_ten_k_data([sentiment_tfidf_ten_ks[example_ticker]], sentiments)",
"[\n {\n negative: '[[ 0. 0. 0. ..., 0. ...\n positive: '[[ 0.22288432 0. 0. ..., 0. ...\n uncertainty: '[[ 0. 0. 0. ..., 0.005...\n litigious: '[[ 0. 0. 0. ..., 0. 0. 0.]\\n [ 0. 0. 0. .....\n constraining: '[[ 0. 0. 0. ..., 0. ...\n interesting: '[[ 0.01673784 0. 0. ..., 0. ...},\n]\n"
]
],
[
[
"### Cosine Similarity\nUsing the TFIDF values, we'll calculate the cosine similarity and plot it over time. Implement `get_cosine_similarity` to return the cosine similarities between each tick in time. Since the input, `tfidf_matrix`, is a TFIDF vector for each time period in order, you just need to computer the cosine similarities for each neighboring vector.",
"_____no_output_____"
]
],
[
[
"from sklearn.metrics.pairwise import cosine_similarity\n\n\ndef get_cosine_similarity(tfidf_matrix):\n \"\"\"\n Get cosine similarities for each neighboring TFIDF vector/document\n\n Parameters\n ----------\n tfidf : 2-d Numpy Ndarray of float\n TFIDF sentiment for each document\n The first dimension is the document.\n The second dimension is the word.\n\n Returns\n -------\n cosine_similarities : list of float\n Cosine similarities for neighboring documents\n \"\"\"\n \n # TODO: Implement\n cosine_similarities = list(np.diag(cosine_similarity(tfidf_matrix, tfidf_matrix), k=1))\n \n return cosine_similarities\n\n\nproject_tests.test_get_cosine_similarity(get_cosine_similarity)",
"Tests Passed\n"
]
],
[
[
"Let's plot the cosine similarities over time.",
"_____no_output_____"
]
],
[
[
"cosine_similarities = {\n ticker: {\n sentiment_name: get_cosine_similarity(sentiment_values)\n for sentiment_name, sentiment_values in ten_k_sentiments.items()}\n for ticker, ten_k_sentiments in sentiment_tfidf_ten_ks.items()}\n\n\nproject_helper.plot_similarities(\n [cosine_similarities[example_ticker][sentiment] for sentiment in sentiments],\n file_dates[example_ticker][1:],\n 'Cosine Similarities for {} Sentiment'.format(example_ticker),\n sentiments)",
"_____no_output_____"
]
],
[
[
"## Evaluate Alpha Factors\nJust like we did in project 4, let's evaluate the alpha factors. For this section, we'll just be looking at the cosine similarities, but it can be applied to the jaccard similarities as well.\n### Price Data\nLet's get yearly pricing to run the factor against, since 10-Ks are produced annually.",
"_____no_output_____"
]
],
[
[
"pricing = pd.read_csv('../../data/project_5_yr/yr-quotemedia.csv', parse_dates=['date'])\npricing = pricing.pivot(index='date', columns='ticker', values='adj_close')\n\n\npricing",
"_____no_output_____"
]
],
[
[
"### Dict to DataFrame\nThe alphalens library uses dataframes, so we we'll need to turn our dictionary into a dataframe. ",
"_____no_output_____"
]
],
[
[
"cosine_similarities_df_dict = {'date': [], 'ticker': [], 'sentiment': [], 'value': []}\n\n\nfor ticker, ten_k_sentiments in cosine_similarities.items():\n for sentiment_name, sentiment_values in ten_k_sentiments.items():\n for sentiment_values, sentiment_value in enumerate(sentiment_values):\n cosine_similarities_df_dict['ticker'].append(ticker)\n cosine_similarities_df_dict['sentiment'].append(sentiment_name)\n cosine_similarities_df_dict['value'].append(sentiment_value)\n cosine_similarities_df_dict['date'].append(file_dates[ticker][1:][sentiment_values])\n\ncosine_similarities_df = pd.DataFrame(cosine_similarities_df_dict)\ncosine_similarities_df['date'] = pd.DatetimeIndex(cosine_similarities_df['date']).year\ncosine_similarities_df['date'] = pd.to_datetime(cosine_similarities_df['date'], format='%Y')\n\n\ncosine_similarities_df.head()",
"_____no_output_____"
]
],
[
[
"### Alphalens Format\nIn order to use a lot of the alphalens functions, we need to aligned the indices and convert the time to unix timestamp. In this next cell, we'll do just that.",
"_____no_output_____"
]
],
[
[
"import alphalens as al\n\n\nfactor_data = {}\nskipped_sentiments = []\n\nfor sentiment in sentiments:\n cs_df = cosine_similarities_df[(cosine_similarities_df['sentiment'] == sentiment)]\n cs_df = cs_df.pivot(index='date', columns='ticker', values='value')\n\n try:\n data = al.utils.get_clean_factor_and_forward_returns(cs_df.stack(), pricing, quantiles=5, bins=None, periods=[1])\n factor_data[sentiment] = data\n except:\n skipped_sentiments.append(sentiment)\n\nif skipped_sentiments:\n print('\\nSkipped the following sentiments:\\n{}'.format('\\n'.join(skipped_sentiments)))\nfactor_data[sentiments[0]].head()",
"/opt/conda/lib/python3.6/site-packages/statsmodels/compat/pandas.py:56: FutureWarning: The pandas.core.datetools module is deprecated and will be removed in a future version. Please use the pandas.tseries module instead.\n from pandas.core import datetools\n"
]
],
[
[
"### Alphalens Format with Unix Time\nAlphalen's `factor_rank_autocorrelation` and `mean_return_by_quantile` functions require unix timestamps to work, so we'll also create factor dataframes with unix time.",
"_____no_output_____"
]
],
[
[
"unixt_factor_data = {\n factor: data.set_index(pd.MultiIndex.from_tuples(\n [(x.timestamp(), y) for x, y in data.index.values],\n names=['date', 'asset']))\n for factor, data in factor_data.items()}",
"_____no_output_____"
]
],
[
[
"### Factor Returns\nLet's view the factor returns over time. We should be seeing it generally move up and to the right.",
"_____no_output_____"
]
],
[
[
"ls_factor_returns = pd.DataFrame()\n\nfor factor_name, data in factor_data.items():\n ls_factor_returns[factor_name] = al.performance.factor_returns(data).iloc[:, 0]\n\n(1 + ls_factor_returns).cumprod().plot()",
"_____no_output_____"
]
],
[
[
"### Basis Points Per Day per Quantile\nIt is not enough to look just at the factor weighted return. A good alpha is also monotonic in quantiles. Let's looks the basis points for the factor returns.",
"_____no_output_____"
]
],
[
[
"qr_factor_returns = pd.DataFrame()\n\nfor factor_name, data in unixt_factor_data.items():\n qr_factor_returns[factor_name] = al.performance.mean_return_by_quantile(data)[0].iloc[:, 0]\n\n(10000*qr_factor_returns).plot.bar(\n subplots=True,\n sharey=True,\n layout=(5,3),\n figsize=(14, 14),\n legend=False)",
"_____no_output_____"
]
],
[
[
"### Turnover Analysis\nWithout doing a full and formal backtest, we can analyze how stable the alphas are over time. Stability in this sense means that from period to period, the alpha ranks do not change much. Since trading is costly, we always prefer, all other things being equal, that the ranks do not change significantly per period. We can measure this with the **Factor Rank Autocorrelation (FRA)**.",
"_____no_output_____"
]
],
[
[
"ls_FRA = pd.DataFrame()\n\nfor factor, data in unixt_factor_data.items():\n ls_FRA[factor] = al.performance.factor_rank_autocorrelation(data)\n\nls_FRA.plot(title=\"Factor Rank Autocorrelation\")",
"_____no_output_____"
]
],
[
[
"### Sharpe Ratio of the Alphas\nThe last analysis we'll do on the factors will be sharpe ratio. Let's see what the sharpe ratio for the factors are. Generally, a Sharpe Ratio of near 1.0 or higher is an acceptable single alpha for this universe.",
"_____no_output_____"
]
],
[
[
"daily_annualization_factor = np.sqrt(252)\n\n(daily_annualization_factor * ls_factor_returns.mean() / ls_factor_returns.std()).round(2)",
"_____no_output_____"
]
],
[
[
"That's it! You've successfully done sentiment analysis on 10-ks!\n## Submission\nNow that you're done with the project, it's time to submit it. Click the submit button in the bottom right. One of our reviewers will give you feedback on your project with a pass or not passed grade. You can continue to the next section while you wait for feedback.",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
e7de42000a8461f0fa0105ab1045476c69d8c26d | 92,462 | ipynb | Jupyter Notebook | 04 Model_Building_and_Evaluaion/Model_Building.ipynb | Karthikraja-Pandian/Project---House-Prices-Prediction | 3632e3f48a1d33ed1bf1a43ac82bbb6d6d4968e9 | [
"MIT"
] | null | null | null | 04 Model_Building_and_Evaluaion/Model_Building.ipynb | Karthikraja-Pandian/Project---House-Prices-Prediction | 3632e3f48a1d33ed1bf1a43ac82bbb6d6d4968e9 | [
"MIT"
] | null | null | null | 04 Model_Building_and_Evaluaion/Model_Building.ipynb | Karthikraja-Pandian/Project---House-Prices-Prediction | 3632e3f48a1d33ed1bf1a43ac82bbb6d6d4968e9 | [
"MIT"
] | null | null | null | 85.692308 | 32,044 | 0.735318 | [
[
[
"## Machine Learning Model Building Pipeline: Machine Learning Model Build\n\nIn the following notebooks, I will take you through a practical example of each one of the steps in the Machine Learning model building pipeline that I learned throughout my experience and analyzing many kaggle notebooks. There will be a notebook for each one of the Machine Learning Pipeline steps:\n\n1. Data Analysis\n2. Feature Engineering\n3. Feature Selection\n4. Model Building\n\n**This is the notebook for step 4: Building the Final Machine Learning Model**\n\nWe will use the house price dataset available on [Kaggle.com](https://www.kaggle.com/c/house-prices-advanced-regression-techniques/data). See below for more details.\n\n===================================================================================================\n\n## Predicting Sale Price of Houses\n\nThe aim of the project is to build a machine learning model to predict the sale price of homes based on different explanatory variables describing aspects of residential houses. \n\n### Why is this important? \n\nPredicting house prices is useful to identify fruitful investments, or to determine whether the price advertised for a house is over or underestimated, before making a buying judgment.\n\n### What is the objective of the machine learning model?\n\nWe aim to minimise the difference between the real price, and the estimated price by our model. We will evaluate model performance using the mean squared error (mse) and the root squared of the mean squared error (rmse).\n\n### How do I download the dataset?\n\nTo download the House Price dataset go this website:\nhttps://www.kaggle.com/c/house-prices-advanced-regression-techniques/data\n\n====================================================================================================",
"_____no_output_____"
],
[
"## House Prices dataset: Machine Learning Model build\n\nIn the following cells, we will finally build our machine learning models, utilising the engineered data and the pre-selected features. \n\n\n### Setting the seed\n\nIt is important to note that we are engineering variables and pre-processing data with the idea of deploying the model if we find business value in it. Therefore, from now on, for each step that includes some element of randomness, it is extremely important that we **set the seed**. This way, we can obtain reproducibility between our research and our development code.\n\nThis is perhaps one of the most important lessons that I learned from my mistakes is **Always set the seeds**.\n\nLet's go ahead and load the dataset.",
"_____no_output_____"
]
],
[
[
"# to handle datasets\nimport pandas as pd\nimport numpy as np\n\n# for plotting\nimport matplotlib.pyplot as plt\n%matplotlib inline\n\n# to build the models\nfrom sklearn.linear_model import Lasso\n\n# to evaluate the models\nfrom sklearn.metrics import mean_squared_error\nfrom math import sqrt\n\n# to visualise al the columns in the dataframe\npd.pandas.set_option('display.max_columns', None)",
"_____no_output_____"
],
[
"# load dataset\n# We load the datasets with the engineered values\n\nX_train = pd.read_csv('xtrain.csv')\nX_test = pd.read_csv('xtest.csv')\n\nX_train.head()",
"_____no_output_____"
],
[
"# capture the target\ny_train = X_train['SalePrice']\ny_test = X_test['SalePrice']",
"_____no_output_____"
],
[
"# load selected features\n\nfeatures = pd.read_csv('selected_features.csv', header=None)\nfeatures = [x for x in features[0]] \n\nfeatures = features[1:]\nfeatures",
"_____no_output_____"
],
[
"# reduce the train and test set to the desired features\n\nX_train = X_train[features]\nX_test = X_test[features]",
"_____no_output_____"
]
],
[
[
"### Regularised linear regression\n\nRemember to set the seed.",
"_____no_output_____"
]
],
[
[
"# train the model\nlin_model = Lasso(alpha=0.005, random_state=0) # remember to set the random_state / seed\nlin_model.fit(X_train, y_train)",
"_____no_output_____"
],
[
"# evaluate the model:\n# remember that we log transformed the output (SalePrice) in our feature engineering notebook\n\n# In order to get the true performance of the Lasso\n# we need to transform both the target and the predictions\n# back to the original house prices values.\n\n# We will evaluate performance using the mean squared error and the\n# root of the mean squared error\n\npred = lin_model.predict(X_train)\nprint('linear train mse: {}'.format(mean_squared_error(np.exp(y_train), np.exp(pred))))\nprint('linear train rmse: {}'.format(sqrt(mean_squared_error(np.exp(y_train), np.exp(pred)))))\nprint()\npred = lin_model.predict(X_test)\nprint('linear test mse: {}'.format(mean_squared_error(np.exp(y_test), np.exp(pred))))\nprint('linear test rmse: {}'.format(sqrt(mean_squared_error(np.exp(y_test), np.exp(pred)))))\nprint()\nprint('Average house price: ', np.exp(y_train).median())",
"linear train mse: 1087435415.4414546\nlinear train rmse: 32976.285652593666\n\nlinear test mse: 1405259552.2596054\nlinear test rmse: 37486.791704006966\n\nAverage house price: 163000.00000000012\n"
],
[
"# let's evaluate our predictions respect to the original price\nplt.scatter(y_test, lin_model.predict(X_test))\nplt.xlabel('True House Price')\nplt.ylabel('Predicted House Price')\nplt.title('Evaluation of Lasso Predictions')",
"_____no_output_____"
]
],
[
[
"We can see that our model is doing a pretty good job at estimating house prices.",
"_____no_output_____"
]
],
[
[
"# let's evaluate the distribution of the errors: \n# they should be fairly normally distributed\n\nerrors = y_test - lin_model.predict(X_test)\nerrors.hist(bins=15)",
"_____no_output_____"
]
],
[
[
"The distribution of the errors follows quite closely a gaussian distribution. That suggests that our model is doing a good job as well.",
"_____no_output_____"
],
[
"### Feature importance",
"_____no_output_____"
]
],
[
[
"# Finally, just for fun, let's look at the feature importance\n\nimportance = pd.Series(np.abs(lin_model.coef_.ravel()))\nimportance.index = features\nimportance.sort_values(inplace=True, ascending=False)\nimportance.plot.bar(figsize=(18,6))\nplt.ylabel('Lasso Coefficients')\nplt.title('Feature Importance')",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
]
] |
e7de422c9374fb305ea13c0d74d6358efd1f457d | 336,955 | ipynb | Jupyter Notebook | notes/CESM_energy_budget.ipynb | brian-rose/env-415-site | a0cdba60e530103b801c8eb8bb4e8fda0abb9519 | [
"MIT"
] | 2 | 2020-02-28T16:02:16.000Z | 2020-11-19T04:15:49.000Z | notes/CESM_energy_budget.ipynb | brian-rose/env-415-site | a0cdba60e530103b801c8eb8bb4e8fda0abb9519 | [
"MIT"
] | null | null | null | notes/CESM_energy_budget.ipynb | brian-rose/env-415-site | a0cdba60e530103b801c8eb8bb4e8fda0abb9519 | [
"MIT"
] | null | null | null | 323.995192 | 71,792 | 0.924898 | [
[
[
"# ENV / ATM 415: Climate Laboratory\n\n# The planetary energy budget in CESM simulations\n\n### Tuesday April 19 and Thursday April 21, 2016\n\n_____________________________________",
"_____no_output_____"
]
],
[
[
"%matplotlib inline\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport netCDF4 as nc",
"/Users/Brian/anaconda/lib/python2.7/site-packages/matplotlib/font_manager.py:273: UserWarning: Matplotlib is building the font cache using fc-list. This may take a moment.\n warnings.warn('Matplotlib is building the font cache using fc-list. This may take a moment.')\n"
]
],
[
[
"Open the output from our control simulation with the slab ocean version of the CESM:",
"_____no_output_____"
]
],
[
[
"## To read data over the internet\ncontrol_filename = 'som_1850_f19.cam.h0.clim.nc'\ndatapath = 'http://ramadda.atmos.albany.edu:8080/repository/opendap/latest/Top/Users/Brian+Rose/CESM+runs/'\nendstr = '/entry.das'\ncontrol = nc.Dataset( datapath + 'som_1850_f19/' + control_filename + endstr )\n\n## To read from a local copy of the file \n## (just a small subset of the total list of variables, to save disk space)\n#ontrol_filename = 'som_1850_f19.cam.h0.clim_subset.nc'\n#control = nc.Dataset( control_filename )",
"_____no_output_____"
]
],
[
[
"The full file from the online server contains many many variables, describing all aspects of the model climatology.\n\nWhether we see a long list or a short list in the following code block depends on whether we are reading the full output file or the much smaller subset:",
"_____no_output_____"
]
],
[
[
"for v in control.variables: print v",
"lev\nhyam\nhybm\nilev\nhyai\nhybi\nP0\ntime\ndate\ndatesec\nlat\nlon\nslat\nslon\nw_stag\ntime_bnds\ndate_written\ntime_written\nntrm\nntrn\nntrk\nndbase\nnsbase\nnbdate\nnbsec\nmdt\nnlon\nwnummax\ngw\nndcur\nnscur\nco2vmr\nch4vmr\nn2ovmr\nf11vmr\nf12vmr\nsol_tsi\nnsteph\nAEROD_v\nCLDHGH\nCLDICE\nCLDLIQ\nCLDLOW\nCLDMED\nCLDTOT\nCLOUD\nCONCLD\nDCQ\nDTCOND\nDTV\nEMIS\nFICE\nFLDS\nFLDSC\nFLNS\nFLNSC\nFLNT\nFLNTC\nFLUT\nFLUTC\nFSDS\nFSDSC\nFSDTOA\nFSNS\nFSNSC\nFSNT\nFSNTC\nFSNTOA\nFSNTOAC\nFSUTOA\nICEFRAC\nICIMR\nICWMR\nLANDFRAC\nLHFLX\nLWCF\nMSKtem\nOCNFRAC\nOMEGA\nOMEGAT\nPBLH\nPHIS\nPRECC\nPRECL\nPRECSC\nPRECSL\nPS\nPSL\nQ\nQFLX\nQREFHT\nQRL\nQRS\nRELHUM\nSFCLDICE\nSFCLDLIQ\nSHFLX\nSNOWHICE\nSNOWHLND\nSOLIN\nSWCF\nT\nTAUX\nTAUY\nTGCLDCWP\nTGCLDIWP\nTGCLDLWP\nTH\nTH2d\nTMQ\nTREFHT\nTS\nTSMN\nTSMX\nU\nU10\nU2d\nUTGWORO\nUU\nUV2d\nUV3d\nUW2d\nUW3d\nV\nV2d\nVD01\nVQ\nVT\nVTH2d\nVTH3d\nVU\nVV\nW2d\nWTH3d\nZ3\n"
]
],
[
[
"Today we need just a few of these variables:\n\n- `TS`: the surface temperature\n- `FLNT`: the longwave radiation at the top of the atmosphere (i.e. what we call the OLR)\n- `FSNT`: the net shortwave radiation at the top of the atmosphere (i.e. what we call the ASR)\n- `FLNTC`: the clear-sky OLR\n- `FSNTC`: the clear-sky ASR",
"_____no_output_____"
],
[
"Take a look at some of the meta-data for these fields:",
"_____no_output_____"
]
],
[
[
"for field in ['TS', 'FLNT', 'FSNT', 'FLNTC', 'FSNTC']:\n print control.variables[field]",
"<type 'netCDF4._netCDF4.Variable'>\nfloat32 TS(time, lat, lon)\n units: K\n long_name: Surface temperature (radiative)\n cell_methods: time: mean time: mean\nunlimited dimensions: time\ncurrent shape = (12, 96, 144)\nfilling off\n\n<type 'netCDF4._netCDF4.Variable'>\nfloat32 FLNT(time, lat, lon)\n Sampling_Sequence: rad_lwsw\n units: W/m2\n long_name: Net longwave flux at top of model\n cell_methods: time: mean time: mean\nunlimited dimensions: time\ncurrent shape = (12, 96, 144)\nfilling off\n\n<type 'netCDF4._netCDF4.Variable'>\nfloat32 FSNT(time, lat, lon)\n Sampling_Sequence: rad_lwsw\n units: W/m2\n long_name: Net solar flux at top of model\n cell_methods: time: mean time: mean\nunlimited dimensions: time\ncurrent shape = (12, 96, 144)\nfilling off\n\n<type 'netCDF4._netCDF4.Variable'>\nfloat32 FLNTC(time, lat, lon)\n Sampling_Sequence: rad_lwsw\n units: W/m2\n long_name: Clearsky net longwave flux at top of model\n cell_methods: time: mean time: mean\nunlimited dimensions: time\ncurrent shape = (12, 96, 144)\nfilling off\n\n<type 'netCDF4._netCDF4.Variable'>\nfloat32 FSNTC(time, lat, lon)\n Sampling_Sequence: rad_lwsw\n units: W/m2\n long_name: Clearsky net solar flux at top of model\n cell_methods: time: mean time: mean\nunlimited dimensions: time\ncurrent shape = (12, 96, 144)\nfilling off\n\n"
]
],
[
[
"Each one of these variables has dimensions `(12, 96, 144)`, which corresponds to time (12 months), latitude and longitude.\n\nTake a look at one of the coordinate variables:",
"_____no_output_____"
]
],
[
[
"print control.variables['lat']",
"<type 'netCDF4._netCDF4.Variable'>\nfloat64 lat(lat)\n long_name: latitude\n units: degrees_north\nunlimited dimensions: \ncurrent shape = (96,)\nfilling off\n\n"
]
],
[
[
"Now let's load in the coordinate data, to use later for plotting:",
"_____no_output_____"
]
],
[
[
"lat = control.variables['lat'][:]\nlon = control.variables['lon'][:]\nprint lat",
"[-90. -88.10526316 -86.21052632 -84.31578947 -82.42105263\n -80.52631579 -78.63157895 -76.73684211 -74.84210526 -72.94736842\n -71.05263158 -69.15789474 -67.26315789 -65.36842105 -63.47368421\n -61.57894737 -59.68421053 -57.78947368 -55.89473684 -54. -52.10526316\n -50.21052632 -48.31578947 -46.42105263 -44.52631579 -42.63157895\n -40.73684211 -38.84210526 -36.94736842 -35.05263158 -33.15789474\n -31.26315789 -29.36842105 -27.47368421 -25.57894737 -23.68421053\n -21.78947368 -19.89473684 -18. -16.10526316 -14.21052632\n -12.31578947 -10.42105263 -8.52631579 -6.63157895 -4.73684211\n -2.84210526 -0.94736842 0.94736842 2.84210526 4.73684211\n 6.63157895 8.52631579 10.42105263 12.31578947 14.21052632\n 16.10526316 18. 19.89473684 21.78947368 23.68421053\n 25.57894737 27.47368421 29.36842105 31.26315789 33.15789474\n 35.05263158 36.94736842 38.84210526 40.73684211 42.63157895\n 44.52631579 46.42105263 48.31578947 50.21052632 52.10526316 54.\n 55.89473684 57.78947368 59.68421053 61.57894737 63.47368421\n 65.36842105 67.26315789 69.15789474 71.05263158 72.94736842\n 74.84210526 76.73684211 78.63157895 80.52631579 82.42105263\n 84.31578947 86.21052632 88.10526316 90. ]\n"
]
],
[
[
"## Surface temperature in the control simulation",
"_____no_output_____"
]
],
[
[
"# A re-usable function to make a map of a 2d field on a latitude / longitude grid\ndef make_map(field_2d):\n # Make a filled contour plot\n fig = plt.figure(figsize=(10,5))\n cax = plt.contourf(lon, lat, field_2d)\n # draw a single contour to outline the continents\n plt.contour( lon, lat, control.variables['LANDFRAC'][0,:,:], [0.5], colors='k')\n plt.xlabel('Longitude (degrees east)')\n plt.ylabel('Latitude (degrees north)')\n plt.colorbar(cax)",
"_____no_output_____"
],
[
"# Here is a convenient function that takes the name of a variable in our CESM output\n# and make a map of its annual average\ndef map_this(fieldname, dataset=control):\n field = dataset.variables[fieldname][:]\n field_annual = np.mean(field, axis=0)\n make_map(field_annual)",
"_____no_output_____"
],
[
"# Use this function to make a quick map of the annual average surface temperature:\nmap_this('TS')",
"_____no_output_____"
]
],
[
[
"### Computing a global average",
"_____no_output_____"
]
],
[
[
"# The lat/lon dimensions after taking the time average:\nTS_annual = np.mean(control.variables['TS'][:], axis=0)\nTS_annual.shape",
"_____no_output_____"
]
],
[
[
"Define a little re-usable function to take the global average of any of these fields:",
"_____no_output_____"
]
],
[
[
"def global_mean(field_2d):\n '''This function takes a 2D array on a regular latitude-longitude grid \n and returns the global area-weighted average'''\n zonal_mean = np.mean(field_2d, axis=1)\n return np.average(zonal_mean, weights=np.cos(np.deg2rad(lat)))",
"_____no_output_____"
],
[
"# Again, a convenience function that takes just the name of the model output field\n# and returns its time and global average\ndef global_mean_this(fieldname, dataset=control):\n field = dataset.variables[fieldname][:]\n field_annual = np.mean(field, axis=0)\n return global_mean(field_annual)",
"_____no_output_____"
]
],
[
[
"Now compute the global average surface temperature in the simulation:",
"_____no_output_____"
]
],
[
[
"global_mean_this('TS')",
"_____no_output_____"
]
],
[
[
"## Cloud cover in the control simulation",
"_____no_output_____"
],
[
"The model simulates cloud amount in every grid box. The cloud field is thus 4-dimensional:",
"_____no_output_____"
]
],
[
[
"# This field is not included in the small subset file\n# so this will only work if you are reading the full file from the online server\ncontrol.variables['CLOUD']",
"_____no_output_____"
]
],
[
[
"To simplify things we can just look at the **total cloud cover**, integrated from the surface to the top of the atmosphere:",
"_____no_output_____"
]
],
[
[
"control.variables['CLDTOT']",
"_____no_output_____"
],
[
"map_this('CLDTOT')",
"_____no_output_____"
]
],
[
[
"Which parts of Earth are cloudy and which are not? (at least in this simulation)",
"_____no_output_____"
],
[
"## Exercise 1: Make three maps: ASR, OLR, and the net radiation ASR-OLR (all annual averages)\n\nWhat interesting features do you see on these maps?",
"_____no_output_____"
]
],
[
[
"# To get you started, here is the ASR\nmap_this('FSNT')",
"_____no_output_____"
],
[
"map_this('FLNT')",
"_____no_output_____"
],
[
"net_radiation = np.mean(control.variables['FSNT'][:] - control.variables['FLNT'][:], axis=0)\nmake_map(net_radiation)",
"_____no_output_____"
]
],
[
[
"## Exercise 2: Calculate the global average net radiation. \n\nIs it close to zero? What does that mean?",
"_____no_output_____"
]
],
[
[
"global_mean(net_radiation)",
"_____no_output_____"
]
],
[
[
"## Exercise 3: Make maps of the clear-sky ASR and clear-sky OLR\n\nThese diagnostics have been calculated by the GCM. Basically at every timestep, the GCM calculates the radiation twice: once with the clouds and once without the clouds.",
"_____no_output_____"
],
[
"## Exercise 4: Make a map of the Cloud Radiative Effect\n\nRecall that we define $CRE$ as\n\n$$ CRE = \\left( ASR - ASR_{clear} \\right) - \\left( OLR - OLR_{clear} \\right) $$\n\nThis quantity is **positive** where the clouds have a **net warming effect** on the climate.",
"_____no_output_____"
],
[
"## Exercise 5: in the global average, are the clouds warming or cooling the climate in the CESM control simulation?",
"_____no_output_____"
],
[
"# Climate sensitivity in the CESM: the effects of doubling CO2",
"_____no_output_____"
],
[
"How much CO2 was in the atmosphere for the control simulation?\n\nThis information is available in the full output file (this won't work with the local subset file):",
"_____no_output_____"
]
],
[
[
"# The meta-data:\ncontrol.variables['co2vmr']",
"_____no_output_____"
],
[
"# The data themselves, expressed in ppm:\ncontrol.variables['co2vmr'][:] * 1E6",
"_____no_output_____"
]
],
[
[
"Answer: the CO2 concentration is 284.7 ppm in the control simulation.",
"_____no_output_____"
],
[
"Now we want to see how the climate changes in the CESM when we double CO2 and run it out to equilibrium.\n\nI have done this. Because we are using a slab ocean model, it reaches equilibrium after just a few decades.\n\nLet's now open up the output file from the 2xCO2 scenario:",
"_____no_output_____"
]
],
[
[
"## To read data over the internet\n# doubleCO2_filename = 'som_1850_2xCO2.cam.h0.clim.nc'\n# doubleCO2 = nc.Dataset( datapath + 'som_1850_f19/' + doubleCO2_filename + endstr )\n\n## To read from a local copy of the file \n## (just a small subset of the total list of variables, to save disk space)\ndoubleCO2_filename = 'som_1850_2xCO2.cam.h0.clim_subset.nc'\ndoubleCO2 = nc.Dataset( doubleCO2_filename )",
"_____no_output_____"
]
],
[
[
"This file has all the same fields as `control`, but they reflect the new equilibrium climate after doubling CO2.\n\nLet's verify the CO2 amount:",
"_____no_output_____"
]
],
[
[
"doubleCO2.variables['co2vmr'][:] * 1E6",
"_____no_output_____"
]
],
[
[
"So the CO2 concentration is now 569.4 ppm.",
"_____no_output_____"
],
[
"## Exercise 6: Make a map of the change in surface temperature due to the doubling of CO2\n\nWhat interesting features do you see on this map? Does it warm more in some locations than others?",
"_____no_output_____"
],
[
"## Exercise 7: Calculate the Equilibrium Climate Sensitivity for the CESM\n\nRemember, this is just the global average of the temperature differences you plotted in Exercise 6",
"_____no_output_____"
],
[
"## Exercise 8: Compute the global average CRE in the 2xCO2 simulation\n\nAre the clouds warming or cooling the climate? \n\nHow has CRE changed compared to the control climate? (i.e. is the net effect larger or smaller than it was before)\n\nCan you infer whether the **cloud feedback** is positive or negative in the CESM?",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
]
] |
e7de46a98f9b3e35a31b7eb7ecf9dfbfc328db4e | 300,112 | ipynb | Jupyter Notebook | examples/n-pendulum-control.ipynb | isaacyeaton/snaketurn | b7894735f487dfd317bf037b081cdd1ffe0d9524 | [
"MIT"
] | null | null | null | examples/n-pendulum-control.ipynb | isaacyeaton/snaketurn | b7894735f487dfd317bf037b081cdd1ffe0d9524 | [
"MIT"
] | null | null | null | examples/n-pendulum-control.ipynb | isaacyeaton/snaketurn | b7894735f487dfd317bf037b081cdd1ffe0d9524 | [
"MIT"
] | null | null | null | 265.351017 | 96,245 | 0.898185 | [
[
[
"empty"
]
]
] | [
"empty"
] | [
[
"empty"
]
] |
e7de4ebab79960dfb638b300bdb9ac43476ecc07 | 6,111 | ipynb | Jupyter Notebook | samples/notebooks/powershell/Docs/Interactive-Host-Experience.ipynb | flcdrg/dotnet-interactive | 6a45d0c52394e08387d7041346c7ae08d4899b3f | [
"MIT"
] | null | null | null | samples/notebooks/powershell/Docs/Interactive-Host-Experience.ipynb | flcdrg/dotnet-interactive | 6a45d0c52394e08387d7041346c7ae08d4899b3f | [
"MIT"
] | null | null | null | samples/notebooks/powershell/Docs/Interactive-Host-Experience.ipynb | flcdrg/dotnet-interactive | 6a45d0c52394e08387d7041346c7ae08d4899b3f | [
"MIT"
] | null | null | null | 21.442105 | 186 | 0.535264 | [
[
[
"[this doc on github](https://github.com/dotnet/interactive/tree/main/samples/notebooks/powershell)\n\n# Interactive Host Experience in PowerShell notebook",
"_____no_output_____"
],
[
"The PowerShell notebook provides a rich interactive experience through its host.\nThe following are some examples.",
"_____no_output_____"
],
[
"1. _You can set the foreground and background colors for the output. The code below sets the foreground color to `Blue`, and you can see the output is rendered in blue afterwards:_",
"_____no_output_____"
]
],
[
[
"$host.UI.RawUI.ForegroundColor = [System.ConsoleColor]::Blue\n$PSVersionTable",
"_____no_output_____"
]
],
[
[
"2. _You can write to the host with specified foreground and background colors_",
"_____no_output_____"
]
],
[
[
"Write-Host \"Something to think about ...\" -ForegroundColor Blue -BackgroundColor Gray",
"_____no_output_____"
]
],
[
[
"3. _Warning, Verbose, and Debug streams are rendered with the expected color:_",
"_____no_output_____"
]
],
[
[
"Write-Warning \"Warning\"\nWrite-Verbose \"Verbose\" -Verbose\nWrite-Debug \"Debug\" -Debug",
"_____no_output_____"
]
],
[
[
"4. _You can use `Write-Host -NoNewline` as expected:_",
"_____no_output_____"
]
],
[
[
"Write-Host \"Hello \" -NoNewline -ForegroundColor Red\nWrite-Host \"World!\" -ForegroundColor Blue",
"_____no_output_____"
]
],
[
[
"5. _You can read from user for credential:_",
"_____no_output_____"
]
],
[
[
"$cred = Get-Credential\n\"$($cred.UserName), password received!\"",
"_____no_output_____"
]
],
[
[
"6. _You can read from user for regular input:_",
"_____no_output_____"
]
],
[
[
"Write-Verbose \"Ask for name\" -Verbose\n\n$name = Read-Host -Prompt \"What's your name? \"\nWrite-Host \"Greetings, $name!\" -ForegroundColor DarkBlue",
"_____no_output_____"
]
],
[
[
"7. _You can read from user for password:_",
"_____no_output_____"
]
],
[
[
"Read-Host -Prompt \"token? \" -AsSecureString",
"_____no_output_____"
]
],
[
[
"8. _You can use the multi-selection when running commands:_",
"_____no_output_____"
]
],
[
[
"Get-Command nonExist -ErrorAction Inquire",
"_____no_output_____"
]
],
[
[
"9. _You can user the mandatory parameter prompts:_",
"_____no_output_____"
]
],
[
[
"Write-Output | ForEach-Object { \"I received '$_'\" }",
"_____no_output_____"
]
],
[
[
"10. _Of course, pipeline streaming works:_",
"_____no_output_____"
]
],
[
[
"Get-Process | select -First 5 | % { start-sleep -Milliseconds 300; $_ }",
"_____no_output_____"
]
],
[
[
"11. _Progress bar rendering works as expected:_",
"_____no_output_____"
]
],
[
[
"## Demo the progress bar\nFor ($i=0; $i -le 100; $i++) {\n Write-Progress -Id 1 -Activity \"Parent work progress\" -Status \"Current Count: $i\" -PercentComplete $i -CurrentOperation \"Counting ...\"\n \n For ($j=0; $j -le 10; $j++) {\n Start-Sleep -Milliseconds 5\n Write-Progress -Parent 1 -Id 2 -Activity \"Child work progress\" -Status \"Current Count: $j\" -PercentComplete ($j*10) -CurrentOperation \"Working ...\"\n }\n \n if ($i -eq 50) {\n Write-Verbose \"working hard!!!\" -Verbose\n \"Something to output\"\n }\n}",
"_____no_output_____"
]
],
[
[
" ",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
e7de733fd2b9b478868f3a9dc7e8d143c01e36b2 | 60,397 | ipynb | Jupyter Notebook | Missiles and Rockets.ipynb | AmoDinho/fastai-experiments | 5295dca19627eb564f60d2c929d273f1a96d2ff4 | [
"MIT"
] | null | null | null | Missiles and Rockets.ipynb | AmoDinho/fastai-experiments | 5295dca19627eb564f60d2c929d273f1a96d2ff4 | [
"MIT"
] | null | null | null | Missiles and Rockets.ipynb | AmoDinho/fastai-experiments | 5295dca19627eb564f60d2c929d273f1a96d2ff4 | [
"MIT"
] | null | null | null | 67.257238 | 849 | 0.656556 | [
[
[
"empty"
]
]
] | [
"empty"
] | [
[
"empty"
]
] |
e7de76b67a9afefefdbdc5a2c9dba626bd454f28 | 237,407 | ipynb | Jupyter Notebook | visulization.ipynb | leizhipeng/DL_Mercari_Price_Suggestion | f3252f02f6f1ce845b74722f169f6651a6a41d9e | [
"MIT"
] | 1 | 2020-12-25T04:14:11.000Z | 2020-12-25T04:14:11.000Z | visulization.ipynb | leizhipeng/DL_Mercari_Price_Suggestion | f3252f02f6f1ce845b74722f169f6651a6a41d9e | [
"MIT"
] | null | null | null | visulization.ipynb | leizhipeng/DL_Mercari_Price_Suggestion | f3252f02f6f1ce845b74722f169f6651a6a41d9e | [
"MIT"
] | null | null | null | 1,018.914163 | 119,008 | 0.952407 | [
[
[
"import numpy as np\nimport pandas as pd\nimport math\nimport matplotlib.pyplot as plt\n%matplotlib inline ",
"_____no_output_____"
],
[
"data = pd.read_table(\"train.tsv\")\ndisplay(data.head(n=2))\nprint(data.shape)",
"_____no_output_____"
],
[
"text = data.item_description.values\nprint(text[1])",
"This keyboard is in great condition and works like it came out of the box. All of the ports are tested and work perfectly. The lights are customizable via the Razer Synapse app on your PC.\n"
],
[
"text_all = ' '.join(text)",
"_____no_output_____"
],
[
"from wordcloud import WordCloud\n# Generate a word cloud image\nwordcloud = WordCloud().generate(text_all)\n\n",
"_____no_output_____"
],
[
"# Display the generated image:\n# the matplotlib way:\n#import matplotlib.pyplot as plt\nplt.imshow(wordcloud, interpolation='bilinear')\nplt.axis(\"off\")\n\n",
"_____no_output_____"
],
[
"# lower max_font_size\nwordcloud = WordCloud(max_font_size=40).generate(text_all)\nplt.figure()\nplt.imshow(wordcloud, interpolation=\"bilinear\")\nplt.axis(\"off\")\n#plt.show()\nplt.savefig('word cloud.png', dpi=300, bbox_inches='tight')",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e7de792b82f45e7a79eb3faae99e897d9076ad8e | 554,946 | ipynb | Jupyter Notebook | trabalho01_regressao/trabalho01_regressao_naval_dataset.ipynb | diascarolina/fundamentos-analise-dados | e46a9cf30690f3e569296b1d9ad9c90234d5236e | [
"MIT"
] | null | null | null | trabalho01_regressao/trabalho01_regressao_naval_dataset.ipynb | diascarolina/fundamentos-analise-dados | e46a9cf30690f3e569296b1d9ad9c90234d5236e | [
"MIT"
] | null | null | null | trabalho01_regressao/trabalho01_regressao_naval_dataset.ipynb | diascarolina/fundamentos-analise-dados | e46a9cf30690f3e569296b1d9ad9c90234d5236e | [
"MIT"
] | null | null | null | 473.503413 | 176,040 | 0.936342 | [
[
[
"# Fundamentos de Análise de Dados 2022.1\n\n# Trabalho 01 - Regressão: _Naval Propulsion Plants_\n\n**Nome:** Carolina Araújo Dias",
"_____no_output_____"
],
[
"# Dataset\n\n_Naval Propulsion Plants_: regressão múltipla (2 variáveis de saída), estimar cada variável de saída separadamente:\n- 11934 amostras;\n- 16 características reais;\n- 2 características reais para estimar, mas estimar somente _GT Compressor decay state coeficient_ (remover _GT Turbine decay state coeficient_).\n\n# 01. Fazer o _download_ do respectivo banco de dados\n\n**Link:** http://archive.ics.uci.edu/ml/datasets/condition+based+maintenance+of+naval+propulsion+plants\n\nApós feito o download, os dados foram salvos em _\"../data/naval_data.txt\"_.",
"_____no_output_____"
],
[
"# Bibliotecas",
"_____no_output_____"
]
],
[
[
"!python --version",
"Python 3.8.10\n"
],
[
"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import mean_squared_error",
"_____no_output_____"
]
],
[
[
"# Funções Auxiliares",
"_____no_output_____"
]
],
[
[
"def check_constant_columns(dataframe: pd.DataFrame) -> None:\n \"\"\"Checa se existem colunas constantes no dataframe\n e imprime o nome e os valores de tais colunas.\"\"\"\n for column in dataframe.columns:\n if len(dataframe[column].unique()) == 1:\n print(f'Coluna: \"{column}\", Valor constante: {dataframe[column].unique()}')\n \n \ndef add_ones_column(data_array: np.array) -> np.array:\n \"\"\"Adiciona uma coluna de 1's ao final de um array.\"\"\"\n length = data_array.shape[0]\n return np.c_[data_array, np.ones(length)]\n\n\ndef plot_data(x, y):\n plt.rcParams[\"figure.figsize\"] = (12, 8)\n plt.scatter(x=x,\n y=y,\n alpha=0.1)\n \n plt.axline((1, 1),\n slope=1,\n color='r')\n \n rmse = round(mean_squared_error(x, y, squared=False), 5)\n plt.title(f'Dados Reais vs. Dados Preditos - RMSE: {rmse}',\n loc='left', fontsize=18)\n plt.xlabel('Dados Reais', fontsize=12)\n plt.ylabel('Dados Preditos', fontsize=12)\n \n plt.show()",
"_____no_output_____"
]
],
[
[
"# 02. Fazer a leitura dos dados",
"_____no_output_____"
]
],
[
[
"column_names = [\n \"Lever position\",\n \"Ship speed\",\n \"Gas Turbine shaft torque\",\n \"GT rate of revolutions\",\n \"Gas Generator rate of revolutions\",\n \"Starboard Propeller Torque\",\n \"Port Propeller Torque\",\n \"Hight Pressure Turbine exit temperature\",\n \"GT Compressor inlet air temperature\",\n \"GT Compressor outlet air temperature\",\n \"HP Turbine exit pressure\",\n \"GT Compressor inlet air pressure\",\n \"GT Compressor outlet air pressure\",\n \"GT exhaust gas pressure\",\n \"Turbine Injecton Control\",\n \"Fuel flow\",\n \"GT Compressor decay state coefficient\",\n \"GT Turbine decay state coefficient\"\n]\n\n# to readthe data using read_csv\n# raw_data = pd.read_csv(\"data/naval_data.txt\", sep=\" \", header=None, engine='python')\n\n# to read the data using read_fwf\nraw_data = pd.read_fwf(\"../data/naval_data.txt\", header=None)\nraw_data.columns = column_names",
"_____no_output_____"
],
[
"# conferir os dados\nraw_data.head()",
"_____no_output_____"
]
],
[
[
"Só por olharmos para os dados conseguimos enxergar alguns problemas com as colunas. Por exemplo, aparentemente as colunas `Starboard Propeller Torque` e `Port Propeller Torque` são iguais. Além disso, as colunas `GT Compressor inlet air temperature` e `GT Compressor inlet air pressure` parecem ter apenas um valor constante. Vamos checar se isso é verdade.",
"_____no_output_____"
]
],
[
[
"if raw_data['Starboard Propeller Torque'].equals(raw_data['Port Propeller Torque']):\n print(f'As colunas \"Starboard Propeller Torque\" e \"Port Propeller Torque\" são iguais.')\nelse:\n print(f'As colunas não são iguais.')",
"As colunas \"Starboard Propeller Torque\" e \"Port Propeller Torque\" são iguais.\n"
],
[
"check_constant_columns(raw_data)",
"Coluna: \"GT Compressor inlet air temperature\", Valor constante: [288.]\nColuna: \"GT Compressor inlet air pressure\", Valor constante: [0.998]\n"
]
],
[
[
"Como identificamos essas colunas problemáticas, iremos removê-las a seguir.",
"_____no_output_____"
]
],
[
[
"data = raw_data.copy()\ndata.drop(['GT Compressor inlet air temperature',\n 'GT Compressor inlet air pressure',\n 'Port Propeller Torque'],\n axis=1,\n inplace=True)",
"_____no_output_____"
]
],
[
[
"# 03. Se necessário, dividir os dados em conjunto de treinamento (70%) e teste (30%), utilizando a função apropriada do scikit-learn. Quatro NumPy arrays devem ser criados: X_train, y_train, X_test e y_test.",
"_____no_output_____"
]
],
[
[
"data.drop([\"GT Turbine decay state coefficient\"],\n axis=1,\n inplace=True)\n\nprint(f'Formato dos dados completos: {data.shape}')\n\nX = data.drop([\"GT Compressor decay state coefficient\"],\n axis=1)\n\ny = data[[\"GT Compressor decay state coefficient\"]]\n\nprint(f'Formato de X: {X.shape}')\nprint(f'Formato de y: {y.shape}')",
"Formato dos dados completos: (11934, 14)\nFormato de X: (11934, 13)\nFormato de y: (11934, 1)\n"
],
[
"X_train, X_test, y_train, y_test = train_test_split(X,\n y,\n test_size=0.3,\n random_state=12)",
"_____no_output_____"
],
[
"X_train = X_train.to_numpy()\nX_test = X_test.to_numpy()\ny_train = y_train.to_numpy()\ny_test = y_test.to_numpy()",
"_____no_output_____"
]
],
[
[
"# 04. Acrescentar uma coluna de 1s ([1 1 . . . 1]^T) como última coluna da matriz de treinamento X_train. Repita o procedimento para a matriz de teste, chamando-a de X_test_2.",
"_____no_output_____"
],
[
"[StackOverflow: How to add an extra column to a NumPy array](https://stackoverflow.com/questions/8486294/how-to-add-an-extra-column-to-a-numpy-array)",
"_____no_output_____"
]
],
[
[
"add_ones_column(X_train).shape",
"_____no_output_____"
]
],
[
[
"# 05. Calcular o posto das matrizes X_train_2 e X_test_2. Se necessário, ajustar as matrizes X_train_2 e X_test_2.",
"_____no_output_____"
],
[
"Antes de remover as 3 colunas problemáticas:",
"_____no_output_____"
]
],
[
[
"raw_data.shape",
"_____no_output_____"
],
[
"X_raw = raw_data.drop([\"GT Compressor decay state coefficient\",\n \"GT Turbine decay state coefficient\"],\n axis=1)\n\nadd_ones_column(X_raw).shape",
"_____no_output_____"
],
[
"np.linalg.matrix_rank(add_ones_column(X_raw))",
"_____no_output_____"
]
],
[
[
"Após remover as 3 colunas problemáticas:",
"_____no_output_____"
]
],
[
[
"np.linalg.matrix_rank(add_ones_column(X_train))",
"_____no_output_____"
],
[
"np.linalg.matrix_rank(add_ones_column(X_test))",
"_____no_output_____"
],
[
"add_ones_column(X_train).shape",
"_____no_output_____"
]
],
[
[
"# 06. Calcular a decomposição QR da matriz de treinamento: X_train_2 = QR, usando a função do NumPy apropriada.",
"_____no_output_____"
]
],
[
[
"Q, R = np.linalg.qr(add_ones_column(X_train))",
"_____no_output_____"
]
],
[
[
"## Questão 04\n\nVerificar numericamente que $Q^TQ = I$, para o respectivo banco de dados.\n\n*R.* Multiplicamos $Q^T$ por Q e salvamos em uma matriz M e comparamos essa matriz com uma matriz identidade de mesma dimensão. A função `np.allclose()` compara os valores levando em consideração as aproximações dos número.",
"_____no_output_____"
]
],
[
[
"M = np.matmul(Q.T, Q)\n\nnp.allclose(M, np.eye(M.shape[0]))",
"_____no_output_____"
]
],
[
[
"# 07. Calcular o vetor de coeficientes $\\mathbf{\\tilde{x}}$ da Equação (1), utilizando a função do NumPy `linalg.solve()`.",
"_____no_output_____"
]
],
[
[
"coefs_lineares = np.linalg.solve(R, np.dot(Q.T, y_train))",
"_____no_output_____"
]
],
[
[
"# 08. Calcular as estimativas do modelo para os valores de treinamento e teste, utilizando o vetor de coeficientes $\\mathbf{\\tilde{x}}$, calculado no item anterior.",
"_____no_output_____"
],
[
"## Treino",
"_____no_output_____"
]
],
[
[
"y_train_preds = []\n\nfor i in range(len(X_train)):\n y_train_preds.append(np.dot(np.squeeze(coefs_lineares), add_ones_column(X_train)[i]))",
"_____no_output_____"
]
],
[
[
"## Teste",
"_____no_output_____"
]
],
[
[
"y_test_preds = []\n\nfor i in range(len(X_test)):\n y_test_preds.append(np.dot(np.squeeze(coefs_lineares), add_ones_column(X_test)[i]))",
"_____no_output_____"
]
],
[
[
"# 09. Gerar um gráfico com os valores reais de treinamento no eixo das abscissas e valores estimados de treinamento no eixo das ordenadas. Acrescentar ao gráfico uma reta pontilhada a +45◦ do eixo das abscissas.",
"_____no_output_____"
],
[
"## Treino",
"_____no_output_____"
]
],
[
[
"plot_data(x=y_train, y=y_train_preds)",
"_____no_output_____"
]
],
[
[
"## Teste",
"_____no_output_____"
]
],
[
[
"plot_data(x=y_test, y=y_test_preds)",
"_____no_output_____"
]
],
[
[
"# 10. Calcular a **raiz quadrada do erro médio quadrático** (RMSE) dos dados de treinamento e de teste.",
"_____no_output_____"
],
[
"## Treino",
"_____no_output_____"
]
],
[
[
"mean_squared_error(y_train, y_train_preds, squared=False)",
"_____no_output_____"
]
],
[
[
"## Teste",
"_____no_output_____"
]
],
[
[
"mean_squared_error(y_test, y_test_preds, squared=False)",
"_____no_output_____"
]
],
[
[
"# Repetir todo o processo acima para o outro target `GT Turbine decay state coefficient`",
"_____no_output_____"
]
],
[
[
"data = raw_data.copy()\ndata.drop(['GT Compressor inlet air temperature',\n 'GT Compressor inlet air pressure',\n 'Port Propeller Torque'],\n axis=1,\n inplace=True)\n\ndata.drop([\"GT Compressor decay state coefficient\"],\n axis=1,\n inplace=True)\n\n\nX = data.drop([\"GT Turbine decay state coefficient\"],\n axis=1)\n\ny = data[[\"GT Turbine decay state coefficient\"]]\n\nX_train, X_test, y_train, y_test = train_test_split(X,\n y,\n test_size=0.3,\n random_state=12)\n\nX_train = X_train.to_numpy()\nX_test = X_test.to_numpy()\ny_train = y_train.to_numpy()\ny_test = y_test.to_numpy()\n\nQ, R = np.linalg.qr(add_ones_column(X_train))\n\ncoefs_lineares = np.linalg.solve(R, np.dot(Q.T, y_train))\n\n\ny_train_preds = []\n\nfor i in range(len(X_train)):\n y_train_preds.append(np.dot(np.squeeze(coefs_lineares), add_ones_column(X_train)[i]))\n \n \ny_test_preds = []\n\nfor i in range(len(X_test)):\n y_test_preds.append(np.dot(np.squeeze(coefs_lineares), add_ones_column(X_test)[i]))",
"_____no_output_____"
]
],
[
[
"## Treino",
"_____no_output_____"
]
],
[
[
"mean_squared_error(y_train, y_train_preds, squared=False)",
"_____no_output_____"
],
[
"plot_data(x=y_train, y=y_train_preds)",
"_____no_output_____"
]
],
[
[
"## Teste",
"_____no_output_____"
]
],
[
[
"mean_squared_error(y_test, y_test_preds, squared=False)",
"_____no_output_____"
],
[
"plot_data(x=y_test, y=y_test_preds)",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |
e7de818411af81158035ba01636db51cb6cd9e84 | 614,489 | ipynb | Jupyter Notebook | tutorials/tutorial_howToAdaptiveOptics.ipynb | joao-aveiro/OOPAO | 213a447cd6a154683a7339f35d80b3cd36d9063a | [
"MIT"
] | null | null | null | tutorials/tutorial_howToAdaptiveOptics.ipynb | joao-aveiro/OOPAO | 213a447cd6a154683a7339f35d80b3cd36d9063a | [
"MIT"
] | null | null | null | tutorials/tutorial_howToAdaptiveOptics.ipynb | joao-aveiro/OOPAO | 213a447cd6a154683a7339f35d80b3cd36d9063a | [
"MIT"
] | null | null | null | 332.515693 | 149,760 | 0.92472 | [
[
[
"# Tutorial HowToAdaptiveOptics\n\nThis report provides a tutorial to use the code develloped to compute the PSIM for the ELT SCAO systems. \nThe code is object-oriented and its architecture is quite inspired from the ([OOMAO simulator](https://github.com/cmcorreia/LAM-Public/tree/master/_libOomao)).\n\n",
"_____no_output_____"
],
[
"## Modules required\nThe code is written in Python 3 and requires the following modules\n\n* **numba** => required in aotools\n* **joblib** => paralleling computing\n* **scikit-image** => 2D interpolations\n* **numexpr** => memory optimized simple operations\n* **astropy** => handling of fits files\n\nTo be able to use the code you need to install the listed modules using the following command lines in a terminal:\n\n*pip install aotools*\n\n*pip install numba*\n\n*pip install joblib*\n\n*pip install scikit-image*\n\n*pip install numexpr*\n\n*pip install astropy*\n",
"_____no_output_____"
],
[
"### Import Modules",
"_____no_output_____"
]
],
[
[
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Oct 21 10:51:32 2020\n\n@author: cheritie\n\"\"\"\n# commom modules\nimport matplotlib.pyplot as plt\nimport numpy as np \nimport time\n\n# adding AO_Module to the path\nimport __load__psim\n__load__psim.load_psim()\n\n# loading AO modules\nfrom AO_modules.Atmosphere import Atmosphere\nfrom AO_modules.Pyramid import Pyramid\nfrom AO_modules.DeformableMirror import DeformableMirror\nfrom AO_modules.MisRegistration import MisRegistration\nfrom AO_modules.Telescope import Telescope\nfrom AO_modules.Source import Source\n\n# calibration modules \nfrom AO_modules.calibration.compute_KL_modal_basis import compute_M2C\nfrom AO_modules.calibration.ao_calibration import ao_calibration\n\n# display modules\nfrom AO_modules.tools.displayTools import displayMap\n",
"Looking for AO_Modules...\n['../AO_modules']\nAO_Modules found! Loading the main modules:\n"
]
],
[
[
"### Read Parameter File",
"_____no_output_____"
]
],
[
[
"#import parameter file (dictionnary)\n\nfrom parameterFile_VLT_I_Band import initializeParameterFile\n\nparam = initializeParameterFile()\n# the list of the keys contained in the dictionnary can be printed using the following lines\n# for key, value in param.items() :\n# print (key, value)",
"Reading/Writting calibration data from /Disk3/cheritier/psim/data_calibration/\nWritting output data in /diskb/cheritier/psim/data_cl\nCreation of the directory /diskb/cheritier/psim/data_cl failed:\nDirectory already exists!\n"
]
],
[
[
"## Telescope Object",
"_____no_output_____"
]
],
[
[
"# create the Telescope object\ntel = Telescope(resolution = param['resolution'],\\\n diameter = param['diameter'],\\\n samplingTime = param['samplingTime'],\\\n centralObstruction = param['centralObstruction'])",
"NGS flux updated!\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% SOURCE %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\nWavelength \t0.55 \t [microns]\nOptical Band \tV\nMagnitude \t-0.0\nFlux \t\t8967391304.0\t [photons/m2/s]\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% SOURCE %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\nWavelength \t0.55 \t [microns]\nOptical Band \tV\nMagnitude \t-0.0\nFlux \t\t8967391304.0\t [photons/m2/s]\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% TELESCOPE %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\nDiameter \t\t\t8 \t [m]\nResolution \t\t\t80 \t [pix]\nPixel Size \t\t\t0.1\t [m]\nSurface \t\t\t50.0\t [m2]\nCentral Obstruction \t\t0\t [% of diameter]\nNumber of pixel in the pupil \t5024 \t [pix]\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n"
]
],
[
[
"The mai informations contained in the telescope objects are the following: \n* tel. pupil : the pupil of the telescope as a 2D mask\n\n* tel.src : the source object attached to the telescope that contains the informations related to the wavelength, flux and phase. The default wavelength is the V band with a magnitude 0.\n\n* tel.OPD : the telescope OPD corresponding to the tel.src.phase \n\nAll the properties of an object can be displayed using the .show() method: ",
"_____no_output_____"
]
],
[
[
"tel.show()",
"telescope:\n D: 8\n OPD: (80, 80)\n centralObstruction: 0\n fov: 0\n index_pixel_petals: None\n isPaired: False\n isPetalFree: False\n pixelArea: 5024\n pixelSize: 0.1\n pupil: (80, 80)\n pupilLogical: (1, 5024)\n pupilReflectivity: (80, 80)\n resolution: 80\n samplingTime: 0.001\n src: source object\n tag: telescope\n"
]
],
[
[
"We can show the 2D map corresponding to the pupil or to the OPD:",
"_____no_output_____"
]
],
[
[
"plt.figure()\nplt.subplot(1,2,1)\nplt.imshow(tel.pupil.T)\nplt.title('Telescope Pupil: %.0f px in the pupil' %tel.pixelArea)\nplt.subplot(1,2,2)\nplt.imshow(tel.OPD.T)\nplt.title('Telescope OPD [m]')\nplt.colorbar()",
"_____no_output_____"
]
],
[
[
"And we can display the property of the child class tel.src that correspond to the default source object attached to the telescope:",
"_____no_output_____"
]
],
[
[
"tel.src.show()",
"source:\n bandwidth: 9e-08\n magnitude: -0.0\n nPhoton: 8967391304.347826\n optBand: V\n phase: (80, 80)\n tag: source\n wavelength: 5.5e-07\n zeroPoint: 8967391304.347826\n"
]
],
[
[
"## Source Object\nThe Source object allows to access the properties related to the flux and wavelength of the object. We consider only on-axis objects as a start. ",
"_____no_output_____"
]
],
[
[
"ngs=Source(optBand = param['opticalBand'],\\\n magnitude = param['magnitude'])\nprint('NGS Object built!')",
"NGS flux updated!\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% SOURCE %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\nWavelength \t0.79 \t [microns]\nOptical Band \tI\nMagnitude \t8.0\nFlux \t\t4629307.0\t [photons/m2/s]\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% SOURCE %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\nWavelength \t0.79 \t [microns]\nOptical Band \tI\nMagnitude \t8.0\nFlux \t\t4629307.0\t [photons/m2/s]\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\nNGS Object built!\n"
]
],
[
[
"The NGS object has to be attached to a telescope object using the ***** operator. \n\nThis operation sets the telescope property tel.src to the ngs object considered. ",
"_____no_output_____"
]
],
[
[
"ngs*tel",
"_____no_output_____"
]
],
[
[
"The ngs object is now attached to the telescope. This means that the tel.src object now has a **phase** and a **fluxMap** property.\n\nIf we display the properties of ngs and tel.src, they are the same: ",
"_____no_output_____"
]
],
[
[
"# properties of ngs\nngs.show()\n# properties of tel.src\ntel.src.show()",
"source:\n bandwidth: 1.5e-07\n fluxMap: (80, 80)\n magnitude: 8.0\n nPhoton: 4629306.603523155\n optBand: I\n phase: (80, 80)\n tag: source\n var: 8.673617379884035e-19\n wavelength: 7.9e-07\n zeroPoint: 7336956521.73913\nsource:\n bandwidth: 1.5e-07\n fluxMap: (80, 80)\n magnitude: 8.0\n nPhoton: 4629306.603523155\n optBand: I\n phase: (80, 80)\n tag: source\n var: 8.673617379884035e-19\n wavelength: 7.9e-07\n zeroPoint: 7336956521.73913\n"
]
],
[
[
"We can compute and display the PSF corresponding to the telescope OPD and Source object attached to the telescope. ",
"_____no_output_____"
]
],
[
[
"\nzeroPaddingFactor = 8\n\ntel.computePSF(zeroPaddingFactor = zeroPaddingFactor)\n\nPSF_normalized = tel.PSF/tel.PSF.max()\n\nnPix = zeroPaddingFactor*tel.resolution//3\n\nplt.figure()\nplt.imshow(np.log(np.abs(PSF_normalized[nPix:-nPix,nPix:-nPix])))\nplt.clim([-13,0])\nplt.colorbar()\n\n",
"_____no_output_____"
]
],
[
[
"## Atmosphere Object\nThe atmosphere object is created mainly from the telescope properties (diameter, pupil, samplingTime)and the *r0* and *L0* parameters. It is possible to generate multi-layers, each one is a child-class of the atmosphere object with its own set of parameters (windSpeed, Cn^2,windDirection, altitude). \n",
"_____no_output_____"
]
],
[
[
"atm=Atmosphere(telescope = tel,\\\n r0 = param['r0'],\\\n L0 = param['L0'],\\\n windSpeed = param['windSpeed'],\\\n fractionalR0 = param['fractionnalR0'],\\\n windDirection = param['windDirection'],\\\n altitude = param['altitude'])\n\nprint('Atmosphere Object built!')",
"Atmosphere Object built!\n"
]
],
[
[
"The atmosphere object has to be initialized using:",
"_____no_output_____"
]
],
[
[
"# initialize atmosphere\natm.initializeAtmosphere(tel)\nprint('Done!')",
"Creation of layer1/5 ...\n-> Computing the initial phase screen...\ninitial phase screen : 0.023934602737426758 s\nZZt.. : 0.7004520893096924 s\nZXt.. : 0.3715839385986328 s\nXXt.. : 0.2279503345489502 s\nDone!\nCreation of layer2/5 ...\n-> Computing the initial phase screen...\ninitial phase screen : 0.036902666091918945 s\nZZt.. : 1.2596936225891113 s\nZXt.. : 0.6154317855834961 s\nXXt.. : 0.26628828048706055 s\nDone!\nCreation of layer3/5 ...\n-> Computing the initial phase screen...\ninitial phase screen : 0.031950950622558594 s\nZZt.. : 0.9521989822387695 s\nZXt.. : 0.4208860397338867 s\nXXt.. : 0.20944452285766602 s\nDone!\nCreation of layer4/5 ...\n-> Computing the initial phase screen...\ninitial phase screen : 0.026932239532470703 s\nZZt.. : 0.998450517654419 s\nZXt.. : 0.5060114860534668 s\nXXt.. : 0.22093653678894043 s\nDone!\nCreation of layer5/5 ...\n-> Computing the initial phase screen...\ninitial phase screen : 0.028923749923706055 s\nZZt.. : 0.9069092273712158 s\nZXt.. : 0.4238872528076172 s\nXXt.. : 0.19648003578186035 s\nDone!\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% ATMOSPHERE %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\nr0 \t\t0.13 \t [m]\nL0 \t\t30 \t [m]\nSeeing(V) \t0.79\t [\"]\n------------------------------------------------------------------------\nLayer \t Direction \t Speed \t\t Altitude\n1 \t 0 [deg] \t 10 [m/s] \t100 [m]\n------------------------------------------------------------------------\n2 \t 72 [deg] \t 10 [m/s] \t100 [m]\n------------------------------------------------------------------------\n3 \t 144 [deg] \t 10 [m/s] \t100 [m]\n------------------------------------------------------------------------\n4 \t 216 [deg] \t 10 [m/s] \t100 [m]\n------------------------------------------------------------------------\n5 \t 288 [deg] \t 10 [m/s] \t100 [m]\n------------------------------------------------------------------------\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\nDone!\n"
]
],
[
[
"Similarly to the Source object, the atmosphere object can be paired to the telescope **+**. \n\nIn that case, if the atmosphere OPD is updated, the telescope OPD is automatically updated.",
"_____no_output_____"
]
],
[
[
"tel+atm\nprint(tel.isPaired)",
"Telescope and Atmosphere combined!\nTrue\n"
]
],
[
[
"We can display the properties of the telescope object:",
"_____no_output_____"
]
],
[
[
"plt.figure()\nplt.imshow(tel.OPD.T)\nplt.title('Telescope OPD [m]')\nplt.colorbar()\n\nplt.figure()\nplt.imshow(tel.src.phase.T)\nplt.colorbar()\nplt.title('NGS Phase [rad]')",
"_____no_output_____"
]
],
[
[
"The atmosphere and the telescope can be separated using the **-** operator. This brings back the system to a diffraction limited case with a flat OPD.",
"_____no_output_____"
]
],
[
[
"tel-atm\nprint(tel.isPaired)",
"Telescope and Atmosphere separated!\nFalse\n"
]
],
[
[
"## Deformable Mirror Object\nThe deformable mirror is mainly characterized with its influence functions. They can be user-defined and loaded in the model but the default case is a cartesian DM with gaussian influence functions and normalized to 1. \nThe DM is always defined in the pupil plane. \n",
"_____no_output_____"
]
],
[
[
"dm=DeformableMirror(telescope = tel,\\\n nSubap = param['nSubaperture'],\\\n mechCoupling = param['mechanicalCoupling'])\nprint('Done!')",
"No coordinates loaded.. taking the cartesian geometry as a default\nGenerating a Deformable Mirror: \nComputing the 2D zonal modes...\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% DEFORMABLE MIRROR %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\nControlled Actuators \t 357\nM4 influence functions \t No\nPixel Size \t\t0.1 \t [m]\nPitch \t\t\t 0.4 \t [m]\nMechanical Coupling \t 0.45 \t [m]\nRotation: 0 deg -- shift X: 0 m -- shift Y: 0 m -- Anamorphosis Angle: 0 deg -- Radial Scaling: 0 -- Tangential Scaling: 0\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\nDone!\n"
]
],
[
[
"We can display the cube of the influence functions to display the position of the actuators.\n",
"_____no_output_____"
]
],
[
[
"cube_IF = np.reshape(np.sum(dm.modes**3, axis =1),[tel.resolution,tel.resolution])\nplt.figure()\nplt.imshow(cube_IF.T)",
"_____no_output_____"
]
],
[
[
"### Light propagation\nThe light can be propagate through the DM using the ***** operator. \n\nTo update the DM surface, the property **dm.coefs** must be updated to set the new values of the DM coefficients.\n\nTypically, using a random command vector, we can propagate the light through the DM (light is reflected hence the sign change and the factor 2 in OPD):",
"_____no_output_____"
]
],
[
[
"tel-atm\n\ndm.coefs = (np.random.rand(dm.nValidAct)-0.5)*100e-9\ntel*dm\nplt.figure()\nplt.subplot(121)\nplt.imshow(dm.OPD)\nplt.title('DM OPD [m]')\nplt.colorbar()\nplt.subplot(122)\nplt.imshow(tel.OPD)\nplt.colorbar()\nplt.title('Telescope OPD [m]')\nplt.figure()\nplt.imshow(atm.OPD_no_pupil)\nplt.colorbar()\nplt.title('Atmosphere OPD [m]')\n",
"Telescope and Atmosphere separated!\n"
]
],
[
[
"### Mis-registrations\nThe DM/WFS mis-registrations are applied directly in the DM space, applying the transformations on the DM influence functions. \n\nFirst we create a **MisRegistration Object** that is initialized to 0. \n\nWe can then update the values of the mis-registrations and input it to the DM model:",
"_____no_output_____"
]
],
[
[
"misReg = MisRegistration()\nmisReg.rotationAngle = 3\nmisReg.shiftX = 0.3*param['diameter']/param['nSubaperture']\nmisReg.shiftY = 0.25*param['diameter']/param['nSubaperture']\n\ndm_misReg = DeformableMirror(telescope = tel,\\\n nSubap = param['nSubaperture'],\\\n mechCoupling = param['mechanicalCoupling'],\\\n misReg = misReg)\nprint('Done!')\n\nplt.figure()\nplt.plot(dm.coordinates[:,0],dm.coordinates[:,1],'.')\nplt.plot(dm_misReg.coordinates[:,0],dm_misReg.coordinates[:,1],'.')\nplt.axis('square')\nplt.legend(['initial DM','mis-registered DM'])\n",
"No coordinates loaded.. taking the cartesian geometry as a default\nGenerating a Deformable Mirror: \nComputing the 2D zonal modes...\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% DEFORMABLE MIRROR %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\nControlled Actuators \t 357\nM4 influence functions \t No\nPixel Size \t\t0.1 \t [m]\nPitch \t\t\t 0.4 \t [m]\nMechanical Coupling \t 0.45 \t [m]\nRotation: 3 deg -- shift X: 0.12 m -- shift Y: 0.1 m -- Anamorphosis Angle: 0 deg -- Radial Scaling: 0 -- Tangential Scaling: 0\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\nDone!\n"
]
],
[
[
"## Pyramid Object\nThe pyramid object consists mainly in defining the PWFS mask to apply the filtering of the electro-magnetic field. \nMany parameters can allow to tune the pyramid model:\n* Centering of the mask and of the FFT on 1 or 4 pixels\n* Modulation radius in λ/D. By default the number of modulation points ensures to have one point every λ/D on the circular trajectory but this sampling can be modified by the user. The number of modulation points is a multiple of 4 to ensure that each quadrant has the same number of modulation points.\n* The modulation value for the calibration and selection of the valid pixels\n* PWFS pupils separation, either for a perfect pyramid with a single value or for an imperfect pyramid with 8 values (shift X and Y for each PWFS pupil) . \n* The type of post-processing of the PWFS signals (slopes-maps, full-frame,etc). To be independent from this choice, the pyramid signals are named “wfs.pyramidSignal_2D” for either the Slopes-Maps or the camera frame and “wfs.pyramidSignal” for the signal reduced to the valid pixels considered.\n* The intensity threshold to select the valid pixel \n\nSome optional features can be user-defined:\n* Zero-padding value\n* Number of pixel on the edge of the Pyramid pupils\n* The units of the WFS signals can be calibrated using a ramp of Tip/Tilt\n\n\nIn addition, the Pyramid object has a Detector object as a child-class that provides the pyramid signals. It can be access through **wfs.cam**",
"_____no_output_____"
]
],
[
[
"# make sure tel and atm are separated to initialize the PWFS\ntel-atm\n\n# create the Pyramid Object\nwfs = Pyramid(nSubap = param['nSubaperture'],\\\n telescope = tel,\\\n modulation = param['modulation'],\\\n lightRatio = param['lightThreshold'],\\\n pupilSeparationRatio = param['pupilSeparationRatio'],\\\n calibModulation = param['calibrationModulation'],\\\n psfCentering = param['psfCentering'],\\\n edgePixel = param['edgePixel'],\\\n unitCalibration = param['unitCalibration'],\\\n extraModulationFactor = param['extraModulationFactor'],\\\n postProcessing = param['postProcessing'])\n",
"Telescope and Atmosphere separated!\nPyramid Mask initialization...\nDone!\nSelection of the valid pixels...\nThe valid pixel are selected on flux considerations\nDone!\nAcquisition of the reference slopes and units calibration...\nWFS calibrated!\nDone!\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% PYRAMID WFS %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\nPupils Diameter \t20 \t [pixels]\nPupils Separation \t3.999999999999999 \t [pixels]\nPixel Size \t\t0.4\t [m]\nTT Modulation \t\t3 \t [lamda/D]\nPSF Core Sampling \t1 \t [pixel(s)]\nSignal Post-Processing \tslopesMaps\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n"
]
],
[
[
"The light can be propagated to the WFS through the different objects with using the ***** operator:",
"_____no_output_____"
]
],
[
[
"tel*wfs\n\nplt.figure()\nplt.imshow(wfs.cam.frame)\nplt.colorbar()\n\n",
"_____no_output_____"
]
],
[
[
"We can display the PWFS signals that corresponds to a random actuation of the DM:",
"_____no_output_____"
]
],
[
[
"dm.coefs = (np.random.rand(dm.nValidAct)-0.5)*100e-9\ntel*dm*wfs\n\nplt.figure()\nplt.imshow(wfs.pyramidSignal_2D)\nplt.colorbar()",
"_____no_output_____"
]
],
[
[
"## Modal Basis\nIn this tutorial, we compute the mode-to-commands matrix (M2C) using the codes provided by C.Verinaud. It corresponds to a KL modal basis orthogonolized in the DM space.",
"_____no_output_____"
]
],
[
[
"# compute the modal basis\nfoldername_M2C = None # name of the folder to save the M2C matrix, if None a default name is used \nfilename_M2C = None # name of the filename, if None a default name is used \n\n\nM2C_KL = compute_M2C(telescope = tel,\\\n atmosphere = atm,\\\n deformableMirror = dm,\\\n param = param,\\\n nameFolder = None,\\\n nameFile = None,\\\n remove_piston = True,\\\n HHtName = None,\\\n baseName = None ,\\\n mem_available = 8.1e9,\\\n minimF = False,\\\n nmo = 300,\\\n ortho_spm = True,\\\n SZ = np.int(2*tel.OPD.shape[0]),\\\n nZer = 3,\\\n NDIVL = 1)\n",
"Creation of the directory /Disk3/cheritier/psim/data_calibration/ failed:\nDirectory already exists!\nCOMPUTING TEL*DM...\n \nPREPARING IF_2D...\n \nComputing Specific Modes ...\n \nCOMPUTING VON KARMAN 2D PSD...\n \nCOMPUTING COV MAT HHt...\n \nTIME ELAPSED: 3 sec. COMPLETED: 100 %\nSERIALIZING IFs...\n \nSERIALIZING Specific Modes... \n \nCOMPUTING IFs CROSS PRODUCT...\n \nNMAX = 300\nRMS opd error = [[1.16127888e-08 1.75899251e-08 1.75899251e-08]]\nRMS Positions = [[7.26577110e-08 3.29310827e-07 3.29310827e-07]]\nMAX Positions = [[4.52339280e-07 8.84512596e-07 8.84512596e-07]]\nCHECKING ORTHONORMALITY OF SPECIFIC MODES...\n \nOrthonormality error for SpM = 3.3306690738754696e-16\nBUILDING SEED BASIS ...\n \nOrthonormality error for 304 modes of the Seed Basis = 2.2426505097428162e-14\nKL WITH DOUBLE DIAGONALISATION: COVARIANCE ERROR = 5.993844598436698e-14\nOrthonormality error for 300 modes of the KL Basis = 2.020605904817785e-14\nPiston removed from the modal basis!\n"
]
],
[
[
"## Interaction Matrix\nThe interaction matrix can be computed using the M2C matrix and the function interactionMatrix.\nThe output is stored as a class that contains all the informations about the inversion (SVD) such as eigenValues, reconstructor, etc. \n\nIt is possible to add a **phaseOffset** to the interactionMatrix measurement. ",
"_____no_output_____"
]
],
[
[
"#%% to manually measure the interaction matrix\n#\n## amplitude of the modes in m\n#stroke=1e-9\n## Modal Interaction Matrix \n#M2C = M2C[:,:param['nModes']]\n#from AO_modules.calibration.InteractionMatrix import interactionMatrix\n#\n#calib = interactionMatrix(ngs = ngs,\\\n# atm = atm,\\\n# tel = tel,\\\n# dm = dm,\\\n# wfs = wfs,\\\n# M2C = M2C,\\\n# stroke = stroke,\\\n# phaseOffset = 0,\\\n# nMeasurements = 100,\\\n# noise = False)\n#\n#plt.figure()\n#plt.plot(np.std(calib.D,axis=0))\n#plt.xlabel('Mode Number')\n#plt.ylabel('WFS slopes STD')\n#plt.ylabel('Optical Gain')\n\n",
"_____no_output_____"
],
[
"param['nModes'] =300\n\nao_calib = ao_calibration(param = param,\\\n ngs = ngs,\\\n tel = tel,\\\n atm = atm,\\\n dm = dm,\\\n wfs = wfs,\\\n nameFolderIntMat = None,\\\n nameIntMat = None,\\\n nameFolderBasis = None,\\\n nameBasis = None,\\\n nMeasurements = 100)\n",
"Creation of the directory /Disk3/cheritier/psim/data_calibration/ failed:\nDirectory already exists!\nLoading the KL Modal Basis from: /Disk3/cheritier/psim/data_calibration/M2C_80_res\nComputing the pseudo-inverse of the modal basis...\nDiagonality criteria: 1.7785772854495008e-13 -- using the fast computation\nCreation of the directory /Disk3/cheritier/psim/data_calibration/VLT_I_band_20x20/ failed:\nDirectory already exists!\nLoading Interaction matrix zonal_interaction_matrix_80_res_3_mod_slopesMaps_psfCentering_False...\nDone!\nNo Modal Gains found. All gains set to 1\n"
]
],
[
[
"## Display Modal Basis\n",
"_____no_output_____"
]
],
[
[
"\n# project the mode on the DM\ndm.coefs = ao_calib.M2C[:,:100]\n\ntel*dm\n#\n# show the modes projected on the dm, cropped by the pupil and normalized by their maximum value\ndisplayMap(tel.OPD,norma=True)\nplt.title('Basis projected on the DM')\n\nKL_dm = np.reshape(tel.OPD,[tel.resolution**2,tel.OPD.shape[2]])\n\ncovMat = (KL_dm.T @ KL_dm) / tel.resolution**2\n\nplt.figure()\nplt.imshow(covMat)\nplt.title('Orthogonality')\nplt.show()\n\nplt.figure()\nplt.plot(np.round(np.std(np.squeeze(KL_dm[tel.pupilLogical,:]),axis = 0),5))\nplt.title('KL mode normalization projected on the DM')\nplt.show()\n",
"_____no_output_____"
]
],
[
[
"## Closed Loop\nHere is a code to do a closed-loop simulation using the PSIM code:",
"_____no_output_____"
]
],
[
[
"\n# These are the calibration data used to close the loop\ncalib_CL = ao_calib.calib\nM2C_CL = ao_calib.M2C\n\nparam['nLoop'] =100\nplt.close('all')\n\n# combine telescope with atmosphere\ntel+atm\n\n# initialize DM commands\ndm.coefs=0\nngs*tel*dm*wfs\n\nplt.ion()\n# setup the display\nfig = plt.figure(79)\nax1 = plt.subplot(2,3,1)\nim_atm = ax1.imshow(tel.src.phase)\nplt.colorbar(im_atm)\nplt.title('Turbulence phase [rad]')\n\nax2 = plt.subplot(2,3,2)\nim_dm = ax2.imshow(dm.OPD*tel.pupil)\nplt.colorbar(im_dm)\nplt.title('DM phase [rad]')\ntel.computePSF(zeroPaddingFactor=6)\n\nax4 = plt.subplot(2,3,3)\nim_PSF_OL = ax4.imshow(tel.PSF_trunc)\nplt.colorbar(im_PSF_OL)\nplt.title('OL PSF')\n\n\nax3 = plt.subplot(2,3,5)\nim_residual = ax3.imshow(tel.src.phase)\nplt.colorbar(im_residual)\nplt.title('Residual phase [rad]')\n\nax5 = plt.subplot(2,3,4)\nim_wfs_CL = ax5.imshow(wfs.cam.frame)\nplt.colorbar(im_wfs_CL)\nplt.title('Pyramid Frame CL')\n\nax6 = plt.subplot(2,3,6)\nim_PSF = ax6.imshow(tel.PSF_trunc)\nplt.colorbar(im_PSF)\nplt.title('CL PSF')\n\nplt.show()\n\n# allocate memory to save data\nSR = np.zeros(param['nLoop'])\ntotal = np.zeros(param['nLoop'])\nresidual = np.zeros(param['nLoop'])\nwfsSignal = np.arange(0,wfs.nSignal)*0\n\n# loop parameters\ngainCL = 0.6\nwfs.cam.photonNoise = True\ndisplay = False\n\nfor i in range(param['nLoop']):\n a=time.time()\n # update phase screens => overwrite tel.OPD and consequently tel.src.phase\n atm.update()\n # save phase variance\n total[i]=np.std(tel.OPD[np.where(tel.pupil>0)])*1e9\n # save turbulent phase\n turbPhase = tel.src.phase\n if display == True:\n # compute the OL PSF and update the display\n tel.computePSF(zeroPaddingFactor=6)\n im_PSF_OL.set_data(np.log(tel.PSF_trunc/tel.PSF_trunc.max()))\n im_PSF_OL.set_clim(vmin=-3,vmax=0)\n \n # propagate to the WFS with the CL commands applied\n tel*dm*wfs\n \n # save the DM OPD shape\n dmOPD=tel.pupil*dm.OPD*2*np.pi/ngs.wavelength\n \n dm.coefs=dm.coefs-gainCL*M2C_CL@calib_CL.M@wfsSignal\n # store the slopes after computing the commands => 2 frames delay\n wfsSignal=wfs.pyramidSignal\n b= time.time()\n print('Elapsed time: ' + str(b-a) +' s')\n # update displays if required\n if display==True:\n \n # Turbulence\n im_atm.set_data(turbPhase)\n im_atm.set_clim(vmin=turbPhase.min(),vmax=turbPhase.max())\n # WFS frame\n C=wfs.cam.frame\n im_wfs_CL.set_data(C)\n im_wfs_CL.set_clim(vmin=C.min(),vmax=C.max())\n # DM OPD\n im_dm.set_data(dmOPD)\n im_dm.set_clim(vmin=dmOPD.min(),vmax=dmOPD.max())\n \n # residual phase\n D=tel.src.phase\n D=D-np.mean(D[tel.pupil])\n im_residual.set_data(D)\n im_residual.set_clim(vmin=D.min(),vmax=D.max()) \n \n tel.computePSF(zeroPaddingFactor=6)\n im_PSF.set_data(np.log(tel.PSF_trunc/tel.PSF_trunc.max()))\n im_PSF.set_clim(vmin=-4,vmax=0)\n plt.draw()\n plt.show()\n plt.pause(0.001)\n \n \n SR[i]=np.exp(-np.var(tel.src.phase[np.where(tel.pupil==1)]))\n residual[i]=np.std(tel.OPD[np.where(tel.pupil>0)])*1e9\n OPD=tel.OPD[np.where(tel.pupil>0)]\n\n print('Loop'+str(i)+'/'+str(param['nLoop'])+' Turbulence: '+str(total[i])+' -- Residual:' +str(residual[i])+ '\\n')\n\n#%%\nplt.figure()\nplt.plot(total)\nplt.plot(residual)\nplt.xlabel('Time')\nplt.ylabel('WFE [nm]')\n\nplt.pause(10)\n",
"Telescope and Atmosphere combined!\n"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
e7de81bd13ef3ad3830880f7de1e1f02a0fe12bd | 39,104 | ipynb | Jupyter Notebook | Santa 2020 - The Candy Cane Contest/src/agent-scrape-everything-step-2.ipynb | DavideStenner/Kaggle | c3e6eae84413611a0859358767319f9604a07d4d | [
"MIT"
] | null | null | null | Santa 2020 - The Candy Cane Contest/src/agent-scrape-everything-step-2.ipynb | DavideStenner/Kaggle | c3e6eae84413611a0859358767319f9604a07d4d | [
"MIT"
] | null | null | null | Santa 2020 - The Candy Cane Contest/src/agent-scrape-everything-step-2.ipynb | DavideStenner/Kaggle | c3e6eae84413611a0859358767319f9604a07d4d | [
"MIT"
] | null | null | null | 31.766044 | 162 | 0.565901 | [
[
[
"import os\nimport sys\nfrom time import sleep\nimport json\nimport pandas as pd\nimport os\nimport gc\nimport numpy as np\nfrom tqdm.notebook import tqdm\nimport pickle\n\nfrom kaggle_environments import (\n evaluate, make, utils,\n get_episode_replay,\n)\n\nroot = \"../input/meta-kaggle\"\nsanta_id = 24539\n\nepisode_agents = pd.read_csv(os.path.join(root, \"EpisodeAgents.csv\"))\nagents = pd.read_csv(os.path.join(root, \"Episodes.csv\"))",
"Loading environment football failed: No module named 'gfootball'\n"
],
[
"#get agent - competition mapping\nsanta_agents = agents.loc[(agents['CompetitionId'] == santa_id)].reset_index(drop = True)\ngc.collect()\n\n#get episode of santa competition\nepisode_agents = episode_agents.merge(santa_agents, how = 'inner', left_on = 'EpisodeId', right_on = \"Id\")\ngc.collect()",
"_____no_output_____"
],
[
"keep_col = [\n \"EpisodeId\", \"Index\", \"Reward\", \"State\", \"SubmissionId\", \"InitialConfidence\",\n \"InitialScore\", \"UpdatedConfidence\", \"UpdatedScore\", \"Type\", \"CreateTime\", \"EndTime\"\n]\n\nepisode_agents = episode_agents[keep_col]\n\n#discard validation episode\nepisode_agents = episode_agents[episode_agents['Type'] == 1].reset_index(drop = True)\n\n#convert time column to dt\nepisode_agents['CreateTime'] = pd.to_datetime(episode_agents['CreateTime'].transform(lambda x: x[:16]))\nepisode_agents['EndTime'] = pd.to_datetime(episode_agents['EndTime'].transform(lambda x: x[:16]))\n\ngc.collect()",
"_____no_output_____"
],
[
"del agents, santa_agents\ngc.collect()",
"_____no_output_____"
],
[
"#get last episode for each submission (last score on leaderboard) and sort by score\nlast_submission_update = episode_agents.sort_values(['SubmissionId', 'EndTime'], ascending = False).groupby('SubmissionId').head(1)\nlast_submission_update = last_submission_update.sort_values('UpdatedScore', ascending = False)",
"_____no_output_____"
],
[
"gc.collect()\n\n#treshold for 7 position on leaderboard\ntop_7_tresh = 1233\nnot_so_well_tresh = 1100\n\n#get every agent with UpdatedScore >= top_7_tresh\ntop_agents = last_submission_update[last_submission_update['UpdatedScore'] >= top_7_tresh].SubmissionId.tolist()",
"_____no_output_____"
],
[
"#get pulled_episode a list with every pulled episode\n\n#insert here path of input folder\npath_folder = '../input/agent-scrape-everything-step-1'\n\nif path_folder != '':\n \n with open(os.path.join(path_folder, 'winner_dic.pkl'), 'rb') as file:\n winner_dic = pickle.load(file)\n \n pulled_episodes = [int(x.replace('.json', '')) for x in winner_dic.keys()]\n \n\nelse:\n pulled_episodes = []\n winner_dic = {}",
"_____no_output_____"
],
[
"episodes_to_pull = []\nfor sub_id in tqdm(top_agents):\n \n epid_list_submission = episode_agents.loc[episode_agents['SubmissionId'] == sub_id, 'EpisodeId'].tolist()\n\n #get list of episode which hasn't been loaded yet\n list_episodes = [epid for epid in epid_list_submission if epid not in pulled_episodes]\n episodes_to_pull += list_episodes\n \nprint('Number of remaining Episodes', len(episodes_to_pull))\n \ndel episodes_to_pull\ngc.collect()",
"_____no_output_____"
],
[
"#max number of call\ngc.collect()\nMAXAPICALLS = 3600\nDIR = './'\ntime_w8 = 20\nnum_api_calls_today = 0\ncounter_per_minute = 0\n\nfor sub_id in tqdm(top_agents):\n print(f'Starting scraping: {sub_id}')\n \n if num_api_calls_today < MAXAPICALLS:\n \n epid_list_submission = episode_agents.loc[episode_agents['SubmissionId'] == sub_id, 'EpisodeId'].tolist()\n \n #get list of episode which hasn't been loaded yet\n list_episodes = [epid for epid in epid_list_submission if epid not in pulled_episodes]\n \n print(f'{len(list_episodes)} different episodes')\n \n for epid in list_episodes:\n \n #get reward for each agent of the episode\n reward = episode_agents[episode_agents['EpisodeId'] == epid]['Reward'].tolist()\n \n #get_sub_id\n sub_episode_list = episode_agents[episode_agents['EpisodeId'] == epid]['SubmissionId'].tolist()\n \n #get last score per sub\n score_per_sub = [last_submission_update.loc[last_submission_update['SubmissionId'] == x, 'UpdatedScore'].values[0] for x in sub_episode_list]\n \n #this episode was broken and one agent got no reward\n if any([x is None for x in reward]): \n pulled_episodes += [epid]\n \n #next episode\n continue \n \n #get index for each agent\n agent = episode_agents[episode_agents['EpisodeId'] == epid]['Index'].tolist()\n \n winner_score = max(reward)\n winner = []\n for i in range(2):\n if (score_per_sub[i] >= top_7_tresh):\n winner += [i]\n \n if (score_per_sub[i] >= not_so_well_tresh) & (reward[i] == winner_score):\n winner += [i]\n \n #save top agent and winner perspective (if it has >= not_so_well_tresh)\n winner = list(set(winner))\n \n try:\n #get the episode\n replay = get_episode_replay(epid)['result']['replay']\n pulled_episodes += [epid]\n \n except:\n sleep(time_w8)\n\n # save replay\n with open(DIR + '/{}.json'.format(epid), 'w') as f:\n f.write(replay)\n \n #save the winner\n winner_dic[f\"{epid}.json\"] = winner \n \n num_api_calls_today += 1\n counter_per_minute += 1\n \n if counter_per_minute >= 60:\n sleep(time_w8)\n counter_per_minute = 0\n \nwith open('winner_dic.pkl', 'wb') as file:\n pickle.dump(winner_dic, file, protocol=pickle.HIGHEST_PROTOCOL)",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e7de8f991a3eac692fd360abcb20385bf941e3ad | 855,072 | ipynb | Jupyter Notebook | week05_explore/week5.ipynb | kianya/Practical_RL | 6e4ee47a0b22622b12e9c3f4a3ffd18ae0c6337c | [
"Unlicense"
] | 3 | 2020-12-14T11:03:38.000Z | 2021-03-03T21:38:40.000Z | week05_explore/week5.ipynb | kianya/Practical_RL | 6e4ee47a0b22622b12e9c3f4a3ffd18ae0c6337c | [
"Unlicense"
] | null | null | null | week05_explore/week5.ipynb | kianya/Practical_RL | 6e4ee47a0b22622b12e9c3f4a3ffd18ae0c6337c | [
"Unlicense"
] | 1 | 2021-12-16T14:42:21.000Z | 2021-12-16T14:42:21.000Z | 600.050526 | 281,712 | 0.944043 | [
[
[
"# Run this if in COLAB\n!pip install --upgrade https://github.com/Theano/Theano/archive/master.zip\n!pip install --upgrade https://github.com/Lasagne/Lasagne/archive/master.zip\n \n!wget -q https://raw.githubusercontent.com/yandexdataschool/Practical_RL/master/week5_explore/bayes.py\n!wget -q https://raw.githubusercontent.com/yandexdataschool/Practical_RL/master/week5_explore/action_rewards.npy\n!wget -q https://raw.githubusercontent.com/yandexdataschool/Practical_RL/master/week5_explore/all_states.npy",
"_____no_output_____"
],
[
"from abc import ABCMeta, abstractmethod, abstractproperty\nimport enum\n\nimport numpy as np\nnp.set_printoptions(precision=3)\nnp.set_printoptions(suppress=True)\n\nimport pandas\n\nfrom matplotlib import pyplot as plt\n%matplotlib inline",
"_____no_output_____"
]
],
[
[
"## Contents\n* [1. Bernoulli Bandit](#Part-1.-Bernoulli-Bandit)\n * [Bonus 1.1. Gittins index (5 points)](#Bonus-1.1.-Gittins-index-%285-points%29.)\n * [HW 1.1. Nonstationary Bernoulli bandit](#HW-1.1.-Nonstationary-Bernoulli-bandit)\n* [2. Contextual bandit](#Part-2.-Contextual-bandit)\n * [2.1 Bulding a BNN agent](#2.1-Bulding-a-BNN-agent)\n * [2.2 Training the agent](#2.2-Training-the-agent)\n * [HW 2.1 Better exploration](#HW-2.1-Better-exploration)\n* [3. Exploration in MDP](#Part-3.-Exploration-in-MDP)\n * [Bonus 3.1 Posterior sampling RL (3 points)](#Bonus-3.1-Posterior-sampling-RL-%283-points%29)\n * [Bonus 3.2 Bootstrapped DQN (10 points)](#Bonus-3.2-Bootstrapped-DQN-%2810-points%29)\n",
"_____no_output_____"
],
[
"## Part 1. Bernoulli Bandit\n\nWe are going to implement several exploration strategies for simplest problem - bernoulli bandit.\n\nThe bandit has $K$ actions. Action produce 1.0 reward $r$ with probability $0 \\le \\theta_k \\le 1$ which is unknown to agent, but fixed over time. Agent's objective is to minimize regret over fixed number $T$ of action selections:\n\n$$\\rho = T\\theta^* - \\sum_{t=1}^T r_t$$\n\nWhere $\\theta^* = \\max_k\\{\\theta_k\\}$\n\n**Real-world analogy:**\n\nClinical trials - we have $K$ pills and $T$ ill patient. After taking pill, patient is cured with probability $\\theta_k$. Task is to find most efficient pill.\n\nA research on clinical trials - https://arxiv.org/pdf/1507.08025.pdf",
"_____no_output_____"
]
],
[
[
"class BernoulliBandit:\n def __init__(self, n_actions=5):\n self._probs = np.random.random(n_actions)\n\n @property\n def action_count(self):\n return len(self._probs)\n\n def pull(self, action):\n if np.any(np.random.random() > self._probs[action]):\n return 0.0\n return 1.0\n\n def optimal_reward(self):\n \"\"\" Used for regret calculation\n \"\"\"\n return np.max(self._probs)\n\n def step(self):\n \"\"\" Used in nonstationary version\n \"\"\"\n pass\n\n def reset(self):\n \"\"\" Used in nonstationary version\n \"\"\"",
"_____no_output_____"
],
[
"class AbstractAgent(metaclass=ABCMeta):\n def init_actions(self, n_actions):\n self._successes = np.zeros(n_actions)\n self._failures = np.zeros(n_actions)\n self._total_pulls = 0\n\n @abstractmethod\n def get_action(self):\n \"\"\"\n Get current best action\n :rtype: int\n \"\"\"\n pass\n\n def update(self, action, reward):\n \"\"\"\n Observe reward from action and update agent's internal parameters\n :type action: int\n :type reward: int\n \"\"\"\n self._total_pulls += 1\n if reward == 1:\n self._successes[action] += 1\n else:\n self._failures[action] += 1\n\n @property\n def name(self):\n return self.__class__.__name__\n\n\nclass RandomAgent(AbstractAgent):\n def get_action(self):\n return np.random.randint(0, len(self._successes))",
"_____no_output_____"
]
],
[
[
"### Epsilon-greedy agent\n\n**for** $t = 1,2,...$ **do**\n\n **for** $k = 1,...,K$ **do**\n\n $\\hat\\theta_k \\leftarrow \\alpha_k / (\\alpha_k + \\beta_k)$\n\n **end for** \n\n $x_t \\leftarrow argmax_{k}\\hat\\theta$ with probability $1 - \\epsilon$ or random action with probability $\\epsilon$\n\n Apply $x_t$ and observe $r_t$\n\n $(\\alpha_{x_t}, \\beta_{x_t}) \\leftarrow (\\alpha_{x_t}, \\beta_{x_t}) + (r_t, 1-r_t)$\n\n**end for**\n\nImplement the algorithm above in the cell below:",
"_____no_output_____"
]
],
[
[
"class EpsilonGreedyAgent(AbstractAgent):\n def __init__(self, epsilon=0.01):\n self._epsilon = epsilon\n\n def get_action(self):\n # YOUR CODE HERE\n\n @property\n def name(self):\n return self.__class__.__name__ + \"(epsilon={})\".format(self._epsilon)",
"_____no_output_____"
]
],
[
[
"### UCB Agent\nEpsilon-greedy strategy heve no preference for actions. It would be better to select among actions that are uncertain or have potential to be optimal. One can come up with idea of index for each action that represents otimality and uncertainty at the same time. One efficient way to do it is to use UCB1 algorithm:\n\n**for** $t = 1,2,...$ **do**\n\n **for** $k = 1,...,K$ **do**\n\n $w_k \\leftarrow \\alpha_k / (\\alpha_k + \\beta_k) + \\sqrt{2log\\ t \\ / \\ (\\alpha_k + \\beta_k)}$\n\n **end for** \n\n **end for** \n $x_t \\leftarrow argmax_{k}w$\n\n Apply $x_t$ and observe $r_t$\n\n $(\\alpha_{x_t}, \\beta_{x_t}) \\leftarrow (\\alpha_{x_t}, \\beta_{x_t}) + (r_t, 1-r_t)$\n\n**end for**\n\n__Note:__ in practice, one can multiply $\\sqrt{2log\\ t \\ / \\ (\\alpha_k + \\beta_k)}$ by some tunable parameter to regulate agent's optimism and wilingness to abandon non-promising actions.\n\nMore versions and optimality analysis - https://homes.di.unimi.it/~cesabian/Pubblicazioni/ml-02.pdf",
"_____no_output_____"
]
],
[
[
"class UCBAgent(AbstractAgent):\n def get_action(self):\n # YOUR CODE HERE",
"_____no_output_____"
]
],
[
[
"### Thompson sampling\n\nUCB1 algorithm does not take into account actual distribution of rewards. If we know the distribution - we can do much better by using Thompson sampling:\n\n**for** $t = 1,2,...$ **do**\n\n **for** $k = 1,...,K$ **do**\n\n Sample $\\hat\\theta_k \\sim beta(\\alpha_k, \\beta_k)$\n\n **end for** \n\n $x_t \\leftarrow argmax_{k}\\hat\\theta$\n\n Apply $x_t$ and observe $r_t$\n\n $(\\alpha_{x_t}, \\beta_{x_t}) \\leftarrow (\\alpha_{x_t}, \\beta_{x_t}) + (r_t, 1-r_t)$\n\n**end for**\n \n\nMore on Thompson Sampling:\nhttps://web.stanford.edu/~bvr/pubs/TS_Tutorial.pdf",
"_____no_output_____"
]
],
[
[
"class ThompsonSamplingAgent(AbstractAgent):\n def get_action(self):\n # YOUR CODE HERE\n",
"_____no_output_____"
],
[
"def plot_regret(env, agents, n_steps=5000, n_trials=50):\n scores = {\n agent.name: [0.0 for step in range(n_steps)] for agent in agents\n }\n\n for trial in range(n_trials):\n env.reset()\n\n for a in agents:\n a.init_actions(env.action_count)\n\n for i in range(n_steps):\n optimal_reward = env.optimal_reward()\n\n for agent in agents:\n action = agent.get_action()\n reward = env.pull(action)\n agent.update(action, reward)\n scores[agent.name][i] += optimal_reward - reward\n\n env.step() # change bandit's state if it is unstationary\n\n plt.figure(figsize=(17, 8))\n for agent in agents:\n plt.plot(np.cumsum(scores[agent.name]) / n_trials)\n\n plt.legend([agent.name for agent in agents])\n\n plt.ylabel(\"regret\")\n plt.xlabel(\"steps\")\n\n plt.show()",
"_____no_output_____"
],
[
"# Uncomment agents\nagents = [\n # EpsilonGreedyAgent(),\n # UCBAgent(),\n # ThompsonSamplingAgent()\n]\n\nplot_regret(BernoulliBandit(), agents, n_steps=10000, n_trials=10)",
"_____no_output_____"
]
],
[
[
"# Bonus 1.1. Gittins index (5 points).\n\nBernoulli bandit problem has an optimal solution - Gittins index algorithm. Implement finite horizon version of the algorithm and demonstrate it's performance with experiments. some articles:\n- Wikipedia article - https://en.wikipedia.org/wiki/Gittins_index\n- Different algorithms for index computation - http://www.ece.mcgill.ca/~amahaj1/projects/bandits/book/2013-bandit-computations.pdf (see \"Bernoulli\" section)\n ",
"_____no_output_____"
],
[
"# HW 1.1. Nonstationary Bernoulli bandit\n\nWhat if success probabilities change over time? Here is an example of such bandit:",
"_____no_output_____"
]
],
[
[
"class DriftingBandit(BernoulliBandit):\n def __init__(self, n_actions=5, gamma=0.01):\n \"\"\"\n Idea from https://github.com/iosband/ts_tutorial\n \"\"\"\n super().__init__(n_actions)\n\n self._gamma = gamma\n\n self._successes = None\n self._failures = None\n self._steps = 0\n\n self.reset()\n\n def reset(self):\n self._successes = np.zeros(self.action_count) + 1.0\n self._failures = np.zeros(self.action_count) + 1.0\n self._steps = 0\n\n def step(self):\n action = np.random.randint(self.action_count)\n reward = self.pull(action)\n self._step(action, reward)\n\n def _step(self, action, reward):\n self._successes = self._successes * (1 - self._gamma) + self._gamma\n self._failures = self._failures * (1 - self._gamma) + self._gamma\n self._steps += 1\n\n self._successes[action] += reward\n self._failures[action] += 1.0 - reward\n\n self._probs = np.random.beta(self._successes, self._failures)",
"_____no_output_____"
]
],
[
[
"And a picture how it's reward probabilities change over time",
"_____no_output_____"
]
],
[
[
"drifting_env = DriftingBandit(n_actions=5)\n\ndrifting_probs = []\nfor i in range(20000):\n drifting_env.step()\n drifting_probs.append(drifting_env._probs)\n\nplt.figure(figsize=(17, 8))\nplt.plot(pandas.DataFrame(drifting_probs).rolling(window=20).mean())\n\nplt.xlabel(\"steps\")\nplt.ylabel(\"Success probability\")\nplt.title(\"Reward probabilities over time\")\nplt.legend([\"Action {}\".format(i) for i in range(drifting_env.action_count)])\nplt.show()",
"_____no_output_____"
]
],
[
[
"Your task is to invent an agent that will have better regret than stationary agents from above.",
"_____no_output_____"
]
],
[
[
"# YOUR AGENT HERE SECTION",
"_____no_output_____"
],
[
"drifting_agents = [\n ThompsonSamplingAgent(),\n EpsilonGreedyAgent(),\n UCBAgent(),\n YourAgent()\n]\n\nplot_regret(DriftingBandit(), drifting_agents, n_steps=20000, n_trials=10)",
"_____no_output_____"
]
],
[
[
"## Part 2. Contextual bandit\n\nNow we will solve much more complex problem - reward will depend on bandit's state.\n\n**Real-word analogy:**\n\n> Contextual advertising. We have a lot of banners and a lot of different users. Users can have different features: age, gender, search requests. We want to show banner with highest click probability.\n\nIf we want use strategies from above, we need some how store reward distributions conditioned both on actions and bandit's state. \nOne way to do this - use bayesian neural networks. Instead of giving pointwise estimates of target, they maintain probability distributions\n\n<img src=\"bnn.png\">\nPicture from https://arxiv.org/pdf/1505.05424.pdf\n\n\nMore material:\n * A post on the matter - [url](http://twiecki.github.io/blog/2016/07/05/bayesian-deep-learning/)\n * Theano+PyMC3 for more serious stuff - [url](http://pymc-devs.github.io/pymc3/notebooks/bayesian_neural_network_advi.html)\n * Same stuff in tensorflow - [url](http://edwardlib.org/tutorials/bayesian-neural-network)\n \nLet's load our dataset:",
"_____no_output_____"
]
],
[
[
"all_states = np.load(\"all_states.npy\")\naction_rewards = np.load(\"action_rewards.npy\")\n\nstate_size = all_states.shape[1]\nn_actions = action_rewards.shape[1]\n\nprint(\"State size: %i, actions: %i\" % (state_size, n_actions))",
"State size: 60, actions: 10\n"
],
[
"import theano\nimport theano.tensor as T\nimport lasagne\nfrom lasagne import init\nfrom lasagne.layers import *\nimport bayes\n\nas_bayesian = bayes.bbpwrap(bayes.NormalApproximation(std=0.1))\nBayesDenseLayer = as_bayesian(DenseLayer)",
"_____no_output_____"
]
],
[
[
"## 2.1 Bulding a BNN agent\n\nLet's implement epsilon-greedy BNN agent",
"_____no_output_____"
]
],
[
[
"class BNNAgent:\n \"\"\"a bandit with bayesian neural net\"\"\"\n\n def __init__(self, state_size, n_actions):\n input_states = T.matrix(\"states\")\n target_actions = T.ivector(\"actions taken\")\n target_rewards = T.vector(\"rewards\")\n\n self.total_samples_seen = theano.shared(\n np.int32(0), \"number of training samples seen so far\")\n batch_size = target_actions.shape[0] # por que?\n\n # Network\n inp = InputLayer((None, state_size), name='input')\n # YOUR NETWORK HERE\n out = <Your network >\n\n # Prediction\n prediction_all_actions = get_output(out, inputs=input_states)\n self.predict_sample_rewards = theano.function(\n [input_states], prediction_all_actions)\n\n # Training\n\n # select prediction for target action\n prediction_target_actions = prediction_all_actions[T.arange(\n batch_size), target_actions]\n\n # loss = negative log-likelihood (mse) + KL\n negative_llh = T.sum((prediction_target_actions - target_rewards)**2)\n\n kl = bayes.get_var_cost(out) / (self.total_samples_seen+batch_size)\n\n loss = (negative_llh + kl)/batch_size\n\n self.weights = get_all_params(out, trainable=True)\n self.out = out\n\n # gradient descent\n updates = lasagne.updates.adam(loss, self.weights)\n # update counts\n updates[self.total_samples_seen] = self.total_samples_seen + \\\n batch_size.astype('int32')\n\n self.train_step = theano.function([input_states, target_actions, target_rewards],\n [negative_llh, kl],\n updates=updates,\n allow_input_downcast=True)\n\n def sample_prediction(self, states, n_samples=1):\n \"\"\"Samples n_samples predictions for rewards,\n\n :returns: tensor [n_samples, state_i, action_i]\n \"\"\"\n assert states.ndim == 2, \"states must be 2-dimensional\"\n\n return np.stack([self.predict_sample_rewards(states) for _ in range(n_samples)])\n\n epsilon = 0.25\n\n def get_action(self, states):\n \"\"\"\n Picks action by \n - with p=1-epsilon, taking argmax of average rewards\n - with p=epsilon, taking random action\n This is exactly e-greedy policy.\n \"\"\"\n\n reward_samples = self.sample_prediction(states, n_samples=100)\n # ^-- samples for rewards, shape = [n_samples,n_states,n_actions]\n\n best_actions = reward_samples.mean(axis=0).argmax(axis=-1)\n # ^-- we take mean over samples to compute expectation, then pick best action with argmax\n\n # YOUR CODE HERE\n chosen_actions = <-- implement epsilon-greedy strategy - ->\n\n return chosen_actions\n\n def train(self, states, actions, rewards, n_iters=10):\n \"\"\"\n trains to predict rewards for chosen actions in given states\n \"\"\"\n loss_sum = kl_sum = 0\n for _ in range(n_iters):\n loss, kl = self.train_step(states, actions, rewards)\n loss_sum += loss\n kl_sum += kl\n\n return loss_sum / n_iters, kl_sum / n_iters\n\n @property\n def name(self):\n return self.__class__.__name__",
"_____no_output_____"
]
],
[
[
"## 2.2 Training the agent",
"_____no_output_____"
]
],
[
[
"N_ITERS = 100",
"_____no_output_____"
],
[
"def get_new_samples(states, action_rewards, batch_size=10):\n \"\"\"samples random minibatch, emulating new users\"\"\"\n batch_ix = np.random.randint(0, len(states), batch_size)\n return states[batch_ix], action_rewards[batch_ix]",
"_____no_output_____"
],
[
"from IPython.display import clear_output\n\nfrom pandas import DataFrame\nmoving_average = lambda x, **kw: DataFrame(\n {'x': np.asarray(x)}).x.ewm(**kw).mean().values\n\ndef train_contextual_agent(agent, batch_size=10, n_iters=100):\n rewards_history = []\n\n for i in range(n_iters):\n b_states, b_action_rewards = get_new_samples(\n all_states, action_rewards, batch_size)\n b_actions = agent.get_action(b_states)\n b_rewards = b_action_rewards[\n np.arange(batch_size), b_actions\n ]\n\n mse, kl = agent.train(b_states, b_actions, b_rewards, n_iters=100)\n\n rewards_history.append(b_rewards.mean())\n\n if i % 10 == 0:\n clear_output(True)\n print(\"iteration #%i\\tmean reward=%.3f\\tmse=%.3f\\tkl=%.3f\" %\n (i, np.mean(rewards_history[-10:]), mse, kl))\n plt.plot(rewards_history)\n plt.plot(moving_average(np.array(rewards_history), alpha=0.1))\n plt.title(\"Reward per epesode\")\n plt.xlabel(\"Episode\")\n plt.ylabel(\"Reward\")\n plt.show()\n\n samples = agent.sample_prediction(\n b_states[:1], n_samples=100).T[:, 0, :]\n for i in range(len(samples)):\n plt.hist(samples[i], alpha=0.25, label=str(i))\n plt.legend(loc='best')\n print('Q(s,a) std:', ';'.join(\n list(map('{:.3f}'.format, np.std(samples, axis=1)))))\n print('correct', b_action_rewards[0].argmax())\n plt.title(\"p(Q(s, a))\")\n plt.show()\n\n return moving_average(np.array(rewards_history), alpha=0.1)",
"_____no_output_____"
],
[
"bnn_agent = BNNAgent(state_size=state_size, n_actions=n_actions)\ngreedy_agent_rewards = train_contextual_agent(\n bnn_agent, batch_size=10, n_iters=N_ITERS)",
"iteration #90\tmean reward=0.560\tmse=0.457\tkl=0.044\n"
]
],
[
[
"## HW 2.1 Better exploration\n\nUse strategies from first part to gain more reward in contextual setting",
"_____no_output_____"
]
],
[
[
"class ThompsonBNNAgent(BNNAgent):\n def get_action(self, states):\n \"\"\"\n picks action based by taking _one_ sample from BNN and taking action with highest sampled reward (yes, that simple)\n This is exactly thompson sampling.\n \"\"\"\n\n # YOUR CODE HERE",
"_____no_output_____"
],
[
"thompson_agent_rewards = train_contextual_agent(ThompsonBNNAgent(state_size=state_size, n_actions=n_actions),\n batch_size=10, n_iters=N_ITERS)",
"iteration #90\tmean reward=0.360\tmse=0.590\tkl=0.038\n"
],
[
"class BayesUCBBNNAgent(BNNAgent):\n q = 90\n\n def get_action(self, states):\n \"\"\"\n Compute q-th percentile of rewards P(r|s,a) for all actions\n Take actions that have highest percentiles.\n\n This implements bayesian UCB strategy\n \"\"\"\n\n # YOUR CODE HERE",
"_____no_output_____"
],
[
"ucb_agent_rewards = train_contextual_agent(BayesUCBBNNAgent(state_size=state_size, n_actions=n_actions),\n batch_size=10, n_iters=N_ITERS)",
"iteration #90\tmean reward=0.630\tmse=0.354\tkl=0.047\n"
],
[
"plt.figure(figsize=(17, 8))\n\nplt.plot(greedy_agent_rewards)\nplt.plot(thompson_agent_rewards)\nplt.plot(ucb_agent_rewards)\n\nplt.legend([\n \"Greedy BNN\",\n \"Thompson sampling BNN\",\n \"UCB BNN\"\n])\n\nplt.show()",
"_____no_output_____"
]
],
[
[
"## Part 3. Exploration in MDP\n\nThe following problem, called \"river swim\", illustrates importance of exploration in context of mdp's.",
"_____no_output_____"
],
[
"<img src=\"river_swim.png\">\n\nPicture from https://arxiv.org/abs/1306.0940",
"_____no_output_____"
],
[
"Rewards and transition probabilities are unknown to an agent. Optimal policy is to swim against current, while easiest way to gain reward is to go left.",
"_____no_output_____"
]
],
[
[
"class RiverSwimEnv:\n LEFT_REWARD = 5.0 / 1000\n RIGHT_REWARD = 1.0\n\n def __init__(self, intermediate_states_count=4, max_steps=16):\n self._max_steps = max_steps\n self._current_state = None\n self._steps = None\n self._interm_states = intermediate_states_count\n self.reset()\n\n def reset(self):\n self._steps = 0\n self._current_state = 1\n return self._current_state, 0.0, False\n\n @property\n def n_actions(self):\n return 2\n\n @property\n def n_states(self):\n return 2 + self._interm_states\n\n def _get_transition_probs(self, action):\n if action == 0:\n if self._current_state == 0:\n return [0, 1.0, 0]\n else:\n return [1.0, 0, 0]\n\n elif action == 1:\n if self._current_state == 0:\n return [0, .4, .6]\n if self._current_state == self.n_states - 1:\n return [.4, .6, 0]\n else:\n return [.05, .6, .35]\n else:\n raise RuntumeError(\n \"Unknown action {}. Max action is {}\".format(action, self.n_actions))\n\n def step(self, action):\n \"\"\"\n :param action:\n :type action: int\n :return: observation, reward, is_done\n :rtype: (int, float, bool)\n \"\"\"\n reward = 0.0\n\n if self._steps >= self._max_steps:\n return self._current_state, reward, True\n\n transition = np.random.choice(\n range(3), p=self._get_transition_probs(action))\n if transition == 0:\n self._current_state -= 1\n elif transition == 1:\n pass\n else:\n self._current_state += 1\n\n if self._current_state == 0:\n reward = self.LEFT_REWARD\n elif self._current_state == self.n_states - 1:\n reward = self.RIGHT_REWARD\n\n self._steps += 1\n return self._current_state, reward, False",
"_____no_output_____"
]
],
[
[
"Let's implement q-learning agent with epsilon-greedy exploration strategy and see how it performs.",
"_____no_output_____"
]
],
[
[
"class QLearningAgent:\n def __init__(self, n_states, n_actions, lr=0.2, gamma=0.95, epsilon=0.1):\n self._gamma = gamma\n self._epsilon = epsilon\n self._q_matrix = np.zeros((n_states, n_actions))\n self._lr = lr\n\n def get_action(self, state):\n if np.random.random() < self._epsilon:\n return np.random.randint(0, self._q_matrix.shape[1])\n else:\n return np.argmax(self._q_matrix[state])\n\n def get_q_matrix(self):\n \"\"\" Used for policy visualization\n \"\"\"\n\n return self._q_matrix\n\n def start_episode(self):\n \"\"\" Used in PSRL agent\n \"\"\"\n pass\n\n def update(self, state, action, reward, next_state):\n # YOUR CODE HERE\n # Finish implementation of q-learnig agent",
"_____no_output_____"
],
[
"def train_mdp_agent(agent, env, n_episodes):\n episode_rewards = []\n\n for ep in range(n_episodes):\n state, ep_reward, is_done = env.reset()\n agent.start_episode()\n while not is_done:\n action = agent.get_action(state)\n\n next_state, reward, is_done = env.step(action)\n agent.update(state, action, reward, next_state)\n\n state = next_state\n ep_reward += reward\n\n episode_rewards.append(ep_reward)\n return episode_rewards",
"_____no_output_____"
],
[
"env = RiverSwimEnv()\nagent = QLearningAgent(env.n_states, env.n_actions)\nrews = train_mdp_agent(agent, env, 1000)\nplt.figure(figsize=(15, 8))\n\nplt.plot(moving_average(np.array(rews), alpha=.1))\nplt.xlabel(\"Episode count\")\nplt.ylabel(\"Reward\")\nplt.show()",
"/usr/local/lib/python3.5/dist-packages/ipykernel_launcher.py:6: FutureWarning: pd.ewm_mean is deprecated for ndarrays and will be removed in a future version\n \n"
]
],
[
[
"Let's visualize our policy:",
"_____no_output_____"
]
],
[
[
"def plot_policy(agent):\n fig = plt.figure(figsize=(15, 8))\n ax = fig.add_subplot(111)\n ax.matshow(agent.get_q_matrix().T)\n ax.set_yticklabels(['', 'left', 'right'])\n plt.xlabel(\"State\")\n plt.ylabel(\"Action\")\n plt.title(\"Values of state-action pairs\")\n plt.show()",
"_____no_output_____"
],
[
"plot_policy(agent)",
"_____no_output_____"
]
],
[
[
"As your see, agent uses suboptimal policy of going left and does not explore the right state.",
"_____no_output_____"
],
[
"## Bonus 3.1 Posterior sampling RL (3 points)",
"_____no_output_____"
],
[
"Now we will implement Thompson Sampling for MDP!\n\nGeneral algorithm:\n\n>**for** episode $k = 1,2,...$ **do**\n>> sample $M_k \\sim f(\\bullet\\ |\\ H_k)$\n\n>> compute policy $\\mu_k$ for $M_k$\n\n>> **for** time $t = 1, 2,...$ **do**\n\n>>> take action $a_t$ from $\\mu_k$ \n\n>>> observe $r_t$ and $s_{t+1}$\n>>> update $H_k$\n\n>> **end for**\n\n>**end for**\n\nIn our case we will model $M_k$ with two matricies: transition and reward. Transition matrix is sampled from dirichlet distribution. Reward matrix is sampled from normal-gamma distribution.\n\nDistributions are updated with bayes rule - see continious distribution section at https://en.wikipedia.org/wiki/Conjugate_prior\n\nArticle on PSRL - https://arxiv.org/abs/1306.0940",
"_____no_output_____"
]
],
[
[
"def sample_normal_gamma(mu, lmbd, alpha, beta):\n \"\"\" https://en.wikipedia.org/wiki/Normal-gamma_distribution\n \"\"\"\n tau = np.random.gamma(alpha, beta)\n mu = np.random.normal(mu, 1.0 / np.sqrt(lmbd * tau))\n return mu, tau\n\n\nclass PsrlAgent:\n def __init__(self, n_states, n_actions, horizon=10):\n self._n_states = n_states\n self._n_actions = n_actions\n self._horizon = horizon\n\n # params for transition sampling - Dirichlet distribution\n self._transition_counts = np.zeros(\n (n_states, n_states, n_actions)) + 1.0\n\n # params for reward sampling - Normal-gamma distribution\n self._mu_matrix = np.zeros((n_states, n_actions)) + 1.0\n self._state_action_counts = np.zeros(\n (n_states, n_actions)) + 1.0 # lambda\n\n self._alpha_matrix = np.zeros((n_states, n_actions)) + 1.0\n self._beta_matrix = np.zeros((n_states, n_actions)) + 1.0\n\n def _value_iteration(self, transitions, rewards):\n # YOU CODE HERE\n state_values = < Find action values with value iteration >\n return state_values\n\n def start_episode(self):\n # sample new mdp\n self._sampled_transitions = np.apply_along_axis(\n np.random.dirichlet, 1, self._transition_counts)\n\n sampled_reward_mus, sampled_reward_stds = sample_normal_gamma(\n self._mu_matrix,\n self._state_action_counts,\n self._alpha_matrix,\n self._beta_matrix\n )\n\n self._sampled_rewards = sampled_reward_mus\n self._current_value_function = self._value_iteration(\n self._sampled_transitions, self._sampled_rewards)\n\n def get_action(self, state):\n return np.argmax(self._sampled_rewards[state] +\n self._current_value_function.dot(self._sampled_transitions[state]))\n\n def update(self, state, action, reward, next_state):\n # YOUR CODE HERE\n # update rules - https://en.wikipedia.org/wiki/Conjugate_prior\n\n def get_q_matrix(self):\n return self._sampled_rewards + self._current_value_function.dot(self._sampled_transitions)",
"_____no_output_____"
],
[
"from pandas import DataFrame\nmoving_average = lambda x, **kw: DataFrame(\n {'x': np.asarray(x)}).x.ewm(**kw).mean().values\n\nhorizon = 20\nenv = RiverSwimEnv(max_steps=horizon)\nagent = PsrlAgent(env.n_states, env.n_actions, horizon=horizon)\nrews = train_mdp_agent(agent, env, 1000)\n\nplt.figure(figsize=(15, 8))\nplt.plot(moving_average(np.array(rews), alpha=0.1))\n\nplt.xlabel(\"Episode count\")\nplt.ylabel(\"Reward\")\nplt.show()",
"/usr/local/lib/python3.5/dist-packages/ipykernel_launcher.py:7: FutureWarning: pd.ewm_mean is deprecated for ndarrays and will be removed in a future version\n import sys\n"
],
[
"plot_policy(agent)",
"_____no_output_____"
]
],
[
[
"## Bonus 3.2 Bootstrapped DQN (10 points)\n\nImplement Bootstrapped DQN algorithm and compare it's performance with ordinary DQN on BeamRider Atari game. Links:\n- https://arxiv.org/abs/1602.04621",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
]
] |
e7de9d83eefdb278b7ea21cb615a6d302371bf31 | 35,387 | ipynb | Jupyter Notebook | 02-lesson/06-apply.ipynb | chilperic/scipy-2017-tutorial-pandas | 6b52344ad58d6dfa4aaaad6327bb5054fb2a5b93 | [
"MIT"
] | 164 | 2017-06-27T19:20:26.000Z | 2022-01-09T03:31:02.000Z | 02-lesson/06-apply.ipynb | chilperic/scipy-2017-tutorial-pandas | 6b52344ad58d6dfa4aaaad6327bb5054fb2a5b93 | [
"MIT"
] | 2 | 2017-11-28T17:03:33.000Z | 2018-03-13T15:23:04.000Z | 02-lesson/06-apply.ipynb | chilperic/scipy-2017-tutorial-pandas | 6b52344ad58d6dfa4aaaad6327bb5054fb2a5b93 | [
"MIT"
] | 167 | 2017-06-29T21:16:19.000Z | 2021-10-01T22:53:29.000Z | 32.494949 | 1,365 | 0.469749 | [
[
[
"import pandas as pd",
"_____no_output_____"
],
[
"def my_function():\n # indent 4 spaces\n # function code\n pass",
"_____no_output_____"
],
[
"def my_sq(x):\n \"\"\"squares a give value\n \"\"\"\n return x ** 2",
"_____no_output_____"
],
[
"my_sq(2)",
"_____no_output_____"
],
[
"my_sq(4)",
"_____no_output_____"
],
[
"def avg_2(x, y):\n \"\"\"calculates avgerage between 2 numbers\n \"\"\"\n return (x + y) / 2.0",
"_____no_output_____"
],
[
"avg_2(10, 20)",
"_____no_output_____"
],
[
"df = pd.DataFrame({\n 'a': [10, 20 , 30],\n 'b': [20 , 30, 40]\n})\ndf",
"_____no_output_____"
],
[
"df['a'] ** 2",
"_____no_output_____"
],
[
"df['a'].apply(my_sq)",
"_____no_output_____"
],
[
"def print_me(x):\n print(x)\ndf",
"_____no_output_____"
],
[
"df.apply(print_me)",
"0 10\n1 20\n2 30\nName: a, dtype: int64\n0 20\n1 30\n2 40\nName: b, dtype: int64\n"
],
[
"def avg_3(x, y, z):\n \"\"\"avg of 3 numbers\n \"\"\"\n return (x + y + x) / 3",
"_____no_output_____"
],
[
"df.apply(avg_3)",
"_____no_output_____"
],
[
"def avg_3(col):\n \"\"\"avg of 3 numbers\n \"\"\"\n x = col[0]\n y = col[1]\n z = col[2]\n return (x + y + z) / 3",
"_____no_output_____"
],
[
"df.apply(avg_3)",
"_____no_output_____"
],
[
"df",
"_____no_output_____"
],
[
"df.apply(avg_3, axis=1)",
"_____no_output_____"
],
[
"def avg_2(row):\n \"\"\"avg of 3 numbers\n \"\"\"\n x = row[0]\n y = row[1]\n return (x + y) / 2\ndf.apply(avg_2, axis=1)",
"_____no_output_____"
],
[
"import seaborn as sns\n\ntitanic = sns.load_dataset('titanic')",
"_____no_output_____"
],
[
"titanic.head()",
"_____no_output_____"
],
[
"titanic.info()",
"<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 891 entries, 0 to 890\nData columns (total 15 columns):\nsurvived 891 non-null int64\npclass 891 non-null int64\nsex 891 non-null object\nage 714 non-null float64\nsibsp 891 non-null int64\nparch 891 non-null int64\nfare 891 non-null float64\nembarked 889 non-null object\nclass 891 non-null category\nwho 891 non-null object\nadult_male 891 non-null bool\ndeck 203 non-null category\nembark_town 889 non-null object\nalive 891 non-null object\nalone 891 non-null bool\ndtypes: bool(2), category(2), float64(2), int64(4), object(5)\nmemory usage: 80.6+ KB\n"
],
[
"import numpy as np",
"_____no_output_____"
],
[
"def count_missing(vec):\n null_vec = pd.isnull(vec)\n null_count = np.sum(null_vec)\n return(null_count)",
"_____no_output_____"
],
[
"def prop_missing(vec):\n num = count_missing(vec)\n dem = vec.size\n return num / dem",
"_____no_output_____"
],
[
"def prop_complete(vec):\n return 1 - prop_missing(vec)",
"_____no_output_____"
],
[
"titanic.apply(count_missing)",
"_____no_output_____"
],
[
"titanic.loc[pd.isnull(titanic['embark_town']), :]",
"_____no_output_____"
],
[
"titanic.apply(count_missing, axis=1).value_counts()",
"_____no_output_____"
],
[
"titanic.apply(prop_complete, axis=1).value_counts()",
"_____no_output_____"
],
[
"def clean_1_colum(single_column):\n return single_column[1:]",
"_____no_output_____"
],
[
"clean_1_colum('d42')",
"_____no_output_____"
],
[
"@np.vectorize\ndef avg_2(x, y):\n \"\"\"calculates avgerage between 2 numbers\n \"\"\"\n return (x + y) / 2.0",
"_____no_output_____"
],
[
"avg_2(df['a'], df['b'])",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e7dea927b736f5f5b0505c0174f1f9b79bdec93e | 15,466 | ipynb | Jupyter Notebook | CI_Data_Science_Lesson_06_ANSWERS.ipynb | MaxDGU/datasciencenotebooks | 8f48f0049de23e20016260f43c0d9037109897d1 | [
"BSD-Source-Code"
] | null | null | null | CI_Data_Science_Lesson_06_ANSWERS.ipynb | MaxDGU/datasciencenotebooks | 8f48f0049de23e20016260f43c0d9037109897d1 | [
"BSD-Source-Code"
] | null | null | null | CI_Data_Science_Lesson_06_ANSWERS.ipynb | MaxDGU/datasciencenotebooks | 8f48f0049de23e20016260f43c0d9037109897d1 | [
"BSD-Source-Code"
] | null | null | null | 26.944251 | 212 | 0.447821 | [
[
[
"<a href=\"https://codeimmersives.com\"><img src = \"https://www.codeimmersives.com/wp-content/uploads/2019/09/CodeImmersives_Logo_RGB_NYC_BW.png\" width = 400> </a>\n\n\n<h1 align=center><font size = 5>Agenda</font></h1>",
"_____no_output_____"
],
[
"### \n<div class=\"alert alert-block alert-info\" style=\"margin-top: 20px\">\n\n1. [Review](#0)<br>\n2. [Numpy Intro and Installation](#2)<br>\n2. [Exercise](#10)<br> \n3. [Exercise](#12)<br> \n</div>\n<hr>",
"_____no_output_____"
],
[
"<h2>Review</h2>",
"_____no_output_____"
],
[
"<h2>Exercise</h2>\nThe following list represents the diameters of circles:<br>\n<code>\ncircle_diameters = [1,2,3,5,8,13,21]\n</code><br>\n1 - Calculate the area of each circle<br>\n2 - Calculate the circumference of each circle<br>\n<br>\nUse Numpy for the solution",
"_____no_output_____"
],
[
"<h4>Solution</h4>",
"_____no_output_____"
]
],
[
[
"import numpy as np\ncircle_diameters = [1,2,3,5,8,13,21]\narea = [round(np.pi*(x/2)**2,2) for x in circle_diameters]\ncirc = [round(np.pi*x) for x in circle_diameters]\nprint(circ)\nprint(area)",
"_____no_output_____"
]
],
[
[
"<h2>Exercise</h2>\nFor each tuple in the list print the area of each rectangle<br><br>\nEach element in the list represents the length and the width\n<code>\ndimensions = [(20,2),(2,3),(4,4),(6,6)]\n</code><br>\nSolve as a non numpy problem",
"_____no_output_____"
],
[
"<h4>Solution</h4>",
"_____no_output_____"
]
],
[
[
"dimensions = [(20,2),(2,3),(4,4),(6,6)]\nareas = [i*x for i,x in dimensions]\nprint(areas)",
"[40, 6, 16, 36]\n"
]
],
[
[
"<h2>Exercise</h2>\nThe following dictionary represents the dimensions of an apartment in <br>\nNew York City. <br><br>\n<code>\napartment = {'Bedroom 1':(12,12), 'Bedroom 2': (12,10), 'Bathroom 1': (6,8),\n 'Bathroom 2': (6,8), 'Kitchen': (10,8), 'Foyer': (14,4), 'Dining Room': (12,10),\n 'Living Room': (12,15)}\n</code><br>\n1 - Calculate the square footage of each room<br>\n2 - Calculate the total square footage of the apartment<br>\n3 - If the apartment is selling for $90 a square foot. How much is it selling for?<br><br>\nNOTE: Selling price is the Square footage * price per square foot as 12<br>\nDo not use numpy",
"_____no_output_____"
]
],
[
[
"apartment = {'Bedroom 1':(12,12), 'Bedroom 2': (12,10), 'Bathroom 1': (6,8), 'Bathroom 2': (6,8), 'Kitchen': (10,8), 'Foyer': (14,4), 'Dining Room': (12,10), 'Living Room': (12,15)}\nareas = [(v[0]*v[1]) for i,v in apartment.items()]\nprint(areas)\nprint(sum(areas))\nprint('$',sum(areas)*90)",
"[144, 120, 48, 48, 80, 56, 120, 180]\n796\n$ 71640\n"
]
],
[
[
"<h2>Exercise</h2>\n5 friends go out to dinner and their individual amounts were tallied on 1 bill<br>\n<code>\ndinner = [36, 42, 27, 32, 39]\n</code><br>\nUsing numpy calculate the following:<br>\n1 - The average meal price<br>\n2 - The median meal price<br>\n3 - The maximum cost for the meal<br>\n4 - The least expensive meal<br>\n5 - The difference each person varied from the mean<br>\n6 - Add a 20% tip to each persons meal then Add 8.875% tax to the pre tax meal amount<br>\n7 - What percentage of the total bill is each person contributing?<br>\n8 - It was the birthday of the person who had the most expensive meal. <br>\nDistribute the cost of their meal amoungst the other patrons.<br>",
"_____no_output_____"
],
[
"<h4>Solution</h4>",
"_____no_output_____"
]
],
[
[
"import numpy as np\ndinner = [36, 42, 27, 32, 39]\navg = np.mean(dinner)\nmedian = np.median(dinner)\nm = np.max(dinner)\nmi = np.min(dinner)\ndiff_mean = [round(x-avg,2) for x in dinner]\nprint('avg:', avg, 'median:', median, 'max:', m, 'min:', mi)\nprint(diff_mean)\n\nadjusted_dinner = [round((x*1.2)*1.0875,2) for x in dinner]\nnew_sum = sum(adjusted_dinner)\nprint(adjusted_dinner)\n\ncontributions = [round((x/new_sum)*100,2) for x in adjusted_dinner]\nprint(contributions, sum(contributions))\n\nadd = np.max(adjusted_dinner)/4 \nadjusted_dinner.remove(max(adjusted_dinner))\nprint(np.add(adjusted_dinner,add))\n",
"avg: 35.2 median: 36.0 max: 42 min: 27\n[0.8, 6.8, -8.2, -3.2, 3.8]\n[46.98, 54.81, 35.23, 41.76, 50.89]\n[20.46, 23.86, 15.34, 18.18, 22.16] 100.0\n[60.6825 48.9325 55.4625 64.5925]\n"
]
],
[
[
"<h2>Numpy continued</h2>",
"_____no_output_____"
],
[
"Strings can also be stored in a numpy array.<br>\n<code>\nimport numpy as np\npasta_shapes = ['Macaroni','Rigatoni','Angel Hair','Spaghetti','Linguini']\nshapes = np.array(pasta_shapes)\nshapes = np.sort(shapes)\nprint(shapes)\nprint(shapes.dtype)\n</code>",
"_____no_output_____"
]
],
[
[
"import numpy as np \npasta_shapes = ['Macaroni','Rigatoni','Angel Hair','Spaghetti','Linguini']\nval= 5\nval = np.array(val)\nname= 'str'\nname = np.array(name)\nboo = True\nboo = np.array(boo)\n\nshapes = np.array(pasta_shapes)\nprint(np.sort(shapes))\nprint(type(shapes))",
"['Angel Hair' 'Linguini' 'Macaroni' 'Rigatoni' 'Spaghetti']\n<U10\n<class 'numpy.ndarray'>\n"
]
],
[
[
"We can use short cut abreviations to assign the data types to numpy<br>\narrays:<br>\nFor i, u, f, S and U we can define size as well. <br>\nWe can combine each letter with a size as well like: 4,8<br>\n<br>\n<code>\nimport numpy as np\narr = np.array(list(range(1,11)), dtype='i8')\nprint(arr, arr.dtype)\n</code>",
"_____no_output_____"
]
],
[
[
"import numpy as np\nnp.array(list(range(1,11)), dtype='i8') ",
"_____no_output_____"
]
],
[
[
"<h2>Casting</h2>\nWe can convert python data type into numpy data type using 2 methods<br>\n<br>\nMethod 1<br>\nUse dtype parameter<br>\n<br>\n<code>\narr_string = np.array(list(range(1,11)), dtype='S')\nprint(arr_string)\n</code><br>\nMethod 2<br>\nUse astype()<br>\n<code>\nimport numpy as np\narr = np.array(list(range(1,11)), dtype='i8')\narr_2 = arr.astype('S')\nprint(arr_2)\n</code>",
"_____no_output_____"
]
],
[
[
"arr_string = np.array(list(range(1,11))) \nprint(arr_string)\nimport numpy as np\narr = np.array(list(range(1,11)), dtype='i8')\narr_2 = arr.astype('S')\nprint(arr_2)\n",
"[ 1 2 3 4 5 6 7 8 9 10]\n[b'1' b'2' b'3' b'4' b'5' b'6' b'7' b'8' b'9' b'10']\n"
]
],
[
[
"<h2>Exercise</h2>\nTake the following floating point number and cast them as integers<br>\nUse both methods.<br>\n<code>\nrainfall = [2.3,3.7,2.4,1.9]\n</code>",
"_____no_output_____"
],
[
"<h4>Solution</h4>",
"_____no_output_____"
]
],
[
[
"",
"_____no_output_____"
],
[
"import numpy as np\nrainfall = np.array([2.3,3.7,2.4,1.9])\nr2 = rainfall.astype(int)\nprint(r2)\n \n",
"[2 3 2 1]\n"
],
[
"import numpy as np\n \n",
"_____no_output_____"
],
[
"",
"_____no_output_____"
],
[
"",
"_____no_output_____"
],
[
"",
"_____no_output_____"
]
],
[
[
"This notebook is part of a course at www.codeimmersives.com called Data Science. If you accessed this notebook outside the course, you can get more information about this course online by clicking here.",
"_____no_output_____"
],
[
"<hr>\n\nCopyright © 2021 Code Immersives",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
]
] |
e7dea930b11d59ee40ac1776a175725470f09eb7 | 141,144 | ipynb | Jupyter Notebook | notebooks/Repairer.ipynb | HGUISEL/debuggingbook | 6dc18c90dafd4cdff577fec8ba485eb9650ec4bf | [
"MIT"
] | 1 | 2021-08-15T11:26:13.000Z | 2021-08-15T11:26:13.000Z | notebooks/Repairer.ipynb | HGUISEL/debuggingbook | 6dc18c90dafd4cdff577fec8ba485eb9650ec4bf | [
"MIT"
] | 1 | 2021-08-15T10:47:37.000Z | 2021-08-15T10:47:37.000Z | notebooks/Repairer.ipynb | HGUISEL/debuggingbook | 6dc18c90dafd4cdff577fec8ba485eb9650ec4bf | [
"MIT"
] | 2 | 2021-08-15T07:43:41.000Z | 2021-11-10T04:41:07.000Z | 30.817467 | 563 | 0.554051 | [
[
[
"# Repairing Code Automatically\n\nSo far, we have discussed how to track failures and how to locate defects in code. Let us now discuss how to _repair_ defects – that is, to correct the code such that the failure no longer occurs. We will discuss how to _repair code automatically_ – by systematically searching through possible fixes and evolving the most promising candidates.",
"_____no_output_____"
]
],
[
[
"from bookutils import YouTubeVideo\nYouTubeVideo(\"UJTf7cW0idI\")",
"_____no_output_____"
]
],
[
[
"**Prerequisites**\n\n* Re-read the [introduction to debugging](Intro_Debugging.ipynb), notably on how to properly fix code.\n* We make use of automatic fault localization, as discussed in the [chapter on statistical debugging](StatisticalDebugger.ipynb).\n* We make extensive use of code transformations, as discussed in the [chapter on tracing executions](Tracer.ipynb).\n* We make use of [delta debugging](DeltaDebugger.ipynb).",
"_____no_output_____"
]
],
[
[
"import bookutils",
"_____no_output_____"
]
],
[
[
"## Synopsis\n<!-- Automatically generated. Do not edit. -->\n\nTo [use the code provided in this chapter](Importing.ipynb), write\n\n```python\n>>> from debuggingbook.Repairer import <identifier>\n```\n\nand then make use of the following features.\n\n\nThis chapter provides tools and techniques for automated repair of program code. The `Repairer()` class takes a `RankingDebugger` debugger as input (such as `OchiaiDebugger` from [the chapter on statistical debugging](StatisticalDebugger.ipynb)). A typical setup looks like this:\n\n```python\nfrom debuggingbook.StatisticalDebugger import OchiaiDebugger\n\ndebugger = OchiaiDebugger()\nfor inputs in TESTCASES:\n with debugger:\n test_foo(inputs)\n...\n\nrepairer = Repairer(debugger)\n```\nHere, `test_foo()` is a function that raises an exception if the tested function `foo()` fails. If `foo()` passes, `test_foo()` should not raise an exception.\n\nThe `repair()` method of a `Repairer` searches for a repair of the code covered in the debugger (except for methods starting or ending in `test`, such that `foo()`, not `test_foo()` is repaired). `repair()` returns the best fix candidate as a pair `(tree, fitness)` where `tree` is a [Python abstract syntax tree](http://docs.python.org/3/library/ast) (AST) of the fix candidate, and `fitness` is the fitness of the candidate (a value between 0 and 1). A `fitness` of 1.0 means that the candidate passed all tests. A typical usage looks like this:\n\n```python\nimport astor\n\ntree, fitness = repairer.repair()\nprint(astor.to_source(tree), fitness)\n```\n\nHere is a complete example for the `middle()` program. This is the original source code of `middle()`:\n\n```python\ndef middle(x, y, z): # type: ignore\n if y < z:\n if x < y:\n return y\n elif x < z:\n return y\n else:\n if x > y:\n return y\n elif x > z:\n return x\n return z\n```\nWe set up a function `middle_test()` that tests it. The `middle_debugger` collects testcases and outcomes:\n\n```python\n>>> middle_debugger = OchiaiDebugger()\n>>> for x, y, z in MIDDLE_PASSING_TESTCASES + MIDDLE_FAILING_TESTCASES:\n>>> with middle_debugger:\n>>> middle_test(x, y, z)\n```\nThe repairer attempts to repair the invoked function (`middle()`). The returned AST `tree` can be output via `astor.to_source()`:\n\n```python\n>>> middle_repairer = Repairer(middle_debugger)\n>>> tree, fitness = middle_repairer.repair()\n>>> print(astor.to_source(tree), fitness)\ndef middle(x, y, z):\n if y < z:\n if x < z:\n if x < y:\n return y\n else:\n return x\n elif x > y:\n return y\n elif x > z:\n return x\n return z\n 1.0\n\n```\nHere are the classes defined in this chapter. A `Repairer` repairs a program, using a `StatementMutator` and a `CrossoverOperator` to evolve a population of candidates.\n\n\n\n",
"_____no_output_____"
],
[
"## Automatic Code Repairs\n\nSo far, we have discussed how to locate defects in code, how to track failures back to the defects that caused them, and how to systematically determine failure conditions. Let us now address the last step in debugging – namely, how to _automatically fix code_.\n\nAlready in the [introduction to debugging](Intro_Debugging.ipynb), we have discussed how to fix code manually. Notably, we have established that a _diagnosis_ (which induces a fix) should show _causality_ (i.e., how the defect causes the failure) and _incorrectness_ (how the defect is wrong). Is it possible to obtain such a diagnosis automatically?",
"_____no_output_____"
],
[
"In this chapter, we introduce a technique of _automatic code repair_ – that is, for a given failure, automatically determine a fix that makes the failure go away. To do so, we randomly (but systematically) _mutate_ the program code – that is, insert, change, and delete fragments – until we find a change that actually causes the failing test to pass.",
"_____no_output_____"
],
[
"If this sounds like an audacious idea, that is because it is. But not only is _automated program repair_ one of the hottest topics of software research in the last decade, it is also being increasingly deployed in industry. At Facebook, for instance, every failing test report comes with an automatically generated _repair suggestion_ – a suggestion that already has been validated to work. Programmers can apply the suggestion as is or use it as basis for their own fixes.",
"_____no_output_____"
],
[
"### The middle() Function",
"_____no_output_____"
],
[
"Let us introduce our ongoing example. In the [chapter on statistical debugging](StatisticalDebugger.ipynb), we have introduced the `middle()` function – a function that returns the \"middle\" of three numbers `x`, `y`, and `z`:",
"_____no_output_____"
]
],
[
[
"from StatisticalDebugger import middle",
"_____no_output_____"
],
[
"# ignore\nfrom bookutils import print_content",
"_____no_output_____"
],
[
"# ignore\nimport inspect",
"_____no_output_____"
],
[
"# ignore\n_, first_lineno = inspect.getsourcelines(middle)\nmiddle_source = inspect.getsource(middle)\nprint_content(middle_source, '.py', start_line_number=first_lineno)",
"_____no_output_____"
]
],
[
[
"In most cases, `middle()` just runs fine:",
"_____no_output_____"
]
],
[
[
"middle(4, 5, 6)",
"_____no_output_____"
]
],
[
[
"In some other cases, though, it does not work correctly:",
"_____no_output_____"
]
],
[
[
"middle(2, 1, 3)",
"_____no_output_____"
]
],
[
[
"### Validated Repairs",
"_____no_output_____"
],
[
"Now, if we only want a repair that fixes this one given failure, this would be very easy. All we have to do is to replace the entire body by a single statement:",
"_____no_output_____"
]
],
[
[
"def middle_sort_of_fixed(x, y, z): # type: ignore\n return x",
"_____no_output_____"
]
],
[
[
"You will concur that the failure no longer occurs:",
"_____no_output_____"
]
],
[
[
"middle_sort_of_fixed(2, 1, 3)",
"_____no_output_____"
]
],
[
[
"But this, of course, is not the aim of automatic fixes, nor of fixes in general: We want our fixes not only to make the given failure go away, but we also want the resulting code to be _correct_ (which, of course, is a lot harder).",
"_____no_output_____"
],
[
"Automatic repair techniques therefore assume the existence of a _test suite_ that can check whether an implementation satisfies its requirements. Better yet, one can use the test suite to gradually check _how close_ one is to perfection: A piece of code that satisfies 99% of all tests is better than one that satisfies ~33% of all tests, as `middle_sort_of_fixed()` would do (assuming the test suite evenly checks the input space).",
"_____no_output_____"
],
[
"### Genetic Optimization",
"_____no_output_____"
],
[
"The common approach for automatic repair follows the principle of _genetic optimization_. Roughly spoken, genetic optimization is a _metaheuristic_ inspired by the process of _natural selection_. The idea is to _evolve_ a selection of _candidate solutions_ towards a maximum _fitness_:\n\n1. Have a selection of _candidates_.\n2. Determine the _fitness_ of each candidate.\n3. Retain those candidates with the _highest fitness_.\n4. Create new candidates from the retained candidates, by applying genetic operations:\n * _Mutation_ mutates some aspect of a candidate.\n * _CrossoverOperator_ creates new candidates combining features of two candidates.\n5. Repeat until an optimal solution is found.",
"_____no_output_____"
],
[
"Applied for automated program repair, this means the following steps:\n\n1. Have a _test suite_ with both failing and passing tests that helps asserting correctness of possible solutions.\n2. With the test suite, use [fault localization](StatisticalDebugger.ipynb) to determine potential code locations to be fixed.\n3. Systematically _mutate_ the code (by adding, changing, or deleting code) and _cross_ code to create possible fix candidates.\n4. Identify the _fittest_ fix candidates – that is, those that satisfy the most tests.\n5. _Evolve_ the fittest candidates until a perfect fix is found, or until time resources are depleted.",
"_____no_output_____"
],
[
"Let us illustrate these steps in the following sections.",
"_____no_output_____"
],
[
"## A Test Suite",
"_____no_output_____"
],
[
"In automated repair, the larger and the more thorough the test suite, the higher the quality of the resulting fix (if any). Hence, if we want to repair `middle()` automatically, we need a good test suite – with good inputs, but also with good checks. Note that running the test suite commonly takes the most time of automated repair, so a large test suite also comes with extra cost.",
"_____no_output_____"
],
[
"Let us first focus on achieving high-quality repairs. Hence, we will use the extensive test suites introduced in the [chapter on statistical debugging](StatisticalDebugger.ipynb):",
"_____no_output_____"
]
],
[
[
"from StatisticalDebugger import MIDDLE_PASSING_TESTCASES, MIDDLE_FAILING_TESTCASES",
"_____no_output_____"
]
],
[
[
"The `middle_test()` function fails whenever `middle()` returns an incorrect result:",
"_____no_output_____"
]
],
[
[
"def middle_test(x: int, y: int, z: int) -> None:\n m = middle(x, y, z)\n assert m == sorted([x, y, z])[1]",
"_____no_output_____"
],
[
"from ExpectError import ExpectError",
"_____no_output_____"
],
[
"with ExpectError():\n middle_test(2, 1, 3)",
"_____no_output_____"
]
],
[
[
"## Locating the Defect",
"_____no_output_____"
],
[
"Our next step is to find potential defect locations – that is, those locations in the code our mutations should focus upon. Since we already do have two test suites, we can make use of [statistical debugging](StatisticalDebugger.ipynb) to identify likely faulty locations. Our `OchiaiDebugger` ranks individual code lines by how frequently they are executed in failing runs (and not in passing runs).",
"_____no_output_____"
]
],
[
[
"from StatisticalDebugger import OchiaiDebugger, RankingDebugger",
"_____no_output_____"
],
[
"middle_debugger = OchiaiDebugger()\n\nfor x, y, z in MIDDLE_PASSING_TESTCASES + MIDDLE_FAILING_TESTCASES:\n with middle_debugger:\n middle_test(x, y, z)",
"_____no_output_____"
]
],
[
[
"We see that the upper half of the `middle()` code is definitely more suspicious:",
"_____no_output_____"
]
],
[
[
"middle_debugger",
"_____no_output_____"
]
],
[
[
"The most suspicious line is:",
"_____no_output_____"
]
],
[
[
"# ignore\nlocation = middle_debugger.rank()[0]\n(func_name, lineno) = location\nlines, first_lineno = inspect.getsourcelines(middle)\nprint(lineno, end=\"\")\nprint_content(lines[lineno - first_lineno], '.py')",
"_____no_output_____"
]
],
[
[
"with a suspiciousness of:",
"_____no_output_____"
]
],
[
[
"# ignore\nmiddle_debugger.suspiciousness(location)",
"_____no_output_____"
]
],
[
[
"## Random Code Mutations",
"_____no_output_____"
],
[
"Our third step in automatic code repair is to _randomly mutate the code_. Specifically, we want to randomly _delete_, _insert_, and _replace_ statements in the program to be repaired. However, simply synthesizing code _from scratch_ is unlikely to yield anything meaningful – the number of combinations is simply far too high. Already for a three-character identifier name, we have more than 200,000 combinations:",
"_____no_output_____"
]
],
[
[
"import string",
"_____no_output_____"
],
[
"string.ascii_letters",
"_____no_output_____"
],
[
"len(string.ascii_letters + '_') * \\\n len(string.ascii_letters + '_' + string.digits) * \\\n len(string.ascii_letters + '_' + string.digits)",
"_____no_output_____"
]
],
[
[
"Hence, we do _not_ synthesize code from scratch, but instead _reuse_ elements from the program to be fixed, hypothesizing that \"a program that contains an error in one area likely implements the correct behavior elsewhere\" \\cite{LeGoues2012}. This insight has been dubbed the *plastic surgery hypothesis*: content of new code can often be assembled out of fragments of code that already exist in the code base \\citeBarr2014}.",
"_____no_output_____"
],
[
"For our \"plastic surgery\", we do not operate on a _textual_ representation of the program, but rather on a _structural_ representation, which by construction allows us to avoid lexical and syntactical errors in the first place.\n\nThis structural representation is the _abstract syntax tree_ (AST), which we already have seen in various chapters, such as the [chapter on delta debugging](DeltaDebugger.ipynb), the [chapter on tracing](Tracer.ipynb), and excessively in the [chapter on slicing](Slicer.ipynb). The [official Python `ast` reference](http://docs.python.org/3/library/ast) is complete, but a bit brief; the documentation [\"Green Tree Snakes - the missing Python AST docs\"](https://greentreesnakes.readthedocs.io/en/latest/) provides an excellent introduction.\n\nRecapitulating, an AST is a tree representation of the program, showing a hierarchical structure of the program's elements. Here is the AST for our `middle()` function.",
"_____no_output_____"
]
],
[
[
"import ast\nimport astor\nimport inspect",
"_____no_output_____"
],
[
"from bookutils import print_content, show_ast",
"_____no_output_____"
],
[
"def middle_tree() -> ast.AST:\n return ast.parse(inspect.getsource(middle))",
"_____no_output_____"
],
[
"show_ast(middle_tree())",
"_____no_output_____"
]
],
[
[
" You see that it consists of one function definition (`FunctionDef`) with three `arguments` and two statements – one `If` and one `Return`. Each `If` subtree has three branches – one for the condition (`test`), one for the body to be executed if the condition is true (`body`), and one for the `else` case (`orelse`). The `body` and `orelse` branches again are lists of statements.",
"_____no_output_____"
],
[
"An AST can also be shown as text, which is more compact, yet reveals more information. `ast.dump()` gives not only the class names of elements, but also how they are constructed – actually, the whole expression can be used to construct an AST.",
"_____no_output_____"
]
],
[
[
"print(ast.dump(middle_tree()))",
"_____no_output_____"
]
],
[
[
"This is the path to the first `return` statement:",
"_____no_output_____"
]
],
[
[
"ast.dump(middle_tree().body[0].body[0].body[0].body[0]) # type: ignore",
"_____no_output_____"
]
],
[
[
"### Picking Statements",
"_____no_output_____"
],
[
"For our mutation operators, we want to use statements from the program itself. Hence, we need a means to find those very statements. The `StatementVisitor` class iterates through an AST, adding all statements it finds in function definitions to its `statements` list. To do so, it subclasses the Python `ast` `NodeVisitor` class, described in the [official Python `ast` reference](http://docs.python.org/3/library/ast).",
"_____no_output_____"
]
],
[
[
"from ast import NodeVisitor",
"_____no_output_____"
],
[
"# ignore\nfrom typing import Any, Callable, Optional, Type, Tuple\nfrom typing import Dict, Union, Set, List, cast",
"_____no_output_____"
],
[
"class StatementVisitor(NodeVisitor):\n \"\"\"Visit all statements within function defs in an AST\"\"\"\n\n def __init__(self) -> None:\n self.statements: List[Tuple[ast.AST, str]] = []\n self.func_name = \"\"\n self.statements_seen: Set[Tuple[ast.AST, str]] = set()\n super().__init__()\n\n def add_statements(self, node: ast.AST, attr: str) -> None:\n elems: List[ast.AST] = getattr(node, attr, [])\n if not isinstance(elems, list):\n elems = [elems] # type: ignore\n\n for elem in elems:\n stmt = (elem, self.func_name)\n if stmt in self.statements_seen:\n continue\n\n self.statements.append(stmt)\n self.statements_seen.add(stmt)\n\n def visit_node(self, node: ast.AST) -> None:\n # Any node other than the ones listed below\n self.add_statements(node, 'body')\n self.add_statements(node, 'orelse')\n\n def visit_Module(self, node: ast.Module) -> None:\n # Module children are defs, classes and globals - don't add\n super().generic_visit(node)\n\n def visit_ClassDef(self, node: ast.ClassDef) -> None:\n # Class children are defs and globals - don't add\n super().generic_visit(node)\n\n def generic_visit(self, node: ast.AST) -> None:\n self.visit_node(node)\n super().generic_visit(node)\n\n def visit_FunctionDef(self,\n node: Union[ast.FunctionDef, ast.AsyncFunctionDef]) -> None:\n if not self.func_name:\n self.func_name = node.name\n\n self.visit_node(node)\n super().generic_visit(node)\n self.func_name = \"\"\n\n def visit_AsyncFunctionDef(self, node: ast.AsyncFunctionDef) -> None:\n return self.visit_FunctionDef(node)",
"_____no_output_____"
]
],
[
[
"The function `all_statements()` returns all statements in the given AST `tree`. If an `ast` class `tp` is given, it only returns instances of that class.",
"_____no_output_____"
]
],
[
[
"def all_statements_and_functions(tree: ast.AST, \n tp: Optional[Type] = None) -> \\\n List[Tuple[ast.AST, str]]:\n \"\"\"\n Return a list of pairs (`statement`, `function`) for all statements in `tree`.\n If `tp` is given, return only statements of that class.\n \"\"\"\n\n visitor = StatementVisitor()\n visitor.visit(tree)\n statements = visitor.statements\n if tp is not None:\n statements = [s for s in statements if isinstance(s[0], tp)]\n\n return statements",
"_____no_output_____"
],
[
"def all_statements(tree: ast.AST, tp: Optional[Type] = None) -> List[ast.AST]:\n \"\"\"\n Return a list of all statements in `tree`.\n If `tp` is given, return only statements of that class.\n \"\"\"\n\n return [stmt for stmt, func_name in all_statements_and_functions(tree, tp)]",
"_____no_output_____"
]
],
[
[
"Here are all the `return` statements in `middle()`:",
"_____no_output_____"
]
],
[
[
"all_statements(middle_tree(), ast.Return)",
"_____no_output_____"
],
[
"all_statements_and_functions(middle_tree(), ast.If)",
"_____no_output_____"
]
],
[
[
"We can randomly pick an element:",
"_____no_output_____"
]
],
[
[
"import random",
"_____no_output_____"
],
[
"random_node = random.choice(all_statements(middle_tree()))\nastor.to_source(random_node)",
"_____no_output_____"
]
],
[
[
"### Mutating Statements\n\nThe main part in mutation, however, is to actually mutate the code of the program under test. To this end, we introduce a `StatementMutator` class – a subclass of `NodeTransformer`, described in the [official Python `ast` reference](http://docs.python.org/3/library/ast).",
"_____no_output_____"
],
[
"The constructor provides various keyword arguments to configure the mutator.",
"_____no_output_____"
]
],
[
[
"from ast import NodeTransformer",
"_____no_output_____"
],
[
"import copy",
"_____no_output_____"
],
[
"class StatementMutator(NodeTransformer):\n \"\"\"Mutate statements in an AST for automated repair.\"\"\"\n\n def __init__(self, \n suspiciousness_func: \n Optional[Callable[[Tuple[Callable, int]], float]] = None,\n source: Optional[List[ast.AST]] = None, \n log: bool = False) -> None:\n \"\"\"\n Constructor.\n `suspiciousness_func` is a function that takes a location\n (function, line_number) and returns a suspiciousness value\n between 0 and 1.0. If not given, all locations get the same \n suspiciousness of 1.0.\n `source` is a list of statements to choose from.\n \"\"\"\n\n super().__init__()\n self.log = log\n\n if suspiciousness_func is None:\n def suspiciousness_func(location: Tuple[Callable, int]) -> float:\n return 1.0\n assert suspiciousness_func is not None\n\n self.suspiciousness_func: Callable = suspiciousness_func\n\n if source is None:\n source = []\n self.source = source\n\n if self.log > 1:\n for i, node in enumerate(self.source):\n print(f\"Source for repairs #{i}:\")\n print_content(astor.to_source(node), '.py')\n print()\n print()\n\n self.mutations = 0",
"_____no_output_____"
]
],
[
[
"#### Choosing Suspicious Statements to Mutate\n\nWe start with deciding which AST nodes to mutate. The method `node_suspiciousness()` returns the suspiciousness for a given node, by invoking the suspiciousness function `suspiciousness_func` given during initialization.",
"_____no_output_____"
]
],
[
[
"import warnings",
"_____no_output_____"
],
[
"class StatementMutator(StatementMutator):\n def node_suspiciousness(self, stmt: ast.AST, func_name: str) -> float:\n if not hasattr(stmt, 'lineno'):\n warnings.warn(f\"{self.format_node(stmt)}: Expected line number\")\n return 0.0\n\n suspiciousness = self.suspiciousness_func((func_name, stmt.lineno))\n if suspiciousness is None: # not executed\n return 0.0\n\n return suspiciousness\n\n def format_node(self, node: ast.AST) -> str:\n ...",
"_____no_output_____"
]
],
[
[
"The method `node_to_be_mutated()` picks a node (statement) to be mutated. It determines the suspiciousness of all statements, and invokes `random.choices()`, using the suspiciousness as weight. Unsuspicious statements (with zero weight) will not be chosen.",
"_____no_output_____"
]
],
[
[
"class StatementMutator(StatementMutator):\n def node_to_be_mutated(self, tree: ast.AST) -> ast.AST:\n statements = all_statements_and_functions(tree)\n assert len(statements) > 0, \"No statements\"\n\n weights = [self.node_suspiciousness(stmt, func_name) \n for stmt, func_name in statements]\n stmts = [stmt for stmt, func_name in statements]\n\n if self.log > 1:\n print(\"Weights:\")\n for i, stmt in enumerate(statements):\n node, func_name = stmt\n print(f\"{weights[i]:.2} {self.format_node(node)}\")\n\n if sum(weights) == 0.0:\n # No suspicious line\n return random.choice(stmts)\n else:\n return random.choices(stmts, weights=weights)[0]",
"_____no_output_____"
]
],
[
[
"#### Choosing a Mutation Method",
"_____no_output_____"
],
[
"The method `visit()` is invoked on all nodes. For nodes marked with a `mutate_me` attribute, it randomly chooses a mutation method (`choose_op()`) and then invokes it on the node.\n\nAccording to the rules of `NodeTransformer`, the mutation method can return\n\n* a new node or a list of nodes, replacing the current node;\n* `None`, deleting it; or\n* the node itself, keeping things as they are.",
"_____no_output_____"
]
],
[
[
"import re",
"_____no_output_____"
],
[
"RE_SPACE = re.compile(r'[ \\t\\n]+')",
"_____no_output_____"
],
[
"class StatementMutator(StatementMutator):\n def choose_op(self) -> Callable:\n return random.choice([self.insert, self.swap, self.delete])\n\n def visit(self, node: ast.AST) -> ast.AST:\n super().visit(node) # Visits (and transforms?) children\n\n if not node.mutate_me: # type: ignore\n return node\n\n op = self.choose_op()\n new_node = op(node)\n self.mutations += 1\n\n if self.log:\n print(f\"{node.lineno:4}:{op.__name__ + ':':7} \"\n f\"{self.format_node(node)} \"\n f\"becomes {self.format_node(new_node)}\")\n\n return new_node",
"_____no_output_____"
]
],
[
[
"#### Swapping Statements\n\nOur first mutator is `swap()`, which replaces the current node `NODE` by a random node found in `source` (using a newly defined `choose_statement()`).\n\nAs a rule of thumb, we try to avoid inserting entire subtrees with all attached statements; and try to respect only the first line of a node. If the new node has the form \n\n```python\nif P:\n BODY\n```\n\nwe thus only insert \n\n```python\nif P: \n pass\n```\n\nsince the statements in `BODY` have a later chance to get inserted. The same holds for all constructs that have a `BODY`, i.e. `while`, `for`, `try`, `with`, and more.",
"_____no_output_____"
]
],
[
[
"class StatementMutator(StatementMutator):\n def choose_statement(self) -> ast.AST:\n return copy.deepcopy(random.choice(self.source))",
"_____no_output_____"
],
[
"class StatementMutator(StatementMutator):\n def swap(self, node: ast.AST) -> ast.AST:\n \"\"\"Replace `node` with a random node from `source`\"\"\"\n new_node = self.choose_statement()\n\n if isinstance(new_node, ast.stmt):\n # The source `if P: X` is added as `if P: pass`\n if hasattr(new_node, 'body'):\n new_node.body = [ast.Pass()] # type: ignore\n if hasattr(new_node, 'orelse'):\n new_node.orelse = [] # type: ignore\n if hasattr(new_node, 'finalbody'):\n new_node.finalbody = [] # type: ignore\n\n # ast.copy_location(new_node, node)\n return new_node",
"_____no_output_____"
]
],
[
[
"#### Inserting Statements\n\nOur next mutator is `insert()`, which randomly chooses some node from `source` and inserts it after the current node `NODE`. (If `NODE` is a `return` statement, then we insert the new node _before_ `NODE`.)\n\nIf the statement to be inserted has the form\n\n```python\nif P:\n BODY\n```\n\nwe only insert the \"header\" of the `if`, resulting in\n\n```python\nif P: \n NODE\n```\n\nAgain, this applies to all constructs that have a `BODY`, i.e., `while`, `for`, `try`, `with`, and more.",
"_____no_output_____"
]
],
[
[
"class StatementMutator(StatementMutator):\n def insert(self, node: ast.AST) -> Union[ast.AST, List[ast.AST]]:\n \"\"\"Insert a random node from `source` after `node`\"\"\"\n new_node = self.choose_statement()\n\n if isinstance(new_node, ast.stmt) and hasattr(new_node, 'body'):\n # Inserting `if P: X` as `if P:`\n new_node.body = [node] # type: ignore\n if hasattr(new_node, 'orelse'):\n new_node.orelse = [] # type: ignore\n if hasattr(new_node, 'finalbody'):\n new_node.finalbody = [] # type: ignore\n # ast.copy_location(new_node, node)\n return new_node\n\n # Only insert before `return`, not after it\n if isinstance(node, ast.Return):\n if isinstance(new_node, ast.Return):\n return new_node\n else:\n return [new_node, node]\n\n return [node, new_node]",
"_____no_output_____"
]
],
[
[
"#### Deleting Statements\n\nOur last mutator is `delete()`, which deletes the current node `NODE`. The standard case is to replace `NODE` by a `pass` statement.\n\nIf the statement to be deleted has the form\n\n```python\nif P:\n BODY\n```\n\nwe only delete the \"header\" of the `if`, resulting in\n\n```python\nBODY\n```\n\nAgain, this applies to all constructs that have a `BODY`, i.e., `while`, `for`, `try`, `with`, and more. If the statement to be deleted has multiple branches, a random branch is chosen (e.g., the `else` branch of an `if` statement).",
"_____no_output_____"
]
],
[
[
"class StatementMutator(StatementMutator):\n def delete(self, node: ast.AST) -> None:\n \"\"\"Delete `node`.\"\"\"\n\n branches = [attr for attr in ['body', 'orelse', 'finalbody']\n if hasattr(node, attr) and getattr(node, attr)]\n if branches:\n # Replace `if P: S` by `S`\n branch = random.choice(branches)\n new_node = getattr(node, branch)\n return new_node\n\n if isinstance(node, ast.stmt):\n # Avoid empty bodies; make this a `pass` statement\n new_node = ast.Pass()\n ast.copy_location(new_node, node)\n return new_node\n\n return None # Just delete",
"_____no_output_____"
],
[
"from bookutils import quiz",
"_____no_output_____"
],
[
"quiz(\"Why are statements replaced by `pass` rather than deleted?\",\n [\n \"Because `if P: pass` is valid Python, while `if P:` is not\",\n \"Because in Python, bodies for `if`, `while`, etc. cannot be empty\",\n \"Because a `pass` node makes a target for future mutations\",\n \"Because it causes the tests to pass\"\n ], '[3 ^ n for n in range(3)]')",
"_____no_output_____"
]
],
[
[
"Indeed, Python's `compile()` will fail if any of the bodies is an empty list. Also, it leaves us a statement that can be evolved further.",
"_____no_output_____"
],
[
"#### Helpers\n\nFor logging purposes, we introduce a helper function `format_node()` that returns a short string representation of the node.",
"_____no_output_____"
]
],
[
[
"class StatementMutator(StatementMutator):\n NODE_MAX_LENGTH = 20\n\n def format_node(self, node: ast.AST) -> str:\n \"\"\"Return a string representation for `node`.\"\"\"\n if node is None:\n return \"None\"\n\n if isinstance(node, list):\n return \"; \".join(self.format_node(elem) for elem in node)\n\n s = RE_SPACE.sub(' ', astor.to_source(node)).strip()\n if len(s) > self.NODE_MAX_LENGTH - len(\"...\"):\n s = s[:self.NODE_MAX_LENGTH] + \"...\"\n return repr(s)",
"_____no_output_____"
]
],
[
[
"#### All Together\n\nLet us now create the main entry point, which is `mutate()`. It picks the node to be mutated and marks it with a `mutate_me` attribute. By calling `visit()`, it then sets off the `NodeTransformer` transformation.",
"_____no_output_____"
]
],
[
[
"class StatementMutator(StatementMutator):\n def mutate(self, tree: ast.AST) -> ast.AST:\n \"\"\"Mutate the given AST `tree` in place. Return mutated tree.\"\"\"\n\n assert isinstance(tree, ast.AST)\n\n tree = copy.deepcopy(tree)\n\n if not self.source:\n self.source = all_statements(tree)\n\n for node in ast.walk(tree):\n node.mutate_me = False # type: ignore\n\n node = self.node_to_be_mutated(tree)\n node.mutate_me = True # type: ignore\n\n self.mutations = 0\n\n tree = self.visit(tree)\n\n if self.mutations == 0:\n warnings.warn(\"No mutations found\")\n\n ast.fix_missing_locations(tree)\n return tree",
"_____no_output_____"
]
],
[
[
"Here are a number of transformations applied by `StatementMutator`:",
"_____no_output_____"
]
],
[
[
"mutator = StatementMutator(log=True)\nfor i in range(10):\n new_tree = mutator.mutate(middle_tree())",
"_____no_output_____"
]
],
[
[
"This is the effect of the last mutator applied on `middle`:",
"_____no_output_____"
]
],
[
[
"print_content(astor.to_source(new_tree), '.py')",
"_____no_output_____"
]
],
[
[
"## Fitness\n\nNow that we can apply random mutations to code, let us find out how good these mutations are. Given our test suites for `middle`, we can check for a given code candidate how many of the previously passing test cases it passes, and how many of the failing test cases it passes. The more tests pass, the higher the _fitness_ of the candidate.",
"_____no_output_____"
],
[
"Not all passing tests have the same value, though. We want to prevent _regressions_ – that is, having a fix that breaks a previously passing test. The values of `WEIGHT_PASSING` and `WEIGHT_FAILING` set the relative weight (or importance) of passing vs. failing tests; we see that keeping passing tests passing is far more important then fixing failing tests.",
"_____no_output_____"
]
],
[
[
"WEIGHT_PASSING = 0.99\nWEIGHT_FAILING = 0.01",
"_____no_output_____"
],
[
"def middle_fitness(tree: ast.AST) -> float:\n \"\"\"Compute fitness of a `middle()` candidate given in `tree`\"\"\"\n original_middle = middle\n\n try:\n code = compile(tree, '<fitness>', 'exec')\n except ValueError:\n return 0 # Compilation error\n\n exec(code, globals())\n\n passing_passed = 0\n failing_passed = 0\n\n # Test how many of the passing runs pass\n for x, y, z in MIDDLE_PASSING_TESTCASES:\n try:\n middle_test(x, y, z)\n passing_passed += 1\n except AssertionError:\n pass\n\n passing_ratio = passing_passed / len(MIDDLE_PASSING_TESTCASES)\n\n # Test how many of the failing runs pass\n for x, y, z in MIDDLE_FAILING_TESTCASES:\n try:\n middle_test(x, y, z)\n failing_passed += 1\n except AssertionError:\n pass\n\n failing_ratio = failing_passed / len(MIDDLE_FAILING_TESTCASES)\n\n fitness = (WEIGHT_PASSING * passing_ratio +\n WEIGHT_FAILING * failing_ratio)\n\n globals()['middle'] = original_middle\n return fitness",
"_____no_output_____"
]
],
[
[
"Our faulty `middle()` program has a fitness of `WEIGHT_PASSING` (99%), because it passes all the passing tests (but none of the failing ones).",
"_____no_output_____"
]
],
[
[
"middle_fitness(middle_tree())",
"_____no_output_____"
]
],
[
[
"Our \"sort of fixed\" version of `middle()` gets a much lower fitness:",
"_____no_output_____"
]
],
[
[
"middle_fitness(ast.parse(\"def middle(x, y, z): return x\"))",
"_____no_output_____"
]
],
[
[
"In the [chapter on statistical debugging](StatisticalDebugger), we also defined a fixed version of `middle()`. This gets a fitness of 1.0, passing all tests. (We won't use this fixed version for automated repairs.)",
"_____no_output_____"
]
],
[
[
"from StatisticalDebugger import middle_fixed",
"_____no_output_____"
],
[
"middle_fixed_source = \\\n inspect.getsource(middle_fixed).replace('middle_fixed', 'middle').strip()",
"_____no_output_____"
],
[
"middle_fitness(ast.parse(middle_fixed_source))",
"_____no_output_____"
]
],
[
[
"## Population\n\nWe now set up a _population_ of fix candidates to evolve over time. A higher population size will yield more candidates to check, but also need more time to test; a lower population size will yield fewer candidates, but allow for more evolution steps. We choose a population size of 40 (from \\cite{LeGoues2012}).",
"_____no_output_____"
]
],
[
[
"POPULATION_SIZE = 40\nmiddle_mutator = StatementMutator()",
"_____no_output_____"
],
[
"MIDDLE_POPULATION = [middle_tree()] + \\\n [middle_mutator.mutate(middle_tree()) for i in range(POPULATION_SIZE - 1)]",
"_____no_output_____"
]
],
[
[
"We sort the fix candidates according to their fitness. This actually runs all tests on all candidates.",
"_____no_output_____"
]
],
[
[
"MIDDLE_POPULATION.sort(key=middle_fitness, reverse=True)",
"_____no_output_____"
]
],
[
[
"The candidate with the highest fitness is still our original (faulty) `middle()` code:",
"_____no_output_____"
]
],
[
[
"print(astor.to_source(MIDDLE_POPULATION[0]),\n middle_fitness(MIDDLE_POPULATION[0]))",
"_____no_output_____"
]
],
[
[
"At the other end of the spectrum, the candidate with the lowest fitness has some vital functionality removed:",
"_____no_output_____"
]
],
[
[
"print(astor.to_source(MIDDLE_POPULATION[-1]),\n middle_fitness(MIDDLE_POPULATION[-1]))",
"_____no_output_____"
]
],
[
[
"## Evolution\n\nTo evolve our population of candidates, we fill up the population with mutations created from the population, using a `StatementMutator` as described above to create these mutations. Then we reduce the population to its original size, keeping the fittest candidates.\n<!-- TODO: shouldn't there be some kind of randomness to also keep sometimes candidates with lesser fitness? -->",
"_____no_output_____"
]
],
[
[
"def evolve_middle() -> None:\n global MIDDLE_POPULATION\n\n source = all_statements(middle_tree())\n mutator = StatementMutator(source=source)\n\n n = len(MIDDLE_POPULATION)\n\n offspring: List[ast.AST] = []\n while len(offspring) < n:\n parent = random.choice(MIDDLE_POPULATION)\n offspring.append(mutator.mutate(parent))\n\n MIDDLE_POPULATION += offspring\n MIDDLE_POPULATION.sort(key=middle_fitness, reverse=True)\n MIDDLE_POPULATION = MIDDLE_POPULATION[:n]",
"_____no_output_____"
]
],
[
[
"This is what happens when evolving our population for the first time; the original source is still our best candidate.",
"_____no_output_____"
]
],
[
[
"evolve_middle()",
"_____no_output_____"
],
[
"tree = MIDDLE_POPULATION[0]\nprint(astor.to_source(tree), middle_fitness(tree))",
"_____no_output_____"
],
[
"# docassert\nassert middle_fitness(tree) < 1.0",
"_____no_output_____"
]
],
[
[
"However, nothing keeps us from evolving for a few generations more...",
"_____no_output_____"
]
],
[
[
"for i in range(50):\n evolve_middle()\n best_middle_tree = MIDDLE_POPULATION[0]\n fitness = middle_fitness(best_middle_tree)\n print(f\"\\rIteration {i:2}: fitness = {fitness} \", end=\"\")\n if fitness >= 1.0:\n break",
"_____no_output_____"
],
[
"# docassert\nassert middle_fitness(best_middle_tree) >= 1.0",
"_____no_output_____"
]
],
[
[
"Success! We find a candidate that actually passes all tests, including the failing ones. Here is the candidate:",
"_____no_output_____"
]
],
[
[
"print_content(astor.to_source(best_middle_tree), '.py', start_line_number=1)",
"_____no_output_____"
]
],
[
[
"... and yes, it passes all tests:",
"_____no_output_____"
]
],
[
[
"original_middle = middle\ncode = compile(best_middle_tree, '<string>', 'exec')\nexec(code, globals())\n\nfor x, y, z in MIDDLE_PASSING_TESTCASES + MIDDLE_FAILING_TESTCASES:\n middle_test(x, y, z)\n\nmiddle = original_middle",
"_____no_output_____"
]
],
[
[
"As the code is already validated by hundreds of test cases, it is very valuable for the programmer. Even if the programmer decides not to use the code as is, the location gives very strong hints on which code to examine and where to apply a fix.",
"_____no_output_____"
],
[
"However, a closer look at our fix candidate shows that there is some amount of redundancy – that is, superfluous statements.",
"_____no_output_____"
]
],
[
[
"quiz(\"Some of the lines in our fix candidate are redundant. \"\n \"Which are these?\",\n [\n \"Line 3: `if x < y:`\",\n \"Line 4: `if x < z:`\",\n \"Line 5: `return y`\",\n \"Line 13: `return z`\"\n ], '[eval(chr(100 - x)) for x in [48, 50]]')",
"_____no_output_____"
]
],
[
[
"## Simplifying",
"_____no_output_____"
],
[
"As demonstrated in the chapter on [reducing failure-inducing inputs](DeltaDebugger.ipynb), we can use delta debugging on code to get rid of these superfluous statements.",
"_____no_output_____"
],
[
"The trick for simplification is to have the test function (`test_middle_lines()`) declare a fitness of 1.0 as a \"failure\". Delta debugging will then simplify the input as long as the \"failure\" (and hence the maximum fitness obtained) persists.",
"_____no_output_____"
]
],
[
[
"from DeltaDebugger import DeltaDebugger",
"_____no_output_____"
],
[
"middle_lines = astor.to_source(best_middle_tree).strip().split('\\n')",
"_____no_output_____"
],
[
"def test_middle_lines(lines: List[str]) -> None:\n source = \"\\n\".join(lines)\n tree = ast.parse(source)\n assert middle_fitness(tree) < 1.0 # \"Fail\" only while fitness is 1.0",
"_____no_output_____"
],
[
"with DeltaDebugger() as dd:\n test_middle_lines(middle_lines)",
"_____no_output_____"
],
[
"reduced_lines = dd.min_args()['lines']",
"_____no_output_____"
],
[
"reduced_source = \"\\n\".join(reduced_lines)",
"_____no_output_____"
],
[
"repaired_source = astor.to_source(ast.parse(reduced_source)) # normalize\nprint_content(repaired_source, '.py')",
"_____no_output_____"
],
[
"# docassert\nassert len(reduced_lines) < len(middle_lines)",
"_____no_output_____"
]
],
[
[
"Success! Delta Debugging has eliminated the superfluous statements. We can present the difference to the original as a patch:",
"_____no_output_____"
]
],
[
[
"original_source = astor.to_source(ast.parse(middle_source)) # normalize",
"_____no_output_____"
],
[
"from ChangeDebugger import diff, print_patch # minor dependency",
"_____no_output_____"
],
[
"for patch in diff(original_source, repaired_source):\n print_patch(patch)",
"_____no_output_____"
]
],
[
[
"We can present this patch to the programmer, who will then immediately know what to fix in the `middle()` code.",
"_____no_output_____"
],
[
"## Crossover\n\nSo far, we have only applied one kind of genetic operators – mutation. There is a second one, though, also inspired by natural selection. \n\nThe *crossover* operation mutates two strands of genes, as illustrated in the following picture. We have two parents (red and blue), each as a sequence of genes. To create \"crossed\" children, we pick a _crossover point_ and exchange the strands at this very point:\n\n",
"_____no_output_____"
],
[
"We implement a `CrossoverOperator` class that implements such an operation on two randomly chosen statement lists of two programs. It is used as\n\n```python\ncrossover = CrossoverOperator()\ncrossover.crossover(tree_p1, tree_p2)\n```\n\nwhere `tree_p1` and `tree_p2` are two ASTs that are changed in place.",
"_____no_output_____"
],
[
"### Excursion: Implementing Crossover",
"_____no_output_____"
],
[
"#### Crossing Statement Lists",
"_____no_output_____"
],
[
"Applied on programs, a crossover mutation takes two parents and \"crosses\" a list of statements. As an example, if our \"parents\" `p1()` and `p2()` are defined as follows:",
"_____no_output_____"
]
],
[
[
"def p1(): # type: ignore\n a = 1\n b = 2\n c = 3",
"_____no_output_____"
],
[
"def p2(): # type: ignore\n x = 1\n y = 2\n z = 3",
"_____no_output_____"
]
],
[
[
"Then a crossover operation would produce one child with a body\n\n```python\na = 1\ny = 2\nz = 3\n```\n\nand another child with a body\n\n```python\nx = 1\nb = 2\nc = 3\n```",
"_____no_output_____"
],
[
"We can easily implement this in a `CrossoverOperator` class in a method `cross_bodies()`.",
"_____no_output_____"
]
],
[
[
"class CrossoverOperator:\n \"\"\"A class for performing statement crossover of Python programs\"\"\"\n\n def __init__(self, log: bool = False):\n \"\"\"Constructor. If `log` is set, turn on logging.\"\"\"\n self.log = log\n\n def cross_bodies(self, body_1: List[ast.AST], body_2: List[ast.AST]) -> \\\n Tuple[List[ast.AST], List[ast.AST]]:\n \"\"\"Crossover the statement lists `body_1` x `body_2`. Return new lists.\"\"\"\n\n assert isinstance(body_1, list)\n assert isinstance(body_2, list)\n\n crossover_point_1 = len(body_1) // 2\n crossover_point_2 = len(body_2) // 2\n return (body_1[:crossover_point_1] + body_2[crossover_point_2:],\n body_2[:crossover_point_2] + body_1[crossover_point_1:])",
"_____no_output_____"
]
],
[
[
"Here's the `CrossoverOperatorMutator` applied on `p1` and `p2`:",
"_____no_output_____"
]
],
[
[
"tree_p1: ast.Module = ast.parse(inspect.getsource(p1))\ntree_p2: ast.Module = ast.parse(inspect.getsource(p2))",
"_____no_output_____"
],
[
"body_p1 = tree_p1.body[0].body # type: ignore\nbody_p2 = tree_p2.body[0].body # type: ignore\nbody_p1",
"_____no_output_____"
],
[
"crosser = CrossoverOperator()\ntree_p1.body[0].body, tree_p2.body[0].body = crosser.cross_bodies(body_p1, body_p2) # type: ignore",
"_____no_output_____"
],
[
"print_content(astor.to_source(tree_p1), '.py')",
"_____no_output_____"
],
[
"print_content(astor.to_source(tree_p2), '.py')",
"_____no_output_____"
]
],
[
[
"#### Applying Crossover on Programs\n\nApplying the crossover operation on arbitrary programs is a bit more complex, though. We first have to _find_ lists of statements that we actually can cross over. The `can_cross()` method returns True if we have a list of statements that we can cross. Python modules and classes are excluded, because changing the ordering of definitions will not have much impact on the program functionality, other than introducing errors due to dependencies.",
"_____no_output_____"
]
],
[
[
"class CrossoverOperator(CrossoverOperator):\n # In modules and class defs, the ordering of elements does not matter (much)\n SKIP_LIST = {ast.Module, ast.ClassDef}\n\n def can_cross(self, tree: ast.AST, body_attr: str = 'body') -> bool:\n if any(isinstance(tree, cls) for cls in self.SKIP_LIST):\n return False\n\n body = getattr(tree, body_attr, [])\n return body and len(body) >= 2",
"_____no_output_____"
]
],
[
[
"Here comes our method `crossover_attr()` which searches for crossover possibilities. It takes two ASTs `t1` and `t2` and an attribute (typically `'body'`) and retrieves the attribute lists $l_1$ (from `t1.<attr>`) and $l_2$ (from `t2.<attr>`).\n\nIf $l_1$ and $l_2$ can be crossed, it crosses them, and is done. Otherwise\n\n* If there is a pair of elements $e_1 \\in l_1$ and $e_2 \\in l_2$ that has the same name – say, functions of the same name –, it applies itself to $e_1$ and $e_2$.\n* Otherwise, it creates random pairs of elements $e_1 \\in l_1$ and $e_2 \\in l_2$ and applies itself on these very pairs.\n\n`crossover_attr()` changes `t1` and `t2` in place and returns True if a crossover was found; it returns False otherwise.",
"_____no_output_____"
]
],
[
[
"class CrossoverOperator(CrossoverOperator):\n def crossover_attr(self, t1: ast.AST, t2: ast.AST, body_attr: str) -> bool:\n \"\"\"\n Crossover the bodies `body_attr` of two trees `t1` and `t2`.\n Return True if successful.\n \"\"\"\n assert isinstance(t1, ast.AST)\n assert isinstance(t2, ast.AST)\n assert isinstance(body_attr, str)\n\n if not getattr(t1, body_attr, None) or not getattr(t2, body_attr, None):\n return False\n\n if self.crossover_branches(t1, t2):\n return True\n\n if self.log > 1:\n print(f\"Checking {t1}.{body_attr} x {t2}.{body_attr}\")\n\n body_1 = getattr(t1, body_attr)\n body_2 = getattr(t2, body_attr)\n\n # If both trees have the attribute, we can cross their bodies\n if self.can_cross(t1, body_attr) and self.can_cross(t2, body_attr):\n if self.log:\n print(f\"Crossing {t1}.{body_attr} x {t2}.{body_attr}\")\n\n new_body_1, new_body_2 = self.cross_bodies(body_1, body_2)\n setattr(t1, body_attr, new_body_1)\n setattr(t2, body_attr, new_body_2)\n return True\n\n # Strategy 1: Find matches in class/function of same name\n for child_1 in body_1:\n if hasattr(child_1, 'name'):\n for child_2 in body_2:\n if (hasattr(child_2, 'name') and\n child_1.name == child_2.name):\n if self.crossover_attr(child_1, child_2, body_attr):\n return True\n\n # Strategy 2: Find matches anywhere\n for child_1 in random.sample(body_1, len(body_1)):\n for child_2 in random.sample(body_2, len(body_2)):\n if self.crossover_attr(child_1, child_2, body_attr):\n return True\n\n return False",
"_____no_output_____"
]
],
[
[
"We have a special case for `if` nodes, where we can cross their body and `else` branches. (In Python, `for` and `while` also have `else` branches, but swapping these with loop bodies is likely to create havoc.)",
"_____no_output_____"
]
],
[
[
"class CrossoverOperator(CrossoverOperator):\n def crossover_branches(self, t1: ast.AST, t2: ast.AST) -> bool:\n \"\"\"Special case:\n `t1` = `if P: S1 else: S2` x `t2` = `if P': S1' else: S2'`\n becomes\n `t1` = `if P: S2' else: S1'` and `t2` = `if P': S2 else: S1`\n Returns True if successful.\n \"\"\"\n assert isinstance(t1, ast.AST)\n assert isinstance(t2, ast.AST)\n\n if (hasattr(t1, 'body') and hasattr(t1, 'orelse') and\n hasattr(t2, 'body') and hasattr(t2, 'orelse')):\n\n t1 = cast(ast.If, t1) # keep mypy happy\n t2 = cast(ast.If, t2)\n\n if self.log:\n print(f\"Crossing branches {t1} x {t2}\")\n\n t1.body, t1.orelse, t2.body, t2.orelse = \\\n t2.orelse, t2.body, t1.orelse, t1.body\n return True\n\n return False",
"_____no_output_____"
]
],
[
[
"The method `crossover()` is the main entry point. It checks for the special `if` case as described above; if not, it searches for possible crossover points. It raises `CrossoverError` if not successful.",
"_____no_output_____"
]
],
[
[
"class CrossoverOperator(CrossoverOperator):\n def crossover(self, t1: ast.AST, t2: ast.AST) -> Tuple[ast.AST, ast.AST]:\n \"\"\"Do a crossover of ASTs `t1` and `t2`.\n Raises `CrossoverError` if no crossover is found.\"\"\"\n assert isinstance(t1, ast.AST)\n assert isinstance(t2, ast.AST)\n\n for body_attr in ['body', 'orelse', 'finalbody']:\n if self.crossover_attr(t1, t2, body_attr):\n return t1, t2\n\n raise CrossoverError(\"No crossover found\")",
"_____no_output_____"
],
[
"class CrossoverError(ValueError):\n pass",
"_____no_output_____"
]
],
[
[
"### End of Excursion",
"_____no_output_____"
],
[
"### Crossover in Action",
"_____no_output_____"
],
[
"Let us put our `CrossoverOperator` in action. Here is a test case for crossover, involving more deeply nested structures:",
"_____no_output_____"
]
],
[
[
"def p1(): # type: ignore\n if True:\n print(1)\n print(2)\n print(3)",
"_____no_output_____"
],
[
"def p2(): # type: ignore\n if True:\n print(a)\n print(b)\n else:\n print(c)\n print(d)",
"_____no_output_____"
]
],
[
[
"We invoke the `crossover()` method with two ASTs from `p1` and `p2`:",
"_____no_output_____"
]
],
[
[
"crossover = CrossoverOperator()\ntree_p1 = ast.parse(inspect.getsource(p1))\ntree_p2 = ast.parse(inspect.getsource(p2))\ncrossover.crossover(tree_p1, tree_p2);",
"_____no_output_____"
]
],
[
[
"Here is the crossed offspring, mixing statement lists of `p1` and `p2`:",
"_____no_output_____"
]
],
[
[
"print_content(astor.to_source(tree_p1), '.py')",
"_____no_output_____"
],
[
"print_content(astor.to_source(tree_p2), '.py')",
"_____no_output_____"
]
],
[
[
"Here is our special case for `if` nodes in action, crossing our `middle()` tree with `p2`.",
"_____no_output_____"
]
],
[
[
"middle_t1, middle_t2 = crossover.crossover(middle_tree(),\n ast.parse(inspect.getsource(p2)))",
"_____no_output_____"
]
],
[
[
"We see how the resulting offspring encompasses elements of both sources:",
"_____no_output_____"
]
],
[
[
"print_content(astor.to_source(middle_t1), '.py')",
"_____no_output_____"
],
[
"print_content(astor.to_source(middle_t2), '.py')",
"_____no_output_____"
]
],
[
[
"## A Repairer Class\n\nSo far, we have applied all our techniques on the `middle()` program only. Let us now create a `Repairer` class that applies automatic program repair on arbitrary Python programs. The idea is that you can apply it on some statistical debugger, for which you have gathered passing and failing test cases, and then invoke its `repair()` method to find a \"best\" fix candidate:\n\n```python\ndebugger = OchiaiDebugger()\nwith debugger:\n <passing test>\nwith debugger:\n <failing test>\n...\nrepairer = Repairer(debugger)\nrepairer.repair()\n```",
"_____no_output_____"
],
[
"### Excursion: Implementing Repairer",
"_____no_output_____"
],
[
"The main argument to the `Repairer` constructor is the `debugger` to get information from. On top of that, it also allows to customize the classes used for mutation, crossover, and reduction. Setting `targets` allows to define a set of functions to repair; setting `sources` allows to set a set of sources to take repairs from. The constructor then sets up the environment for running tests and repairing, as described below.",
"_____no_output_____"
]
],
[
[
"from StackInspector import StackInspector # minor dependency",
"_____no_output_____"
],
[
"class Repairer(StackInspector):\n \"\"\"A class for automatic repair of Python programs\"\"\"\n\n def __init__(self, debugger: RankingDebugger, *,\n targets: Optional[List[Any]] = None,\n sources: Optional[List[Any]] = None,\n log: Union[bool, int] = False,\n mutator_class: Type = StatementMutator,\n crossover_class: Type = CrossoverOperator,\n reducer_class: Type = DeltaDebugger,\n globals: Optional[Dict[str, Any]] = None):\n \"\"\"Constructor.\n`debugger`: a `RankingDebugger` to take tests and coverage from.\n`targets`: a list of functions/modules to be repaired.\n (default: the covered functions in `debugger`, except tests)\n`sources`: a list of functions/modules to take repairs from.\n (default: same as `targets`)\n`globals`: if given, a `globals()` dict for executing targets\n (default: `globals()` of caller)\"\"\"\n\n assert isinstance(debugger, RankingDebugger)\n self.debugger = debugger\n self.log = log\n\n if targets is None:\n targets = self.default_functions()\n if not targets:\n raise ValueError(\"No targets to repair\")\n\n if sources is None:\n sources = self.default_functions()\n if not sources:\n raise ValueError(\"No sources to take repairs from\")\n\n if self.debugger.function() is None:\n raise ValueError(\"Multiple entry points observed\")\n\n self.target_tree: ast.AST = self.parse(targets)\n self.source_tree: ast.AST = self.parse(sources)\n\n self.log_tree(\"Target code to be repaired:\", self.target_tree)\n if ast.dump(self.target_tree) != ast.dump(self.source_tree):\n self.log_tree(\"Source code to take repairs from:\", \n self.source_tree)\n\n self.fitness_cache: Dict[str, float] = {}\n\n self.mutator: StatementMutator = \\\n mutator_class(\n source=all_statements(self.source_tree),\n suspiciousness_func=self.debugger.suspiciousness,\n log=(self.log >= 3))\n self.crossover: CrossoverOperator = crossover_class(log=(self.log >= 3))\n self.reducer: DeltaDebugger = reducer_class(log=(self.log >= 3))\n\n if globals is None:\n globals = self.caller_globals() # see below\n\n self.globals = globals",
"_____no_output_____"
]
],
[
[
"When we access or execute functions, we do so in the caller's environment, not ours. The `caller_globals()` method from `StackInspector` acts as replacement for `globals()`.",
"_____no_output_____"
],
[
"#### Helper Functions\n\nThe constructor uses a number of helper functions to create its environment.",
"_____no_output_____"
]
],
[
[
"class Repairer(Repairer):\n def getsource(self, item: Union[str, Any]) -> str:\n \"\"\"Get the source for `item`. Can also be a string.\"\"\"\n\n if isinstance(item, str):\n item = self.globals[item]\n return inspect.getsource(item)",
"_____no_output_____"
],
[
"class Repairer(Repairer):\n def default_functions(self) -> List[Callable]:\n \"\"\"Return the set of functions to be repaired.\n Functions whose names start or end in `test` are excluded.\"\"\"\n def is_test(name: str) -> bool:\n return name.startswith('test') or name.endswith('test')\n\n return [func for func in self.debugger.covered_functions()\n if not is_test(func.__name__)]",
"_____no_output_____"
],
[
"class Repairer(Repairer):\n def log_tree(self, description: str, tree: Any) -> None:\n \"\"\"Print out `tree` as source code prefixed by `description`.\"\"\"\n if self.log:\n print(description)\n print_content(astor.to_source(tree), '.py')\n print()\n print()",
"_____no_output_____"
],
[
"class Repairer(Repairer):\n def parse(self, items: List[Any]) -> ast.AST:\n \"\"\"Read in a list of items into a single tree\"\"\"\n tree = ast.parse(\"\")\n for item in items:\n if isinstance(item, str):\n item = self.globals[item]\n\n item_lines, item_first_lineno = inspect.getsourcelines(item)\n\n try:\n item_tree = ast.parse(\"\".join(item_lines))\n except IndentationError:\n # inner function or likewise\n warnings.warn(f\"Can't parse {item.__name__}\")\n continue\n\n ast.increment_lineno(item_tree, item_first_lineno - 1)\n tree.body += item_tree.body\n\n return tree",
"_____no_output_____"
]
],
[
[
"#### Running Tests\n\nNow that we have set the environment for `Repairer`, we can implement one step of automatic repair after the other. The method `run_test_set()` runs the given `test_set` (`DifferenceDebugger.PASS` or `DifferenceDebugger.FAIL`), returning the number of passed tests. If `validate` is set, it checks whether the outcomes are as expected.",
"_____no_output_____"
]
],
[
[
"class Repairer(Repairer):\n def run_test_set(self, test_set: str, validate: bool = False) -> int:\n \"\"\"\n Run given `test_set`\n (`DifferenceDebugger.PASS` or `DifferenceDebugger.FAIL`).\n If `validate` is set, check expectations.\n Return number of passed tests.\n \"\"\"\n passed = 0\n collectors = self.debugger.collectors[test_set]\n function = self.debugger.function()\n assert function is not None\n # FIXME: function may have been redefined\n\n for c in collectors:\n if self.log >= 4:\n print(f\"Testing {c.id()}...\", end=\"\")\n\n try:\n function(**c.args())\n except Exception as err:\n if self.log >= 4:\n print(f\"failed ({err.__class__.__name__})\")\n\n if validate and test_set == self.debugger.PASS:\n raise err.__class__(\n f\"{c.id()} should have passed, but failed\")\n continue\n\n passed += 1\n if self.log >= 4:\n print(\"passed\")\n\n if validate and test_set == self.debugger.FAIL:\n raise FailureNotReproducedError(\n f\"{c.id()} should have failed, but passed\")\n\n return passed",
"_____no_output_____"
],
[
"class FailureNotReproducedError(ValueError):\n pass",
"_____no_output_____"
]
],
[
[
"Here is how we use `run_tests_set()`:",
"_____no_output_____"
]
],
[
[
"repairer = Repairer(middle_debugger)\nassert repairer.run_test_set(middle_debugger.PASS) == \\\n len(MIDDLE_PASSING_TESTCASES)\nassert repairer.run_test_set(middle_debugger.FAIL) == 0",
"_____no_output_____"
]
],
[
[
"The method `run_tests()` runs passing and failing tests, weighing the passed test cases to obtain the overall fitness.",
"_____no_output_____"
]
],
[
[
"class Repairer(Repairer):\n def weight(self, test_set: str) -> float:\n \"\"\"\n Return the weight of `test_set`\n (`DifferenceDebugger.PASS` or `DifferenceDebugger.FAIL`).\n \"\"\"\n return {\n self.debugger.PASS: WEIGHT_PASSING,\n self.debugger.FAIL: WEIGHT_FAILING\n }[test_set]\n\n def run_tests(self, validate: bool = False) -> float:\n \"\"\"Run passing and failing tests, returning weighted fitness.\"\"\"\n fitness = 0.0\n\n for test_set in [self.debugger.PASS, self.debugger.FAIL]:\n passed = self.run_test_set(test_set, validate=validate)\n ratio = passed / len(self.debugger.collectors[test_set])\n fitness += self.weight(test_set) * ratio\n\n return fitness",
"_____no_output_____"
]
],
[
[
"The method `validate()` ensures the observed tests can be adequately reproduced.",
"_____no_output_____"
]
],
[
[
"class Repairer(Repairer):\n def validate(self) -> None:\n fitness = self.run_tests(validate=True)\n assert fitness == self.weight(self.debugger.PASS)",
"_____no_output_____"
],
[
"repairer = Repairer(middle_debugger)\nrepairer.validate()",
"_____no_output_____"
]
],
[
[
"#### (Re)defining Functions\n\nOur `run_tests()` methods above do not yet redefine the function to be repaired. This is done by the `fitness()` function, which compiles and defines the given repair candidate `tree` before testing it. It caches and returns the fitness.",
"_____no_output_____"
]
],
[
[
"class Repairer(Repairer):\n def fitness(self, tree: ast.AST) -> float:\n \"\"\"Test `tree`, returning its fitness\"\"\"\n key = cast(str, ast.dump(tree))\n if key in self.fitness_cache:\n return self.fitness_cache[key]\n\n # Save defs\n original_defs: Dict[str, Any] = {}\n for name in self.toplevel_defs(tree):\n if name in self.globals:\n original_defs[name] = self.globals[name]\n else:\n warnings.warn(f\"Couldn't find definition of {repr(name)}\")\n\n assert original_defs, f\"Couldn't find any definition\"\n\n if self.log >= 3:\n print(\"Repair candidate:\")\n print_content(astor.to_source(tree), '.py')\n print()\n\n # Create new definition\n try:\n code = compile(tree, '<Repairer>', 'exec')\n except ValueError: # Compilation error\n code = None\n\n if code is None:\n if self.log >= 3:\n print(f\"Fitness = 0.0 (compilation error)\")\n\n fitness = 0.0\n return fitness\n\n # Execute new code, defining new functions in `self.globals`\n exec(code, self.globals)\n\n # Set new definitions in the namespace (`__globals__`)\n # of the function we will be calling.\n function = self.debugger.function()\n assert function is not None\n assert hasattr(function, '__globals__')\n\n for name in original_defs:\n function.__globals__[name] = self.globals[name] # type: ignore\n\n fitness = self.run_tests(validate=False)\n\n # Restore definitions\n for name in original_defs:\n function.__globals__[name] = original_defs[name] # type: ignore\n self.globals[name] = original_defs[name]\n\n if self.log >= 3:\n print(f\"Fitness = {fitness}\")\n\n self.fitness_cache[key] = fitness\n return fitness",
"_____no_output_____"
]
],
[
[
"The helper function `toplevel_defs()` helps saving and restoring the environment before and after redefining the function under repair.",
"_____no_output_____"
]
],
[
[
"class Repairer(Repairer):\n def toplevel_defs(self, tree: ast.AST) -> List[str]:\n \"\"\"Return a list of names of defined functions and classes in `tree`\"\"\"\n visitor = DefinitionVisitor()\n visitor.visit(tree)\n assert hasattr(visitor, 'definitions')\n return visitor.definitions",
"_____no_output_____"
],
[
"class DefinitionVisitor(NodeVisitor):\n def __init__(self) -> None:\n self.definitions: List[str] = []\n\n def add_definition(self, node: Union[ast.ClassDef, \n ast.FunctionDef, \n ast.AsyncFunctionDef]) -> None:\n self.definitions.append(node.name)\n\n def visit_FunctionDef(self, node: ast.FunctionDef) -> None:\n self.add_definition(node)\n\n def visit_AsyncFunctionDef(self, node: ast.AsyncFunctionDef) -> None:\n self.add_definition(node)\n\n def visit_ClassDef(self, node: ast.ClassDef) -> None:\n self.add_definition(node)",
"_____no_output_____"
]
],
[
[
"Here's an example for `fitness()`:",
"_____no_output_____"
]
],
[
[
"repairer = Repairer(middle_debugger, log=1)",
"_____no_output_____"
],
[
"good_fitness = repairer.fitness(middle_tree())\ngood_fitness",
"_____no_output_____"
],
[
"# docassert\nassert good_fitness >= 0.99, \"fitness() failed\"",
"_____no_output_____"
],
[
"bad_middle_tree = ast.parse(\"def middle(x, y, z): return x\")\nbad_fitness = repairer.fitness(bad_middle_tree)\nbad_fitness",
"_____no_output_____"
],
[
"# docassert\nassert bad_fitness < 0.5, \"fitness() failed\"",
"_____no_output_____"
]
],
[
[
"#### Repairing\n\nNow for the actual `repair()` method, which creates a `population` and then evolves it until the fitness is 1.0 or the given number of iterations is spent.",
"_____no_output_____"
]
],
[
[
"import traceback",
"_____no_output_____"
],
[
"class Repairer(Repairer):\n def initial_population(self, size: int) -> List[ast.AST]:\n \"\"\"Return an initial population of size `size`\"\"\"\n return [self.target_tree] + \\\n [self.mutator.mutate(copy.deepcopy(self.target_tree))\n for i in range(size - 1)]\n\n def repair(self, population_size: int = POPULATION_SIZE, iterations: int = 100) -> \\\n Tuple[ast.AST, float]:\n \"\"\"\n Repair the function we collected test runs from.\n Use a population size of `population_size` and\n at most `iterations` iterations.\n Returns a pair (`ast`, `fitness`) where \n `ast` is the AST of the repaired function, and\n `fitness` is its fitness (between 0 and 1.0)\n \"\"\"\n self.validate()\n\n population = self.initial_population(population_size)\n\n last_key = ast.dump(self.target_tree)\n\n for iteration in range(iterations):\n population = self.evolve(population)\n\n best_tree = population[0]\n fitness = self.fitness(best_tree)\n\n if self.log:\n print(f\"Evolving population: \"\n f\"iteration{iteration:4}/{iterations} \"\n f\"fitness = {fitness:.5} \\r\", end=\"\")\n\n if self.log >= 2:\n best_key = ast.dump(best_tree)\n if best_key != last_key:\n print()\n print()\n self.log_tree(f\"New best code (fitness = {fitness}):\",\n best_tree)\n last_key = best_key\n\n if fitness >= 1.0:\n break\n\n if self.log:\n print()\n\n if self.log and self.log < 2:\n self.log_tree(f\"Best code (fitness = {fitness}):\", best_tree)\n\n best_tree = self.reduce(best_tree)\n fitness = self.fitness(best_tree)\n\n self.log_tree(f\"Reduced code (fitness = {fitness}):\", best_tree)\n\n return best_tree, fitness",
"_____no_output_____"
]
],
[
[
"#### Evolving\n\nThe evolution of our population takes place in the `evolve()` method. In contrast to the `evolve_middle()` function, above, we use crossover to create the offspring, which we still mutate afterwards.",
"_____no_output_____"
]
],
[
[
"class Repairer(Repairer):\n def evolve(self, population: List[ast.AST]) -> List[ast.AST]:\n \"\"\"Evolve the candidate population by mutating and crossover.\"\"\"\n n = len(population)\n\n # Create offspring as crossover of parents\n offspring: List[ast.AST] = []\n while len(offspring) < n:\n parent_1 = copy.deepcopy(random.choice(population))\n parent_2 = copy.deepcopy(random.choice(population))\n try:\n self.crossover.crossover(parent_1, parent_2)\n except CrossoverError:\n pass # Just keep parents\n offspring += [parent_1, parent_2]\n\n # Mutate offspring\n offspring = [self.mutator.mutate(tree) for tree in offspring]\n\n # Add it to population\n population += offspring\n\n # Keep the fitter part of the population\n population.sort(key=self.fitness_key, reverse=True)\n population = population[:n]\n\n return population",
"_____no_output_____"
]
],
[
[
"A second difference is that we not only sort by fitness, but also by tree size – with equal fitness, a smaller tree thus will be favored. This helps keeping fixes and patches small.",
"_____no_output_____"
]
],
[
[
"class Repairer(Repairer):\n def fitness_key(self, tree: ast.AST) -> Tuple[float, int]:\n \"\"\"Key to be used for sorting the population\"\"\"\n tree_size = len([node for node in ast.walk(tree)])\n return (self.fitness(tree), -tree_size)",
"_____no_output_____"
]
],
[
[
"#### Simplifying\n\nThe last step in repairing is simplifying the code. As demonstrated in the chapter on [reducing failure-inducing inputs](DeltaDebugger.ipynb), we can use delta debugging on code to get rid of superfluous statements. To this end, we convert the tree to lines, run delta debugging on them, and then convert it back to a tree.",
"_____no_output_____"
]
],
[
[
"class Repairer(Repairer):\n def reduce(self, tree: ast.AST) -> ast.AST:\n \"\"\"Simplify `tree` using delta debugging.\"\"\"\n\n original_fitness = self.fitness(tree)\n source_lines = astor.to_source(tree).split('\\n')\n\n with self.reducer:\n self.test_reduce(source_lines, original_fitness)\n\n reduced_lines = self.reducer.min_args()['source_lines']\n reduced_source = \"\\n\".join(reduced_lines)\n\n return ast.parse(reduced_source)",
"_____no_output_____"
]
],
[
[
"As dicussed above, we simplify the code by having the test function (`test_reduce()`) declare reaching the maximum fitness obtained so far as a \"failure\". Delta debugging will then simplify the input as long as the \"failure\" (and hence the maximum fitness obtained) persists.",
"_____no_output_____"
]
],
[
[
"class Repairer(Repairer):\n def test_reduce(self, source_lines: List[str], original_fitness: float) -> None:\n \"\"\"Test function for delta debugging.\"\"\"\n\n try:\n source = \"\\n\".join(source_lines)\n tree = ast.parse(source)\n fitness = self.fitness(tree)\n assert fitness < original_fitness\n\n except AssertionError:\n raise\n except SyntaxError:\n raise\n except IndentationError:\n raise\n except Exception:\n # traceback.print_exc() # Uncomment to see internal errors\n raise",
"_____no_output_____"
]
],
[
[
"### End of Excursion",
"_____no_output_____"
],
[
"### Repairer in Action\n\nLet us go and apply `Repairer` in practice. We initialize it with `middle_debugger`, which has (still) collected the passing and failing runs for `middle_test()`. We also set `log` for some diagnostics along the way.",
"_____no_output_____"
]
],
[
[
"repairer = Repairer(middle_debugger, log=True)",
"_____no_output_____"
]
],
[
[
"We now invoke `repair()` to evolve our population. After a few iterations, we find a best tree with perfect fitness.",
"_____no_output_____"
]
],
[
[
"best_tree, fitness = repairer.repair()",
"_____no_output_____"
],
[
"print_content(astor.to_source(best_tree), '.py')",
"_____no_output_____"
],
[
"fitness",
"_____no_output_____"
],
[
"# docassert\nassert fitness >= 1.0",
"_____no_output_____"
]
],
[
[
"Again, we have a perfect solution. Here, we did not even need to simplify the code in the last iteration, as our `fitness_key()` function favors smaller implementations.",
"_____no_output_____"
],
[
"## Removing HTML Markup\n\nLet us apply `Repairer` on our other ongoing example, namely `remove_html_markup()`.",
"_____no_output_____"
]
],
[
[
"def remove_html_markup(s): # type: ignore\n tag = False\n quote = False\n out = \"\"\n\n for c in s:\n if c == '<' and not quote:\n tag = True\n elif c == '>' and not quote:\n tag = False\n elif c == '\"' or c == \"'\" and tag:\n quote = not quote\n elif not tag:\n out = out + c\n\n return out",
"_____no_output_____"
],
[
"def remove_html_markup_tree() -> ast.AST:\n return ast.parse(inspect.getsource(remove_html_markup))",
"_____no_output_____"
]
],
[
[
"To run `Repairer` on `remove_html_markup()`, we need a test and a test suite. `remove_html_markup_test()` raises an exception if applying `remove_html_markup()` on the given `html` string does not yield the `plain` string.",
"_____no_output_____"
]
],
[
[
"def remove_html_markup_test(html: str, plain: str) -> None:\n outcome = remove_html_markup(html)\n assert outcome == plain, \\\n f\"Got {repr(outcome)}, expected {repr(plain)}\"",
"_____no_output_____"
]
],
[
[
"Now for the test suite. We use a simple fuzzing scheme to create dozens of passing and failing test cases in `REMOVE_HTML_PASSING_TESTCASES` and `REMOVE_HTML_FAILING_TESTCASES`, respectively.",
"_____no_output_____"
],
[
"### Excursion: Creating HTML Test Cases",
"_____no_output_____"
]
],
[
[
"def random_string(length: int = 5, start: int = ord(' '), end: int = ord('~')) -> str:\n return \"\".join(chr(random.randrange(start, end + 1)) for i in range(length))",
"_____no_output_____"
],
[
"random_string()",
"_____no_output_____"
],
[
"def random_id(length: int = 2) -> str:\n return random_string(start=ord('a'), end=ord('z'))",
"_____no_output_____"
],
[
"random_id()",
"_____no_output_____"
],
[
"def random_plain() -> str:\n return random_string().replace('<', '').replace('>', '')",
"_____no_output_____"
],
[
"def random_string_noquotes() -> str:\n return random_string().replace('\"', '').replace(\"'\", '')",
"_____no_output_____"
],
[
"def random_html(depth: int = 0) -> Tuple[str, str]:\n prefix = random_plain()\n tag = random_id()\n\n if depth > 0:\n html, plain = random_html(depth - 1)\n else:\n html = plain = random_plain()\n\n attr = random_id()\n value = '\"' + random_string_noquotes() + '\"'\n postfix = random_plain()\n\n return f'{prefix}<{tag} {attr}={value}>{html}</{tag}>{postfix}', \\\n prefix + plain + postfix",
"_____no_output_____"
],
[
"random_html()",
"_____no_output_____"
],
[
"def remove_html_testcase(expected: bool = True) -> Tuple[str, str]:\n while True:\n html, plain = random_html()\n outcome = (remove_html_markup(html) == plain)\n if outcome == expected:\n return html, plain",
"_____no_output_____"
],
[
"REMOVE_HTML_TESTS = 100\nREMOVE_HTML_PASSING_TESTCASES = \\\n [remove_html_testcase(True) for i in range(REMOVE_HTML_TESTS)]\nREMOVE_HTML_FAILING_TESTCASES = \\\n [remove_html_testcase(False) for i in range(REMOVE_HTML_TESTS)]",
"_____no_output_____"
]
],
[
[
"### End of Excursion",
"_____no_output_____"
],
[
"Here is a passing test case:",
"_____no_output_____"
]
],
[
[
"REMOVE_HTML_PASSING_TESTCASES[0]",
"_____no_output_____"
],
[
"html, plain = REMOVE_HTML_PASSING_TESTCASES[0]\nremove_html_markup_test(html, plain)",
"_____no_output_____"
]
],
[
[
"Here is a failing test case (containing a double quote in the plain text)",
"_____no_output_____"
]
],
[
[
"REMOVE_HTML_FAILING_TESTCASES[0]",
"_____no_output_____"
],
[
"with ExpectError():\n html, plain = REMOVE_HTML_FAILING_TESTCASES[0]\n remove_html_markup_test(html, plain)",
"_____no_output_____"
]
],
[
[
"We run our tests, collecting the outcomes in `html_debugger`.",
"_____no_output_____"
]
],
[
[
"html_debugger = OchiaiDebugger()",
"_____no_output_____"
],
[
"for html, plain in (REMOVE_HTML_PASSING_TESTCASES + \n REMOVE_HTML_FAILING_TESTCASES):\n with html_debugger:\n remove_html_markup_test(html, plain)",
"_____no_output_____"
]
],
[
[
"The suspiciousness distribution will not be of much help here – pretty much all lines in `remove_html_markup()` have the same suspiciousness.",
"_____no_output_____"
]
],
[
[
"html_debugger",
"_____no_output_____"
]
],
[
[
"Let us create our repairer and run it.",
"_____no_output_____"
]
],
[
[
"html_repairer = Repairer(html_debugger, log=True)",
"_____no_output_____"
],
[
"best_tree, fitness = html_repairer.repair(iterations=20)",
"_____no_output_____"
],
[
"# docassert\nassert fitness < 1.0",
"_____no_output_____"
]
],
[
[
"We see that the \"best\" code is still our original code, with no changes. And we can set `iterations` to 50, 100, 200... – our `Repairer` won't be able to repair it.",
"_____no_output_____"
]
],
[
[
"quiz(\"Why couldn't `Repairer()` repair `remove_html_markup()`?\",\n [\n \"The population is too small!\",\n \"The suspiciousness is too evenly distributed!\",\n \"We need more test cases!\",\n \"We need more iterations!\",\n \"There is no statement in the source with a correct condition!\",\n \"The population is too big!\",\n ], '5242880 >> 20')",
"_____no_output_____"
]
],
[
[
"You can explore all of the hypotheses above by changing the appropriate parameters, but you won't be able to change the outcome. The problem is that, unlike `middle()`, there is no statement (or combination thereof) in `remove_html_markup()` that could be used to make the failure go away. For this, we need to mutate another aspect of the code, which we will explore in the next section.",
"_____no_output_____"
],
[
"## Mutating Conditions\n\nThe `Repairer` class is very configurable. The individual steps in automated repair can all be replaced by providing own classes in the keyword arguments of its `__init__()` constructor:\n\n* To change fault localization, pass a different `debugger` that is a subclass of `RankingDebugger`.\n* To change the mutation operator, set `mutator_class` to a subclass of `StatementMutator`.\n* To change the crossover operator, set `crossover_class` to a subclass of `CrossoverOperator`.\n* To change the reduction algorithm, set `reducer_class` to a subclass of `Reducer`.\n\nIn this section, we will explore how to extend the mutation operator such that it can mutate _conditions_ for control constructs such as `if`, `while`, or `for`. To this end, we introduce a new class `ConditionMutator` subclassing `StatementMutator`.",
"_____no_output_____"
],
[
"### Collecting Conditions\n\nLet us start with a few simple supporting functions. The function `all_conditions()` retrieves all control conditions from an AST.",
"_____no_output_____"
]
],
[
[
"def all_conditions(trees: Union[ast.AST, List[ast.AST]],\n tp: Optional[Type] = None) -> List[ast.expr]:\n \"\"\"\n Return all conditions from the AST (or AST list) `trees`.\n If `tp` is given, return only elements of that type.\n \"\"\"\n\n if not isinstance(trees, list):\n assert isinstance(trees, ast.AST)\n trees = [trees]\n\n visitor = ConditionVisitor()\n for tree in trees:\n visitor.visit(tree)\n conditions = visitor.conditions\n if tp is not None:\n conditions = [c for c in conditions if isinstance(c, tp)]\n\n return conditions",
"_____no_output_____"
]
],
[
[
"`all_conditions()` uses a `ConditionVisitor` class to walk the tree and collect the conditions:",
"_____no_output_____"
]
],
[
[
"class ConditionVisitor(NodeVisitor):\n def __init__(self) -> None:\n self.conditions: List[ast.expr] = []\n self.conditions_seen: Set[str] = set()\n super().__init__()\n\n def add_conditions(self, node: ast.AST, attr: str) -> None:\n elems = getattr(node, attr, [])\n if not isinstance(elems, list):\n elems = [elems]\n\n elems = cast(List[ast.expr], elems)\n\n for elem in elems:\n elem_str = astor.to_source(elem)\n if elem_str not in self.conditions_seen:\n self.conditions.append(elem)\n self.conditions_seen.add(elem_str)\n\n def visit_BoolOp(self, node: ast.BoolOp) -> ast.AST:\n self.add_conditions(node, 'values')\n return super().generic_visit(node)\n\n def visit_UnaryOp(self, node: ast.UnaryOp) -> ast.AST:\n if isinstance(node.op, ast.Not):\n self.add_conditions(node, 'operand')\n return super().generic_visit(node)\n\n def generic_visit(self, node: ast.AST) -> ast.AST:\n if hasattr(node, 'test'):\n self.add_conditions(node, 'test')\n return super().generic_visit(node)",
"_____no_output_____"
]
],
[
[
"Here are all the conditions in `remove_html_markup()`. This is some material to construct new conditions from.",
"_____no_output_____"
]
],
[
[
"[astor.to_source(cond).strip()\n for cond in all_conditions(remove_html_markup_tree())]",
"_____no_output_____"
]
],
[
[
"### Mutating Conditions\n\nHere comes our `ConditionMutator` class. We subclass from `StatementMutator` and set an attribute `self.conditions` containing all the conditions in the source. The method `choose_condition()` randomly picks a condition.",
"_____no_output_____"
]
],
[
[
"class ConditionMutator(StatementMutator):\n \"\"\"Mutate conditions in an AST\"\"\"\n\n def __init__(self, *args: Any, **kwargs: Any) -> None:\n \"\"\"Constructor. Arguments are as with `StatementMutator` constructor.\"\"\"\n super().__init__(*args, **kwargs)\n self.conditions = all_conditions(self.source)\n if self.log:\n print(\"Found conditions\",\n [astor.to_source(cond).strip() \n for cond in self.conditions])\n\n def choose_condition(self) -> ast.expr:\n \"\"\"Return a random condition from source.\"\"\"\n return copy.deepcopy(random.choice(self.conditions))",
"_____no_output_____"
]
],
[
[
"The actual mutation takes place in the `swap()` method. If the node to be replaced has a `test` attribute (i.e. a controlling predicate), then we pick a random condition `cond` from the source and randomly chose from:\n\n* **set**: We change `test` to `cond`.\n* **not**: We invert `test`.\n* **and**: We replace `test` by `cond and test`.\n* **or**: We replace `test` by `cond or test`.\n\nOver time, this might lead to operators propagating across the population.",
"_____no_output_____"
]
],
[
[
"class ConditionMutator(ConditionMutator):\n def choose_bool_op(self) -> str:\n return random.choice(['set', 'not', 'and', 'or'])\n\n def swap(self, node: ast.AST) -> ast.AST:\n \"\"\"Replace `node` condition by a condition from `source`\"\"\"\n if not hasattr(node, 'test'):\n return super().swap(node)\n\n node = cast(ast.If, node)\n\n cond = self.choose_condition()\n new_test = None\n\n choice = self.choose_bool_op()\n\n if choice == 'set':\n new_test = cond\n elif choice == 'not':\n new_test = ast.UnaryOp(op=ast.Not(), operand=node.test)\n elif choice == 'and':\n new_test = ast.BoolOp(op=ast.And(), values=[cond, node.test])\n elif choice == 'or':\n new_test = ast.BoolOp(op=ast.Or(), values=[cond, node.test])\n else:\n raise ValueError(\"Unknown boolean operand\")\n\n if new_test:\n # ast.copy_location(new_test, node)\n node.test = new_test\n\n return node",
"_____no_output_____"
]
],
[
[
"We can use the mutator just like `StatementMutator`, except that some of the mutations will also include new conditions:",
"_____no_output_____"
]
],
[
[
"mutator = ConditionMutator(source=all_statements(remove_html_markup_tree()),\n log=True)",
"_____no_output_____"
],
[
"for i in range(10):\n new_tree = mutator.mutate(remove_html_markup_tree())",
"_____no_output_____"
]
],
[
[
"Let us put our new mutator to action, again in a `Repairer()`. To activate it, all we need to do is to pass it as `mutator_class` keyword argument.",
"_____no_output_____"
]
],
[
[
"condition_repairer = Repairer(html_debugger,\n mutator_class=ConditionMutator,\n log=2)",
"_____no_output_____"
]
],
[
[
"We might need more iterations for this one. Let us see...",
"_____no_output_____"
]
],
[
[
"best_tree, fitness = condition_repairer.repair(iterations=200)",
"_____no_output_____"
],
[
"repaired_source = astor.to_source(best_tree)",
"_____no_output_____"
],
[
"print_content(repaired_source, '.py')",
"_____no_output_____"
],
[
"# docassert\nassert fitness >= 1.0",
"_____no_output_____"
]
],
[
[
"Success again! We have automatically repaired `remove_html_markup()` – the resulting code passes all tests, including those that were previously failing.",
"_____no_output_____"
],
[
"Again, we can present the fix as a patch:",
"_____no_output_____"
]
],
[
[
"original_source = astor.to_source(remove_html_markup_tree())",
"_____no_output_____"
],
[
"for patch in diff(original_source, repaired_source):\n print_patch(patch)",
"_____no_output_____"
]
],
[
[
"However, looking at the patch, one may come up with doubts.",
"_____no_output_____"
]
],
[
[
"quiz(\"Is this actually the best solution?\",\n [\n \"Yes, sure, of course. Why?\",\n \"Err - what happened to single quotes?\"\n ], 1 << 1)",
"_____no_output_____"
]
],
[
[
"Indeed – our solution does not seem to handle single quotes anymore. Why is that so?",
"_____no_output_____"
]
],
[
[
"quiz(\"Why aren't single quotes handled in the solution?\",\n [\n \"Because they're not important. \"\n \"I mean, y'know, who uses 'em anyway?\",\n \"Because they are not part of our tests? \"\n \"Let me look up how they are constructed...\"\n ], 1 << 1)",
"_____no_output_____"
]
],
[
[
"Correct! Our test cases do not include single quotes – at least not in the interior of HTML tags – and thus, automatic repair did not care to preserve their handling.",
"_____no_output_____"
],
[
"How can we fix this? An easy way is to include an appropriate test case in our set – a test case that passes with the original `remove_html_markup()`, yet fails with the \"repaired\" `remove_html_markup()` as whosn above.",
"_____no_output_____"
]
],
[
[
"with html_debugger:\n remove_html_markup_test(\"<foo quote='>abc'>me</foo>\", \"me\")",
"_____no_output_____"
]
],
[
[
"Let us repeat the repair with the extended test set:",
"_____no_output_____"
]
],
[
[
"best_tree, fitness = condition_repairer.repair(iterations=200)",
"_____no_output_____"
]
],
[
[
"Here is the final tree:",
"_____no_output_____"
]
],
[
[
"print_content(astor.to_source(best_tree), '.py')",
"_____no_output_____"
]
],
[
[
"And here is its fitness:",
"_____no_output_____"
]
],
[
[
"fitness",
"_____no_output_____"
],
[
"# docassert\nassert fitness >= 1.0",
"_____no_output_____"
]
],
[
[
"The revised candidate now passes _all_ tests (including the tricky quote test we added last). Its condition now properly checks for `tag` _and_ both quotes. (The `tag` inside the parentheses is still redundant, but so be it.) From this example, we can learn a few lessons about the possibilities and risks of automated repair:\n\n* First, automatic repair is highly dependent on the quality of the checking tests. The risk is that the repair may overspecialize towards the test.\n* Second, when based on \"plastic surgery\", automated repair is highly dependent on the sources that program fragments are chosen from. If there is a hint of a solution somewhere in the code, there is a chance that automated repair will catch it up.\n* Third, automatic repair is a deeply heuristic approach. Its behavior will vary widely with any change to the parameters (and the underlying random number generators).\n* Fourth, automatic repair can take a long time. The examples we have in this chapter take less than a minute to compute, and neither Python nor our implementation is exactly fast. But as the search space grows, automated repair will take much longer.\n\nOn the other hand, even an incomplete automated repair candidate can be much better than nothing at all – it may provide all the essential ingredients (such as the location or the involved variables) for a successful fix. When users of automated repair techniques are aware of its limitations and its assumptions, there is lots of potential in automated repair. Enjoy!",
"_____no_output_____"
],
[
"## Limitations",
"_____no_output_____"
],
[
"The `Repairer` class is tested on our example programs, but not much more. Things that do not work include\n\n* Functions with inner functions are not repaired.",
"_____no_output_____"
],
[
"## Synopsis",
"_____no_output_____"
],
[
"This chapter provides tools and techniques for automated repair of program code. The `Repairer` class takes a `RankingDebugger` debugger as input (such as `OchiaiDebugger` from the [chapter on statistical debugging](StatisticalDebugger.ipynb). A typical setup looks like this:\n\n```python\nfrom debuggingbook.StatisticalDebugger import OchiaiDebugger\n\ndebugger = OchiaiDebugger()\nfor inputs in TESTCASES:\n with debugger:\n test_foo(inputs)\n...\n\nrepairer = Repairer(debugger)\n```\nHere, `test_foo()` is a function that raises an exception if the tested function `foo()` fails. If `foo()` passes, `test_foo()` should not raise an exception.",
"_____no_output_____"
],
[
"The `repair()` method of a `Repairer` searches for a repair of the code covered in the debugger (except for methods whose name starts or ends in `test`, such that `foo()`, not `test_foo()` is repaired). `repair()` returns the best fix candidate as a pair `(tree, fitness)` where `tree` is a [Python abstract syntax tree](http://docs.python.org/3/library/ast) (AST) of the fix candidate, and `fitness` is the fitness of the candidate (a value between 0 and 1). A `fitness` of 1.0 means that the candidate passed all tests. A typical usage looks like this:\n\n```python\nimport astor\n\ntree, fitness = repairer.repair()\nprint(astor.to_source(tree), fitness)\n```",
"_____no_output_____"
],
[
"Here is a complete example for the `middle()` program. This is the original source code of `middle()`:",
"_____no_output_____"
]
],
[
[
"# ignore\nprint_content(middle_source, '.py')",
"_____no_output_____"
]
],
[
[
"We set up a function `middle_test()` that tests it. The `middle_debugger` collects testcases and outcomes:",
"_____no_output_____"
]
],
[
[
"middle_debugger = OchiaiDebugger()",
"_____no_output_____"
],
[
"for x, y, z in MIDDLE_PASSING_TESTCASES + MIDDLE_FAILING_TESTCASES:\n with middle_debugger:\n middle_test(x, y, z)",
"_____no_output_____"
]
],
[
[
"The repairer is instantiated with the debugger used (`middle_debugger`):",
"_____no_output_____"
]
],
[
[
"middle_repairer = Repairer(middle_debugger)",
"_____no_output_____"
]
],
[
[
"The `repair()` method of the repairer attempts to repair the function invoked by the test (`middle()`).",
"_____no_output_____"
]
],
[
[
"tree, fitness = middle_repairer.repair()",
"_____no_output_____"
]
],
[
[
"The returned AST `tree` can be output via `astor.to_source()`:",
"_____no_output_____"
]
],
[
[
"print(astor.to_source(tree))",
"_____no_output_____"
]
],
[
[
"The `fitness` value shows how well the repaired program fits the tests. A fitness value of 1.0 shows that the repaired program satisfies all tests.",
"_____no_output_____"
]
],
[
[
"fitness",
"_____no_output_____"
],
[
"# docassert\nassert fitness >= 1.0",
"_____no_output_____"
]
],
[
[
"Hence, the above program indeed is a perfect repair in the sense that all previously failing tests now pass – our repair was successful.",
"_____no_output_____"
],
[
"Here are the classes defined in this chapter. A `Repairer` repairs a program, using a `StatementMutator` and a `CrossoverOperator` to evolve a population of candidates.",
"_____no_output_____"
]
],
[
[
"# ignore\nfrom ClassDiagram import display_class_hierarchy",
"_____no_output_____"
],
[
"# ignore\ndisplay_class_hierarchy([Repairer, ConditionMutator, CrossoverOperator],\n abstract_classes=[\n NodeVisitor,\n NodeTransformer\n ],\n public_methods=[\n Repairer.__init__,\n Repairer.repair,\n StatementMutator.__init__,\n StatementMutator.mutate,\n ConditionMutator.__init__,\n CrossoverOperator.__init__,\n CrossoverOperator.crossover,\n ],\n project='debuggingbook')",
"_____no_output_____"
]
],
[
[
"## Lessons Learned\n\n* Automated repair based on genetic optimization uses five ingredients:\n 1. A _test suite_ to determine passing and failing tests\n 2. _Defect localization_ (typically obtained from [statistical debugging](StatisticalDebugger.ipynb) with the test suite) to determine potential locations to be fixed\n 3. _Random code mutations_ and _crossover operations_ to create and evolve a population of inputs\n 4. A _fitness function_ and a _selection strategy_ to determine the part of the population that should be evolved further\n 5. A _reducer_ such as [delta debugging](DeltaDebugger.ipynb) to simplify the final candidate with the highest fitness.\n* The result of automated repair is a _fix candidate_ with the highest fitness for the given tests.\n* A _fix candidate_ is not guaranteed to be correct or optimal, but gives important hints on how to fix the program.\n* All of the above ingredients offer plenty of settings and alternatives to experiment with.",
"_____no_output_____"
],
[
"## Background\n\nThe seminal work in automated repair is [GenProg](https://squareslab.github.io/genprog-code/) \\cite{LeGoues2012}, which heavily inspired our `Repairer` implementation. Major differences between GenProg and `Repairer` include:\n\n* GenProg includes its own defect localization (which is also dynamically updated), whereas `Repairer` builds on earlier statistical debugging.\n* GenProg can apply multiple mutations on programs (or none at all), whereas `Repairer` applies exactly one mutation.\n* The `StatementMutator` used by `Repairer` includes various special cases for program structures (`if`, `for`, `while`...), whereas GenProg operates on statements only.\n* GenProg has been tested on large production programs.\n\nWhile GenProg is _the_ seminal work in the area (and arguably the most important software engineering research contribution of the 2010s), there have been a number of important extensions of automated repair. These include:\n\n* *AutoFix* \\cite{Pei2014} leverages _program contracts_ (pre- and postconditions) to generate tests and assertions automatically. Not only do such [assertions](Assertions.ipynb) help in fault localization, they also allow for much better validation of fix candidates.\n* *SemFix* \\cite{Nguyen2013} and its successor *[Angelix](http://angelix.io)* \\cite{Mechtaev2016}\nintroduce automated program repair based on _symbolic analysis_ rather than genetic optimization. This allows to leverage program semantics, which GenProg does not consider.\n\nTo learn more about automated program repair, see [program-repair.org](http://program-repair.org), the community page dedicated to research in program repair.",
"_____no_output_____"
],
[
"## Exercises",
"_____no_output_____"
],
[
"### Exercise 1: Automated Repair Parameters\n\nAutomated Repair is influenced by a large number of design choices – the size of the population, the number of iterations, the genetic optimization strategy, and more. How do changes to these design choices affect its effectiveness? \n\n* Consider the constants defined in this chapter (such as `POPULATION_SIZE` or `WEIGHT_PASSING` vs. `WEIGHT_FAILING`). How do changes affect the effectiveness of automated repair?\n* As an effectiveness metric, consider the number of iterations it takes to produce a fix candidate.\n* Since genetic optimization is a random algorithm, you need to determine effectiveness averages over a large number of runs (say, 100).",
"_____no_output_____"
],
[
"### Exercise 2: Elitism\n\n[_Elitism_](https://en.wikipedia.org/wiki/Genetic_algorithm#Elitism) (also known as _elitist selection_) is a variant of genetic selection in which a small fraction of the fittest candidates of the last population are included unchanged in the offspring.\n\n* Implement elitist selection by subclassing the `evolve()` method. Experiment with various fractions (5%, 10%, 25%) of \"elites\" and see how this improves results.",
"_____no_output_____"
],
[
"### Exercise 3: Evolving Values\n\nFollowing the steps of `ConditionMutator`, implement a `ValueMutator` class that replaces one constant value by another one found in the source (say, `0` by `1` or `True` by `False`).\n\nFor validation, consider the following failure in the `square_root()` function from the [chapter on assertions](Assertions.ipynb):",
"_____no_output_____"
]
],
[
[
"from Assertions import square_root # minor dependency",
"_____no_output_____"
],
[
"with ExpectError():\n square_root_of_zero = square_root(0)",
"_____no_output_____"
]
],
[
[
"Can your `ValueMutator` automatically fix this failure?",
"_____no_output_____"
],
[
"**Solution.** Your solution will be effective if it also includes named constants such as `None`.",
"_____no_output_____"
]
],
[
[
"import math",
"_____no_output_____"
],
[
"def square_root_fixed(x): # type: ignore\n assert x >= 0 # precondition\n\n approx = 0 # <-- FIX: Change `None` to 0\n guess = x / 2\n while approx != guess:\n approx = guess\n guess = (approx + x / approx) / 2\n\n assert math.isclose(approx * approx, x)\n return approx",
"_____no_output_____"
],
[
"square_root_fixed(0)",
"_____no_output_____"
]
],
[
[
"### Exercise 4: Evolving Variable Names\n\nFollowing the steps of `ConditionMutator`, implement a `IdentifierMutator` class that replaces one identifier by another one found in the source (say, `y` by `x`). Does it help fixing the `middle()` error?",
"_____no_output_____"
],
[
"### Exercise 5: Parallel Repair\n\nAutomatic Repair is a technique that is embarrassingly parallel – all tests for one candidate can all be run in parallel, and all tests for _all_ candidates can also be run in parallel. Set up an infrastructure for running concurrent tests using Pythons [asyncio](https://docs.python.org/3/library/asyncio.html) library.",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
]
] |
e7deb16955ce72765546796ba5c1f117969fcd09 | 193,359 | ipynb | Jupyter Notebook | samples/AppInsights/TroubleShootingGuides/Web-services-TSG-version.ipynb | dmc-dk/BCTech | 4ac4ff95a4383009e814fd8f5750247bbafe9c41 | [
"MIT"
] | null | null | null | samples/AppInsights/TroubleShootingGuides/Web-services-TSG-version.ipynb | dmc-dk/BCTech | 4ac4ff95a4383009e814fd8f5750247bbafe9c41 | [
"MIT"
] | null | null | null | samples/AppInsights/TroubleShootingGuides/Web-services-TSG-version.ipynb | dmc-dk/BCTech | 4ac4ff95a4383009e814fd8f5750247bbafe9c41 | [
"MIT"
] | null | null | null | 57.308536 | 1,530 | 0.163794 | [
[
[
"# Dynamics 365 Business Central Trouble Shooting Guide (TSG) - Web services\r\n\r\nThis notebook contains Kusto queries that can help getting to the root cause of an issue with web services for an environment. \r\n\r\nEach section in the notebook contains links to relevant documentation from the performance tuning guide [aka.ms/bcperformance](aka.ms/bcperformance), telemetry documentation in [aka.ms/bctelemetry](aka.ms/bctelemetry), as well as Kusto queries that help dive into a specific area.\r\n\r\nNB! Some of the signal used in this notebook is only available in newer versions of Business Central, so check the version of your environment if some sections do not return any data. The signal documentation states in which version a given signal was introduced.\r\n\r\n**NB!** Telemetry for SOAP endpoints does not emit HTTP status code. So the sections that query for different values of HTTP status will not show results for these requests.",
"_____no_output_____"
],
[
"## 1. Connect to Application Insights\r\nFirst you need to set the notebook Kernel to Python3, load the KQLmagic module (did you install it?) and connect to your Application Insights resource (get appid and appkey from the API access page in the Application Insights portal)",
"_____no_output_____"
]
],
[
[
"# load the KQLmagic module\r\n%reload_ext Kqlmagic\r\n\r\n# Connect to the Application Insights API\r\n%kql appinsights://appid='<add app id from the Application Insights portal>';appkey='<add API key from the Application Insights portal>'",
"_____no_output_____"
]
],
[
[
"## 2. Define filters\r\nThis workbook is designed for troubleshooting a single environment. Please provide values for aadTenantId and environmentName: ",
"_____no_output_____"
]
],
[
[
"aadTenantId = \"<Add AAD tenant id here>\"\r\nenvironmentName = \"<add environment name here>\"",
"_____no_output_____"
]
],
[
[
"# Analyze web service usage\r\nNow you can run Kusto queries to look for possible root causes for issues about web services.\r\n\r\nEither click **Run All** above to run all sections, or scroll down to the type of analysis you want to do and manually run queries",
"_____no_output_____"
],
[
"## Web service requests overview\r\n\r\nPerformance tuning guide: https://docs.microsoft.com/en-us/dynamics365/business-central/dev-itpro/performance/performance-developer#writing-efficient-web-services\r\n\r\nWeb service telemetry docs: https://docs.microsoft.com/en-us/dynamics365/business-central/dev-itpro/administration/telemetry-webservices-trace\r\n\r\nKQL sample: https://github.com/microsoft/BCTech/blob/master/samples/AppInsights/KQL/RawData/WebServiceCalls.kql",
"_____no_output_____"
]
],
[
[
"%%kql\r\nlet _aadTenantId = aadTenantId;\r\nlet _environmentName = environmentName;\r\ntraces\r\n| where 1==1 \r\n and customDimensions.aadTenantId == _aadTenantId\r\n and customDimensions.environmentName == _environmentName\r\n and customDimensions.eventId == 'RT0008'\r\n and timestamp > ago(7d)\r\n| extend category = tostring( customDimensions.category )\r\n| summarize request_count=count() by category, bin(timestamp, 1d)\r\n| render timechart title= 'Number of web service requests by category'",
"_____no_output_____"
],
[
"%%kql\r\nlet _aadTenantId = aadTenantId;\r\nlet _environmentName = environmentName;\r\ntraces\r\n| where 1==1 \r\n and customDimensions.aadTenantId == _aadTenantId\r\n and customDimensions.environmentName == _environmentName\r\n and customDimensions.eventId == 'RT0008'\r\n and timestamp > ago(7d)\r\n| extend category = tostring( customDimensions.category )\r\n , executionTimeInMS = toreal(totimespan(customDimensions.serverExecutionTime))/10000 //the datatype for executionTime is timespan \r\n| summarize count=count() by executionTime_ms = bin(executionTimeInMS, 100), category\r\n| order by category, executionTime_ms asc\r\n| render columnchart with (ycolumns = count, series = category, title= 'Execution time (in milliseconds) of web service requests by category' ) \r\n",
"_____no_output_____"
]
],
[
[
"## Web service throttling\r\nOperational Limits for Business Central Online: \r\n* https://docs.microsoft.com/en-us/dynamics365/business-central/dev-itpro/administration/operational-limits-online#query-limits\r\n\r\nTelemetry docs: \r\n* https://docs.microsoft.com/en-us/dynamics365/business-central/dev-itpro/administration/telemetry-webservices-trace",
"_____no_output_____"
]
],
[
[
"%%kql\r\nlet _aadTenantId = aadTenantId;\r\nlet _environmentName = environmentName;\r\ntraces\r\n| where 1==1 \r\n and customDimensions.aadTenantId == _aadTenantId\r\n and customDimensions.environmentName == _environmentName\r\n and customDimensions.eventId == 'RT0008'\r\n and timestamp > ago(7d)\r\n| extend httpStatusCode = tostring( customDimensions.httpStatusCode )\r\n| summarize count() by bin(timestamp, 1d), httpStatusCode\r\n| render timechart title= 'Number of web service requests by http status code'",
"_____no_output_____"
]
],
[
[
"## Web service requests (Access denied)\r\nThe user who made the request doesn't have proper permissions. For more information, see \r\n* https://docs.microsoft.com/en-us/dynamics365/business-central/dev-itpro/webservices/web-services-authentication\r\n* https://docs.microsoft.com/en-us/dynamics365/business-central/ui-define-granular-permissions\r\n\r\nTelemetry docs: \r\n* https://docs.microsoft.com/en-us/dynamics365/business-central/dev-itpro/administration/telemetry-webservices-trace",
"_____no_output_____"
]
],
[
[
"%%kql\r\n//\r\n// Top 10 endpoint requests with access denied\r\n//\r\nlet _aadTenantId = aadTenantId;\r\nlet _environmentName = environmentName;\r\ntraces\r\n| where 1==1 \r\n and customDimensions.aadTenantId == _aadTenantId\r\n and customDimensions.environmentName == _environmentName\r\n and customDimensions.eventId == 'RT0008'\r\n and timestamp > ago(7d)\r\n and customDimensions.httpStatusCode == '401'\r\n| limit 10",
"_____no_output_____"
]
],
[
[
"## Web service requests (Not found)\r\nThe given endpoint was not valid\r\n\r\nSee\r\n* https://docs.microsoft.com/en-us/dynamics365/business-central/dev-itpro/webservices/publish-web-service\r\n\r\nTelemetry docs: \r\n* https://docs.microsoft.com/en-us/dynamics365/business-central/dev-itpro/administration/telemetry-webservices-trace",
"_____no_output_____"
]
],
[
[
"%%kql\r\n//\r\n// Top 10 non-valid endpoints called\r\n//\r\nlet _aadTenantId = aadTenantId;\r\nlet _environmentName = environmentName;\r\ntraces\r\n| where 1==1 \r\n and customDimensions.aadTenantId == _aadTenantId\r\n and customDimensions.environmentName == _environmentName\r\n and customDimensions.eventId == 'RT0008'\r\n and timestamp > ago(7d)\r\n and customDimensions.httpStatusCode == '404'\r\n| summarize number_of_requests=count() by endpoint = tostring( customDimensions.endpoint ), alObjectName = tostring( customDimensions.alObjectName ), alObjectId = tostring( customDimensions.alObjectId )\r\n| order by number_of_requests desc\r\n| limit 10",
"_____no_output_____"
]
],
[
[
"## Web service requests (Request timed out)\r\nThe request took longer to complete than the threshold configured for the service\r\n\r\nSee\r\n* https://docs.microsoft.com/en-us/dynamics365/business-central/dev-itpro/administration/operational-limits-online#ODataServices\r\n\r\nTelemetry docs: \r\n* https://docs.microsoft.com/en-us/dynamics365/business-central/dev-itpro/administration/telemetry-webservices-trace\r\n\r\nPerformance tuning guide (you need to tune these endpoints to make them go faster)\r\n* https://docs.microsoft.com/en-us/dynamics365/business-central/dev-itpro/performance/performance-developer#writing-efficient-web-services",
"_____no_output_____"
]
],
[
[
"%%kql\r\n//\r\n// Top 10 endpoints that times out\r\n//\r\nlet _aadTenantId = aadTenantId;\r\nlet _environmentName = environmentName;\r\ntraces\r\n| where 1==1 \r\n and customDimensions.aadTenantId == _aadTenantId\r\n and customDimensions.environmentName == _environmentName\r\n and customDimensions.eventId == 'RT0008'\r\n and timestamp > ago(7d)\r\n and customDimensions.httpStatusCode == '408'\r\n| summarize number_of_requests=count() by endpoint = tostring( customDimensions.endpoint ), alObjectName = tostring( customDimensions.alObjectName ), alObjectId = tostring( customDimensions.alObjectId )\r\n| order by number_of_requests desc\r\n| limit 10",
"_____no_output_____"
]
],
[
[
"## Web service requests (Too Many Requests)\r\nThe request exceeded the maximum simultaneous requests allowed on the service.\r\n\r\nSee\r\n* https://docs.microsoft.com/en-us/dynamics365/business-central/dev-itpro/administration/operational-limits-online#ODataServices\r\n\r\nTelemetry docs: \r\n* https://docs.microsoft.com/en-us/dynamics365/business-central/dev-itpro/administration/telemetry-webservices-trace\r\n\r\nPerformance tuning guide (you need to make your web service client backoff and retry)\r\n* https://docs.microsoft.com/en-us/dynamics365/business-central/dev-itpro/performance/performance-developer#writing-efficient-web-services",
"_____no_output_____"
]
],
[
[
"%%kql\r\n//\r\n// Top 10 endpoints get throttled\r\n//\r\nlet _aadTenantId = aadTenantId;\r\nlet _environmentName = environmentName;\r\ntraces\r\n| where 1==1 \r\n and customDimensions.aadTenantId == _aadTenantId\r\n and customDimensions.environmentName == _environmentName\r\n and customDimensions.eventId == 'RT0008'\r\n and timestamp > ago(7d)\r\n and customDimensions.httpStatusCode == '426'\r\n| summarize number_of_requests=count() by endpoint = tostring( customDimensions.endpoint ), alObjectName = tostring( customDimensions.alObjectName ), alObjectId = tostring( customDimensions.alObjectId )\r\n| order by number_of_requests desc\r\n| limit 10",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
e7deb1c8822edc3f9bdc4c7a93415e902eb8ec8a | 10,903 | ipynb | Jupyter Notebook | 14FinalCapstoneProject.ipynb | MBadriNarayanan/UdemyPython | 8e027984ca2d859918b1bf9071be30023c34a023 | [
"MIT"
] | null | null | null | 14FinalCapstoneProject.ipynb | MBadriNarayanan/UdemyPython | 8e027984ca2d859918b1bf9071be30023c34a023 | [
"MIT"
] | null | null | null | 14FinalCapstoneProject.ipynb | MBadriNarayanan/UdemyPython | 8e027984ca2d859918b1bf9071be30023c34a023 | [
"MIT"
] | null | null | null | 28.319481 | 121 | 0.276988 | [
[
[
"import math\n\nclass Complex() :\n \n def __init__(self,r = 5,i = 5) :\n \n self.real = r\n \n self.imaginary = i\n \n if self.imaginary >= 0 :\n \n self.sign = '+'\n \n else :\n \n self.sign = '-'\n \n def __str__(self) :\n \n return '\\n The Complex Number Is : {} {} {}i \\n'.format(self.real,self.sign,abs(self.imaginary))\n \n def plus(self,b) :\n \n c = Complex()\n \n c.real = self.real + b.real\n \n c.imaginary = self.imaginary + b.imaginary \n \n if c.imaginary >= 0 :\n \n c.sign = '+'\n \n else :\n \n c.sign = '-'\n \n return c\n \n def minus(self,b) :\n \n c = Complex()\n \n c.real = self.real - b.real\n \n c.imaginary = self.imaginary - b.imaginary \n \n if c.imaginary >= 0 :\n \n c.sign = '+'\n \n else :\n \n c.sign = '-'\n \n return c\n \n def multiply(self,b) :\n \n c = Complex()\n \n c.real = (self.real * b.real) - (self.imaginary * b.imaginary) \n \n c.imaginary = (self.real * b.imaginary) + (b.real * self.imaginary) \n \n if c.imaginary >=0 :\n \n c.sign = '+'\n \n else :\n \n c.sign = '-'\n \n return c\n \n def divide(self,b) :\n \n c = Complex()\n \n c.real = ((self.real * b.real) + (b.imaginary * self.imaginary))/((self.real ** 2) + (b.real ** 2))\n \n c.imaginary = ((self.imaginary * b.real) - (self.real * b.imaginary))/((self.real ** 2) + (b.real ** 2))\n \n if c.imaginary >=0 :\n \n c.sign = '+'\n \n else :\n \n c.sign = '-'\n \n return c\n ",
"_____no_output_____"
],
[
"def get_real_imaginary() :\n \n real = int(input(' Enter Real Part : '))\n \n imaginary = int(input(' Enter Imaginary Part : '))\n \n return (real,imaginary)",
"_____no_output_____"
],
[
"def main() :\n \n print(' Complex Number Operations \\n\\n')\n \n choice = 1\n \n while choice == 1:\n \n \n print(' 1. Input And Display \\n')\n \n print(' 2. Addition \\n')\n \n print(' 3. Subtraction \\n')\n \n print(' 4. Multiplication \\n')\n \n print(' 5. Division \\n')\n \n print(' 6. Exit \\n')\n \n option = int(input(' Enter Choice : '))\n \n if option == 1 :\n \n print('\\n\\n Input and Display \\n\\n')\n \n r,i = get_real_imaginary()\n \n a = Complex(r,i)\n \n print(a)\n \n elif option == 2 :\n \n \n print('\\n\\n Addition \\n\\n')\n \n r,i = get_real_imaginary()\n \n a = Complex(r,i)\n \n r,i = get_real_imaginary()\n \n b = Complex(r,i)\n \n c = a.plus(b)\n \n print(c)\n \n \n elif option == 3 :\n \n \n print('\\n\\n Subtraction \\n\\n')\n \n r,i = get_real_imaginary()\n \n a = Complex(r,i)\n \n r,i = get_real_imaginary()\n \n b = Complex(r,i)\n \n c = a.minus(b)\n \n print(c)\n \n \n \n elif option == 4 :\n \n \n print('\\n\\n Multiplication \\n\\n')\n \n r,i = get_real_imaginary()\n \n a = Complex(r,i)\n \n r,i = get_real_imaginary()\n \n b = Complex(r,i)\n \n c = a.multiply(b)\n \n print(c)\n \n \n elif option == 5 :\n \n \n print('\\n\\n Division \\n\\n')\n \n r,i = get_real_imaginary()\n \n a = Complex(r,i)\n \n r,i = get_real_imaginary()\n \n b = Complex(r,i)\n \n c = a.divide(b)\n \n print(c)\n\n else :\n print('\\n\\n Exiting The Program.....\\n\\n')\n \n \n choice = int(input('\\n\\n Do You Want To Continue ( Type 0 Or 1 ) : '))\n \n print('\\n')\n \n print(\"\\n\\n The End \\n\\n\")\n ",
"_____no_output_____"
],
[
"main()",
" Complex Number Operations \n\n\n 1. Input And Display \n\n 2. Addition \n\n 3. Subtraction \n\n 4. Multiplication \n\n 5. Division \n\n 6. Exit \n\n Enter Choice : 1\n\n\n Input and Display \n\n\n Enter Real Part : 2\n Enter Imaginary Part : 4\n\n The Complex Number Is : 2 + 4i \n\n\n\n Do You Want To Continue ( Type 0 Or 1 ) : 1\n\n\n 1. Input And Display \n\n 2. Addition \n\n 3. Subtraction \n\n 4. Multiplication \n\n 5. Division \n\n 6. Exit \n\n Enter Choice : 2\n\n\n Addition \n\n\n Enter Real Part : 3\n Enter Imaginary Part : 4\n Enter Real Part : 5\n Enter Imaginary Part : 6\n\n The Complex Number Is : 8 + 10i \n\n\n\n Do You Want To Continue ( Type 0 Or 1 ) : 1\n\n\n 1. Input And Display \n\n 2. Addition \n\n 3. Subtraction \n\n 4. Multiplication \n\n 5. Division \n\n 6. Exit \n\n Enter Choice : 3\n\n\n Subtraction \n\n\n Enter Real Part : 4\n Enter Imaginary Part : 5\n Enter Real Part : 6\n Enter Imaginary Part : 7\n\n The Complex Number Is : -2 - 2i \n\n\n\n Do You Want To Continue ( Type 0 Or 1 ) : 0\n\n\n\n\n The End \n\n\n"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code"
]
] |
e7ded5dc9c558b7a82375ac1b8c0ebb540805a12 | 4,289 | ipynb | Jupyter Notebook | Medical Insurance Project II - Python Functions.ipynb | milkywaysandy/Codecademy-Project | 3b51cd235ac3b6e18b7c3a3ae4a6798b509bcd7f | [
"MIT"
] | null | null | null | Medical Insurance Project II - Python Functions.ipynb | milkywaysandy/Codecademy-Project | 3b51cd235ac3b6e18b7c3a3ae4a6798b509bcd7f | [
"MIT"
] | null | null | null | Medical Insurance Project II - Python Functions.ipynb | milkywaysandy/Codecademy-Project | 3b51cd235ac3b6e18b7c3a3ae4a6798b509bcd7f | [
"MIT"
] | null | null | null | 34.58871 | 154 | 0.591979 | [
[
[
"#modify medical insurance project with function\n#2. Create calculate_insurance_cost() function below\n#3. Create a variable called estimated_cost. \n#For now, set this variable equal to a value of 1000. You’ll add the full formula in the next step.\n#Add a print statement that prints estimated_cost. \n#4. Modify the function definition so that it contains five parameters\n#5. modify the estimate cost with formula\n#6. find out maria's estimate cost",
"_____no_output_____"
],
[
"def calculate_insurance_cost(age,sex,bmi,num_of_children,smoker):\n\n estimated_cost = 250*age - 128*sex + 370*bmi + 425*num_of_children + 24000*smoker - 12500\n \n return estimated_cost\nprint(\"The estimated insurance cost for Maria is\", calculate_insurance_cost(28,0,26.2,3,0),\"dollars.\")\nprint(\"The estimated insurance cost for Omar is\",calculate_insurance_cost(35,1,22.2,0,1), \"dollars.\")",
"The estimated insurance cost for Maria is 5469.0 dollars.\nThe estimated insurance cost for Omar is 28336.0 dollars.\n"
],
[
"# At the top of your code, define a function called analyze_smoker() that takes an input smoker_status. \n#If smoker_status is equal to 1, print \"To lower your cost, you should consider quitting smoking.\"\n#Otherwise, print \"Smoking is not an issue for you.\"\n#evaluation BMI according to WHO standard\n\ndef analyze_smoker(smoker):\n if smoker == 1:\n print (\"To lower your cost, you should consider quitting smoking.\")\n else:\n print(\"Smoking is not an issue for you.\")\n\ndef analyze_bmi(bmi):\n if bmi > 30: \n print(\"Your BMI is in the obese range. To lower your cost, you should significantly lower your BMI.\")\n if bmi >= 25 and bmi <= 30: \n print(\"Your BMI is in the overweight range. To lower your cost, you should lower your BMI.\")\n if bmi >= 18.5 and bmi < 25: \n print(\"Your BMI is in a healthy range.\")\n if bmi < 18.5: \n print(\"Your BMI is in the underweight range. Increasing your BMI will not help lower your cost, but it will help improve your health.\")\n\n# Function to estimate insurance cost:\ndef estimate_insurance_cost(name, age, sex, bmi, num_of_children, smoker):\n estimated_cost = 250*age - 128*sex + 370*bmi + 425*num_of_children + 24000*smoker - 12500\n print(name + \"'s Estimated Insurance Cost: \" + str(estimated_cost) + \" dollars.\")\n \n analyze_smoker(smoker)\n analyze_bmi(bmi)\n \n return estimated_cost\n \n# Estimate Keanu's insurance cost\nkeanu_insurance_cost = estimate_insurance_cost(name = 'Keanu', age = 29, sex = 1, bmi = 26.2, num_of_children = 3, smoker = 1)",
"Keanu's Estimated Insurance Cost: 29591.0 dollars.\nTo lower your cost, you should consider quitting smoking.\nYour BMI is in the overweight range. To lower your cost, you should lower your BMI.\n"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code"
]
] |
e7dee9e113a837ba1e2c935d9b01da967207b649 | 50,866 | ipynb | Jupyter Notebook | Data Science Academy/Cap08/Notebooks/DSA-Python-Cap08-07-StatsModels.ipynb | srgbastos/Artificial-Intelligence | 546935bdb1c57bffaf696fe0256052031dea5981 | [
"MIT"
] | null | null | null | Data Science Academy/Cap08/Notebooks/DSA-Python-Cap08-07-StatsModels.ipynb | srgbastos/Artificial-Intelligence | 546935bdb1c57bffaf696fe0256052031dea5981 | [
"MIT"
] | null | null | null | Data Science Academy/Cap08/Notebooks/DSA-Python-Cap08-07-StatsModels.ipynb | srgbastos/Artificial-Intelligence | 546935bdb1c57bffaf696fe0256052031dea5981 | [
"MIT"
] | null | null | null | 104.447639 | 34,800 | 0.789113 | [
[
[
"# <font color='blue'>Data Science Academy - Python Fundamentos - Capítulo 8</font>\n\n## Download: http://github.com/dsacademybr",
"_____no_output_____"
]
],
[
[
"# Versão da Linguagem Python\nfrom platform import python_version\nprint('Versão da Linguagem Python Usada Neste Jupyter Notebook:', python_version())",
"_____no_output_____"
]
],
[
[
"## Statsmodels",
"_____no_output_____"
],
[
"### Linear Regression Models",
"_____no_output_____"
]
],
[
[
"# Para visualização de gráficos\nfrom pylab import *\n%matplotlib inline",
"_____no_output_____"
],
[
"import numpy as np\nimport pandas as pd\nimport statsmodels as st\nimport sys\nimport warnings\nif not sys.warnoptions:\n warnings.simplefilter(\"ignore\")\nwarnings.simplefilter(action='ignore', category=FutureWarning)\nwarnings.filterwarnings(\"ignore\", category=FutureWarning)\nimport statsmodels.api as sm\nfrom statsmodels.sandbox.regression.predstd import wls_prediction_std\nnp.random.seed(9876789)",
"_____no_output_____"
],
[
"np.__version__",
"_____no_output_____"
],
[
"pd.__version__",
"_____no_output_____"
],
[
"st.__version__",
"_____no_output_____"
],
[
"# Criando dados artificiais\nnsample = 100\nx = np.linspace(0, 10, 100)\nX = np.column_stack((x, x**2))\nbeta = np.array([1, 0.1, 10])\ne = np.random.normal(size=nsample)",
"_____no_output_____"
],
[
"X = sm.add_constant(X)\ny = np.dot(X, beta) + e",
"_____no_output_____"
],
[
"model = sm.OLS(y, X)\nresults = model.fit()\nprint(results.summary())",
" OLS Regression Results \n==============================================================================\nDep. Variable: y R-squared: 1.000\nModel: OLS Adj. R-squared: 1.000\nMethod: Least Squares F-statistic: 4.020e+06\nDate: Mon, 23 Mar 2020 Prob (F-statistic): 2.83e-239\nTime: 20:09:10 Log-Likelihood: -146.51\nNo. Observations: 100 AIC: 299.0\nDf Residuals: 97 BIC: 306.8\nDf Model: 2 \nCovariance Type: nonrobust \n==============================================================================\n coef std err t P>|t| [0.025 0.975]\n------------------------------------------------------------------------------\nconst 1.3423 0.313 4.292 0.000 0.722 1.963\nx1 -0.0402 0.145 -0.278 0.781 -0.327 0.247\nx2 10.0103 0.014 715.745 0.000 9.982 10.038\n==============================================================================\nOmnibus: 2.042 Durbin-Watson: 2.274\nProb(Omnibus): 0.360 Jarque-Bera (JB): 1.875\nSkew: 0.234 Prob(JB): 0.392\nKurtosis: 2.519 Cond. No. 144.\n==============================================================================\n\nWarnings:\n[1] Standard Errors assume that the covariance matrix of the errors is correctly specified.\n"
],
[
"print('Parameters: ', results.params)\nprint('R2: ', results.rsquared)",
"Parameters: [ 1.34233516 -0.04024948 10.01025357]\nR2: 0.9999879365025871\n"
],
[
"nsample = 50\nsig = 0.5\nx = np.linspace(0, 20, nsample)\nX = np.column_stack((x, np.sin(x), (x-5)**2, np.ones(nsample)))\nbeta = [0.5, 0.5, -0.02, 5.]\n\ny_true = np.dot(X, beta)\ny = y_true + sig * np.random.normal(size=nsample)\n\nres = sm.OLS(y, X).fit()\nprint(res.summary())",
" OLS Regression Results \n==============================================================================\nDep. Variable: y R-squared: 0.933\nModel: OLS Adj. R-squared: 0.928\nMethod: Least Squares F-statistic: 211.8\nDate: Mon, 23 Mar 2020 Prob (F-statistic): 6.30e-27\nTime: 20:09:10 Log-Likelihood: -34.438\nNo. Observations: 50 AIC: 76.88\nDf Residuals: 46 BIC: 84.52\nDf Model: 3 \nCovariance Type: nonrobust \n==============================================================================\n coef std err t P>|t| [0.025 0.975]\n------------------------------------------------------------------------------\nx1 0.4687 0.026 17.751 0.000 0.416 0.522\nx2 0.4836 0.104 4.659 0.000 0.275 0.693\nx3 -0.0174 0.002 -7.507 0.000 -0.022 -0.013\nconst 5.2058 0.171 30.405 0.000 4.861 5.550\n==============================================================================\nOmnibus: 0.655 Durbin-Watson: 2.896\nProb(Omnibus): 0.721 Jarque-Bera (JB): 0.360\nSkew: 0.207 Prob(JB): 0.835\nKurtosis: 3.026 Cond. No. 221.\n==============================================================================\n\nWarnings:\n[1] Standard Errors assume that the covariance matrix of the errors is correctly specified.\n"
],
[
"print('Parameters: ', res.params)\nprint('Standard errors: ', res.bse)\nprint('Predicted values: ', res.predict())",
"Parameters: [ 0.46872448 0.48360119 -0.01740479 5.20584496]\nStandard errors: [0.02640602 0.10380518 0.00231847 0.17121765]\nPredicted values: [ 4.77072516 5.22213464 5.63620761 5.98658823 6.25643234 6.44117491\n 6.54928009 6.60085051 6.62432454 6.6518039 6.71377946 6.83412169\n 7.02615877 7.29048685 7.61487206 7.97626054 8.34456611 8.68761335\n 8.97642389 9.18997755 9.31866582 9.36587056 9.34740836 9.28893189\n 9.22171529 9.17751587 9.1833565 9.25708583 9.40444579 9.61812821\n 9.87897556 10.15912843 10.42660281 10.65054491 10.8063004 10.87946503\n 10.86825119 10.78378163 10.64826203 10.49133265 10.34519853 10.23933827\n 10.19566084 10.22490593 10.32487947 10.48081414 10.66779556 10.85485568\n 11.01006072 11.10575781]\n"
],
[
"prstd, iv_l, iv_u = wls_prediction_std(res)\n\nfig, ax = plt.subplots(figsize=(8,6))\n\nax.plot(x, y, 'o', label=\"data\")\nax.plot(x, y_true, 'b-', label=\"True\")\nax.plot(x, res.fittedvalues, 'r--.', label=\"OLS\")\nax.plot(x, iv_u, 'r--')\nax.plot(x, iv_l, 'r--')\nax.legend(loc='best')",
"_____no_output_____"
]
],
[
[
"### Time-Series Analysis",
"_____no_output_____"
]
],
[
[
"from statsmodels.tsa.arima_process import arma_generate_sample",
"_____no_output_____"
],
[
"# Gerando dados\nnp.random.seed(12345)\narparams = np.array([.75, -.25])\nmaparams = np.array([.65, .35])",
"_____no_output_____"
],
[
"# Parâmetros\narparams = np.r_[1, -arparams]\nmaparam = np.r_[1, maparams]\nnobs = 250\ny = arma_generate_sample(arparams, maparams, nobs)",
"_____no_output_____"
],
[
"dates = sm.tsa.datetools.dates_from_range('1980m1', length=nobs)\ny = pd.Series(y, index=dates)\narma_mod = sm.tsa.ARMA(y, order=(2,2))\narma_res = arma_mod.fit(trend='nc', disp=-1)",
"/Users/dmpm/opt/anaconda3/lib/python3.7/site-packages/statsmodels/tsa/base/tsa_model.py:162: ValueWarning: No frequency information was provided, so inferred frequency M will be used.\n % freq, ValueWarning)\n"
],
[
"print(arma_res.summary())",
" ARMA Model Results \n==============================================================================\nDep. Variable: y No. Observations: 250\nModel: ARMA(2, 2) Log Likelihood -245.887\nMethod: css-mle S.D. of innovations 0.645\nDate: Mon, 23 Mar 2020 AIC 501.773\nTime: 20:09:11 BIC 519.381\nSample: 01-31-1980 HQIC 508.860\n - 10-31-2000 \n==============================================================================\n coef std err z P>|z| [0.025 0.975]\n------------------------------------------------------------------------------\nar.L1.y 0.8411 0.403 2.089 0.037 0.052 1.630\nar.L2.y -0.2693 0.247 -1.092 0.275 -0.753 0.214\nma.L1.y 0.5352 0.412 1.299 0.194 -0.273 1.343\nma.L2.y 0.0157 0.306 0.051 0.959 -0.585 0.616\n Roots \n=============================================================================\n Real Imaginary Modulus Frequency\n-----------------------------------------------------------------------------\nAR.1 1.5618 -1.1289j 1.9271 -0.0996\nAR.2 1.5618 +1.1289j 1.9271 0.0996\nMA.1 -1.9835 +0.0000j 1.9835 0.5000\nMA.2 -32.1793 +0.0000j 32.1793 0.5000\n-----------------------------------------------------------------------------\n"
]
],
[
[
"Conheça a Formação Cientista de Dados, um programa completo, 100% online e 100% em português, com 400 horas, mais de 1.200 aulas em vídeos e 26 projetos, que vão ajudá-lo a se tornar um dos profissionais mais cobiçados do mercado de análise de dados. Clique no link abaixo, faça sua inscrição, comece hoje mesmo e aumente sua empregabilidade:\n\nhttps://www.datascienceacademy.com.br/pages/formacao-cientista-de-dados",
"_____no_output_____"
],
[
"# Fim",
"_____no_output_____"
],
[
"### Obrigado - Data Science Academy - <a href=\"http://facebook.com/dsacademybr\">facebook.com/dsacademybr</a>",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
]
] |
e7df023c19ab1e295924101ef96b987233fb129e | 28,164 | ipynb | Jupyter Notebook | 01-byoc/audio.ipynb | asfhiolNick/incremental-training-mlops | e0803522045d9ddb821da3410d7307d1578ce6bb | [
"MIT-0"
] | 1 | 2021-07-05T10:24:28.000Z | 2021-07-05T10:24:28.000Z | 01-byoc/audio.ipynb | asfhiolNick/incremental-training-mlops | e0803522045d9ddb821da3410d7307d1578ce6bb | [
"MIT-0"
] | null | null | null | 01-byoc/audio.ipynb | asfhiolNick/incremental-training-mlops | e0803522045d9ddb821da3410d7307d1578ce6bb | [
"MIT-0"
] | null | null | null | 39.667606 | 8,676 | 0.6781 | [
[
[
"\n## Bring Your Own Algorithm to SageMaker\n### Architecture of this notebook \n\n\n<img src=\"./images/part1.png\" alt=\"architecture\" width=\"800\"/>\n\n\n### 1. Training\na. [Bring Your Own Container](#byoc)\n\nb. [Training locally](#local_train)\n\nc. [Trigger remote training job](#remote_train)\n\nd. [Test locally](#local_test)\n\n### 2. Deploy EndPoint\n[Deploy model to SageMaker Endpoint](#deploy_endpoint)\n\n### 3. Build Lambda Function\na. [Construct lambda function](#build_lambda_function)\n\nb. [Test lambda](#lambda_test)\n\n### 4. Configure API Gateway\n\na. [Construct and setting api gateway](#api-gateway)\n\nb. [Configure for passing binary media to Lambda Function](#binary-content)\n\nc. [test api gateway](#test-api)\n",
"_____no_output_____"
],
[
"### BYOC (Bring Your Own Container) for Example Audio Classification Algorithm\n<a name=\"byoc\"></a>\n",
"_____no_output_____"
],
[
"* prepare necessry variables\nusing `boto3` to get region and account_id for later usage - ECR uri construction ",
"_____no_output_____"
]
],
[
[
"import boto3 \n\nsession = boto3.session.Session()\nregion = session.region_name\nclient = boto3.client(\"sts\")\naccount_id = client.get_caller_identity()[\"Account\"]\nalgorithm_name = \"vgg16-audio\"",
"_____no_output_____"
]
],
[
[
"#### 3 elements to build bring your own container \n* `build_and_push.sh` is the script communicating with ECR \n* `Dockerfile` defines the training and serving environment \n* `code/train` and `code/serve` defines entry point of our container ",
"_____no_output_____"
]
],
[
[
"!./build_and_push.sh",
"_____no_output_____"
],
[
"!cat Dockerfile",
"_____no_output_____"
],
[
"!cat build_and_push.sh",
"_____no_output_____"
]
],
[
[
"* construct image uri by account_id, region and algorithm_name",
"_____no_output_____"
]
],
[
[
"image_uri=f\"{account_id}.dkr.ecr.{region}.amazonaws.com/{algorithm_name}\"\nimage_uri",
"_____no_output_____"
]
],
[
[
"* prepare necessary variables/object for training ",
"_____no_output_____"
]
],
[
[
"import sagemaker \nsession = sagemaker.session.Session()\nbucket = session.default_bucket()",
"_____no_output_____"
],
[
"from sagemaker import get_execution_role\n\nrole = get_execution_role()\nprint(role)\n\ns3_path = f\"s3://{bucket}/data/competition\"\ns3_path",
"_____no_output_____"
]
],
[
[
"### Dataset Description - \n\nDataset used in this workshop can be obtained from [Dog Bark Sound AI competition](https://tbrain.trendmicro.com.tw/Competitions/Details/15) hold by the world leading pet camera brand [Tomofun](https://en.wikipedia.org/wiki/Tomofun). The url below will be invalid after workshop. ",
"_____no_output_____"
]
],
[
[
"# s3://tomofun-audio-classification-yianc\n# data/data.zip\n!wget https://www.dropbox.com/s/gvcswtrmdnhyiwo/Final_Training_Dataset.zip?dl=1\n!unzip -o Final_Training_Dataset.zip?dl=1\n!mv Final_Training_Dataset/train.zip ./\n!unzip -o train.zip\n!aws s3 cp --recursive ./train/ $s3_path",
"_____no_output_____"
]
],
[
[
"### Train model in a docker container with terminal interface \n<a name=\"local_train\"></a>",
"_____no_output_____"
],
[
"* start container in interactive mode\n```\nIMAGE_ID=$(sudo docker images --filter=reference=vgg16-audio --format \"{{.ID}}\")\nnvidia-docker run -it -v $PWD:/opt/ml --entrypoint '' $IMAGE_ID bash \n```\n* train model based on README.md\n```\npython train.py --csv_path=/opt/ml/input/data/competition/meta_train.csv --data_dir=/opt/ml/input/data/competition/train --epochs=50 --val_split 0.1\n```",
"_____no_output_____"
]
],
[
[
"from datetime import datetime\nnow = datetime.now()\ntimestamp = datetime.timestamp(now)\njob_name = \"audio-{}\".format(str(int(timestamp))) \njob_name",
"_____no_output_____"
]
],
[
[
"### Start SageMaker Training Job\n<a name=\"remote_train\"></a>\n* sagemaker training jobs can run either locally or remotely ",
"_____no_output_____"
]
],
[
[
"mode = 'remote'\nif mode == 'local':\n csess = sagemaker.local.LocalSession()\nelse: \n csess = session\n\nprint(csess)\nestimator = sagemaker.estimator.Estimator( \n role=role,\n image_uri=image_uri,\n instance_count=1,\n# instance_type='local_gpu',\n instance_type='ml.p3.8xlarge',\n sagemaker_session=csess,\n volume_size=100, \n debugger_hook_config=False\n )",
"_____no_output_____"
],
[
"estimator.fit(inputs={\"competition\":s3_path}, job_name=job_name)",
"_____no_output_____"
],
[
"estimator.model_data",
"_____no_output_____"
],
[
"model_s3_path = estimator.model_data\n\n!aws s3 cp $model_s3_path . \n!tar -xvf model.tar.gz\n!mkdir -p model \n!mv final_model.pkl model/",
"_____no_output_____"
]
],
[
[
"### Test Model Locally \n<a name=\"local_test\"></a>\n\n* start container in interactive mode\n```\nIMAGE_ID=$(sudo docker images --filter=reference=vgg16-audio --format \"{{.ID}}\")\nnvidia-docker run -it -v $PWD:/opt/ml --entrypoint '' $IMAGE_ID bash \n```\n* test model based on README.md\n```\npython test.py --test_csv /opt/ml/input/data/competition/meta_train.csv --data_dir /opt/ml/input/data/competition/train --model_name VGGish --model_path /opt/ml/model --saved_root results/test --saved_name test_result\n```",
"_____no_output_____"
],
[
"### Deploy SageMaker Endpoint \n<a name=\"deploy_endpoint\"></a>\n",
"_____no_output_____"
]
],
[
[
"predictor = estimator.deploy(instance_type='ml.p2.xlarge', initial_instance_count=1, serializer=sagemaker.serializers.IdentitySerializer())\n# predictor = estimator.deploy(instance_type='local_gpu', initial_instance_count=1, serializer=sagemaker.serializers.IdentitySerializer())",
"_____no_output_____"
],
[
"endpoint_name = predictor.endpoint_name",
"_____no_output_____"
]
],
[
[
"### You can deploy by using model file directly \n\nThe Source code is as below. we can use model locally trained to deploy a sagemaker endpoint ",
"_____no_output_____"
],
[
"#### get example model file from s3 \n```\nsource_model_data_url = 'https://tinyurl.com/yh7tw3hj'\n!wget -O model.tar.gz $source_model_data_url\n\nMODEL_PATH = f's3://{bucket}/model'\nmodel_data_s3_uri = f'{MODEL_PATH}/model.tar.gz'\n!aws s3 cp model.tar.gz $model_data_s3_uri\n```\n",
"_____no_output_____"
],
[
"#### build endpoint from the model file \n```\nimport time \n\nmode = 'remote'\n\nif mode == 'local':\n csess = sagemaker.local.LocalSession()\nelse:\n csess = session\n\nmodel = sagemaker.model.Model(image_uri, \n model_data = model_data_s3_uri,\n role = role,\n predictor_cls = sagemaker.predictor.Predictor,\n sagemaker_session = csess)\n\nnow = datetime.now()\ntimestamp = datetime.timestamp(now)\nnew_endpoint_name = \"audio-{}\".format(str(int(timestamp))) \nobject_detector = model.deploy(initial_instance_count = 1,\n instance_type = 'ml.p2.xlarge',\n# instance_type = 'local_gpu',\n endpoint_name = new_endpoint_name,\n serializer = sagemaker.serializers.IdentitySerializer())\n```",
"_____no_output_____"
],
[
"### You can also update endpoint based on following example code \n\n```\nnew_detector = sagemaker.predictor.Predictor(endpoint_name = endpoint_name) \nnew_detector.update_endpoint(model_name=model.name, initial_instance_count = 1,\n instance_type = 'ml.m4.xlarge')\n```",
"_____no_output_____"
]
],
[
[
"import json \nfile_name = \"./input/data/competition/train/train_00002.wav\"\nwith open(file_name, 'rb') as image:\n f = image.read()\n b = bytearray(f)\nresults = predictor.predict(b)\ndetections = json.loads(results)\nprint(detections) ",
"_____no_output_____"
]
],
[
[
"### Create Lambda Function \n<a name=\"build_lambda_function\"></a>",
"_____no_output_____"
]
],
[
[
"import time \niam = boto3.client(\"iam\")\n\nrole_name = \"AmazonSageMaker-LambdaExecutionRole\"\nassume_role_policy_document = {\n \"Version\": \"2012-10-17\",\n \"Statement\": [\n {\n \"Effect\": \"Allow\",\n \"Principal\": {\n \"Service\": [\"sagemaker.amazonaws.com\", \"lambda.amazonaws.com\"]\n },\n \"Action\": \"sts:AssumeRole\"\n }\n ]\n}\n\ncreate_role_response = iam.create_role(\n RoleName = role_name,\n AssumeRolePolicyDocument = json.dumps(assume_role_policy_document)\n)\n\n\n# Now add S3 support\niam.attach_role_policy(\n PolicyArn='arn:aws:iam::aws:policy/AmazonS3FullAccess',\n RoleName=role_name\n)\n\niam.attach_role_policy(\n PolicyArn='arn:aws:iam::aws:policy/AmazonSQSFullAccess',\n RoleName=role_name\n)\n\niam.attach_role_policy(\n PolicyArn='arn:aws:iam::aws:policy/AmazonSageMakerFullAccess',\n RoleName=role_name\n)\ntime.sleep(60) # wait for a minute to allow IAM role policy attachment to propagate\n\nlambda_role_arn = create_role_response[\"Role\"][\"Arn\"]\nprint(lambda_role_arn)",
"_____no_output_____"
],
[
"%%bash -s \"$bucket\" \ncd invoke_endpoint \nzip -r invoke_endpoint.zip .\naws s3 cp invoke_endpoint.zip s3://$1/lambda/",
"_____no_output_____"
],
[
"import os\ncwd = os.getcwd()\n!aws lambda create-function --function-name invoke_endpoint --zip-file fileb://$cwd/invoke_endpoint/invoke_endpoint.zip --handler lambda_function.lambda_handler --runtime python3.7 --role $lambda_role_arn",
"_____no_output_____"
],
[
"endpoint_name = predictor.endpoint_name\nbucket_key = \"audio-demo\"\nvariables = f\"ENDPOINT_NAME={endpoint_name}\"\nenv = \"Variables={\"+variables+\"}\"\n\n!aws lambda update-function-configuration --function-name invoke_endpoint --environment \"$env\"\n",
"_____no_output_____"
]
],
[
[
"### Test Material \n<a name=\"lambda_test\"></a>\n```\n{\n \"content\": \"/9j/4AAQSkZJRgABAQAAAQABAAD/2wCEAAoHCBYWFRgWFRUZGRgYGBgYGBoYGBoYGBgYGhgZGRgYGBgcIS4lHB4rIRgYJjgmKy8xNTU1GiQ7QDs0Py40NTEBDAwMEA8QHxISHjQrJCQ0NDQ0NDQ0NDQ0NDQ0NDQ0NDQ0NDQ0NDQ0NDQ0NDQ0NDQ0NDQ0NDQ0NDQ0NDQ0NDQ0NP/AABEIAMMBAgMBIgACEQEDEQH/xAAbAAABBQEBAAAAAAAAAAAAAAAEAAIDBQYBB//EADkQAAEDAgQEBAMGBgIDAAAAAAEAAhEDIQQSMUEFBlFhEyJxgTKhsUKRwdHh8BQVUmJygiPxB5Ky/8QAGQEAAwEBAQAAAAAAAAAAAAAAAAECAwQF/8QAJBEAAgICAwACAgMBAAAAAAAAAAECESExAxJBE1EiMgRhgXH/2gAMAwEAAhEDEQA/AABjTG6aKyAZWC67EhcDbDsWLn2QtWoVC3EAqUEKbYdgd9KdUFX4c0o6u6NEmGVomIqTw0dENW4SDstA4BDuTUmBS0uDiVLU4SNCrunAXKjgm5sADDcHaRGUKj4hwlzXWG61rKkBQYhwKcZtOxUYd+FcNk0UndFr3YVpSfgGwtPlY8mNLT0SAK1zeHN6JzeGN6I+VBZksvUKMha+rwodFEzgwnRC5UKyhZmy2BQtQkm62/8ALQGqlxXB5d5QlHkVgmZ9JXL+COCHfwp4WqnFjtFckjv5a9NdgHBO0FoDSUpw7ui4aThsU7QyNJSCiei4WEbItAMBSKSUIASSRCSAEkkkgDXGuVzxVASmBy5EhBza6kOKIQTXJr3lNxFQd/FSnnGQFVhyeRZLqJhYxqe3EKtIU9JKgDhXSa8ygy6EvGRQFk+quEygGVC4wFe4HglR0F8U2/3fEfRv5otIqKlJ0kQ4PDh13TlHTUnoFd/wdF7LMLSdHAkkHvJgqQiixuUbD7h17pYbiDG7SD1tH5LGUm3g7YcUVGmsgNPgDyfLUZHW/wAwmYzg1anfLnb/AFMkx6iJC0+EOe9o7z+KPZiabCJqNHUG6PkfoPgi9HnLnwuU6q9QxWDoV2Fr8pkGHNABaToQUPgeWMMwAtYXnWXmflp8k1NUYvgdmCbRe4SGOI6wYXKZG4Xrjn5GEaNANsrdIVZR5awrpc5mYuuczjN+gBgeyblHwn4JHmtVzUK9jVveJcq4UuysL6ZO4OZs9CHfmlheWcOxozAVXaOLiW/+rZshTig+CVmDZRaVFWwjdoXoLKGHpkinTDHdYLj9529E19U7NbHVoAjuRCPmXhov4rrLPPsNwd9Qwxhd6Cw9ToEY/k6pEuLG9nOP1AIWsdiHTrPr+Sc7ENcMpspfO/DSP8WK2zB47l+pREvZDTo5pDmH/YWB7FVVTBjovTWPyTo5hEOY64c3cELMce4c2k/yXY8B7JuQD9knqD+C0hy9jDm4emVoyD+HjonNwQ6K2cJ2SFNbdznKl3D5GiFOAutHkEIOsxNTHZWfwASRmUrqfYLDGUJTH0YKJpPSqGVnYwRzEwMRLmKNzYRYEMQkSpMspgYiwIi1PYYTnNUYclQrHuN04XSaJTiIQInw9QsJe0eYAlvr+cSh8XzBVcfjOnX8FzOg8XQnzN9x36hHVXk24pUqHjiT+pU9HiD5EEkuP00CqQYN0XhjLgRtum4pG/Z/ZccR5kfTbkYb6SCbnc+nRZ3+ZVnuBL3EylisI74j6W2RXC8KHvAPwi59AqqEY3RPaUpbwekcv8ScWMzAxb1J6wt3hXktDoi2i8xw/GKVDLmgWnsBt7q84TzjTqmA6DMRtA3XG00rrB0YeLNgAXlzSYGVxI9GkoWhXd11+SJ5ZqNqB5I1zN9oVFUxDWPex5gA2J0hS1UVL7sE/wAnEvThcw819x++qz/N1Ooyk57ZMAzHyPqoqfMtNj8hqNE6EuhaV9dlRmUwQ6x/RKsZQ7aPH8BzkWuAcTGkm/4LT8O4uysYY4B/TZ3p+SxfNPAhTqPDLQ4/mFV8IbUaZBjKZB6EdCuh8UJR7QZHyyUqkj1l7JvEOF+nuFVcRxTWAuecsW/6R2D4wx1NheQX5fNpqLGR+9VVcb4hScIDG37yfumAsEldGzdK7HYfEB1Jry17swJADg2GCwJsTJIMe3VVfF6pc5l5bkGWdQNCD3kIetxBxblFhA+URHawQufSSt4xrJx83KpKkyVRl9106KGbrQ5iYlNc0LhNlHmQJnMgSTl1FsQxlRPc9KrRhOpM7JlCYDqlUKMY2yGfR1QDQM1yTinChdJ1IpEkVRQEoh7CozQKYDfEXDUXXUCuHDlNIdHJXXFMa0p5TBCYAdQnuaFxgXXBSxj2MEQpOG4bK5xm0W97/gowYRVB3lJlTLRpwvNFdj8F4jrHzDrpCs+W+WanjDzNiL7g9h30+9Op4HP5i8NHpeB6LX8Ir0KNMPfNogadSJ7mFPeTj1R0qMU+zN5ytg8gaAIDRfue6B5q4CXguYGk31EiFQYPnOS4spuIsBDgIJMCVpeX+YxiC6i9jvEa0lwdAcWyBItfULX4bhTM/lqdo8pfyjUc5xcXQTldo2zbi3QbEL0Hlrh7WMaA5xDRAzOLr+6t+NcGNsroB+1Egjo4dUM0CiweabWgRJ9FzzlJvq/DaPWrj6ed89tjEubPlLWu++fnqsy58aLU84DNUncMbJtrLjH3ELLuYtOJLqc/M25UJlSVMAhskKRj1ZiSOXIXQ5LMihCkppUkrhRQUNldyroCdMIoBmQpKXOEk6YqQhVlPY8KLw4TgEgoKZXCe9wIQDmFRlzkUCDMi5lUdOoVIXooZwNvdEeGIQbqhldNYwl1AkewJll2kZ1XKohOgGeCCoX0EQEg2UACikniknudCZnQA0005php/f73XAV10EaxEEe179kpLBUHUkF4WuAWg2uB6g6fijuNYx9TDupC0GZGttB81VcWwrm0i9mkNe0ja6bw/iQqAtLocR5mnQ9x1CcI1+RvOVqjPYZxaS3PAMAwT5TJE+0fNek/+I8C5uIdWNUuY1mT+0ucQXBpJkgR2uvOcVSptcZdfNcC8X6jZej8kcwUKNFzc7Z11yxGgJN102YHp3NXFGUcK+pJMRA1MyNFh38S8anTqmxJyR/cQTMeyp+LcXdxF4oUSW0ZzPfoXwW/1XiJ7kxoNdlyvwSS177MYZYztoCfYBcvNBykkjfjkoxyUXH+DEMNR5Azhoa37XwgG+5Xn+Jplji06g/9Fem88uL8Y0D4W0wBqbkmbfcvPeOUyHj/AB1PqUsRfVCmrj2YCxdKa0JwKujnOTCRK48pjE6AJY+y4EwBdDUBY8PSe6UxwhNzpCsV0kvEC4jsItxQldGHurQUhoFK3DBTZp1KR7OygqM7K6xVMdFBTpgo7B1KtrCphRI1CsG0bpzxKVh1K4YeV11EKwDAntoAosVFO+idkvDO6tBSE3ThRBTsKKhrF1jJVwcM0KIUgEdvA6lX4EqB1FXooKF2HCaYqKplJONIaQrNmGB0RLMFNgJhDd4KSoi4dRz0sr7QS2+mUjT0us3U4Pkqmm6A5zXZO9iT8gfvWqxOJyQ0jaD0lA8QoGs1rmOAqMByE76QAdiri+rLu2ZkcEOkbj63Wm4TwSmKbxiDkDv+Ng3L3tcWH08s+yhwPNLaYIq4QmoCIc13lJ7tIstNw59TFllSrTbSptcHBhIzPI0uR0kbfEffZtGlxStGo5Q5dpYZjAYLy3M+e9hbQDULR1MSxj8jdSJPtsqHhwqOqOc9rhmFyLgCIG+yu6GFp2MS7QTss1oybtlPx3AzUa/KYy3cBN9gs5zByz4tPMweZoJZ/deXMPrt3XqXgiIOioeKUgHQ0RfQad1hzRcfyTNYSUl1Z4ezCHQjRNfhey2vNHDPDf4jR5HmT0Dzcj3uVngJKcZWrMJR6uipOGURoFW9SneEhTA1VWTRU+GUvDKtHsErhYITsKKt7ZUIZdW3gjRdZhQCkKiq/h+xSV/4DUkBQeydVwYqLJ+HqWQ1dt1kW2OqvlDiWlTNlJ7eqdBY9olNewhcBI0Uk9U6GRN1RTBZQ09ZTnPG6SQqGVmrlJymzghQYcyYVD0Odqk9kXUlamBdNAzeil0sieBuddYQpXUQdCh3UyClGSehJ4Jm0byp8HZ+utvyQZqGLIjhzC57fWVcbspBGNwuYEbqh8CPKTBBsfpC1WIYZsEFiKDHxnEEHUReFpJDRDgwx13tBdu4tABP7hX/AA54eHUnR8JyWgAi7dFFgeDtPwmw7D9J2ROH4U5rpDmxmsTMiDe4uNxupbaKikyy4RiX5AxpfrveOy1OEpwJce/Qqnw4ywA6ZvbtrfZWmHcSeoVRFIsWGUPxDCBzZA8wuETSTqxsfRXOKlFpkxbTwYzjODD6b2ET5SW/5C4XnIIb6r07HOgx2K874jhQDmA66dVxccqbRvyxtdgCq64JTKhulVqhOosButbOehGFG6l0UtWn0TaQKOyYtkGUyngKQtukXABJyoUmkR5ikn5uySnuT2D3EBMIBUlOnm12UlKnJ00Qk0WlggZTJcOinLBOiKFMLjqZBkXQUC1RA0QzKbiVYam6Ip5WppCor6dM9FFVwpVg/wCKdihnvgmUk2NkDKDhpom5YMoh2IMABRYhx2CdhSHsEi6ieSNkwVSLFEZ5Gil2weUDsc4qSgSTB0RTQMkkKNjxeE44EkkQVG3MKfhNQh8dj9FD4l0qZh2b1Vp07GlbLx7utwi8Ng21D8J/1k/NC8NcH6wJ6q+/k5DbSJ6GxXRtD0GcNwQZ5XAjYZomehg2Vo2iwGIF/wB7rP4Z+TyukHrNj6zoiGVMzhc/eoWBsKxNZjHEQQNo2JROG4nSFhmn/FR4/DtIzbIdlNpNvdVoRo6FYOEt0TsTUAbqqrDPyugaKTHPdIt+ScpVFhGNsqOKN8xOyyHEsoYT0d9bfitjxIyDbQLG8bHkcB2+oleev2OmX6FFVc03hQvrCLJswLqNlOdFvSOR5HGuUn4qEQWNAAUL6QzRsmkhJEuGqA6rkCUjQACYwQjAMfASTfDHVdRQYLV1AsGZS4Zh1K7XJMAqdlOQob8G/oY8rrRNgoatM6dE6gYN0k3YqZ12G1umuGVhUue5hQvqzbYaqrKqkCUMzutlPVph0HfdE0y0adLqFo887FIWyM0w1wJ0TKwzOFrKxq0xv7KAggTCd+DS8IG4e8kWClZTgF0WRAe0sJNioXPLmgDRCCiBtbMCIsmsokbJ9NwY2SLSp6r9C3dF+j3kHdoLaJuNgttspGVQTA21UmJALHAawUWEcMfy/wCV2ZzrDYR8zqvQsFjmPYBoR7ELFcvYIvYSW7fhCt8Dw17I809Jm/a6h8soukdKgpLJd43CAtzWWcw9fLULdpWjq4kMovzkCAd1g34uX5mrpcrSZjVOjWcW4jDMo1gFZ6hxZwdrH5LtWqXAON7KnxVTKZCbb2SjUt4kYDswgant6rmH44+q8AAwPU/UarJsxudjmt+KQ33NgfqtfwSm1rAGiwGvU3ue65+abqkb8UVst8U2Wz1aZWO4405JA+IgBbl7paFhOZJDwxn2Jn3/AEWaWQm/xaM/icNbulQow2US9zgbiU1rXAXFlon9nKo0DvbFkOHkOhWzKQ33TBgW5rXVJjoHLbXTcRTltkY6gu0sOfZFhTsqfAPVJW2XskixUi0xNNoEoZmINxsEXVF9fZcr4fSNwpoojw+YDM4WOiY+mbuUhaWsh5k7BDGsbNE7J2BPQpF11ABE23hWAEXE2GnVNa+ZJHeEMAdrCRA1KjewsIEI6k9okxB2Ujhm9OqT+gKr+Jc4y4aaIwMloKDreVxbsiWYkmGgbITBsixFIWaTGYp4pgGBpomPZL2z9ko6vVAdLR2TDwEdQtBvdReDY9Roj2sIAdsUI6pBch6Ahw9K56zdTGBMCbGfuTsKYc4kahPADWGR5nAj71KQKrL7l+uMhbpb6K8wjg45Tvp6rz7g+LLX/rZbHD1SRLetuxSkns6lnBTc+4ktc2ju5oe70kgfQrLMqWhaXn6jm8LEaOA8CoDsfM9h/wDv5LIStLXhjK/S8p4mGBVON8036rj6ktDeikwGFNR7GAwXECTt1Psm5dsErBY8h8vGs573khgIA7ka+116GOFeHGUS3r27qy4Nw9lGk2mwQAPck3JPeSjy20dlq+FNZ2NcrWjNYp+VpHRYvGed7ndSTKv+N4rzvpg6Egn20+aochgCNd1yqLtj5HdIFpYa8qDEsdtBurxtIBgEKsbTJcRBsrZD0QmkIEld8rW21KJGHBaUIyh5ydgEqFsTaTnRGv4JPYS4ibiy7hn5Sb3UrGfaPcosLAPDd1SRXjDskpsLRZOy5p2At6phY4X13soalBpEOdMaAdUTTpwASU78E8AeOY98ZbRclJjIZLrkHXsj2vF56dIlQCsxxiPKQhO0CyDOxT5GQW6rj6paQALm8o52Roa0aaJ2IcA3L133SavYmrIW1IcCRqNFI6mbAm0yY2GyFrNI8wM2hcw1Qxc/EIgpppDqjmOZNgNxB6qYPDQDG111gmGkjsigWFhYRfcpoa/sDxFAkAt3Ejsm0BaHbI+ocryGXsIH1KHqSLRrc9ymxNHK1VobF5GnSUPSbml3TVKrTJiGz1SwRnMGjVTkYmOBGvmCcxs63Kc+kGuEtk9dkZSoZrsItr3TX0CZS1aJY8dDcHv0K9J5W4eGMOaC4w70svO+MVXCm5wjyOafuN16by2ZoMeT8QmT02W/Ck7sJSdIF504U2rhK8Nl7WF7Y1LmDMBbUkAj3Xj+AJeMoBJGkX/7XvzKrSYDgT6rEcxcuspVRXpeRr3f8jRGUO/qaNpOo6p8kE8oSk9Hn7qBGv6q85RwZfiaZIsCXH2B/RS4pmYg6iTeNI0Wo5CwZyvqOGrsrf8AEakep+ijjjchtmybog+L4o06T3jZpj1hGtCoObHODGR8JfleOxEj5hdL0QYfEvIaHmS57jJ9bk/RPfWOUEjQW/fsiuN0w0sDfhAJM9SqmpUdodtLLhSawzWT7Owo4kua0gqKnWIJFpO/Zcp0BldJhxiFG6mdBroTO6b+xaHY5xbGUz1UNFtySNpKTKLgMsiZTcNVcHunTf0CS/slHXsBEgKei5rmEdFHVuCR8M2UT3Whvb37JWCwxuXskjG0XwPKkn1GOZTJIaPsiSVM5pI8okk23v0TGvJGZrSLkO7jpKcxjpgyA3zAQpik0Rs5iGuiLE7t6dVXV6TgczB0kKw8W0kOBOi7lJhgudZ7I65GmBeLmcGvERcdlNXJJ0kDdS0cK0ODyPNJEnS3ULrKbs+YeYGTA09AihtWyZlItYHGJ1ynYIFkuu34h5vWeiMqsc5jnEXFo0tuhKbHAtY0AWtPT1T14JANZ7/EzQQALd1aNcSJLdQCnNfmIkD23jYItktGVwEuNwNh0lLDHvYJQxOd0htxA/RTPpPnQEjYnRDswuXOWzBNvVcwdUvBaTBbr39U442LQTWeAC82AOUDqYuoWGAMgOZ2oAn9hTupjKM1xJ9j1hLh77Py7yJjZV6CYBXD3ENIOt/RdoPcxttydFY4t4BY0C9iSO43QviDxcjT8IMnp1RVDA6+Fzse12pY6B0sYPrKIxvNQo0WU2vAaKTSASPhDToNzYe0qUsJzECQ5sT0jqsVx7hDzVaARGRzBmmMjiTY9QSR6ALTjlVjLvB/+SqjmEljWFrmhzgZsS6C1ojTyg3KvsJzd/EANcWkOny3u4CWuA2vIhYXhnJpMB7iWnUC3uDC1OD4cykcrGRlEAm5deZLlU31Vh2VFg+sfOyLOEgdFt+UCf4ZhdYmY/xBIb8gF589rg62oMQfxWw5e4kxwYxpBjM0xMBwixP+rrdkcLy7E8mtBVNzU5ow7i7QFpHrmH6qehiTlcbE+YtA94H4LPczcWY+gaYcDUztGQXJElaSlgHGjMYnGF5G4M6bKTxRkDRBMx3ChqUpbPwlsTFtUxtGCHToR7rktpheAhxizm6iL7nZVbKD2u1Nzp0V3iCHDPN7QNRG6GZiQ55tobH6pyBjXUjAgibn23lDeH10Fz3CmL5e63l0/ROd5hMRt6KW1QXaAKeKaXuYJLdk3MWlvc2PvuiDRIJbF5F46p9TDicua4+UpJWGy3ZiLC40C6qvKz+o/ckrt/Y8ljiWwbW1+pQ+GeSDJJnXvdJJJaJZeY6g3yCBAbooOL0gDTIES38l1JOWmUAvYIIjdR4Wzvl8kkllHz/RLQ2qLn0H1T8WwZGmL6T/AKlJJOQIFw7bjtP0CN3aerSuJKVofgqf2RsSbKPFMANhrKSSa2J6HYNsi9/N9FC+zXRbyn6pJKvCfoJpsGVhi5YPqVVlgFV9tXD6JJKXtif7FjR+A93ge3RRcwYZnggxcPsekriSqO0UN4V8KO+27s0JJLfm/Vf9BbG1qLYeYvDbyVk34t9LHUKdNxYx1RxLW2Bza/U/eupJQ2UjR4jidVlSnlqESwk6X/5ag/Afcp6dIZ3GLnKSdz5Ekkp/qUypYJeJ3cQe6Kx9qrgNMuiSSyWjN6Ez4PcKFjfID/e76lJJNAywawZXW3UlKmC15jqfkkkmg8BKHxj0/BQYqg3MDH2ep6pJJS0xouaOGblFth16JJJKCj//2Q==\"\n}\n```",
"_____no_output_____"
],
[
"### Configure API Gateway \n<a name=\"api-gateway\"></a>\n* Finally, we need an API to have the service accessible\n* This API accepts image POST to it and pass the image to ```invoke_image_object_detection```\n\n```curl -X POST -H 'content-type: application/octet-stream' --data-binary @$f $OD_API | jq .``` \n\n* we can create it by console also by aws cli ",
"_____no_output_____"
]
],
[
[
"!aws lambda add-permission \\\n --function-name invoke_endpoint \\\n --action lambda:InvokeFunction \\\n --statement-id apigateway \\\n --principal apigateway.amazonaws.com \n\n",
"_____no_output_____"
],
[
"!sed \"s/<account_id>/$account_id/g\" latestswagger2-template.json > latestswagger2-tmp.json \n!sed \"s/<region>/$region/g\" latestswagger2-tmp.json > latestswagger2.json ",
"_____no_output_____"
],
[
"api_info = !aws apigateway import-rest-api \\\n --fail-on-warnings \\\n --body 'file:////home/ec2-user/SageMaker/incremental-training-mlops/01-byoc/latestswagger2.json' ",
"_____no_output_____"
],
[
"api_info",
"_____no_output_____"
],
[
"api_obj = json.loads(''.join(api_info))\napi_id = api_obj['id']\napi_id",
"_____no_output_____"
],
[
"!aws apigateway create-deployment --rest-api-id $api_id --stage-name dev",
"_____no_output_____"
]
],
[
[
"### Manually Setup API-Gateway in Console\n<a name=\"binary-content\"></a>\n",
"_____no_output_____"
],
[
"#### Create Restful API \n<img src=\"../03-lambda-api/content_image/build_api_gateway.png\" alt=\"rest_api\" width=\"600\"/>",
"_____no_output_____"
],
[
"#### Create resource and methods \n* click the drop down manual and name your resource \n<img src=\"../03-lambda-api/content_image/create_resource.png\" alt=\"create_resource\" width=\"600\"/>\n\n* focus on the resource just created, click the drop down manual and select create method, then select backend lambda function \n<img src=\"../03-lambda-api/content_image/create_method.png\" alt=\"create_method\" width=\"600\"/>\n<img src=\"../03-lambda-api/content_image/create_method2.png\" alt=\"create_method\" width=\"600\"/>\n",
"_____no_output_____"
],
[
"### Configurations for passing the binary content to backend\n* Add binary media type in ```Settings``` of this API \n* Configure which attribute to extract and fill it in event in Lambda integration",
"_____no_output_____"
],
[
"<img src=\"../03-lambda-api/content_image/setting.png\" alt=\"binary_media\" width=\"600\"/>\n<img src=\"../03-lambda-api/content_image/integration.png\" alt=\"config lambda function\" width=\"600\"/>\n<img src=\"../03-lambda-api/content_image/setting1.png\" alt=\"config lambda function2\" width=\"600\"/>",
"_____no_output_____"
],
[
"### Test API Gateway \n<a name=\"test-api\"></a>",
"_____no_output_____"
]
],
[
[
"api_endpoint = \"https://{}.execute-api.{}.amazonaws.com/dev/classify\".format(api_id, region)\n\n!curl -X POST -H 'content-type: application/octet-stream' --data-binary @./input/data/competition/train/train_00002.wav $api_endpoint\n",
"_____no_output_____"
],
[
"%store endpoint_name \n%store lambda_role_arn\n%store model_s3_path ",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
]
] |
e7df130ce6bf37789f9875ba4fde45ae95ce08fe | 37,823 | ipynb | Jupyter Notebook | courses/machine_learning/deepdive2/recommendation_systems/solutions/als_bqml_hybrid.ipynb | Glairly/introduction_to_tensorflow | aa0a44d9c428a6eb86d1f79d73f54c0861b6358d | [
"Apache-2.0"
] | 2 | 2022-01-06T11:52:57.000Z | 2022-01-09T01:53:56.000Z | courses/machine_learning/deepdive2/recommendation_systems/solutions/als_bqml_hybrid.ipynb | Glairly/introduction_to_tensorflow | aa0a44d9c428a6eb86d1f79d73f54c0861b6358d | [
"Apache-2.0"
] | null | null | null | courses/machine_learning/deepdive2/recommendation_systems/solutions/als_bqml_hybrid.ipynb | Glairly/introduction_to_tensorflow | aa0a44d9c428a6eb86d1f79d73f54c0861b6358d | [
"Apache-2.0"
] | null | null | null | 32.946864 | 555 | 0.455411 | [
[
[
"# Hybrid Recommendations with the Movie Lens Dataset",
"_____no_output_____"
],
[
"### __Note:__ It is recommended that you complete the companion [__als_bqml.ipynb__](../solutions/als_bqml.ipynb) notebook before continuing with this __als_bqml_hybrid.ipynb__ notebook. If you already have the movielens dataset and trained model you can skip the \"Import the dataset and trained model\" section.\n\n## Learning Objectives\n1. Know extract user and product factors from a BigQuery Matrix Factorizarion Model\n2. Know how to format inputs for a BigQuery Hybrid Recommendation Model\n",
"_____no_output_____"
]
],
[
[
"import os\nimport tensorflow as tf\nPROJECT = \"your-project-id-here\" # REPLACE WITH YOUR PROJECT ID\n\n# Do not change these\nos.environ[\"PROJECT\"] = PROJECT\nos.environ[\"TFVERSION\"] = '2.5'",
"_____no_output_____"
]
],
[
[
"## Import the dataset and trained model\nIn the previous notebook, you imported 20 million movie recommendations and trained an ALS model with BigQuery ML\n\nTo save you the steps of having to do so again (if this is a new environment) you can run the below commands to copy over the clean data and trained model.",
"_____no_output_____"
],
[
"First create the BigQuery dataset and copy over the data",
"_____no_output_____"
]
],
[
[
"!bq mk movielens",
"BigQuery error in mk operation: Dataset 'qwiklabs-gcp-00-20dab82189fb:movielens'\nalready exists.\n"
]
],
[
[
"Next, copy over the trained recommendation model. Note that if you're project is in the EU you will need to change the location from US to EU below. Note that as of the time of writing you cannot copy models across regions with `bq cp`.",
"_____no_output_____"
]
],
[
[
"%%bash\nbq --location=US cp \\\ncloud-training-demos:movielens.recommender_16 \\\nmovielens.recommender_16\n\nbq --location=US cp \\\ncloud-training-demos:movielens.recommender_hybrid \\\nmovielens.recommender_hybrid",
"Table 'cloud-training-demos:movielens.recommender_16' successfully copied to 'qwiklabs-gcp-00-20dab82189fb:movielens.recommender_16'\nTable 'cloud-training-demos:movielens.recommender_hybrid' successfully copied to 'qwiklabs-gcp-00-20dab82189fb:movielens.recommender_hybrid'\n"
]
],
[
[
"Next, ensure the model still works by invoking predictions for movie recommendations:",
"_____no_output_____"
]
],
[
[
"%%bigquery --project $PROJECT\nSELECT * FROM\nML.PREDICT(MODEL `movielens.recommender_16`, (\n SELECT \n movieId, title, 903 AS userId\n FROM movielens.movies, UNNEST(genres) g\n WHERE g = 'Comedy'\n))\nORDER BY predicted_rating DESC\nLIMIT 5",
"_____no_output_____"
]
],
[
[
"### Incorporating user and movie information \nThe matrix factorization approach does not use any information about users or movies beyond what is available from the ratings matrix. However, we will often have user information (such as the city they live, their annual income, their annual expenditure, etc.) and we will almost always have more information about the products in our catalog. How do we incorporate this information in our recommendation model?\n\nThe answer lies in recognizing that the user factors and product factors that result from the matrix factorization approach end up being a concise representation of the information about users and products available from the ratings matrix. We can concatenate this information with other information we have available and train a regression model to predict the rating.\n### Obtaining user and product factors\nWe can get the user factors or product factors from ML.WEIGHTS. For example to get the product factors for movieId=96481 and user factors for userId=54192, we would do:",
"_____no_output_____"
]
],
[
[
"%%bigquery --project $PROJECT\nSELECT \n processed_input,\n feature,\n TO_JSON_STRING(factor_weights) AS factor_weights,\n intercept\nFROM ML.WEIGHTS(MODEL `movielens.recommender_16`)\nWHERE\n (processed_input = 'movieId' AND feature = '96481')\n OR (processed_input = 'userId' AND feature = '54192')",
"_____no_output_____"
]
],
[
[
"Multiplying these weights and adding the intercept is how we get the predicted rating for this combination of movieId and userId in the matrix factorization approach.\n\nThese weights also serve as a low-dimensional representation of the movie and user behavior. We can create a regression model to predict the rating given the user factors, product factors, and any other information we know about our users and products.\n### Creating input features\nThe MovieLens dataset does not have any user information, and has very little information about the movies themselves. To illustrate the concept, therefore, let’s create some synthetic information about users:\n",
"_____no_output_____"
]
],
[
[
"%%bigquery --project $PROJECT\nCREATE OR REPLACE TABLE movielens.users AS\nSELECT\n userId,\n RAND() * COUNT(rating) AS loyalty,\n CONCAT(SUBSTR(CAST(userId AS STRING), 0, 2)) AS postcode\nFROM\n movielens.ratings\nGROUP BY userId",
"_____no_output_____"
]
],
[
[
"Input features about users can be obtained by joining the user table with the ML weights and selecting all the user information and the user factors from the weights array.\n",
"_____no_output_____"
]
],
[
[
"%%bigquery --project $PROJECT\nWITH userFeatures AS (\n SELECT \n u.*,\n (SELECT ARRAY_AGG(weight) FROM UNNEST(factor_weights)) AS user_factors\n FROM movielens.users u\n JOIN ML.WEIGHTS(MODEL movielens.recommender_16) w\n ON processed_input = 'userId' AND feature = CAST(u.userId AS STRING)\n)\n\nSELECT * FROM userFeatures\nLIMIT 5",
"_____no_output_____"
]
],
[
[
"Similarly, we can get product features for the movies data, except that we have to decide how to handle the genre since a movie could have more than one genre. If we decide to create a separate training row for each genre, then we can construct the product features using.",
"_____no_output_____"
]
],
[
[
"%%bigquery --project $PROJECT\nWITH productFeatures AS (\n SELECT \n p.* EXCEPT(genres),\n g, (SELECT ARRAY_AGG(weight) FROM UNNEST(factor_weights))\n AS product_factors\n FROM movielens.movies p, UNNEST(genres) g\n JOIN ML.WEIGHTS(MODEL movielens.recommender_16) w\n ON processed_input = 'movieId' AND feature = CAST(p.movieId AS STRING)\n)\n\nSELECT * FROM productFeatures\nLIMIT 5",
"_____no_output_____"
]
],
[
[
"Combining these two WITH clauses and pulling in the rating corresponding the movieId-userId combination (if it exists in the ratings table), we can create the training dataset.\n\n**TODO 1**: Combine the above two queries to get the user factors and product factor for each rating.",
"_____no_output_____"
],
[
"**NOTE**: The below cell will take approximately 4~5 minutes for the completion.",
"_____no_output_____"
]
],
[
[
"%%bigquery --project $PROJECT\nCREATE OR REPLACE TABLE movielens.hybrid_dataset AS\n\n WITH userFeatures AS (\n SELECT \n u.*,\n (SELECT ARRAY_AGG(weight) FROM UNNEST(factor_weights))\n AS user_factors\n FROM movielens.users u\n JOIN ML.WEIGHTS(MODEL movielens.recommender_16) w\n ON processed_input = 'userId' AND feature = CAST(u.userId AS STRING)\n ),\n\n productFeatures AS (\n SELECT \n p.* EXCEPT(genres),\n g, (SELECT ARRAY_AGG(weight) FROM UNNEST(factor_weights))\n AS product_factors\n FROM movielens.movies p, UNNEST(genres) g\n JOIN ML.WEIGHTS(MODEL movielens.recommender_16) w\n ON processed_input = 'movieId' AND feature = CAST(p.movieId AS STRING)\n )\n\n SELECT\n p.* EXCEPT(movieId),\n u.* EXCEPT(userId),\n rating \n FROM productFeatures p, userFeatures u\n JOIN movielens.ratings r\n ON r.movieId = p.movieId AND r.userId = u.userId",
"_____no_output_____"
]
],
[
[
"One of the rows of this table looks like this:",
"_____no_output_____"
]
],
[
[
"%%bigquery --project $PROJECT\nSELECT *\nFROM movielens.hybrid_dataset\nLIMIT 1",
"_____no_output_____"
]
],
[
[
"Essentially, we have a couple of attributes about the movie, the product factors array corresponding to the movie, a couple of attributes about the user, and the user factors array corresponding to the user. These form the inputs to our “hybrid” recommendations model that builds off the matrix factorization model and adds in metadata about users and movies.\n### Training hybrid recommendation model\nAt the time of writing, BigQuery ML can not handle arrays as inputs to a regression model. Let’s, therefore, define a function to convert arrays to a struct where the array elements are its fields:\n",
"_____no_output_____"
]
],
[
[
"%%bigquery --project $PROJECT\nCREATE OR REPLACE FUNCTION movielens.arr_to_input_16_users(u ARRAY<FLOAT64>)\nRETURNS \n STRUCT<\n u1 FLOAT64,\n u2 FLOAT64,\n u3 FLOAT64,\n u4 FLOAT64,\n u5 FLOAT64,\n u6 FLOAT64,\n u7 FLOAT64,\n u8 FLOAT64,\n u9 FLOAT64,\n u10 FLOAT64,\n u11 FLOAT64,\n u12 FLOAT64,\n u13 FLOAT64,\n u14 FLOAT64,\n u15 FLOAT64,\n u16 FLOAT64\n > AS (STRUCT(\n u[OFFSET(0)],\n u[OFFSET(1)],\n u[OFFSET(2)],\n u[OFFSET(3)],\n u[OFFSET(4)],\n u[OFFSET(5)],\n u[OFFSET(6)],\n u[OFFSET(7)],\n u[OFFSET(8)],\n u[OFFSET(9)],\n u[OFFSET(10)],\n u[OFFSET(11)],\n u[OFFSET(12)],\n u[OFFSET(13)],\n u[OFFSET(14)],\n u[OFFSET(15)]\n));",
"_____no_output_____"
]
],
[
[
"which gives:",
"_____no_output_____"
]
],
[
[
"%%bigquery --project $PROJECT\nSELECT movielens.arr_to_input_16_users(u).*\nFROM (SELECT\n [0., 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15.] AS u)",
"_____no_output_____"
]
],
[
[
"We can create a similar function named movielens.arr_to_input_16_products to convert the product factor array into named columns.\n\n**TODO 2**: Create a function that returns named columns from a size 16 product factor array.",
"_____no_output_____"
]
],
[
[
"%%bigquery --project $PROJECT\nCREATE OR REPLACE FUNCTION movielens.arr_to_input_16_products(p ARRAY<FLOAT64>)\nRETURNS \n STRUCT<\n p1 FLOAT64,\n p2 FLOAT64,\n p3 FLOAT64,\n p4 FLOAT64,\n p5 FLOAT64,\n p6 FLOAT64,\n p7 FLOAT64,\n p8 FLOAT64,\n p9 FLOAT64,\n p10 FLOAT64,\n p11 FLOAT64,\n p12 FLOAT64,\n p13 FLOAT64,\n p14 FLOAT64,\n p15 FLOAT64,\n p16 FLOAT64\n > AS (STRUCT(\n p[OFFSET(0)],\n p[OFFSET(1)],\n p[OFFSET(2)],\n p[OFFSET(3)],\n p[OFFSET(4)],\n p[OFFSET(5)],\n p[OFFSET(6)],\n p[OFFSET(7)],\n p[OFFSET(8)],\n p[OFFSET(9)],\n p[OFFSET(10)],\n p[OFFSET(11)],\n p[OFFSET(12)],\n p[OFFSET(13)],\n p[OFFSET(14)],\n p[OFFSET(15)]\n));",
"_____no_output_____"
]
],
[
[
"Then, we can tie together metadata about users and products with the user factors and product factors obtained from the matrix factorization approach to create a regression model to predict the rating:",
"_____no_output_____"
],
[
"**NOTE**: The below cell will take approximately 25~30 minutes for the completion.",
"_____no_output_____"
]
],
[
[
"%%bigquery --project $PROJECT\nCREATE OR REPLACE MODEL movielens.recommender_hybrid \nOPTIONS(model_type='linear_reg', input_label_cols=['rating'])\nAS\n\nSELECT\n * EXCEPT(user_factors, product_factors),\n movielens.arr_to_input_16_users(user_factors).*,\n movielens.arr_to_input_16_products(product_factors).*\nFROM\n movielens.hybrid_dataset",
"Executing query with job ID: 3ccc5208-b63e-479e-980f-2e472e0d65ba\nQuery executing: 1327.21s"
]
],
[
[
"There is no point looking at the evaluation metrics of this model because the user information we used to create the training dataset was fake (not the RAND() in the creation of the loyalty column) -- we did this exercise in order to demonstrate how it could be done. And of course, we could train a dnn_regressor model and optimize the hyperparameters if we want a more sophisticated model. But if we are going to go that far, it might be better to consider using [Auto ML tables](https://cloud.google.com/automl-tables).\n",
"_____no_output_____"
],
[
"Copyright 2021 Google Inc. Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
]
] |
e7df2a5911283e30119488535529139d73d27de6 | 588 | ipynb | Jupyter Notebook | notebooks/1.0-xs-visualization.ipynb | SinAustin/advantage-investing | 3ab669f40d4ce1304d7f1e2c08c3d3f3de096130 | [
"FTL"
] | null | null | null | notebooks/1.0-xs-visualization.ipynb | SinAustin/advantage-investing | 3ab669f40d4ce1304d7f1e2c08c3d3f3de096130 | [
"FTL"
] | null | null | null | notebooks/1.0-xs-visualization.ipynb | SinAustin/advantage-investing | 3ab669f40d4ce1304d7f1e2c08c3d3f3de096130 | [
"FTL"
] | null | null | null | 17.294118 | 42 | 0.528912 | [] | [] | [] |
e7df2bd0ee3e47f149fbaa11d2d48082b388afe5 | 404,860 | ipynb | Jupyter Notebook | data_cleaning/notebooks/data_cleaning.ipynb | CamRoy008/AlouetteApp | 4aba620c56e38a6697e6eba1dc87b2fddbebc4c5 | [
"MIT"
] | null | null | null | data_cleaning/notebooks/data_cleaning.ipynb | CamRoy008/AlouetteApp | 4aba620c56e38a6697e6eba1dc87b2fddbebc4c5 | [
"MIT"
] | null | null | null | data_cleaning/notebooks/data_cleaning.ipynb | CamRoy008/AlouetteApp | 4aba620c56e38a6697e6eba1dc87b2fddbebc4c5 | [
"MIT"
] | null | null | null | 207.301587 | 76,456 | 0.89405 | [
[
[
"import pandas as pd\nimport matplotlib.pyplot as plt\nimport matplotlib.dates\nfrom dateutil.relativedelta import relativedelta\nimport datetime",
"_____no_output_____"
],
[
"df_num_data = pd.read_csv('data/process_1/num_data.csv')\nprint(df_num_data.shape)\nsum = df_num_data.shape[0]\n\nfor i in range(1, 6):\n read_data = pd.read_csv('data/process_' + str(i+1) + '/num_data.csv')\n print(read_data.shape)\n sum += read_data.shape[0]\n df_num_data = df_num_data.append(read_data)\n \nprint(\"sum:\", sum)\nprint(\"df:\", df_num_data.shape[0])",
"(119062, 18)\n(93618, 18)\n(72401, 18)\n(70294, 18)\n(56807, 18)\n(55594, 18)\nsum: 467776\ndf: 467776\n"
],
[
"df_dot_data = pd.read_csv('data/process_1/dot_data.csv')\nprint(df_dot_data.shape)\nsum = df_dot_data.shape[0]\n\nfor i in range(1, 6):\n read_data = pd.read_csv('data/process_' + str(i+1) + '/dot_data.csv')\n print(read_data.shape)\n sum += read_data.shape[0]\n df_dot_data = df_dot_data.append(read_data)\n \nprint(\"sum:\", sum)\nprint(\"df:\", df_dot_data.shape[0])",
"(0, 15)\n(0, 15)\n(0, 15)\n(0, 15)\n(0, 15)\n(0, 15)\nsum: 0\ndf: 0\n"
],
[
"df_loss = pd.read_csv('data/process_1/loss.csv')\nprint(df_loss.shape)\nsum = df_loss.shape[0]\n\nfor i in range(1, 6):\n read_data = pd.read_csv('data/process_' + str(i+1) + '/loss.csv')\n print(read_data.shape)\n sum += read_data.shape[0]\n df_loss = df_loss.append(read_data)\n \nprint(\"sum:\", sum)\nprint(\"df:\", df_loss.shape[0])",
"(2963, 4)\n(1255, 4)\n(824, 4)\n(1670, 4)\n(904, 4)\n(2181, 4)\nsum: 9797\ndf: 9797\n"
],
[
"df_outlier = pd.read_csv('data/process_1/outlier.csv')\nprint(df_outlier .shape)\nsum = df_outlier .shape[0]\n\nfor i in range(1, 6):\n read_data = pd.read_csv('data/process_' + str(i+1) + '/outlier.csv')\n print(read_data.shape)\n sum += read_data.shape[0]\n df_outlier = df_outlier.append(read_data)\n \nprint(\"sum:\", sum)\nprint(\"df:\", df_outlier.shape[0])",
"(7878, 5)\n(4755, 5)\n"
],
[
"# total number of ionograms processed\ndf_num_data.shape[0] + df_dot_data.shape[0] + df_loss.shape[0] + df_outlier.shape[0]",
"_____no_output_____"
],
[
"df_num_data.head()",
"_____no_output_____"
]
],
[
[
"# Outliers",
"_____no_output_____"
]
],
[
[
"# day_1 (should be 0-3)\nfig = plt.figure(figsize=(20, 5))\n\nplt.subplot(1, 3, 1)\nax = df_num_data['day_1'].hist(bins=60)\nax.set_ylabel(\"Ionograms\")\nax.set_xlabel(\"day_1\")\n\n# day_1 outliers above 10\nplt.subplot(1, 3, 2)\nax = df_num_data[df_num_data['day_1']>=10]['day_1'].hist(bins=50)\nax.set_xlabel(\"day_1\")\n\n# day_1 below 10\nplt.subplot(1, 3, 3)\nax = df_num_data[df_num_data['day_1']<10]['day_1'].hist(bins=10)\nax.set_xlabel(\"day_1\")\n\nprint(df_num_data[df_num_data['day_1']>=10].shape[0])\nprint(df_num_data[df_num_data['day_1']>3].shape[0])\nprint(df_num_data[df_num_data['day_1']<=3].shape[0])\n\nprint(\"% error:\", 100 * df_num_data[df_num_data['day_1']>3].shape[0] / df_num_data.shape[0])",
"505\n30365\n437411\n% error: 6.491354836502942\n"
],
[
"# hour_1 (should be 0-2)\nfig = plt.figure(figsize=(20, 5))\n\nplt.subplot(1, 3, 1)\nax = df_num_data['hour_1'].hist(bins=60)\nax.set_ylabel(\"Ionograms\")\nax.set_xlabel(\"hour_1\")\n\n# # hour_1 outliers above 10\nplt.subplot(1, 3, 2)\ndf_num_data[df_num_data['hour_1']>=10]['hour_1'].hist(bins=50)\nax.set_xlabel(\"hour_1\")\n\n# # hour_1 outliers below 10\nplt.subplot(1, 3, 3)\ndf_num_data[df_num_data['hour_1']<10]['hour_1'].hist(bins=10)\nax.set_xlabel(\"hour_1\")\n\nprint(df_num_data[df_num_data['hour_1']>=10].shape[0])\nprint(df_num_data[df_num_data['hour_1']>2].shape[0])\nprint(df_num_data[df_num_data['hour_1']<=2].shape[0])\n\nprint(\"% error:\", 100 * df_num_data[df_num_data['hour_1']>2].shape[0] / df_num_data.shape[0])",
"474\n27328\n440448\n% error: 5.842112464085374\n"
],
[
"# minute_1 (should be 0-6)\nfig = plt.figure(figsize=(20, 5))\n\nplt.subplot(1, 3, 1)\nax = df_num_data['minute_1'].hist(bins=60)\nax.set_ylabel(\"Ionograms\")\nax.set_xlabel(\"minute_1\")\n\nplt.subplot(1, 3, 2)\ndf_num_data[df_num_data['minute_1']>6]['minute_1'].hist(bins=55)\nax.set_xlabel(\"minute_1\")\n\nplt.subplot(1, 3, 3)\ndf_num_data[df_num_data['minute_1']<=10]['minute_1'].hist(bins=10)\nax.set_xlabel(\"minute_1\")\n\nprint(df_num_data[df_num_data['minute_1']>=10].shape[0])\nprint(df_num_data[df_num_data['minute_1']>6].shape[0])\nprint(df_num_data[df_num_data['minute_1']<=6].shape[0])\n\nprint(\"% error:\", 100 * df_num_data[df_num_data['minute_1']>6].shape[0] / df_num_data.shape[0])",
"1102\n12515\n455261\n% error: 2.6754258448488164\n"
],
[
"# year (should be 0-10)\nfig = plt.figure(figsize=(20, 5))\n\nplt.subplot(1, 3, 1)\nax = df_num_data['year'].hist(bins=range(0,90))\nax.set_xlabel(\"year\")\nax.set_ylabel(\"Ionograms\")\n\nplt.subplot(1, 3, 2)\nax = df_num_data[df_num_data['year'] < 20]['year'].hist(bins=range(0,20))\nax.set_xlabel(\"year\")\n\nplt.subplot(1, 3, 3)\nax = df_num_data[df_num_data['year'] <= 11]['year'].hist(bins=range(0,12))\nax.set_xlabel(\"year\")\n\nprint(df_num_data[df_num_data['year']>10].shape[0])\nprint(df_num_data[df_num_data['year']<=10].shape[0])\n\nprint(\"% error:\", 100 * df_num_data[df_num_data['year']>10].shape[0] / df_num_data.shape[0])",
"634\n467142\n% error: 0.13553495690244904\n"
],
[
"# station number 1 (should be 0-7)\nfig = plt.figure(figsize=(20, 5))\n\nplt.subplot(1, 3, 1)\nax = df_num_data['station_number_1'].hist(bins=60)\nax.set_xlabel(\"station number\")\nax.set_ylabel(\"Ionograms\")\n\nplt.subplot(1, 3, 2)\nax = df_num_data[df_num_data['station_number_1'] > 7]['station_number_1'].hist(bins=15)\nax.set_xlabel(\"station number\")\n\nprint(df_num_data[df_num_data['station_number_1']>7].shape[0])\nprint(df_num_data[df_num_data['station_number_1']<=7].shape[0])\n\nprint(\"% error:\", 100 * df_num_data[df_num_data['station_number_1']>7].shape[0] / df_num_data.shape[0])",
"15068\n452708\n% error: 3.2211998905459023\n"
],
[
"# satellite number (should be 1)\nfig = plt.figure(figsize=(20, 5))\n\nplt.subplot(1, 3, 1)\nax = df_num_data['satellite_number'].hist(bins=60)\nax.set_xlabel(\"satellite_number\")\nax.set_ylabel(\"Ionograms\")\n\nplt.subplot(1, 3, 2)\nax = df_num_data[df_num_data['satellite_number'] > 1]['satellite_number'].hist(bins=50)\nax.set_xlabel(\"satellite_number\")\n\nplt.subplot(1, 3, 3)\nax = df_num_data[df_num_data['satellite_number'] < 10]['satellite_number'].hist(bins=10)\nax.set_xlabel(\"satellite_number\")\n\nprint(df_num_data[df_num_data['satellite_number']>1].shape[0])\nprint(df_num_data[df_num_data['satellite_number']==1].shape[0])\n\nprint(\"% error:\", 100 * df_num_data[df_num_data['satellite_number']!=1].shape[0] / df_num_data.shape[0])",
"14487\n421789\n% error: 9.830987481187577\n"
]
],
[
[
"## Output data\n",
"_____no_output_____"
]
],
[
[
"df_num_data.to_csv(\"data/all_num_data.csv\")\ndf_dot_data.to_csv(\"data/all_dot_data.csv\")\ndf_loss.to_csv(\"data/all_loss.csv\")\ndf_outlier.to_csv(\"data/all_outlier.csv\")",
"_____no_output_____"
],
[
"df_num_data = pd.read_csv(\"data/all_num_data.csv\")",
"_____no_output_____"
]
],
[
[
"## Combine columns",
"_____no_output_____"
]
],
[
[
"df_num_data['day'] = df_num_data.apply(lambda x: int(str(x['day_1']) + str(x['day_2']) + str(x['day_3'])), axis=1)\ndf_num_data['hour'] = df_num_data.apply(lambda x: int(str(x['hour_1']) + str(x['hour_2'])), axis=1)\ndf_num_data['minute'] = df_num_data.apply(lambda x: int(str(x['minute_1']) + str(x['minute_2'])), axis=1)\ndf_num_data['second'] = df_num_data.apply(lambda x: int(str(x['second_1']) + str(x['second_2'])), axis=1)\ndf_num_data['station_number'] = df_num_data.apply(lambda x: int(str(x['station_number_1']) + str(x['station_number_2'])), axis=1)",
"_____no_output_____"
],
[
"df_num_data.head()",
"_____no_output_____"
],
[
"rows = df_num_data.shape[0]\nprint(\"Rows in unfiltered df:\", rows)\nprint()\n\nfiltered_df = df_num_data[df_num_data.year <= 12]\nprint(\"Errors in 'year':\", rows - filtered_df.shape[0])\nrows = filtered_df.shape[0]\n\nfiltered_df = filtered_df[filtered_df.day <= 365]\nprint(\"Errors in 'day':\", rows - filtered_df.shape[0])\nrows = filtered_df.shape[0]\n\nfiltered_df = filtered_df[filtered_df.hour <= 24]\nprint(\"Errors in 'hour':\", rows - filtered_df.shape[0])\nrows = filtered_df.shape[0]\n\nfiltered_df = filtered_df[filtered_df.minute <= 60]\nprint(\"Errors in 'minute':\", rows - filtered_df.shape[0])\nrows = filtered_df.shape[0]\n\nfiltered_df = filtered_df[filtered_df.second <= 60]\nprint(\"Errors in 'second':\", rows - filtered_df.shape[0])\nrows = filtered_df.shape[0]\n\nfiltered_df = filtered_df[filtered_df.station_number <= 99]\nprint(\"Errors in 'station_number':\", rows - filtered_df.shape[0])\nrows = filtered_df.shape[0]\n\nfiltered_df = filtered_df[filtered_df.satellite_number == 1]\nprint(\"Errors in 'satellite_number':\", rows - filtered_df.shape[0])\nrows = filtered_df.shape[0]\n\nprint()\nprint(\"Rows in filtered df:\", rows)\nprint(\"Total error rate:\", 100 - 100 * rows / df_num_data.shape[0])",
"Rows in unfiltered df: 467776\n\nErrors in 'year': 258\nErrors in 'day': 32383\nErrors in 'hour': 8860\nErrors in 'minute': 4337\nErrors in 'second': 8107\nErrors in 'station_number': 194\nErrors in 'satellite_number': 6332\n\nRows in filtered df: 407305\nTotal error rate: 12.92734129155835\n"
]
],
[
[
"## Convert to datetime object",
"_____no_output_____"
]
],
[
[
"filtered_df2 = filtered_df.copy()\n\nfiltered_df['timestamp'] = filtered_df.apply(lambda x: datetime.datetime(year=1962, month=1, day=1) + \\\n relativedelta(years=x['year'], days=x['day']-1, hours=x['hour'], minutes=x['minute'], seconds=x['second']), axis=1)\n\nfiltered_df2['timestamp'] = filtered_df2.apply(lambda x: datetime.datetime(year=1960, month=1, day=1) + \\\n relativedelta(years=x['year'], days=x['day']-1, hours=x['hour'], minutes=x['minute'], seconds=x['second']), axis=1)",
"_____no_output_____"
],
[
"# \nfig = plt.figure(figsize=(20, 5))\n\nplt.subplot(1, 3, 1)\n\nax = filtered_df['timestamp'].hist(bins=100)\n\nax.set_xlabel(\"satellite_number\")\nax.set_ylabel(\"Ionograms\")\n\n\nplt.subplot(1, 3, 2)\n\nax = filtered_df2['timestamp'].hist(bins=100)\n\nax.set_xlabel(\"satellite_number\")\nax.set_ylabel(\"Ionograms\")\n",
"C:\\Anaconda3\\lib\\site-packages\\pandas\\plotting\\_converter.py:129: FutureWarning: Using an implicitly registered datetime converter for a matplotlib plotting method. The converter was registered by pandas on import. Future versions of pandas will require you to explicitly register matplotlib converters.\n\nTo register the converters:\n\t>>> from pandas.plotting import register_matplotlib_converters\n\t>>> register_matplotlib_converters()\n warnings.warn(msg, FutureWarning)\n"
],
[
"print(\"Rows in df:\", filtered_df.shape[0])",
"Rows in df: 407305\n"
],
[
"alouette_launch_date = datetime.datetime(year=1962, month=9, day=29)\nalouette_deactivation_date = datetime.datetime(year=1972, month=12, day=31) # don't know eact date \n\nfiltered_df = filtered_df[filtered_df.timestamp >= alouette_launch_date]\nprint(\"Errors in timestamp (date too early):\", rows - filtered_df.shape[0])\nrows = filtered_df.shape[0]\n\nfiltered_df = filtered_df[filtered_df.timestamp <= alouette_deactivation_date]\nprint(\"Errors in timestamp (date too late):\", rows - filtered_df.shape[0])\n\nprint(\"Total error rate:\", 100 - 100 * rows / filtered_df.shape[0])",
"Errors in timestamp (date too early): 669\nErrors in timestamp (date too late): 70\nTotal error rate: -0.017217376760470415\n"
],
[
"filtered_df2 = filtered_df2[filtered_df2.timestamp >= alouette_launch_date]\nprint(\"Errors in timestamp (date too early):\", rows - filtered_df2.shape[0])\nrows = filtered_df2.shape[0]\n\nfiltered_df2 = filtered_df2[filtered_df2.timestamp <= alouette_deactivation_date]\nprint(\"Errors in timestamp (date too late):\", rows - filtered_df2.shape[0])\n\nprint(\"Total error rate:\", 100 - 100 * rows / filtered_df2.shape[0])",
"Errors in timestamp (date too early): 1464\nErrors in timestamp (date too late): 0\nTotal error rate: 0.0\n"
],
[
"# \nfig = plt.figure(figsize=(20, 5))\n\nplt.subplot(1, 3, 1)\n\nax = filtered_df['timestamp'].hist(bins=100)\n\nax.set_xlabel(\"satellite_number\")\nax.set_ylabel(\"Ionograms\")\n\n\nplt.subplot(1, 3, 2)\n\nax = filtered_df2['timestamp'].hist(bins=100)\n\nax.set_xlabel(\"satellite_number\")\nax.set_ylabel(\"Ionograms\")",
"_____no_output_____"
],
[
"# Match station_numbers with their respective station names and locations\ndf_stations = pd.read_csv(\"data/station_codes.csv\")\n\ndf_stations.columns = ['station_name', 'station_number', 'before_07_01_1965','3_letter_code', 'lat', 'lon']\ndf_stations.astype({'station_number': 'int32'}).dtypes\n\nstations_dict=df_stationscsvict('list')",
"_____no_output_____"
],
[
"df_stations.astype({'station_name': 'str'}).dtypes",
"_____no_output_____"
],
[
"stations_dict['station_number'].index(6)\nstations_dict['station_name'][0]",
"_____no_output_____"
],
[
"#filtered_df.astype({'station_number': 'int32'}).dtypes\nfiltered_df.columns",
"_____no_output_____"
],
[
"type(get_station_name(1, datetime.datetime.strptime('1965-05-25 16:48:01','%Y-%m-%d %H:%M:%S')))",
"_____no_output_____"
],
[
"df = pd.DataFrame([[1, 2], [3, 4], [5, 6]], index=['a', 'b', 'c'], columns=['A', 'B'])\ndf['C'] = \"Hello\" \ntype(df['C'][0])",
"_____no_output_____"
],
[
"df_stations[\"station_name\"] = df_stations[\"station_name\"].astype(str, errors='raise')",
"_____no_output_____"
],
[
"def get_station_name(station_number, timestamp):\n # print(station_number, timestamp)\n if timestamp >= datetime.datetime(year=1965, month=7, day=1) and station_number in stations_dict['station_number']:\n name = stations_dict['station_name'][stations_dict['station_number'].index(station_number)]\n code = stations_dict['3_letter_code'][stations_dict['station_number'].index(station_number)]\n lat = stations_dict['lat'][stations_dict['station_number'].index(station_number)]\n lon = stations_dict['lon'][stations_dict['station_number'].index(station_number)]\n elif timestamp < datetime.datetime(year=1965, month=7, day=1) and station_number in stations_dict['before_07_01_1965']:\n name = stations_dict['station_name'][stations_dict['before_07_01_1965'].index(station_number)]\n code = stations_dict['3_letter_code'][stations_dict['before_07_01_1965'].index(station_number)]\n lat = stations_dict['lat'][stations_dict['before_07_01_1965'].index(station_number)]\n lon = stations_dict['lon'][stations_dict['before_07_01_1965'].index(station_number)]\n elif timestamp < datetime.datetime(year=1963, month=4, day=25) and station_number == 10:\n name = stations_dict['station_name'][stations_dict['station_name'].index('Winkfield, England')]\n code = stations_dict['3_letter_code'][stations_dict['station_name'].index('Winkfield, England')]\n lat = stations_dict['lat'][stations_dict['station_name'].index('Winkfield, England')]\n lon = stations_dict['lon'][stations_dict['station_name'].index('Winkfield, England')]\n \n # assumption, need to look into these:\n elif timestamp >= datetime.datetime(year=1965, month=7, day=1) and station_number == 9:\n name = stations_dict['station_name'][stations_dict['station_name'].index('South Atlantic, Falkland Islands')]\n code = stations_dict['3_letter_code'][stations_dict['station_name'].index('South Atlantic, Falkland Islands')]\n lat = stations_dict['lat'][stations_dict['station_name'].index('South Atlantic, Falkland Islands')]\n lon = stations_dict['lon'][stations_dict['station_name'].index('South Atlantic, Falkland Islands')]\n elif timestamp >= datetime.datetime(year=1965, month=7, day=1) and station_number == 7:\n name = stations_dict['station_name'][stations_dict['station_name'].index('Quito, Ecuador')]\n code = stations_dict['3_letter_code'][stations_dict['station_name'].index('Quito, Ecuador')]\n lat = stations_dict['lat'][stations_dict['station_name'].index('Quito, Ecuador')]\n lon = stations_dict['lon'][stations_dict['station_name'].index('Quito, Ecuador')]\n elif timestamp >= datetime.datetime(year=1965, month=7, day=1) and station_number == 4:\n name = stations_dict['station_name'][stations_dict['station_name'].index(\"St. John's, Newfoundland\")]\n code = stations_dict['3_letter_code'][stations_dict['station_name'].index(\"St. John's, Newfoundland\")]\n lat = stations_dict['lat'][stations_dict['station_name'].index(\"St. John's, Newfoundland\")]\n lon = stations_dict['lon'][stations_dict['station_name'].index(\"St. John's, Newfoundland\")]\n else:\n name = None\n code = None\n lat = None\n lon = None\n \n #if len([name, code, lat, lon]) != 4:\n \n return name, code, lat, lon\n",
"_____no_output_____"
],
[
"#station_values = filtered_df.apply(lambda x: get_station_name(x['station_number'], x['timestamp']), axis=1)\n\nfiltered_df['station_name'] = None\nfiltered_df['3_letter_code'] = None\nfiltered_df['lat'] = None\nfiltered_df['lon'] = None\n\nfor i in range(len(filtered_df.index)):\n station_values = get_station_name(filtered_df.iloc[i]['station_number'], filtered_df.iloc[i]['timestamp'])\n \n filtered_df.iloc[i, filtered_df.columns.get_loc('station_name')] = station_values[0]\n filtered_df.iloc[i, filtered_df.columns.get_loc('3_letter_code')] = station_values[1]\n filtered_df.iloc[i, filtered_df.columns.get_loc('lat')] = station_values[2]\n filtered_df.iloc[i, filtered_df.columns.get_loc('lon')] = station_values[3]\n \n#filtered_df['station_name'], filtered_df['3_letter_code'], filtered_df['lat'], filtered_df['lon'] = ",
"_____no_output_____"
],
[
"fig = plt.figure(figsize=(20, 5))\n\nax = filtered_df[filtered_df['station_number'] == 4].apply(lambda x: x['timestamp'].date(), axis=1).value_counts().plot()\n\nax.set_xlabel(\"station_number\")\nax.set_ylabel(\"Ionograms\")\nadd_value_labels(ax)",
"_____no_output_____"
],
[
"print(filtered_df.isnull().sum())\nprint(len(filtered_df.index))",
"Unnamed: 0 0\nfile_name 0\nfmin 0\nmax_depth 0\nsubdir_name 0\nsatellite_number 0\nyear 0\nday_1 0\nday_2 0\nday_3 0\nhour_1 0\nhour_2 0\nminute_1 0\nminute_2 0\nsecond_1 0\nsecond_2 0\nstation_number_1 0\nstation_number_2 0\nday 0\nhour 0\nminute 0\nsecond 0\nstation_number 0\ntimestamp 0\nstation_name 12491\n3_letter_code 14045\nlat 12491\nlon 12491\ndtype: int64\n406566\n"
],
[
"# function from https://stackoverflow.com/questions/28931224/adding-value-labels-on-a-matplotlib-bar-chart\ndef add_value_labels(ax, spacing=5):\n \"\"\"Add labels to the end of each bar in a bar chart.\n\n Arguments:\n ax (matplotlib.axes.Axes): The matplotlib object containing the axes\n of the plot to annotate.\n spacing (int): The distance between the labels and the bars.\n \"\"\"\n\n # For each bar: Place a label\n for rect in ax.patches:\n # Get X and Y placement of label from rect.\n y_value = rect.get_height()\n x_value = rect.get_x() + rect.get_width() / 2\n\n # Number of points between bar and label. Change to your liking.\n space = spacing\n # Vertical alignment for positive values\n va = 'bottom'\n\n # If value of bar is negative: Place label below bar\n if y_value < 0:\n # Invert space to place label below\n space *= -1\n # Vertically align label at top\n va = 'top'\n\n # Use Y value as label and format number with one decimal place\n label = y_value\n\n # Create annotation\n ax.annotate(\n label, # Use `label` as label\n (x_value, y_value),# Place label at end of the bar\n xytext=(0, space), # Vertically shift label by `space`\n textcoords=\"offset points\", # Interpret `xytext` as offset in points\n ha='center', # Horizontally center label\n va=va) # Vertically align label differently for\n # positive and negative values.",
"_____no_output_____"
],
[
"fig = plt.figure(figsize=(20, 5))\n\nax = filtered_df['station_name'].value_counts().plot.bar()\n\nax.set_xlabel(\"station_name\")\nax.set_ylabel(\"Ionograms\")\nadd_value_labels(ax)",
"_____no_output_____"
],
[
"filtered_df['station_name'].value_counts()\n# St John's amd Santiago don't show up anywhere",
"_____no_output_____"
],
[
"fig = plt.figure(figsize=(20, 5))\n\nax = filtered_df[filtered_df['station_name'].isnull()]['station_number'].value_counts().plot.bar()\n\nax.set_xlabel(\"station_number\")\nax.set_ylabel(\"Ionograms\")\nadd_value_labels(ax)",
"_____no_output_____"
],
[
"#filtered_df[filtered_df['station_number'] == 7].apply(lambda x: x['timestamp'].date(), axis=1)",
"_____no_output_____"
],
[
"datetime.month(filtered_df.iloc[26338]['timestamp'].date())",
"_____no_output_____"
],
[
"print(type(list(filtered_df['station_name'])[0]))\nprint(type(list(df_stations['station_name'])[0]))",
"<class 'str'>\n<class 'str'>\n"
],
[
"initial_df_size = 467776\ntotal_error_rate = 100 * (1 - len(filtered_df.dropna().index) / initial_df_size)\n\nprint(\"Final df size:\", len(filtered_df.dropna().index))\nprint(\"Total error rate: \" + str(total_error_rate) + '%')",
"Final df size: 392521\nTotal error rate: 16.087828362293067%\n"
],
[
"# fmin\nfig = plt.figure(figsize=(20, 5))\n\nplt.subplot(1, 2, 1)\nax = filtered_df['fmin'].hist(bins=100)\nax.set_xlabel(\"fmin\")\nax.set_ylabel(\"Ionograms\")\n\nplt.subplot(1, 2, 2)\nax = filtered_df[filtered_df['fmin'] < 2]['fmin'].hist(bins=120)\nax.set_xlabel(\"fmin\")",
"_____no_output_____"
],
[
"# max depth\nfig = plt.figure(figsize=(20, 5))\n\nplt.subplot(1, 3, 1)\nax = filtered_df['max_depth'].hist(bins=100, orientation='horizontal')\nax.set_ylabel(\"max_depth\")\nax.set_xlabel(\"Ionograms\")\n\nplt.subplot(1, 3, 2)\nax = filtered_df[filtered_df['max_depth'] < 1600]['max_depth'].hist(bins=80, orientation='horizontal')\nax.set_ylabel(\"max_depth\")\nax.set_xlabel(\"Ionograms\")\n\nplt.subplot(1, 3, 3)\nax = filtered_df[filtered_df['max_depth'] > 3000]['max_depth'].hist(bins=80, orientation='horizontal')\nax.set_ylabel(\"max_depth\")\nax.set_xlabel(\"Ionograms\")",
"_____no_output_____"
]
],
[
[
"### Fix file naming",
"_____no_output_____"
]
],
[
[
"#def fix_file_name(file_name):\n\n# dir_0 = []\n# dir_1 = []\n# dir_2 = []\n# dir_3 = []\n\n# file_array = filtered_df.iloc[i]['file_name'].replace('\\\\', '/').split('/')\n# file_array[-3:]",
"_____no_output_____"
],
[
"df_final = filtered_df.copy()\ndf_final['file_name'] = filtered_df.apply(lambda x: '/'.join(x['file_name'].replace('\\\\', '/').split('/')[-3:])[:-4], axis=1)\n\n#ftp://ftp.asc-csa.gc.ca/users/OpenData_DonneesOuvertes/pub/AlouetteData/Alouette%20Data/R014207815/3488-15A/1.png",
"_____no_output_____"
]
],
[
[
"### Drop unnecessary columns",
"_____no_output_____"
]
],
[
[
"df_final.columns\ndf_final = df_final.drop(columns=['Unnamed: 0', 'year', 'day_1', 'day_2', 'day_3', 'hour_1','hour_2', 'minute_1', 'minute_2',\\\n 'second_1', 'second_2','station_number_1', 'station_number_2', 'day', 'hour', 'minute','second'])",
"_____no_output_____"
]
],
[
[
"## Export final dateframe",
"_____no_output_____"
]
],
[
[
"len(df_final.index)",
"_____no_output_____"
],
[
"df_final.to_csv(\"data/final_alouette_data.csv\")",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |
e7df4181255044f875d48454935374d10bdfa9c3 | 8,279 | ipynb | Jupyter Notebook | 00_AMLConfiguration.ipynb | Bhaskers-Blu-Org2/az-ml-batch-score | 40c6e1e6117e5ffee270e564b2b170f56189f737 | [
"MIT"
] | 7 | 2020-04-13T11:43:23.000Z | 2022-03-03T09:44:58.000Z | 00_AMLConfiguration.ipynb | itsshaikaslam/az-ml-batch-score | 40c6e1e6117e5ffee270e564b2b170f56189f737 | [
"MIT"
] | 1 | 2019-01-29T18:06:30.000Z | 2019-01-29T18:06:30.000Z | 00_AMLConfiguration.ipynb | itsshaikaslam/az-ml-batch-score | 40c6e1e6117e5ffee270e564b2b170f56189f737 | [
"MIT"
] | 14 | 2020-03-08T04:38:33.000Z | 2022-01-20T07:08:55.000Z | 30.549815 | 370 | 0.613238 | [
[
[
"Copyright (c) Microsoft Corporation. All rights reserved.\n\nLicensed under the MIT License.",
"_____no_output_____"
],
[
"# Installation and configuration\nThis notebook configures the notebooks in this tutorial to connect to an Azure Machine Learning (AML) Workspace. You can use an existing workspace or create a new one.",
"_____no_output_____"
]
],
[
[
"import azureml.core\nfrom azureml.core import Workspace\nfrom azureml.core.authentication import ServicePrincipalAuthentication, AzureCliAuthentication, \\\n InteractiveLoginAuthentication\nfrom azureml.exceptions import AuthenticationException\nfrom dotenv import set_key, get_key, find_dotenv\nfrom pathlib import Path",
"_____no_output_____"
]
],
[
[
"## Prerequisites",
"_____no_output_____"
],
[
"If you have already completed the prerequisites and selected the correct Kernel for this notebook, the AML Python SDK is already installed. Let's check the AML SDK version.",
"_____no_output_____"
]
],
[
[
"print(\"AML SDK Version:\", azureml.core.VERSION)",
"_____no_output_____"
]
],
[
[
"## Set up your Azure Machine Learning workspace",
"_____no_output_____"
],
[
"To create or access an Azure ML Workspace, you will need the following information:\n\n* Your subscription id\n* A resource group name\n* A name for your workspace\n* A region for your workspace\n\n**Note**: As with other Azure services, there are limits on certain resources like cluster size associated with the Azure Machine Learning service. Please read [this article](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-manage-quotas) on the default limits and how to request more quota.",
"_____no_output_____"
],
[
"If you have a workspace created already, you need to get your subscription and workspace information. You can find the values for those by visiting your workspace in the [Azure portal](http://portal.azure.com). If you don't have a workspace, the create workspace command in the next section will create a resource group and a workspace using the names you provide.",
"_____no_output_____"
],
[
"Replace the values in the following cell with your information. If you would like to use service principal authentication as described [here](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/manage-azureml-service/authentication-in-azureml/authentication-in-azure-ml.ipynb) make sure you provide the optional values as well. ",
"_____no_output_____"
]
],
[
[
"# Azure resources\nsubscription_id = \"\"\nresource_group = \"\" \nworkspace_name = \"\" \nworkspace_region = \"\"\n\ntenant_id = \"YOUR_TENANT_ID\" # Optional for service principal authentication\nusername = \"YOUR_SERVICE_PRINCIPAL_APPLICATION_ID\" # Optional for service principal authentication\npassword = \"YOUR_SERVICE_PRINCIPAL_PASSWORD\" # Optional for service principal authentication",
"_____no_output_____"
]
],
[
[
"Create and initialize a dotenv file for storing parameters used in multiple notebooks.",
"_____no_output_____"
]
],
[
[
"env_path = find_dotenv()\nif env_path == \"\":\n Path(\".env\").touch()\n env_path = find_dotenv()",
"_____no_output_____"
],
[
"set_key(env_path, \"subscription_id\", subscription_id) # Replace YOUR_AZURE_SUBSCRIPTION\nset_key(env_path, \"resource_group\", resource_group)\nset_key(env_path, \"workspace_name\", workspace_name)\nset_key(env_path, \"workspace_region\", workspace_region)\n\nset_key(env_path, \"tenant_id\", tenant_id)\nset_key(env_path, \"username\", username)\nset_key(env_path, \"password\", password)",
"_____no_output_____"
]
],
[
[
"#### Create the workspace\nThis cell will create an AML workspace for you in a subscription, provided you have the correct permissions.\n\nThis will fail when:\n1. You do not have permission to create a workspace in the resource group\n2. You do not have permission to create a resource group if it's non-existing.\n2. You are not a subscription owner or contributor and no Azure ML workspaces have ever been created in this subscription\n\nIf workspace creation fails, please work with your IT admin to provide you with the appropriate permissions or to provision the required resources. If this cell succeeds, you're done configuring AML! ",
"_____no_output_____"
]
],
[
[
"def get_auth(env_path):\n if get_key(env_path, 'password') != \"YOUR_SERVICE_PRINCIPAL_PASSWORD\":\n aml_sp_password = get_key(env_path, 'password')\n aml_sp_tennant_id = get_key(env_path, 'tenant_id')\n aml_sp_username = get_key(env_path, 'username')\n auth = ServicePrincipalAuthentication(\n tenant_id=aml_sp_tennant_id,\n service_principal_id=aml_sp_username,\n service_principal_password=aml_sp_password\n )\n else:\n try:\n auth = AzureCliAuthentication()\n auth.get_authentication_header()\n except AuthenticationException:\n auth = InteractiveLoginAuthentication()\n\n return auth\n\nws = Workspace.create(\n name=workspace_name,\n subscription_id=subscription_id,\n resource_group=resource_group,\n location=workspace_region,\n create_resource_group=True,\n auth=get_auth(env_path),\n exist_ok=True,\n)",
"_____no_output_____"
]
],
[
[
"Let's check the details of the workspace.",
"_____no_output_____"
]
],
[
[
"ws.get_details()",
"_____no_output_____"
]
],
[
[
"Let's write the workspace configuration for the rest of the notebooks to connect to the workspace.",
"_____no_output_____"
]
],
[
[
"ws.write_config()",
"_____no_output_____"
]
],
[
[
"You are now ready to move on to the [data preperation](01_DataPrep.ipynb) notebook.",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
e7df49718a412a16b244bfd63991bd29c28c80f3 | 4,528 | ipynb | Jupyter Notebook | practice/matrix/hardware.ipynb | alepoydes/introduction-to-numerical-simulation | 933ba186a0d9250fde9181d6aec974847d0ba841 | [
"MIT"
] | 26 | 2017-02-22T09:02:59.000Z | 2021-10-04T14:42:47.000Z | practice/matrix/hardware.ipynb | alepoydes/introduction-to-numerical-simulation | 933ba186a0d9250fde9181d6aec974847d0ba841 | [
"MIT"
] | 1 | 2017-10-01T09:56:40.000Z | 2017-10-01T09:56:40.000Z | practice/matrix/hardware.ipynb | alepoydes/introduction-to-numerical-simulation | 933ba186a0d9250fde9181d6aec974847d0ba841 | [
"MIT"
] | 19 | 2017-03-13T17:04:21.000Z | 2021-10-04T14:41:03.000Z | 37.421488 | 175 | 0.650398 | [
[
[
"# Уберите комментарий и установите numba, если вы получаете сообщение \"ModuleNotFoundError: No module named 'numba'\".\n# !pip3 install numba",
"_____no_output_____"
],
[
"import numpy as np\nimport numba as nb\nimport matplotlib.pyplot as plt",
"_____no_output_____"
]
],
[
[
"# Задания.\n\n1. Изучите реализацию многомерных массивов numpy.ndarray и работу с ними в numba.\nПосмотрите ноутбук [FastPython.](../FastPython.ipynb) \n\n2. Реализуйте произведение матриц $A\\in Mat(N\\times K)$, $B\\in Mat(K\\times M)$ согласно определению\n$$\nC_{n,m}=\\sum_{k=1}^K A_{n,k}B_{k,m}.\n$$\nСравните быстродействие реализаций на основе numpy.sum, с помощью numba и стандартный метод numpy.dot.\nНасколько полно используется процессор? \nСколько используется памяти?\n\n3. Составьте модель использования вычислительных ресурсов функцией на основе numba.jit из предыдущего пункта.\nРазмеры матриц должны быть параметрами модели.\nПроведите вычислительные эксперименты, чтобы подобрать параметры модели.\nЭкстраполируйте результат на большие матрицы, сделайте экспериментальную проверку.\n\n4. В простейшем алгоритме произведения матриц используются три цикла: перебирающие элементы $n$ и $m$ матрицы $C$\nи суммирующие по $k$. \nСравните время перемножения матриц в зависимости от порядка циклов.\nОцените объем кэшей разных уровней, проводя эксперименты для матриц разного размера.\n\n5. Обновите функцию для перемножения матриц, используя несколько потоков с помощью numba.prange.\nОбновите модель использования вычислительных ресурсов, принимая во внимание число потоков.\nОцените параметры модели из эксперимента.\nКакое [параллельное ускорение](https://en.wikipedia.org/wiki/Amdahl%27s_law) вы получили?\n\n6. Сможете ли вы реализовать реализовать на С более быстрый вариант перемножения матриц, чем на numba?\n\n7. Реализуйте быстрое произведение матриц, например, используйте [алгоритм Штрассена](https://en.wikipedia.org/wiki/Strassen_algorithm).\nОцените, на матрицах какого размера быстрое произведение матриц быстрее, чем стандартная реализация.\nКакой из методов дает меньшую погрешность вычислений?\n",
"_____no_output_____"
],
[
"# Литература\n\n1. Ван Лоун Чарльз Ф., Голуб Джин Х. Матричные вычисления. Глава 1.\n\n1. [NumPy](https://numpy.org/doc/stable/contents.html)\n\n1. [Numba: A High Performance Python Compiler.](https://numba.pydata.org/) [Performance Tips](https://numba.pydata.org/numba-doc/latest/user/performance-tips.html)\n\n1. [JAX: Autograd and XLA](https://github.com/google/jax)\n\n1. [xeus-cling: a Jupyter kernel for C++](https://github.com/jupyter-xeus/xeus-cling)\n\n1. [Minimal C kernel for Jupyter](https://github.com/brendan-rius/jupyter-c-kernel)\n\n1. Micha Gorelick, Ian Ozsvald.\n[High Performance Python](https://www.oreilly.com/library/view/high-performance-python/9781449361747/) \n\n1. [Performance Tips of NumPy ndarray](https://shihchinw.github.io/2019/03/performance-tips-of-numpy-ndarray.html)\n\n1. [Beating NumPy performance speed by extending Python with C](https://medium.com/analytics-vidhya/beating-numpy-performance-by-extending-python-with-c-c9b644ee2ca8)\n\n1. [Principles of Performance](https://llllllllll.github.io/principles-of-performance/index.html)",
"_____no_output_____"
]
]
] | [
"code",
"markdown"
] | [
[
"code",
"code"
],
[
"markdown",
"markdown"
]
] |
e7df49cc1695e936e404f3870b90a0812a47374e | 5,295 | ipynb | Jupyter Notebook | doc/tutorials/materials_demo.ipynb | DennisYelizarov/armi | 3f0e63769abb2a57b3aac7a2f0975dcd629b015a | [
"Apache-2.0"
] | 162 | 2019-11-01T17:35:58.000Z | 2022-03-18T04:22:39.000Z | doc/tutorials/materials_demo.ipynb | DennisYelizarov/armi | 3f0e63769abb2a57b3aac7a2f0975dcd629b015a | [
"Apache-2.0"
] | 315 | 2019-11-01T17:32:05.000Z | 2022-03-30T03:51:42.000Z | doc/tutorials/materials_demo.ipynb | DennisYelizarov/armi | 3f0e63769abb2a57b3aac7a2f0975dcd629b015a | [
"Apache-2.0"
] | 55 | 2019-11-01T16:59:59.000Z | 2022-03-25T18:19:06.000Z | 32.090909 | 384 | 0.623607 | [
[
[
"# The ARMI Material Library\n\nWhile *nuclides* are the microscopic building blocks of nature, their collection into *materials* is what we interact with at the engineering scale. The ARMI Framework provides a `Material` class, which has a composition (how many of each nuclide are in the material), and a variety of thermomechanical properties (many of which are temperature dependent), such as:\n\n* Mass density \n* Heat capacity\n* Linear or volumetric thermal expansion\n* Thermal conductivity\n* Solidus/liquidus temperature\n\nand so on. \n\nMany of these properties are widely available in the literature for fresh materials. As materials are irradiated, the properties tend to change in complex ways. Material objects can be extended to account for such changes. \n\nThe ARMI Framework comes with a small set of example material definitions. These are generally quite incomplete (often missing temperature dependence), and are of academic quality at best. To do engineering design calculations, users of ARMI are expected to make or otherwise prepare materials. As the ecosystem grows, we hope the material library will mature.\n\nIn any case, here we will explore the use of `Material`s. Let's get an instance of the Uranium Oxide material.",
"_____no_output_____"
]
],
[
[
"from armi.materials import uraniumOxide\nuo2 = uraniumOxide.UO2()\ndensity500 = uo2.density(Tc=500)\nprint(f\"The density of UO2 @ T = 500C is {density500:.2f} g/cc\")",
"_____no_output_____"
]
],
[
[
"Taking a look at the composition",
"_____no_output_____"
]
],
[
[
"print(uo2.p.massFrac)",
"_____no_output_____"
]
],
[
[
"The mass fractions of a material, plus its mass density, fully define the composition. Conversions between number density/fraction and mass density/fraction are handled on the next level up (on `Component`s), which we will explore soon.\n\nARMI automatically thermally-expands materials based on their coefficients of linear expansion. For instance, a piece of Uranium Oxide that's 10 cm at room temperature would be longer at 500 C according to the formula:\n\n\\begin{equation}\n\\frac{\\Delta L}{L_0} = \\alpha \\Delta T\n\\end{equation}\n\nOn the reactor model, this all happens behind the scenes. But here at the material library level, we can see it in detail. ",
"_____no_output_____"
]
],
[
[
"L0 = 10.0\ndLL = uo2.linearExpansionFactor(500,25)\nL = L0 * (1+dLL)\nprint(f\"Hot length is {L:.4f} cm\")\n",
"_____no_output_____"
]
],
[
[
"Let's plot the heat capacity as a function of temperature in K.",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport matplotlib.pyplot as plt\n%matplotlib inline\nTk = np.linspace(300,2000)\nheatCapacity = [uo2.heatCapacity(Tk=ti) for ti in Tk]\nplt.plot(Tk, heatCapacity)\nplt.title(\"$UO_2$ heat capacity vs. temperature\")\nplt.xlabel(\"Temperature (K)\")\nplt.ylabel(\"Heat capacity (J/kg-K)\")\nplt.grid(ls='--',alpha=0.3)",
"_____no_output_____"
]
],
[
[
"Different physics plugins require different properties to be defined. For pure neutronics runs, mass density and composition is enough. But for thermal/hydraulics runs, heat capacity and thermal conductivity is needed for solids, and more is needed for coolants. As irradiation models are investigated, creep, corrosion, porosity, swelling, and other factors will be necessary. ",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
e7df4a5bae38c2de885d906f84c1c6eb96f2ca96 | 33,485 | ipynb | Jupyter Notebook | bilstm_crf_ner/ner_bilstm_crf_smalldata_2d.ipynb | ybdesire/machinelearning | 0224746332e1085336e0b02e0ca3b11d74bd9a91 | [
"MIT"
] | 30 | 2017-02-28T13:52:58.000Z | 2022-03-24T10:28:43.000Z | bilstm_crf_ner/ner_bilstm_crf_smalldata_2d.ipynb | ybdesire/machinelearning | 0224746332e1085336e0b02e0ca3b11d74bd9a91 | [
"MIT"
] | null | null | null | bilstm_crf_ner/ner_bilstm_crf_smalldata_2d.ipynb | ybdesire/machinelearning | 0224746332e1085336e0b02e0ca3b11d74bd9a91 | [
"MIT"
] | 17 | 2017-03-03T12:38:04.000Z | 2022-03-11T01:53:20.000Z | 31.17784 | 407 | 0.450142 | [
[
[
"# 2D for bert embedding, elmo embedding and so on",
"_____no_output_____"
],
[
"# !pip install git+https://www.github.com/keras-team/keras-contrib.git",
"_____no_output_____"
],
[
"import numpy as np \nimport pandas as pd \nimport keras\nprint(keras.__version__)\n\nfrom math import nan\nfrom keras.callbacks import ModelCheckpoint\n\nfrom keras_contrib.layers import CRF\n\n",
"/root/anaconda3/lib/python3.6/site-packages/h5py/__init__.py:34: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.\n from ._conv import register_converters as _register_converters\nUsing TensorFlow backend.\n/root/anaconda3/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:516: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n _np_qint8 = np.dtype([(\"qint8\", np.int8, 1)])\n/root/anaconda3/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:517: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n _np_quint8 = np.dtype([(\"quint8\", np.uint8, 1)])\n/root/anaconda3/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:518: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n _np_qint16 = np.dtype([(\"qint16\", np.int16, 1)])\n/root/anaconda3/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:519: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n _np_quint16 = np.dtype([(\"quint16\", np.uint16, 1)])\n/root/anaconda3/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:520: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n _np_qint32 = np.dtype([(\"qint32\", np.int32, 1)])\n/root/anaconda3/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:525: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n np_resource = np.dtype([(\"resource\", np.ubyte, 1)])\n"
],
[
"dframe = pd.read_csv(\"ner_small.csv\", encoding = \"ISO-8859-1\", error_bad_lines=False)",
"_____no_output_____"
],
[
"dataset=dframe.drop(['Unnamed: 0', 'lemma', 'next-lemma', 'next-next-lemma', 'next-next-pos',\n 'next-next-shape', 'next-next-word', 'next-pos', 'next-shape',\n 'next-word', 'prev-iob', 'prev-lemma', 'prev-pos',\n 'prev-prev-iob', 'prev-prev-lemma', 'prev-prev-pos', 'prev-prev-shape',\n 'prev-prev-word', 'prev-shape', 'prev-word',\"pos\"],axis=1)",
"_____no_output_____"
],
[
"dataset.info()",
"<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 15591 entries, 0 to 15590\nData columns (total 4 columns):\nsentence_idx 15591 non-null int64\nshape 15591 non-null object\nword 15591 non-null object\ntag 15591 non-null object\ndtypes: int64(1), object(3)\nmemory usage: 487.3+ KB\n"
],
[
"dataset.head()",
"_____no_output_____"
],
[
"dataset=dataset.drop(['shape'],axis=1)",
"_____no_output_____"
],
[
"dataset.head()",
"_____no_output_____"
],
[
"class SentenceGetter(object):\n \n def __init__(self, dataset):\n self.n_sent = 1\n self.dataset = dataset\n self.empty = False\n agg_func = lambda s: [(w, t) for w,t in zip(s[\"word\"].values.tolist(),\n s[\"tag\"].values.tolist())]\n self.grouped = self.dataset.groupby(\"sentence_idx\").apply(agg_func)\n self.sentences = [s for s in self.grouped]\n \n def get_next(self):\n try:\n s = self.grouped[\"Sentence: {}\".format(self.n_sent)]\n self.n_sent += 1\n return s\n except:\n return None",
"_____no_output_____"
],
[
"getter = SentenceGetter(dataset)",
"_____no_output_____"
],
[
"sentences = getter.sentences",
"_____no_output_____"
],
[
"print(sentences[5])",
"[('The', 'O'), ('party', 'O'), ('is', 'O'), ('divided', 'O'), ('over', 'O'), ('Britain', 'B-gpe'), (\"'s\", 'O'), ('participation', 'O'), ('in', 'O'), ('the', 'O'), ('Iraq', 'B-geo'), ('conflict', 'O'), ('and', 'O'), ('the', 'O'), ('continued', 'O'), ('deployment', 'O'), ('of', 'O'), ('8,500', 'O'), ('British', 'B-gpe'), ('troops', 'O'), ('in', 'O'), ('that', 'O'), ('country', 'O'), ('.', 'O')]\n"
],
[
"maxlen = max([len(s) for s in sentences])\nprint ('Maximum sequence length:', maxlen)",
"Maximum sequence length: 62\n"
],
[
"words = list(set(dataset[\"word\"].values))\nwords.append(\"ENDPAD\")",
"_____no_output_____"
],
[
"n_words = len(words); n_words",
"_____no_output_____"
],
[
"tags = []\nfor tag in set(dataset[\"tag\"].values):\n if tag is nan or isinstance(tag, float):\n tags.append('unk')\n else:\n tags.append(tag)\nprint(tags)",
"['I-per', 'I-gpe', 'I-nat', 'B-geo', 'I-art', 'I-org', 'I-eve', 'B-eve', 'O', 'B-per', 'B-gpe', 'B-tim', 'B-art', 'I-geo', 'B-org', 'I-tim', 'B-nat']\n"
],
[
"n_tags = len(tags); n_tags",
"_____no_output_____"
],
[
"from future.utils import iteritems\nword2idx = {w: i for i, w in enumerate(words)}\ntag2idx = {t: i for i, t in enumerate(tags)}\nidx2tag = {v: k for k, v in iteritems(tag2idx)}",
"_____no_output_____"
],
[
"from keras.preprocessing.sequence import pad_sequences\nX = [[word2idx[w[0]] for w in s] for s in sentences]",
"_____no_output_____"
],
[
"np.array(X).shape\nX2=X",
"_____no_output_____"
],
[
"X = pad_sequences(maxlen=1400, sequences=X, padding=\"post\",value=n_words - 1)\nX2 = pad_sequences(maxlen=140, sequences=X2, padding=\"post\",value=n_words - 1)",
"_____no_output_____"
],
[
"y_idx = [[tag2idx[w[1]] for w in s] for s in sentences]\nprint(sentences[100])\nprint(y_idx[100])",
"[('The', 'O'), ('Pakistani', 'B-gpe'), ('military', 'O'), ('launched', 'O'), ('its', 'O'), ('offensive', 'O'), ('in', 'O'), ('Orakzai', 'B-geo'), ('to', 'O'), ('hunt', 'O'), ('Taliban', 'B-org'), ('insurgents', 'O'), ('.', 'O')]\n[8, 10, 8, 8, 8, 8, 8, 3, 8, 8, 14, 8, 8]\n"
],
[
"y = pad_sequences(maxlen=140, sequences=y_idx, padding=\"post\", value=tag2idx[\"O\"])\nprint(y_idx[100])",
"[8, 10, 8, 8, 8, 8, 8, 3, 8, 8, 14, 8, 8]\n"
],
[
"from keras.utils import to_categorical\ny = [to_categorical(i, num_classes=n_tags) for i in y]",
"_____no_output_____"
],
[
"X.shape",
"_____no_output_____"
],
[
"X = X.reshape((X.shape[0],140,10))\nX.shape",
"_____no_output_____"
],
[
"from sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=666)\nX_train2, X_test2, y_train2, y_test2 = train_test_split(X2, y, test_size=0.2, random_state=666)",
"_____no_output_____"
],
[
"X_train.shape",
"_____no_output_____"
],
[
"X_train[0]",
"_____no_output_____"
],
[
"y_train[1],y_train[1].shape",
"_____no_output_____"
],
[
"from keras.models import Model, Input\nfrom keras.layers import LSTM, Embedding, Dense, TimeDistributed, Dropout, Bidirectional\nimport keras as k\n",
"_____no_output_____"
],
[
"input = Input(shape=(140,10))\nword_embedding_size = 10\nmodel = Bidirectional(LSTM(units=word_embedding_size, \n return_sequences=True, \n dropout=0.5, \n recurrent_dropout=0.5, \n kernel_initializer=k.initializers.he_normal()))(input)\nmodel = LSTM(units=word_embedding_size * 2, \n return_sequences=True, \n dropout=0.5, \n recurrent_dropout=0.5, \n kernel_initializer=k.initializers.he_normal())(model)\nmodel = TimeDistributed(Dense(n_tags, activation=\"relu\"))(model) # previously softmax output layer\n\ncrf = CRF(n_tags) # CRF layer\nout = crf(model) # output\n",
"_____no_output_____"
],
[
"model = Model(input, out)\n",
"_____no_output_____"
],
[
"adam = k.optimizers.Adam(lr=0.0005, beta_1=0.9, beta_2=0.999)\n#model.compile(optimizer=adam, loss=\"categorical_crossentropy\", metrics=[\"accuracy\"])\nmodel.compile(optimizer=adam, loss=crf.loss_function, metrics=[crf.accuracy, 'accuracy'])\n",
"/root/anaconda3/lib/python3.6/site-packages/keras_contrib/layers/crf.py:346: UserWarning: CRF.loss_function is deprecated and it might be removed in the future. Please use losses.crf_loss instead.\n warnings.warn('CRF.loss_function is deprecated '\n/root/anaconda3/lib/python3.6/site-packages/keras_contrib/layers/crf.py:353: UserWarning: CRF.accuracy is deprecated and it might be removed in the future. Please use metrics.crf_accuracy\n warnings.warn('CRF.accuracy is deprecated and it '\n"
],
[
"history = model.fit(X_train, np.array(y_train), batch_size=256, epochs=3, validation_split=0.2, verbose=1)\n",
"WARNING:tensorflow:From /root/anaconda3/lib/python3.6/site-packages/keras/backend/tensorflow_backend.py:422: The name tf.global_variables is deprecated. Please use tf.compat.v1.global_variables instead.\n\nTrain on 448 samples, validate on 113 samples\nEpoch 1/3\n448/448 [==============================] - 3s 6ms/step - loss: 2.7265 - crf_viterbi_accuracy: 0.1537 - accuracy: 0.0028 - val_loss: 2.7363 - val_crf_viterbi_accuracy: 0.0044 - val_accuracy: 0.0044\nEpoch 2/3\n448/448 [==============================] - 1s 2ms/step - loss: 2.7060 - crf_viterbi_accuracy: 0.1636 - accuracy: 0.0028 - val_loss: 2.7054 - val_crf_viterbi_accuracy: 0.0052 - val_accuracy: 0.0052\nEpoch 3/3\n448/448 [==============================] - 1s 2ms/step - loss: 2.6905 - crf_viterbi_accuracy: 0.1517 - accuracy: 0.0028 - val_loss: 2.6764 - val_crf_viterbi_accuracy: 0.0068 - val_accuracy: 0.0068\n"
],
[
"p = model.predict(np.array([X_test[0]]))\np = np.argmax(p, axis=-1)\nprint(p)",
"[[ 5 1 9 10 3 9 1 9 1 9 1 9 1 9 1 9 1 9 1 9 1 9 1 9\n 1 9 1 9 1 9 1 9 1 9 1 9 1 9 1 9 1 9 1 9 1 9 1 9\n 1 9 1 9 1 9 1 9 1 9 1 9 1 9 1 9 1 9 1 9 1 9 1 9\n 1 9 1 9 1 9 1 9 1 9 1 9 1 9 1 9 1 9 1 9 1 9 1 9\n 1 9 1 9 1 9 1 9 1 9 1 9 1 9 1 9 1 9 1 9 1 9 1 9\n 1 9 1 9 1 9 1 9 1 9 1 9 1 9 1 9 1 9 1 9]]\n"
],
[
"X_test2.shape",
"_____no_output_____"
],
[
"gt = np.argmax(y_test[0], axis=-1)\nprint(gt)\nprint(\"{:14}: ({:5}): {}\".format(\"Word\", \"True\", \"Pred\"))\nfor idx, (w,pred) in enumerate(zip(X_test2[0],p[0])):\n #\n print(\"{:14}: ({:5}): {}\".format(words[w],idx2tag[gt[idx]],tags[pred]))\n",
"[8 8 8 8 8 9 0 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8\n 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8\n 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8\n 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8 8]\nWord : (True ): Pred\nA : (O ): I-org\nmilitary : (O ): I-gpe\ntribunal : (O ): B-per\nhas : (O ): B-gpe\ncharged : (O ): B-geo\nDr. : (B-per): B-per\nBesigye : (I-per): I-gpe\nwith : (O ): B-per\nterrorism : (O ): I-gpe\nand : (O ): B-per\npossessing : (O ): I-gpe\nillegal : (O ): B-per\nweapons : (O ): I-gpe\n. : (O ): B-per\nENDPAD : (O ): I-gpe\nENDPAD : (O ): B-per\nENDPAD : (O ): I-gpe\nENDPAD : (O ): B-per\nENDPAD : (O ): I-gpe\nENDPAD : (O ): B-per\nENDPAD : (O ): I-gpe\nENDPAD : (O ): B-per\nENDPAD : (O ): I-gpe\nENDPAD : (O ): B-per\nENDPAD : (O ): I-gpe\nENDPAD : (O ): B-per\nENDPAD : (O ): I-gpe\nENDPAD : (O ): B-per\nENDPAD : (O ): I-gpe\nENDPAD : (O ): B-per\nENDPAD : (O ): I-gpe\nENDPAD : (O ): B-per\nENDPAD : (O ): I-gpe\nENDPAD : (O ): B-per\nENDPAD : (O ): I-gpe\nENDPAD : (O ): B-per\nENDPAD : (O ): I-gpe\nENDPAD : (O ): B-per\nENDPAD : (O ): I-gpe\nENDPAD : (O ): B-per\nENDPAD : (O ): I-gpe\nENDPAD : (O ): B-per\nENDPAD : (O ): I-gpe\nENDPAD : (O ): B-per\nENDPAD : (O ): I-gpe\nENDPAD : (O ): B-per\nENDPAD : (O ): I-gpe\nENDPAD : (O ): B-per\nENDPAD : (O ): I-gpe\nENDPAD : (O ): B-per\nENDPAD : (O ): I-gpe\nENDPAD : (O ): B-per\nENDPAD : (O ): I-gpe\nENDPAD : (O ): B-per\nENDPAD : (O ): I-gpe\nENDPAD : (O ): B-per\nENDPAD : (O ): I-gpe\nENDPAD : (O ): B-per\nENDPAD : (O ): I-gpe\nENDPAD : (O ): B-per\nENDPAD : (O ): I-gpe\nENDPAD : (O ): B-per\nENDPAD : (O ): I-gpe\nENDPAD : (O ): B-per\nENDPAD : (O ): I-gpe\nENDPAD : (O ): B-per\nENDPAD : (O ): I-gpe\nENDPAD : (O ): B-per\nENDPAD : (O ): I-gpe\nENDPAD : (O ): B-per\nENDPAD : (O ): I-gpe\nENDPAD : (O ): B-per\nENDPAD : (O ): I-gpe\nENDPAD : (O ): B-per\nENDPAD : (O ): I-gpe\nENDPAD : (O ): B-per\nENDPAD : (O ): I-gpe\nENDPAD : (O ): B-per\nENDPAD : (O ): I-gpe\nENDPAD : (O ): B-per\nENDPAD : (O ): I-gpe\nENDPAD : (O ): B-per\nENDPAD : (O ): I-gpe\nENDPAD : (O ): B-per\nENDPAD : (O ): I-gpe\nENDPAD : (O ): B-per\nENDPAD : (O ): I-gpe\nENDPAD : (O ): B-per\nENDPAD : (O ): I-gpe\nENDPAD : (O ): B-per\nENDPAD : (O ): I-gpe\nENDPAD : (O ): B-per\nENDPAD : (O ): I-gpe\nENDPAD : (O ): B-per\nENDPAD : (O ): I-gpe\nENDPAD : (O ): B-per\nENDPAD : (O ): I-gpe\nENDPAD : (O ): B-per\nENDPAD : (O ): I-gpe\nENDPAD : (O ): B-per\nENDPAD : (O ): I-gpe\nENDPAD : (O ): B-per\nENDPAD : (O ): I-gpe\nENDPAD : (O ): B-per\nENDPAD : (O ): I-gpe\nENDPAD : (O ): B-per\nENDPAD : (O ): I-gpe\nENDPAD : (O ): B-per\nENDPAD : (O ): I-gpe\nENDPAD : (O ): B-per\nENDPAD : (O ): I-gpe\nENDPAD : (O ): B-per\nENDPAD : (O ): I-gpe\nENDPAD : (O ): B-per\nENDPAD : (O ): I-gpe\nENDPAD : (O ): B-per\nENDPAD : (O ): I-gpe\nENDPAD : (O ): B-per\nENDPAD : (O ): I-gpe\nENDPAD : (O ): B-per\nENDPAD : (O ): I-gpe\nENDPAD : (O ): B-per\nENDPAD : (O ): I-gpe\nENDPAD : (O ): B-per\nENDPAD : (O ): I-gpe\nENDPAD : (O ): B-per\nENDPAD : (O ): I-gpe\nENDPAD : (O ): B-per\nENDPAD : (O ): I-gpe\nENDPAD : (O ): B-per\nENDPAD : (O ): I-gpe\nENDPAD : (O ): B-per\nENDPAD : (O ): I-gpe\nENDPAD : (O ): B-per\nENDPAD : (O ): I-gpe\nENDPAD : (O ): B-per\nENDPAD : (O ): I-gpe\nENDPAD : (O ): B-per\nENDPAD : (O ): I-gpe\nENDPAD : (O ): B-per\n"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e7df4db56a00cffd8d90ba87f01f2feeb53eb57e | 19,303 | ipynb | Jupyter Notebook | example/rcnn/Moodys-Table-Detection.ipynb | SCDM/mxnet | 2dcab02620798f3dd51d4282d566e0dfd5422f1c | [
"Apache-2.0"
] | null | null | null | example/rcnn/Moodys-Table-Detection.ipynb | SCDM/mxnet | 2dcab02620798f3dd51d4282d566e0dfd5422f1c | [
"Apache-2.0"
] | null | null | null | example/rcnn/Moodys-Table-Detection.ipynb | SCDM/mxnet | 2dcab02620798f3dd51d4282d566e0dfd5422f1c | [
"Apache-2.0"
] | null | null | null | 42.895556 | 353 | 0.582811 | [
[
[
"# Intro to Table Detection with Fast RCNN\n\nBy taking an ImageNet-pretrained model such as the VGG16, we can add a few more convolutional layers to construct an RPN, or region proposal network. This module extracts regions of interest, or RoIs, that inform a model on where to identify an object. \n\n\nWhen the RoIs are applied, we do max pooling only in the regions of interest, as to find an embedding that uniquely identifies that area of the input and well as building a description of what object might lie in that region. From this description, the model can then categorize that region into one of k categories it was trained to recognize. \n",
"_____no_output_____"
]
],
[
[
"# Train Fast RCNN\n\nimport logging\nimport pprint\nimport mxnet as mx\nimport numpy as np\n\nfrom rcnn.config import config, default, generate_config\nfrom rcnn.symbol import *\nfrom rcnn.core import callback, metric\nfrom rcnn.core.loader import AnchorLoader\nfrom rcnn.core.module import MutableModule\nfrom rcnn.utils.load_data import load_gt_roidb, merge_roidb, filter_roidb\nfrom rcnn.utils.load_model import load_param\n\n\ndef train_net(args, ctx, pretrained, epoch, prefix, begin_epoch, end_epoch,\n lr=0.001, lr_step='5'):\n # set up logger\n logging.basicConfig()\n logger = logging.getLogger()\n logger.setLevel(logging.INFO)\n\n # setup config\n config.TRAIN.BATCH_IMAGES = 1\n config.TRAIN.BATCH_ROIS = 128\n config.TRAIN.END2END = True\n config.TRAIN.BBOX_NORMALIZATION_PRECOMPUTED = True\n\n # load symbol\n sym = eval('get_' + args.network + '_train')(num_classes=config.NUM_CLASSES, num_anchors=config.NUM_ANCHORS)\n feat_sym = sym.get_internals()['rpn_cls_score_output']\n\n\n # setup multi-gpu\n batch_size = len(ctx)\n input_batch_size = config.TRAIN.BATCH_IMAGES * batch_size\n\n # print config\n pprint.pprint(config)\n\n # load dataset and prepare imdb for training\n image_sets = [iset for iset in args.image_set.split('+')]\n roidbs = [load_gt_roidb(args.dataset, image_set, args.root_path, args.dataset_path,\n flip=not args.no_flip)\n for image_set in image_sets]\n roidb = merge_roidb(roidbs)\n roidb = filter_roidb(roidb)\n\n # load training data\n train_data = AnchorLoader(feat_sym, roidb, batch_size=input_batch_size, shuffle=not args.no_shuffle,\n ctx=ctx, work_load_list=args.work_load_list,\n feat_stride=config.RPN_FEAT_STRIDE, anchor_scales=config.ANCHOR_SCALES,\n anchor_ratios=config.ANCHOR_RATIOS, aspect_grouping=config.TRAIN.ASPECT_GROUPING)\n\n # infer max shape\n max_data_shape = [('data', (input_batch_size, 3, max([v[0] for v in config.SCALES]), max([v[1] for v in config.SCALES])))]\n max_data_shape, max_label_shape = train_data.infer_shape(max_data_shape)\n max_data_shape.append(('gt_boxes', (input_batch_size, 100, 5)))\n print('providing maximum shape', max_data_shape, max_label_shape)\n\n # infer shape\n data_shape_dict = dict(train_data.provide_data + train_data.provide_label)\n arg_shape, out_shape, aux_shape = sym.infer_shape(**data_shape_dict)\n arg_shape_dict = dict(zip(sym.list_arguments(), arg_shape))\n out_shape_dict = dict(zip(sym.list_outputs(), out_shape))\n aux_shape_dict = dict(zip(sym.list_auxiliary_states(), aux_shape))\n print('output shape')\n pprint.pprint(out_shape_dict)\n\n # load and initialize params\n if args.resume:\n arg_params, aux_params = load_param(prefix, begin_epoch, convert=True)\n else:\n arg_params, aux_params = load_param(pretrained, epoch, convert=True)\n arg_params['rpn_conv_3x3_weight'] = mx.random.normal(0, 0.01, shape=arg_shape_dict['rpn_conv_3x3_weight'])\n arg_params['rpn_conv_3x3_bias'] = mx.nd.zeros(shape=arg_shape_dict['rpn_conv_3x3_bias'])\n arg_params['rpn_cls_score_weight'] = mx.random.normal(0, 0.01, shape=arg_shape_dict['rpn_cls_score_weight'])\n arg_params['rpn_cls_score_bias'] = mx.nd.zeros(shape=arg_shape_dict['rpn_cls_score_bias'])\n arg_params['rpn_bbox_pred_weight'] = mx.random.normal(0, 0.01, shape=arg_shape_dict['rpn_bbox_pred_weight'])\n arg_params['rpn_bbox_pred_bias'] = mx.nd.zeros(shape=arg_shape_dict['rpn_bbox_pred_bias'])\n arg_params['cls_score_weight'] = mx.random.normal(0, 0.01, shape=arg_shape_dict['cls_score_weight'])\n arg_params['cls_score_bias'] = mx.nd.zeros(shape=arg_shape_dict['cls_score_bias'])\n arg_params['bbox_pred_weight'] = mx.random.normal(0, 0.001, shape=arg_shape_dict['bbox_pred_weight'])\n arg_params['bbox_pred_bias'] = mx.nd.zeros(shape=arg_shape_dict['bbox_pred_bias'])\n\n # check parameter shapes\n for k in sym.list_arguments():\n if k in data_shape_dict:\n continue\n assert k in arg_params, k + ' not initialized'\n assert arg_params[k].shape == arg_shape_dict[k], \\\n 'shape inconsistent for ' + k + ' inferred ' + str(arg_shape_dict[k]) + ' provided ' + str(arg_params[k].shape)\n for k in sym.list_auxiliary_states():\n assert k in aux_params, k + ' not initialized'\n assert aux_params[k].shape == aux_shape_dict[k], \\\n 'shape inconsistent for ' + k + ' inferred ' + str(aux_shape_dict[k]) + ' provided ' + str(aux_params[k].shape)\n\n # create solver\n fixed_param_prefix = config.FIXED_PARAMS\n data_names = [k[0] for k in train_data.provide_data]\n label_names = [k[0] for k in train_data.provide_label]\n mod = MutableModule(sym, data_names=data_names, label_names=label_names,\n logger=logger, context=ctx, work_load_list=args.work_load_list,\n max_data_shapes=max_data_shape, max_label_shapes=max_label_shape,\n fixed_param_prefix=fixed_param_prefix)\n\n # decide training params metric\n rpn_eval_metric = metric.RPNAccMetric()\n rpn_cls_metric = metric.RPNLogLossMetric()\n rpn_bbox_metric = metric.RPNL1LossMetric()\n eval_metric = metric.RCNNAccMetric()\n cls_metric = metric.RCNNLogLossMetric()\n bbox_metric = metric.RCNNL1LossMetric()\n eval_metrics = mx.metric.CompositeEvalMetric()\n for child_metric in [rpn_eval_metric, rpn_cls_metric, rpn_bbox_metric, eval_metric, cls_metric, bbox_metric]:\n eval_metrics.add(child_metric)\n \n # callback\n batch_end_callback = callback.Speedometer(train_data.batch_size, frequent=args.frequent)\n means = np.tile(np.array(config.TRAIN.BBOX_MEANS), config.NUM_CLASSES)\n stds = np.tile(np.array(config.TRAIN.BBOX_STDS), config.NUM_CLASSES)\n epoch_end_callback = callback.do_checkpoint(prefix, means, stds)\n \n # decide learning rate\n base_lr = lr\n lr_factor = 0.1\n lr_epoch = [int(epoch) for epoch in lr_step.split(',')]\n lr_epoch_diff = [epoch - begin_epoch for epoch in lr_epoch if epoch > begin_epoch]\n lr = base_lr * (lr_factor ** (len(lr_epoch) - len(lr_epoch_diff)))\n lr_iters = [int(epoch * len(roidb) / batch_size) for epoch in lr_epoch_diff]\n print('lr', lr, 'lr_epoch_diff', lr_epoch_diff, 'lr_iters', lr_iters)\n lr_scheduler = mx.lr_scheduler.MultiFactorScheduler(lr_iters, lr_factor)\n # optimizer\n optimizer_params = {'momentum': 0.9,\n 'wd': 0.0005,\n 'learning_rate': lr,\n 'lr_scheduler': lr_scheduler,\n 'rescale_grad': (1.0 / batch_size),\n 'clip_gradient': 5}\n\n # train\n mod.fit(train_data, eval_metric=eval_metrics, epoch_end_callback=epoch_end_callback,\n batch_end_callback=batch_end_callback, kvstore=args.kvstore,\n optimizer='sgd', optimizer_params=optimizer_params,\n arg_params=arg_params, aux_params=aux_params, \n begin_epoch=begin_epoch, num_epoch=end_epoch)\n\n",
"_____no_output_____"
],
[
"## Training Args\nclass DictToObject:\n '''\n helper class to encapsulate all the args from dict to obj\n '''\n def __init__(self, **entries):\n self.__dict__.update(entries)\n\nargs = {'lr': 0.001, 'image_set': '2007_trainval', 'network': 'resnet',\n 'resume': False, 'pretrained': 'model/resnet-101', 'root_path': 'new_data',\n 'dataset': 'TableDetectionVOC', 'lr_step': '7', 'prefix': 'model/rese2e', \n 'end_epoch': 10, 'dataset_path': 'table_data/VOCdevkit', \n 'gpus': '0',\n 'no_flip': False, 'no_shuffle': False, 'begin_epoch': 0, \n 'work_load_list': None, 'pretrained_epoch': 0,\n 'kvstore': 'device', 'frequent': 20}\n\nargs = DictToObject(**args)\nif len(args.gpus) > 1:\n ctx = [mx.gpu(int(i)) for i in args.gpus.split(',')]\nelse:\n ctx = [mx.gpu(int(args.gpus))]\ntrain_net(args, ctx, args.pretrained, args.pretrained_epoch, args.prefix, args.begin_epoch, args.end_epoch,\n lr=args.lr, lr_step=args.lr_step)\n",
"_____no_output_____"
],
[
"# Fast r-cnn trained on VOC2007 dataset\n\nimport os\nimport cv2\nimport mxnet as mx\nimport numpy as np\nfrom rcnn.config import config\nfrom rcnn.symbol import get_vgg_test, get_vgg_rpn_test\nfrom rcnn.io.image import resize, transform\nfrom rcnn.core.tester import Predictor, im_detect, im_proposal, vis_all_detection, draw_all_detection\nfrom rcnn.utils.load_model import load_param\nfrom rcnn.processing.nms import py_nms_wrapper, cpu_nms_wrapper, gpu_nms_wrapper\n\nimport urllib2\nimport tempfile\n\n# 13 classes\nCLASSES = ('__background__',\n 'table', 'header', 'row', 'column')\n\nconfig.TEST.HAS_RPN = True\nSHORT_SIDE = config.SCALES[0][0]\nLONG_SIDE = config.SCALES[0][1]\nPIXEL_MEANS = config.PIXEL_MEANS\nDATA_NAMES = ['data', 'im_info']\nLABEL_NAMES = None\nDATA_SHAPES = [('data', (1, 3, LONG_SIDE, SHORT_SIDE)), ('im_info', (1, 3))]\nLABEL_SHAPES = None\n\n# visualization\nCONF_THRESH = 0.7\nNMS_THRESH = 0.3\nnms = py_nms_wrapper(NMS_THRESH)\n\n\ndef get_net(symbol, prefix, epoch, ctx):\n arg_params, aux_params = load_param(prefix, epoch, convert=True, ctx=ctx, process=True)\n\n # infer shape\n data_shape_dict = dict(DATA_SHAPES)\n arg_names, aux_names = symbol.list_arguments(), symbol.list_auxiliary_states()\n arg_shape, _, aux_shape = symbol.infer_shape(**data_shape_dict)\n arg_shape_dict = dict(zip(arg_names, arg_shape))\n aux_shape_dict = dict(zip(aux_names, aux_shape))\n\n # check shapes\n for k in symbol.list_arguments():\n if k in data_shape_dict or 'label' in k:\n continue\n assert k in arg_params, k + ' not initialized'\n assert arg_params[k].shape == arg_shape_dict[k], \\\n 'shape inconsistent for ' + k + ' inferred ' + str(arg_shape_dict[k]) + ' provided ' + str(arg_params[k].shape)\n for k in symbol.list_auxiliary_states():\n assert k in aux_params, k + ' not initialized'\n assert aux_params[k].shape == aux_shape_dict[k], \\\n 'shape inconsistent for ' + k + ' inferred ' + str(aux_shape_dict[k]) + ' provided ' + str(aux_params[k].shape)\n\n predictor = Predictor(symbol, DATA_NAMES, LABEL_NAMES, context=ctx,\n provide_data=DATA_SHAPES, provide_label=LABEL_SHAPES,\n arg_params=arg_params, aux_params=aux_params)\n return predictor\n\n\ndef generate_batch(im):\n \"\"\"\n preprocess image, return batch\n :param im: cv2.imread returns [height, width, channel] in BGR\n :return:\n data_batch: MXNet input batch\n data_names: names in data_batch\n im_scale: float number\n \"\"\"\n im_array, im_scale = resize(im, SHORT_SIDE, LONG_SIDE)\n im_array = transform(im_array, PIXEL_MEANS)\n im_info = np.array([[im_array.shape[2], im_array.shape[3], im_scale]], dtype=np.float32)\n data = [mx.nd.array(im_array), mx.nd.array(im_info)]\n data_shapes = [('data', im_array.shape), ('im_info', im_info.shape)]\n data_batch = mx.io.DataBatch(data=data, label=None, provide_data=data_shapes, provide_label=None)\n return data_batch, DATA_NAMES, im_scale\n\n\ndef demo_net(predictor, im, vis=False):\n \"\"\"\n generate data_batch -> im_detect -> post process\n :param predictor: Predictor\n :param image_name: image name\n :param vis: will save as a new image if not visualized\n :return: None\n \"\"\"\n\n data_batch, data_names, im_scale = generate_batch(im)\n scores, boxes, data_dict = im_detect(predictor, data_batch, data_names, im_scale)\n\n all_boxes = [[] for _ in CLASSES]\n for cls in CLASSES:\n cls_ind = CLASSES.index(cls)\n cls_boxes = boxes[:, 4 * cls_ind:4 * (cls_ind + 1)]\n cls_scores = scores[:, cls_ind, np.newaxis]\n keep = np.where(cls_scores >= CONF_THRESH)[0]\n dets = np.hstack((cls_boxes, cls_scores)).astype(np.float32)[keep, :]\n keep = nms(dets)\n all_boxes[cls_ind] = dets[keep, :]\n\n boxes_this_image = [[]] + [all_boxes[j] for j in range(1, len(CLASSES))]\n\n # print results\n print('class ---- [[x1, x2, y1, y2, confidence]]')\n for ind, boxes in enumerate(boxes_this_image):\n if len(boxes) > 0:\n print('---------', CLASSES[ind], '---------')\n print(boxes)\n\n if vis:\n vis_all_detection(data_dict['data'].asnumpy(), boxes_this_image, CLASSES, im_scale)\n else:\n #result_file = image_name.replace('.', '_result.')\n result_file = \"output.jpg\"\n print('results saved to %s' % result_file)\n im = draw_all_detection(data_dict['data'].asnumpy(), boxes_this_image, CLASSES, im_scale)\n cv2.imwrite(result_file, im)\n\n \ndef get_image_from_url(url, img_file):\n\n req = urllib2.urlopen(url)\n img_file.write(req.read())\n img_file.flush()\n return img_file.name",
"_____no_output_____"
]
],
[
[
"## Inference - Lets run some predictions\n",
"_____no_output_____"
]
],
[
[
"vis = False\ngpu = 0\nepoch = 3\nprefix = 'e2e'\n\nctx = mx.gpu(gpu)\nsymbol = get_vgg_test(num_classes=config.NUM_CLASSES, num_anchors=config.NUM_ANCHORS)\npredictor = get_net(symbol, prefix, epoch, ctx)\n\n",
"_____no_output_____"
],
[
"img_file = tempfile.NamedTemporaryFile()\n#url = 'http://images.all-free-download.com/images/graphiclarge/aeroplane_boeing_737_air_new_zealand_218019.jpg'\n#url = 'http://host.robots.ox.ac.uk/pascal/VOC/voc2012/segexamples/images/21.jpg'\n#url = 'https://www.siemens.com/press/pool/de/pressebilder/2011/mobility/soimo201107/072dpi/soimo201107-04_072dpi.jpg'\nurl = '/home/ubuntu/workspace/mxnet/example/rcnn/new_data/VOCdevkit/VOC2007/JPEGImages/500046727_20161125_page_002.jpg'\n\nif 'JPEGImages' in url:\n image = url\nelse:\n image = get_image_from_url(url, img_file)\nassert os.path.exists(image), image + ' not found'\n\nim = cv2.imread(image)\ndemo_net(predictor, im, vis)",
"_____no_output_____"
]
],
[
[
"## Table Object Detection",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport matplotlib.pyplot as plt\n\nim = np.array(Image.open('/home/ubuntu/workspace/mxnet/example/rcnn/new_data/marked_table.png'))\nplt.imshow(im)\nplt.show()",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
]
] |
e7df52f47756f3d352f8a978e4fc490c2488ce78 | 66,285 | ipynb | Jupyter Notebook | modules/models_ensemble.ipynb | dzenanz/tutorials | 9556f4b03e81379b91b965776f07c6d48c19b4f6 | [
"Apache-2.0"
] | 1 | 2021-07-13T00:34:40.000Z | 2021-07-13T00:34:40.000Z | modules/models_ensemble.ipynb | basharbme/tutorials | 9c1ce56973faa3bb2bd209d61078b5fd1f6b0bbe | [
"Apache-2.0"
] | null | null | null | modules/models_ensemble.ipynb | basharbme/tutorials | 9c1ce56973faa3bb2bd209d61078b5fd1f6b0bbe | [
"Apache-2.0"
] | null | null | null | 59.986425 | 190 | 0.640462 | [
[
[
"# Models ensemble to achieve better test metrics\n\nModels ensemble is a popular strategy in machine learning and deep learning areas to achieve more accurate and more stable outputs. \nA typical practice is:\n* Split all the training dataset into K folds.\n* Train K models with every K-1 folds data.\n* Execute inference on the test data with all the K models.\n* Compute the average values with weights or vote the most common value as the final result.\n<p>\n<img src=\"../figures/models_ensemble.png\" width=\"80%\" alt='models_ensemble'>\n</p>\n\nMONAI provides `EnsembleEvaluator` and `MeanEnsemble`, `VoteEnsemble` post transforms. \nThis tutorial shows how to leverage ensemble modules in MONAI to set up ensemble program.\n\n[](https://colab.research.google.com/github/Project-MONAI/tutorials/blob/master/modules/models_ensemble.ipynb)",
"_____no_output_____"
],
[
"## Setup environment",
"_____no_output_____"
]
],
[
[
"!python -c \"import monai\" || pip install -q \"monai-weekly[ignite, nibabel, tqdm]\"",
"_____no_output_____"
]
],
[
[
"## Setup imports",
"_____no_output_____"
]
],
[
[
"# Copyright 2020 MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport glob\nimport logging\nimport os\nimport tempfile\nimport shutil\nimport sys\n\nimport nibabel as nib\nimport numpy as np\nimport torch\n\nfrom monai.config import print_config\nfrom monai.data import CacheDataset, DataLoader, create_test_image_3d\nfrom monai.engines import (\n EnsembleEvaluator,\n SupervisedEvaluator,\n SupervisedTrainer\n)\nfrom monai.handlers import MeanDice, StatsHandler, ValidationHandler, from_engine\nfrom monai.inferers import SimpleInferer, SlidingWindowInferer\nfrom monai.losses import DiceLoss\nfrom monai.networks.nets import UNet\nfrom monai.transforms import (\n Activationsd,\n AsChannelFirstd,\n AsDiscreted,\n Compose,\n LoadImaged,\n MeanEnsembled,\n RandCropByPosNegLabeld,\n RandRotate90d,\n ScaleIntensityd,\n EnsureTyped,\n VoteEnsembled,\n)\nfrom monai.utils import set_determinism\n\nprint_config()",
"MONAI version: 0.6.0rc1+23.gc6793fd0\nNumpy version: 1.20.3\nPytorch version: 1.9.0a0+c3d40fd\nMONAI flags: HAS_EXT = True, USE_COMPILED = False\nMONAI rev id: c6793fd0f316a448778d0047664aaf8c1895fe1c\n\nOptional dependencies:\nPytorch Ignite version: 0.4.5\nNibabel version: 3.2.1\nscikit-image version: 0.15.0\nPillow version: 7.0.0\nTensorboard version: 2.5.0\ngdown version: 3.13.0\nTorchVision version: 0.10.0a0\nITK version: 5.1.2\ntqdm version: 4.53.0\nlmdb version: 1.2.1\npsutil version: 5.8.0\npandas version: 1.1.4\neinops version: 0.3.0\n\nFor details about installing the optional dependencies, please visit:\n https://docs.monai.io/en/latest/installation.html#installing-the-recommended-dependencies\n\n"
]
],
[
[
"## Setup data directory\n\nYou can specify a directory with the `MONAI_DATA_DIRECTORY` environment variable. \nThis allows you to save results and reuse downloads. \nIf not specified a temporary directory will be used.",
"_____no_output_____"
]
],
[
[
"directory = os.environ.get(\"MONAI_DATA_DIRECTORY\")\nroot_dir = tempfile.mkdtemp() if directory is None else directory\nprint(root_dir)",
"/workspace/data/medical\n"
]
],
[
[
"## Set determinism, logging, device",
"_____no_output_____"
]
],
[
[
"set_determinism(seed=0)\nlogging.basicConfig(stream=sys.stdout, level=logging.INFO)\ndevice = torch.device(\"cuda:0\")",
"_____no_output_____"
]
],
[
[
"## Generate random (image, label) pairs\n\nGenerate 60 pairs for the task, 50 for training and 10 for test. \nAnd then split the 50 pairs into 5 folds to train 5 separate models.",
"_____no_output_____"
]
],
[
[
"data_dir = os.path.join(root_dir, \"runs\")\n\nif not os.path.exists(data_dir):\n os.makedirs(data_dir)\n for i in range(60):\n im, seg = create_test_image_3d(\n 128, 128, 128, num_seg_classes=1, channel_dim=-1)\n\n n = nib.Nifti1Image(im, np.eye(4))\n nib.save(n, os.path.join(data_dir, f\"img{i}.nii.gz\"))\n\n n = nib.Nifti1Image(seg, np.eye(4))\n nib.save(n, os.path.join(data_dir, f\"seg{i}.nii.gz\"))\n\nimages = sorted(glob.glob(os.path.join(data_dir, \"img*.nii.gz\")))\nsegs = sorted(glob.glob(os.path.join(data_dir, \"seg*.nii.gz\")))\n\ntrain_files = []\nval_files = []\nfor i in range(5):\n train_files.append(\n [\n {\"image\": img, \"label\": seg}\n for img, seg in zip(\n images[: (10 * i)] + images[(10 * (i + 1)): 50],\n segs[: (10 * i)] + segs[(10 * (i + 1)): 50],\n )\n ]\n )\n val_files.append(\n [\n {\"image\": img, \"label\": seg}\n for img, seg in zip(images[(10 * i): (10 * (i + 1))],\n segs[(10 * i): (10 * (i + 1))])\n ]\n )\n\ntest_files = [{\"image\": img, \"label\": seg}\n for img, seg in zip(images[50:60], segs[50:60])]",
"_____no_output_____"
]
],
[
[
"## Setup transforms for training and validation",
"_____no_output_____"
]
],
[
[
"train_transforms = Compose(\n [\n LoadImaged(keys=[\"image\", \"label\"]),\n AsChannelFirstd(keys=[\"image\", \"label\"], channel_dim=-1),\n ScaleIntensityd(keys=[\"image\", \"label\"]),\n RandCropByPosNegLabeld(\n keys=[\"image\", \"label\"],\n label_key=\"label\",\n spatial_size=[96, 96, 96],\n pos=1,\n neg=1,\n num_samples=4,\n ),\n RandRotate90d(keys=[\"image\", \"label\"], prob=0.5, spatial_axes=[0, 2]),\n EnsureTyped(keys=[\"image\", \"label\"]),\n ]\n)\nval_transforms = Compose(\n [\n LoadImaged(keys=[\"image\", \"label\"]),\n AsChannelFirstd(keys=[\"image\", \"label\"], channel_dim=-1),\n ScaleIntensityd(keys=[\"image\", \"label\"]),\n EnsureTyped(keys=[\"image\", \"label\"]),\n ]\n)",
"_____no_output_____"
]
],
[
[
"## Define CacheDatasets and DataLoaders for train, validation and test",
"_____no_output_____"
]
],
[
[
"num_models = 5\ntrain_dss = [CacheDataset(\n data=train_files[i],\n transform=train_transforms) for i in range(num_models)]\ntrain_loaders = [\n DataLoader(\n train_dss[i], batch_size=2, shuffle=True, num_workers=4)\n for i in range(num_models)\n]\n\nval_dss = [CacheDataset(data=val_files[i], transform=val_transforms)\n for i in range(num_models)]\nval_loaders = [DataLoader(val_dss[i], batch_size=1, num_workers=4)\n for i in range(num_models)]\n\ntest_ds = CacheDataset(data=test_files, transform=val_transforms)\ntest_loader = DataLoader(test_ds, batch_size=1, num_workers=4)",
"100%|██████████| 40/40 [00:01<00:00, 26.37it/s]\n100%|██████████| 40/40 [00:01<00:00, 33.42it/s]\n100%|██████████| 40/40 [00:01<00:00, 36.70it/s]\n100%|██████████| 40/40 [00:00<00:00, 40.63it/s]\n100%|██████████| 40/40 [00:00<00:00, 43.25it/s]\n100%|██████████| 10/10 [00:00<00:00, 40.24it/s]\n100%|██████████| 10/10 [00:00<00:00, 37.47it/s]\n100%|██████████| 10/10 [00:00<00:00, 39.96it/s]\n100%|██████████| 10/10 [00:00<00:00, 38.21it/s]\n100%|██████████| 10/10 [00:00<00:00, 39.50it/s]\n100%|██████████| 10/10 [00:00<00:00, 42.86it/s]\n"
]
],
[
[
"## Define a training process based on workflows\n\nMore usage examples of MONAI workflows are available at: [workflow examples](https://github.com/Project-MONAI/tutorials/tree/master/modules/engines).",
"_____no_output_____"
]
],
[
[
"def train(index):\n net = UNet(\n dimensions=3,\n in_channels=1,\n out_channels=1,\n channels=(16, 32, 64, 128, 256),\n strides=(2, 2, 2, 2),\n num_res_units=2,\n ).to(device)\n loss = DiceLoss(sigmoid=True)\n opt = torch.optim.Adam(net.parameters(), 1e-3)\n\n val_post_transforms = Compose(\n [EnsureTyped(keys=\"pred\"), Activationsd(keys=\"pred\", sigmoid=True), AsDiscreted(\n keys=\"pred\", threshold_values=True)]\n )\n\n evaluator = SupervisedEvaluator(\n device=device,\n val_data_loader=val_loaders[index],\n network=net,\n inferer=SlidingWindowInferer(\n roi_size=(96, 96, 96), sw_batch_size=4, overlap=0.5),\n postprocessing=val_post_transforms,\n key_val_metric={\n \"val_mean_dice\": MeanDice(\n include_background=True,\n output_transform=from_engine([\"pred\", \"label\"]),\n )\n },\n )\n train_handlers = [\n ValidationHandler(validator=evaluator, interval=4, epoch_level=True),\n StatsHandler(tag_name=\"train_loss\",\n output_transform=from_engine([\"loss\"], first=True)),\n ]\n\n trainer = SupervisedTrainer(\n device=device,\n max_epochs=4,\n train_data_loader=train_loaders[index],\n network=net,\n optimizer=opt,\n loss_function=loss,\n inferer=SimpleInferer(),\n amp=False,\n train_handlers=train_handlers,\n )\n trainer.run()\n return net",
"_____no_output_____"
]
],
[
[
"## Execute 5 training processes and get 5 models",
"_____no_output_____"
]
],
[
[
"models = [train(i) for i in range(num_models)]",
"INFO:ignite.engine.engine.SupervisedTrainer:Engine run resuming from iteration 0, epoch 0 until 4 epochs\nINFO:ignite.engine.engine.SupervisedTrainer:Epoch: 1/4, Iter: 1/20 -- train_loss: 0.6230 \nINFO:ignite.engine.engine.SupervisedTrainer:Epoch: 1/4, Iter: 2/20 -- train_loss: 0.5654 \nINFO:ignite.engine.engine.SupervisedTrainer:Epoch: 1/4, Iter: 3/20 -- train_loss: 0.5949 \nINFO:ignite.engine.engine.SupervisedTrainer:Epoch: 1/4, Iter: 4/20 -- train_loss: 0.5036 \nINFO:ignite.engine.engine.SupervisedTrainer:Epoch: 1/4, Iter: 5/20 -- train_loss: 0.4908 \nINFO:ignite.engine.engine.SupervisedTrainer:Epoch: 1/4, Iter: 6/20 -- train_loss: 0.4712 \nINFO:ignite.engine.engine.SupervisedTrainer:Epoch: 1/4, Iter: 7/20 -- train_loss: 0.4696 \nINFO:ignite.engine.engine.SupervisedTrainer:Epoch: 1/4, Iter: 8/20 -- train_loss: 0.5312 \nINFO:ignite.engine.engine.SupervisedTrainer:Epoch: 1/4, Iter: 9/20 -- train_loss: 0.4865 \nINFO:ignite.engine.engine.SupervisedTrainer:Epoch: 1/4, Iter: 10/20 -- train_loss: 0.4700 \nINFO:ignite.engine.engine.SupervisedTrainer:Epoch: 1/4, Iter: 11/20 -- train_loss: 0.4217 \nINFO:ignite.engine.engine.SupervisedTrainer:Epoch: 1/4, Iter: 12/20 -- train_loss: 0.4699 \nINFO:ignite.engine.engine.SupervisedTrainer:Epoch: 1/4, Iter: 13/20 -- train_loss: 0.5223 \nINFO:ignite.engine.engine.SupervisedTrainer:Epoch: 1/4, Iter: 14/20 -- train_loss: 0.4458 \nINFO:ignite.engine.engine.SupervisedTrainer:Epoch: 1/4, Iter: 15/20 -- train_loss: 0.3606 \nINFO:ignite.engine.engine.SupervisedTrainer:Epoch: 1/4, Iter: 16/20 -- train_loss: 0.4486 \nINFO:ignite.engine.engine.SupervisedTrainer:Epoch: 1/4, Iter: 17/20 -- train_loss: 0.4257 \nINFO:ignite.engine.engine.SupervisedTrainer:Epoch: 1/4, Iter: 18/20 -- train_loss: 0.4503 \nINFO:ignite.engine.engine.SupervisedTrainer:Epoch: 1/4, Iter: 19/20 -- train_loss: 0.4755 \nINFO:ignite.engine.engine.SupervisedTrainer:Epoch: 1/4, Iter: 20/20 -- train_loss: 0.3600 \nINFO:ignite.engine.engine.SupervisedTrainer:Epoch[1] Complete. Time taken: 00:00:05\nINFO:ignite.engine.engine.SupervisedTrainer:Epoch: 2/4, Iter: 1/20 -- train_loss: 0.3595 \nINFO:ignite.engine.engine.SupervisedTrainer:Epoch: 2/4, Iter: 2/20 -- train_loss: 0.4048 \nINFO:ignite.engine.engine.SupervisedTrainer:Epoch: 2/4, Iter: 3/20 -- train_loss: 0.4752 \nINFO:ignite.engine.engine.SupervisedTrainer:Epoch: 2/4, Iter: 4/20 -- train_loss: 0.4201 \nINFO:ignite.engine.engine.SupervisedTrainer:Epoch: 2/4, Iter: 5/20 -- train_loss: 0.3508 \nINFO:ignite.engine.engine.SupervisedTrainer:Epoch: 2/4, Iter: 6/20 -- train_loss: 0.3597 \nINFO:ignite.engine.engine.SupervisedTrainer:Epoch: 2/4, Iter: 7/20 -- train_loss: 0.3493 \nINFO:ignite.engine.engine.SupervisedTrainer:Epoch: 2/4, Iter: 8/20 -- train_loss: 0.4521 \nINFO:ignite.engine.engine.SupervisedTrainer:Epoch: 2/4, Iter: 9/20 -- train_loss: 0.3626 \nINFO:ignite.engine.engine.SupervisedTrainer:Epoch: 2/4, Iter: 10/20 -- train_loss: 0.5069 \nINFO:ignite.engine.engine.SupervisedTrainer:Epoch: 2/4, Iter: 11/20 -- train_loss: 0.4473 \nINFO:ignite.engine.engine.SupervisedTrainer:Epoch: 2/4, Iter: 12/20 -- train_loss: 0.4254 \nINFO:ignite.engine.engine.SupervisedTrainer:Epoch: 2/4, Iter: 13/20 -- train_loss: 0.4346 \nINFO:ignite.engine.engine.SupervisedTrainer:Epoch: 2/4, Iter: 14/20 -- train_loss: 0.3218 \nINFO:ignite.engine.engine.SupervisedTrainer:Epoch: 2/4, Iter: 15/20 -- train_loss: 0.4270 \nINFO:ignite.engine.engine.SupervisedTrainer:Epoch: 2/4, Iter: 16/20 -- train_loss: 0.4167 \nINFO:ignite.engine.engine.SupervisedTrainer:Epoch: 2/4, Iter: 17/20 -- train_loss: 0.3766 \nINFO:ignite.engine.engine.SupervisedTrainer:Epoch: 2/4, Iter: 18/20 -- train_loss: 0.4059 \nINFO:ignite.engine.engine.SupervisedTrainer:Epoch: 2/4, Iter: 19/20 -- train_loss: 0.3510 \nINFO:ignite.engine.engine.SupervisedTrainer:Epoch: 2/4, Iter: 20/20 -- train_loss: 0.5764 \nINFO:ignite.engine.engine.SupervisedTrainer:Epoch[2] Complete. Time taken: 00:00:06\nINFO:ignite.engine.engine.SupervisedTrainer:Epoch: 3/4, Iter: 1/20 -- train_loss: 0.3963 \nINFO:ignite.engine.engine.SupervisedTrainer:Epoch: 3/4, Iter: 2/20 -- train_loss: 0.3510 \nINFO:ignite.engine.engine.SupervisedTrainer:Epoch: 3/4, Iter: 3/20 -- train_loss: 0.4277 \nINFO:ignite.engine.engine.SupervisedTrainer:Epoch: 3/4, Iter: 4/20 -- train_loss: 0.4574 \nINFO:ignite.engine.engine.SupervisedTrainer:Epoch: 3/4, Iter: 5/20 -- train_loss: 0.3738 \nINFO:ignite.engine.engine.SupervisedTrainer:Epoch: 3/4, Iter: 6/20 -- train_loss: 0.4260 \nINFO:ignite.engine.engine.SupervisedTrainer:Epoch: 3/4, Iter: 7/20 -- train_loss: 0.5325 \nINFO:ignite.engine.engine.SupervisedTrainer:Epoch: 3/4, Iter: 8/20 -- train_loss: 0.3237 \nINFO:ignite.engine.engine.SupervisedTrainer:Epoch: 3/4, Iter: 9/20 -- train_loss: 0.4540 \nINFO:ignite.engine.engine.SupervisedTrainer:Epoch: 3/4, Iter: 10/20 -- train_loss: 0.3067 \nINFO:ignite.engine.engine.SupervisedTrainer:Epoch: 3/4, Iter: 11/20 -- train_loss: 0.3417 \nINFO:ignite.engine.engine.SupervisedTrainer:Epoch: 3/4, Iter: 12/20 -- train_loss: 0.3756 \nINFO:ignite.engine.engine.SupervisedTrainer:Epoch: 3/4, Iter: 13/20 -- train_loss: 0.3444 \nINFO:ignite.engine.engine.SupervisedTrainer:Epoch: 3/4, Iter: 14/20 -- train_loss: 0.3136 \nINFO:ignite.engine.engine.SupervisedTrainer:Epoch: 3/4, Iter: 15/20 -- train_loss: 0.3385 \nINFO:ignite.engine.engine.SupervisedTrainer:Epoch: 3/4, Iter: 16/20 -- train_loss: 0.3211 \nINFO:ignite.engine.engine.SupervisedTrainer:Epoch: 3/4, Iter: 17/20 -- train_loss: 0.3638 \nINFO:ignite.engine.engine.SupervisedTrainer:Epoch: 3/4, Iter: 18/20 -- train_loss: 0.3703 \nINFO:ignite.engine.engine.SupervisedTrainer:Epoch: 3/4, Iter: 19/20 -- train_loss: 0.3725 \nINFO:ignite.engine.engine.SupervisedTrainer:Epoch: 3/4, Iter: 20/20 -- train_loss: 0.3613 \nINFO:ignite.engine.engine.SupervisedTrainer:Epoch[3] Complete. Time taken: 00:00:06\nINFO:ignite.engine.engine.SupervisedTrainer:Epoch: 4/4, Iter: 1/20 -- train_loss: 0.3960 \nINFO:ignite.engine.engine.SupervisedTrainer:Epoch: 4/4, Iter: 2/20 -- train_loss: 0.3212 \nINFO:ignite.engine.engine.SupervisedTrainer:Epoch: 4/4, Iter: 3/20 -- train_loss: 0.3127 \nINFO:ignite.engine.engine.SupervisedTrainer:Epoch: 4/4, Iter: 4/20 -- train_loss: 0.3453 \nINFO:ignite.engine.engine.SupervisedTrainer:Epoch: 4/4, Iter: 5/20 -- train_loss: 0.3885 \nINFO:ignite.engine.engine.SupervisedTrainer:Epoch: 4/4, Iter: 6/20 -- train_loss: 0.3419 \nINFO:ignite.engine.engine.SupervisedTrainer:Epoch: 4/4, Iter: 7/20 -- train_loss: 0.3377 \nINFO:ignite.engine.engine.SupervisedTrainer:Epoch: 4/4, Iter: 8/20 -- train_loss: 0.3056 \nINFO:ignite.engine.engine.SupervisedTrainer:Epoch: 4/4, Iter: 9/20 -- train_loss: 0.5426 \nINFO:ignite.engine.engine.SupervisedTrainer:Epoch: 4/4, Iter: 10/20 -- train_loss: 0.3914 \nINFO:ignite.engine.engine.SupervisedTrainer:Epoch: 4/4, Iter: 11/20 -- train_loss: 0.4319 \nINFO:ignite.engine.engine.SupervisedTrainer:Epoch: 4/4, Iter: 12/20 -- train_loss: 0.3419 \nINFO:ignite.engine.engine.SupervisedTrainer:Epoch: 4/4, Iter: 13/20 -- train_loss: 0.2996 \nINFO:ignite.engine.engine.SupervisedTrainer:Epoch: 4/4, Iter: 14/20 -- train_loss: 0.3304 \nINFO:ignite.engine.engine.SupervisedTrainer:Epoch: 4/4, Iter: 15/20 -- train_loss: 0.3622 \nINFO:ignite.engine.engine.SupervisedTrainer:Epoch: 4/4, Iter: 16/20 -- train_loss: 0.3328 \nINFO:ignite.engine.engine.SupervisedTrainer:Epoch: 4/4, Iter: 17/20 -- train_loss: 0.3306 \nINFO:ignite.engine.engine.SupervisedTrainer:Epoch: 4/4, Iter: 18/20 -- train_loss: 0.3221 \nINFO:ignite.engine.engine.SupervisedTrainer:Epoch: 4/4, Iter: 19/20 -- train_loss: 0.3609 \nINFO:ignite.engine.engine.SupervisedTrainer:Epoch: 4/4, Iter: 20/20 -- train_loss: 0.3655 \nINFO:ignite.engine.engine.SupervisedEvaluator:Engine run resuming from iteration 0, epoch 3 until 4 epochs\nINFO:ignite.engine.engine.SupervisedEvaluator:Got new best metric of val_mean_dice: 0.9533967077732086\nINFO:ignite.engine.engine.SupervisedEvaluator:Epoch[4] Complete. Time taken: 00:00:01\nINFO:ignite.engine.engine.SupervisedEvaluator:Engine run complete. Time taken: 00:00:02\nINFO:ignite.engine.engine.SupervisedTrainer:Epoch[4] Complete. Time taken: 00:00:08\nINFO:ignite.engine.engine.SupervisedTrainer:Engine run complete. Time taken: 00:00:26\nINFO:ignite.engine.engine.SupervisedTrainer:Engine run resuming from iteration 0, epoch 0 until 4 epochs\n"
]
],
[
[
"## Define evaluation process based on `EnsembleEvaluator`",
"_____no_output_____"
]
],
[
[
"def ensemble_evaluate(post_transforms, models):\n evaluator = EnsembleEvaluator(\n device=device,\n val_data_loader=test_loader,\n pred_keys=[\"pred0\", \"pred1\", \"pred2\", \"pred3\", \"pred4\"],\n networks=models,\n inferer=SlidingWindowInferer(\n roi_size=(96, 96, 96), sw_batch_size=4, overlap=0.5),\n postprocessing=post_transforms,\n key_val_metric={\n \"test_mean_dice\": MeanDice(\n include_background=True,\n output_transform=from_engine([\"pred\", \"label\"]),\n )\n },\n )\n evaluator.run()",
"_____no_output_____"
]
],
[
[
"## Evaluate the ensemble result with `MeanEnsemble`\n\n`EnsembleEvaluator` accepts a list of models for inference and outputs a list of predictions for further operations.\n\nHere the input data is a list or tuple of PyTorch Tensor with shape: [B, C, H, W, D]. \nThe list represents the output data from 5 models. \nAnd `MeanEnsemble` also can support to add `weights` for the input data:\n* The `weights` will be added to input data from highest dimension.\n* If the `weights` only has 1 dimension, it will be added to the `E` dimension of input data.\n* If the `weights` has 3 dimensions, it will be added to `E`, `B` and `C` dimensions. \nFor example, to ensemble 3 segmentation model outputs, every output has 4 channels(classes), \nThe input data shape can be: [3, B, 4, H, W, D], and add different `weights` for different classes. \nSo the `weights` shape can be: [3, 1, 4], like: \n`weights = [[[1, 2, 3, 4]], [[4, 3, 2, 1]], [[1, 1, 1, 1]]]`.",
"_____no_output_____"
]
],
[
[
"mean_post_transforms = Compose(\n [\n EnsureTyped(keys=[\"pred0\", \"pred1\", \"pred2\", \"pred3\", \"pred4\"]),\n MeanEnsembled(\n keys=[\"pred0\", \"pred1\", \"pred2\", \"pred3\", \"pred4\"],\n output_key=\"pred\",\n # in this particular example, we use validation metrics as weights\n weights=[0.95, 0.94, 0.95, 0.94, 0.90],\n ),\n Activationsd(keys=\"pred\", sigmoid=True),\n AsDiscreted(keys=\"pred\", threshold_values=True),\n ]\n)\nensemble_evaluate(mean_post_transforms, models)",
"INFO:ignite.engine.engine.EnsembleEvaluator:Engine run resuming from iteration 0, epoch 0 until 1 epochs\nINFO:ignite.engine.engine.EnsembleEvaluator:Got new best metric of test_mean_dice: 0.9435271978378296\nINFO:ignite.engine.engine.EnsembleEvaluator:Epoch[1] Complete. Time taken: 00:00:02\nINFO:ignite.engine.engine.EnsembleEvaluator:Engine run complete. Time taken: 00:00:03\n"
]
],
[
[
"## Evaluate the ensemble result with `VoteEnsemble`\n\nHere the input data is a list or tuple of PyTorch Tensor with shape: [B, C, H, W, D]. \nThe list represents the output data from 5 models.\n\nNote that:\n* `VoteEnsemble` expects the input data is discrete values.\n* Input data can be multiple channels data in One-Hot format or single channel data.\n* It will vote to select the most common data between items.\n* The output data has the same shape as every item of the input data.",
"_____no_output_____"
]
],
[
[
"vote_post_transforms = Compose(\n [\n EnsureTyped(keys=[\"pred0\", \"pred1\", \"pred2\", \"pred3\", \"pred4\"]),\n Activationsd(keys=[\"pred0\", \"pred1\", \"pred2\",\n \"pred3\", \"pred4\"], sigmoid=True),\n # transform data into discrete before voting\n AsDiscreted(keys=[\"pred0\", \"pred1\", \"pred2\", \"pred3\",\n \"pred4\"], threshold_values=True),\n VoteEnsembled(keys=[\"pred0\", \"pred1\", \"pred2\",\n \"pred3\", \"pred4\"], output_key=\"pred\"),\n ]\n)\nensemble_evaluate(vote_post_transforms, models)",
"INFO:ignite.engine.engine.EnsembleEvaluator:Engine run resuming from iteration 0, epoch 0 until 1 epochs\nINFO:ignite.engine.engine.EnsembleEvaluator:Got new best metric of test_mean_dice: 0.9436934590339661\nINFO:ignite.engine.engine.EnsembleEvaluator:Epoch[1] Complete. Time taken: 00:00:02\nINFO:ignite.engine.engine.EnsembleEvaluator:Engine run complete. Time taken: 00:00:03\n"
]
],
[
[
"## Cleanup data directory\n\nRemove directory if a temporary was used.",
"_____no_output_____"
]
],
[
[
"if directory is None:\n shutil.rmtree(root_dir)",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
e7df59443c196bebc9bc1e333ef42e0d6a3e7bbc | 6,043 | ipynb | Jupyter Notebook | beta/debug_load_imgaes.ipynb | SothanaV/visionmarker | f79ecb3983b6c1eab148229f947e30dd46d52c12 | [
"MIT"
] | null | null | null | beta/debug_load_imgaes.ipynb | SothanaV/visionmarker | f79ecb3983b6c1eab148229f947e30dd46d52c12 | [
"MIT"
] | null | null | null | beta/debug_load_imgaes.ipynb | SothanaV/visionmarker | f79ecb3983b6c1eab148229f947e30dd46d52c12 | [
"MIT"
] | null | null | null | 37.76875 | 132 | 0.608307 | [
[
[
"## Load Dataset\nTo clear all record and load all images to the /dataset.\nsvg_w=960, svg_h=540",
"_____no_output_____"
]
],
[
[
"from app.models import Label,Image,Batch, Comment, STATUS_CHOICES\nfrom django.contrib.auth.models import User\nimport os, fnmatch, uuid, shutil\nfrom uuid import uuid4\ndef getbatchlist(filelist):\n def chunks(li, n):\n \"\"\"Yield successive n-sized chunks from l.\"\"\"\n for i in range(0, len(li), n):\n yield li[i:i + n]\n\n return list(chunks(filelist, 5))\nprint getbatchlist(range(10))",
"[[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]\n"
],
[
"#FOR DEBUG ONLY !!!!\n# Clear all batches and move images from /dataset to /raw\nprint \"DELETE ALL RECORDS!!\"\nq=Batch.objects.all().delete()\nstatic_path = settings.STATICFILES_DIRS[0]\nraw_path = os.path.join(static_path,'raw')\ndataset_path = os.path.join(static_path,'dataset')\nraw_files = fnmatch.filter(os.listdir(dataset_path), '*.jpg')\nfor i in raw_files:\n _dst=os.path.join(raw_path, i )\n _src=os.path.join(dataset_path,i)\n print \"moving to: %s\"%(_dst)\n shutil.move(src=_src, dst=_dst)",
"DELETE ALL RECORDS!!\nmoving to: C:\\Users\\Wasit\\Documents\\GitHub\\visionmarker\\beta\\static\\raw\\15185e10-3d59-4129-b5c4-314fdb228a59.jpg\nmoving to: C:\\Users\\Wasit\\Documents\\GitHub\\visionmarker\\beta\\static\\raw\\25136c78-05f6-422c-9b82-cbbd42deb261.jpg\nmoving to: C:\\Users\\Wasit\\Documents\\GitHub\\visionmarker\\beta\\static\\raw\\34c84071-abd7-4f01-86e6-3f2dc6c96a0b.jpg\nmoving to: C:\\Users\\Wasit\\Documents\\GitHub\\visionmarker\\beta\\static\\raw\\50b41b0a-1fa7-473b-8330-9a26310380b7.jpg\nmoving to: C:\\Users\\Wasit\\Documents\\GitHub\\visionmarker\\beta\\static\\raw\\54e9550f-5c1f-46d8-b4d5-45899bf0554f.jpg\nmoving to: C:\\Users\\Wasit\\Documents\\GitHub\\visionmarker\\beta\\static\\raw\\693a478e-439a-45de-8b20-20f4d0e0f240.jpg\nmoving to: C:\\Users\\Wasit\\Documents\\GitHub\\visionmarker\\beta\\static\\raw\\7696acee-37df-4d8c-b85b-30220ac00020.jpg\nmoving to: C:\\Users\\Wasit\\Documents\\GitHub\\visionmarker\\beta\\static\\raw\\88634d71-c69f-4582-b54c-926719da1020.jpg\nmoving to: C:\\Users\\Wasit\\Documents\\GitHub\\visionmarker\\beta\\static\\raw\\92506a5f-1f28-482a-98ad-e377c7ddfed3.jpg\nmoving to: C:\\Users\\Wasit\\Documents\\GitHub\\visionmarker\\beta\\static\\raw\\bbd100a5-82e3-4bcd-8213-24d6ad73ffc6.jpg\n"
],
[
"# moving from /raw/i to /dataset/j\nstatic_path = settings.STATICFILES_DIRS[0]\nraw_path = os.path.join(static_path,'raw')\ndataset_path = os.path.join(static_path,'dataset')\nraw_files = fnmatch.filter(os.listdir(raw_path), '*.jpg')\nfor chunk in getbatchlist(raw_files):\n b=Batch()\n b.save()\n for i in chunk: \n j=unicode(uuid4())+'.jpg'\n print \"batch: %s,src: %s, dst: %s\"%(b,i,j)\n Image(batch=b, src_path=j, raw_path=i).save()\n _dst=os.path.join(dataset_path,j)\n _src=os.path.join(raw_path,i)\n \n shutil.move(src=_src, dst=_dst)",
"batch: BID000001,src: 15185e10-3d59-4129-b5c4-314fdb228a59.jpg, dst: 6c35c307-30ca-48c1-a92e-7b7dd9b60108.jpg\nbatch: BID000001,src: 25136c78-05f6-422c-9b82-cbbd42deb261.jpg, dst: ba0d77ca-d6da-4213-b396-944580bfccea.jpg\nbatch: BID000001,src: 34c84071-abd7-4f01-86e6-3f2dc6c96a0b.jpg, dst: 2ed9b9ed-bcec-45a2-a068-8806e9e83764.jpg\nbatch: BID000001,src: 50b41b0a-1fa7-473b-8330-9a26310380b7.jpg, dst: f92dd495-e108-401d-9cfc-d498cc768004.jpg\nbatch: BID000001,src: 54e9550f-5c1f-46d8-b4d5-45899bf0554f.jpg, dst: f1093aab-7a1b-4ef4-bd5c-86174cc86f8c.jpg\nbatch: BID000002,src: 693a478e-439a-45de-8b20-20f4d0e0f240.jpg, dst: d97782e1-404a-47c3-8f12-decb80c9abf4.jpg\nbatch: BID000002,src: 7696acee-37df-4d8c-b85b-30220ac00020.jpg, dst: 0d3e864c-fad3-4d30-9580-b61c7ec9fb47.jpg\nbatch: BID000002,src: 88634d71-c69f-4582-b54c-926719da1020.jpg, dst: f5bed359-f260-41d5-8fc5-2181e9441f7d.jpg\nbatch: BID000002,src: 92506a5f-1f28-482a-98ad-e377c7ddfed3.jpg, dst: 648e13bf-3aa9-46d6-85ff-6393cf870e00.jpg\nbatch: BID000002,src: bbd100a5-82e3-4bcd-8213-24d6ad73ffc6.jpg, dst: 8466c83f-6873-4340-ab13-229322640162.jpg\n"
]
]
] | [
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code",
"code"
]
] |
e7df6c3721f564cb895ad7fe325839bb9a59e33a | 666,170 | ipynb | Jupyter Notebook | infinite_swapping/mcmc_replica_infiniteswap.ipynb | matsunagalab/notebooks | b5cfd038999e2f01874145d26de32fdc933f2fd8 | [
"BSD-3-Clause"
] | null | null | null | infinite_swapping/mcmc_replica_infiniteswap.ipynb | matsunagalab/notebooks | b5cfd038999e2f01874145d26de32fdc933f2fd8 | [
"BSD-3-Clause"
] | null | null | null | infinite_swapping/mcmc_replica_infiniteswap.ipynb | matsunagalab/notebooks | b5cfd038999e2f01874145d26de32fdc933f2fd8 | [
"BSD-3-Clause"
] | null | null | null | 1,464.10989 | 284,350 | 0.960228 | [
[
[
"empty"
]
]
] | [
"empty"
] | [
[
"empty"
]
] |
e7df9b3fdeb16b39134b96e81d442c719bcf081f | 109,094 | ipynb | Jupyter Notebook | Code/[2]Annotations.ipynb | SalimFares4/Image-Segmentation | 787530a2db9ebd5881eaf042317411ce432fcc71 | [
"MIT"
] | null | null | null | Code/[2]Annotations.ipynb | SalimFares4/Image-Segmentation | 787530a2db9ebd5881eaf042317411ce432fcc71 | [
"MIT"
] | 3 | 2021-12-16T10:09:36.000Z | 2022-02-11T17:53:31.000Z | Code/[2]Annotations.ipynb | SalimFares4/Image-Segmentation | 787530a2db9ebd5881eaf042317411ce432fcc71 | [
"MIT"
] | null | null | null | 109,094 | 109,094 | 0.937531 | [
[
[
"!pip install rasterio\n!pip install geopandas",
"Collecting rasterio\n Downloading rasterio-1.2.10-cp37-cp37m-manylinux1_x86_64.whl (19.3 MB)\n\u001b[K |████████████████████████████████| 19.3 MB 5.2 MB/s \n\u001b[?25hCollecting cligj>=0.5\n Downloading cligj-0.7.2-py3-none-any.whl (7.1 kB)\nRequirement already satisfied: certifi in /usr/local/lib/python3.7/dist-packages (from rasterio) (2021.10.8)\nRequirement already satisfied: click>=4.0 in /usr/local/lib/python3.7/dist-packages (from rasterio) (7.1.2)\nRequirement already satisfied: setuptools in /usr/local/lib/python3.7/dist-packages (from rasterio) (57.4.0)\nCollecting affine\n Downloading affine-2.3.0-py2.py3-none-any.whl (15 kB)\nRequirement already satisfied: attrs in /usr/local/lib/python3.7/dist-packages (from rasterio) (21.4.0)\nRequirement already satisfied: numpy in /usr/local/lib/python3.7/dist-packages (from rasterio) (1.19.5)\nCollecting snuggs>=1.4.1\n Downloading snuggs-1.4.7-py3-none-any.whl (5.4 kB)\nCollecting click-plugins\n Downloading click_plugins-1.1.1-py2.py3-none-any.whl (7.5 kB)\nRequirement already satisfied: pyparsing>=2.1.6 in /usr/local/lib/python3.7/dist-packages (from snuggs>=1.4.1->rasterio) (3.0.7)\nInstalling collected packages: snuggs, cligj, click-plugins, affine, rasterio\nSuccessfully installed affine-2.3.0 click-plugins-1.1.1 cligj-0.7.2 rasterio-1.2.10 snuggs-1.4.7\nCollecting geopandas\n Downloading geopandas-0.10.2-py2.py3-none-any.whl (1.0 MB)\n\u001b[K |████████████████████████████████| 1.0 MB 28.7 MB/s \n\u001b[?25hCollecting pyproj>=2.2.0\n Downloading pyproj-3.2.1-cp37-cp37m-manylinux2010_x86_64.whl (6.3 MB)\n\u001b[K |████████████████████████████████| 6.3 MB 45.7 MB/s \n\u001b[?25hRequirement already satisfied: pandas>=0.25.0 in /usr/local/lib/python3.7/dist-packages (from geopandas) (1.3.5)\nCollecting fiona>=1.8\n Downloading Fiona-1.8.20-cp37-cp37m-manylinux1_x86_64.whl (15.4 MB)\n\u001b[K |████████████████████████████████| 15.4 MB 47.6 MB/s \n\u001b[?25hRequirement already satisfied: shapely>=1.6 in /usr/local/lib/python3.7/dist-packages (from geopandas) (1.8.0)\nRequirement already satisfied: setuptools in /usr/local/lib/python3.7/dist-packages (from fiona>=1.8->geopandas) (57.4.0)\nRequirement already satisfied: certifi in /usr/local/lib/python3.7/dist-packages (from fiona>=1.8->geopandas) (2021.10.8)\nRequirement already satisfied: click>=4.0 in /usr/local/lib/python3.7/dist-packages (from fiona>=1.8->geopandas) (7.1.2)\nCollecting munch\n Downloading munch-2.5.0-py2.py3-none-any.whl (10 kB)\nRequirement already satisfied: click-plugins>=1.0 in /usr/local/lib/python3.7/dist-packages (from fiona>=1.8->geopandas) (1.1.1)\nRequirement already satisfied: six>=1.7 in /usr/local/lib/python3.7/dist-packages (from fiona>=1.8->geopandas) (1.15.0)\nRequirement already satisfied: cligj>=0.5 in /usr/local/lib/python3.7/dist-packages (from fiona>=1.8->geopandas) (0.7.2)\nRequirement already satisfied: attrs>=17 in /usr/local/lib/python3.7/dist-packages (from fiona>=1.8->geopandas) (21.4.0)\nRequirement already satisfied: numpy>=1.17.3 in /usr/local/lib/python3.7/dist-packages (from pandas>=0.25.0->geopandas) (1.19.5)\nRequirement already satisfied: python-dateutil>=2.7.3 in /usr/local/lib/python3.7/dist-packages (from pandas>=0.25.0->geopandas) (2.8.2)\nRequirement already satisfied: pytz>=2017.3 in /usr/local/lib/python3.7/dist-packages (from pandas>=0.25.0->geopandas) (2018.9)\nInstalling collected packages: munch, pyproj, fiona, geopandas\nSuccessfully installed fiona-1.8.20 geopandas-0.10.2 munch-2.5.0 pyproj-3.2.1\n"
],
[
"%pylab inline\nimport geopandas as gpd\nimport rasterio as rio\nimport rasterio.plot as rplot\nimport itertools\nimport rasterio.mask as mask\nimport matplotlib.pyplot as plt\nimport numpy as np",
"Populating the interactive namespace from numpy and matplotlib\n"
],
[
"# Root Directory\nImage_Segmentation_Path = '/content/drive/My Drive/Image Segmentation/'\n\n# Inputs/Sources\nProcessed_DEMs_Path = Image_Segmentation_Path + \"Processed DEMs/\"\nhigh_dem_interpolated = Processed_DEMs_Path + \"high_dem_interpolated.tif\"\nmounds_f = Image_Segmentation_Path + \"polygons/mounds.shp\"\nvents_f = Image_Segmentation_Path + \"points/vent_points.shp\"\n\n#Outputs/Destinations\n# annotation_file = Processed_DEMs_Path + \"annotated.tif\"\nannotation_file = Processed_DEMs_Path + \"all_annotation.tif\"\n\n# PNGs for the report \nPNG_Path = Image_Segmentation_Path + \"PNG/\"",
"_____no_output_____"
],
[
"data = gpd.read_file(mounds_f)",
"_____no_output_____"
],
[
"set(data['Morpho'])",
"_____no_output_____"
],
[
"dsource= rio.open(high_dem_interpolated)\ndtm = dsource.read()\nmeta = dsource.meta\nprofile =dsource.profile\ndata = gpd.read_file(mounds_f)\ntoremove = []\nfor id, gg in enumerate(data.geometry):\n if not gg.is_valid:\n print(f\"not valid {id}\")\n toremove.append(id)\n\ndata = data.drop(toremove)\ndata.drop_duplicates(inplace=True)\n\n# simple_mounds = data[data['Morpho'] == 'simple']\nall_mounds = data\n### Reprojecting the labels coordinate system to that of the original DEM. \n\n# mounds_corrected =simple_mounds.to_crs(meta[\"crs\"])\nmounds_corrected =all_mounds.to_crs(meta[\"crs\"])\n\n### Masking the labels. 0 for non mounds, 1 for mounds. Can be used for segmentation. \n\n# out_img, out_transform =mask.mask(dataset=dsource, shapes = mounds_corrected.geometry, nodata=np.nan) \nout_img, out_transform =mask.mask(dataset=dsource, shapes = mounds_corrected.geometry, nodata=np.nan) \n\n# do the crop \nout_img = out_img[0]\nnodata = meta[\"nodata\"] \ninvalid_elevation = dtm[0] == nodata \nisnan = np.isnan(out_img) \nout_img[isnan] = 0.0\nout_img[~isnan] = 1.0\nout_img[invalid_elevation] = 0.0 # overall mask 0 non mounds, 1 mounds\nfig, ax = plt.subplots(1, 1, figsize=(8,8))\nplt.imshow(out_img)\n# fig.savefig(PNG_Path+\"Annotated.png\")",
"not valid 56\nnot valid 92\nnot valid 100\nnot valid 163\nnot valid 200\nnot valid 329\nnot valid 330\n"
],
[
"",
"_____no_output_____"
],
[
"with rio.open(annotation_file, 'w', **profile) as dest: \n dest.write_band(1, out_img)",
"_____no_output_____"
],
[
"annotated = rio.open(annotation_file)\nfig, ax = plt.subplots(1,1, figsize=(10,10))\nrplot.show(annotated, fig)",
"_____no_output_____"
],
[
"",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e7df9c53d0e1e601afa85fdf0c7c7bf792dfd1ad | 443,094 | ipynb | Jupyter Notebook | ML_fundations/exercise_3/machine_learning_algorithms.ipynb | jhmz333/ai-masters-degree-stuff | b8b868429e0f548d63ce8b18c68e297067852790 | [
"MIT"
] | null | null | null | ML_fundations/exercise_3/machine_learning_algorithms.ipynb | jhmz333/ai-masters-degree-stuff | b8b868429e0f548d63ce8b18c68e297067852790 | [
"MIT"
] | null | null | null | ML_fundations/exercise_3/machine_learning_algorithms.ipynb | jhmz333/ai-masters-degree-stuff | b8b868429e0f548d63ce8b18c68e297067852790 | [
"MIT"
] | null | null | null | 445.320603 | 62,256 | 0.936032 | [
[
[
"<h1>Table of Contents<span class=\"tocSkip\"></span></h1>\n<div class=\"toc\"><ul class=\"toc-item\"><li><span><a href=\"#Actividad-3.-Algoritmos-de-Machine-Learning.\" data-toc-modified-id=\"Actividad-3.-Algoritmos-de-Machine-Learning.-1\"><span class=\"toc-item-num\">1 </span>Actividad 3. Algoritmos de Machine Learning.</a></span><ul class=\"toc-item\"><li><span><a href=\"#Finalidad-de-la-práctica\" data-toc-modified-id=\"Finalidad-de-la-práctica-1.1\"><span class=\"toc-item-num\">1.1 </span>Finalidad de la práctica</a></span></li><li><span><a href=\"#Introduciendo-Random-Forests-y-Decision-Trees\" data-toc-modified-id=\"Introduciendo-Random-Forests-y-Decision-Trees-1.2\"><span class=\"toc-item-num\">1.2 </span>Introduciendo Random Forests y Decision Trees</a></span><ul class=\"toc-item\"><li><span><a href=\"#Importamos-las-librerías-necesarias\" data-toc-modified-id=\"Importamos-las-librerías-necesarias-1.2.1\"><span class=\"toc-item-num\">1.2.1 </span>Importamos las librerías necesarias</a></span></li><li><span><a href=\"#Creando-un-Decision-Tree\" data-toc-modified-id=\"Creando-un-Decision-Tree-1.2.2\"><span class=\"toc-item-num\">1.2.2 </span>Creando un Decision Tree</a></span></li><li><span><a href=\"#Decision-Trees-y-Overfitting\" data-toc-modified-id=\"Decision-Trees-y-Overfitting-1.2.3\"><span class=\"toc-item-num\">1.2.3 </span>Decision Trees y Overfitting</a></span></li></ul></li><li><span><a href=\"#Ensembles-of-Estimators:-Random-Forests\" data-toc-modified-id=\"Ensembles-of-Estimators:-Random-Forests-1.3\"><span class=\"toc-item-num\">1.3 </span>Ensembles of Estimators: Random Forests</a></span></li><li><span><a href=\"#Decision-Trees-y-Random-Forest-para-Classificación-de-Dígitos\" data-toc-modified-id=\"Decision-Trees-y-Random-Forest-para-Classificación-de-Dígitos-1.4\"><span class=\"toc-item-num\">1.4 </span>Decision Trees y Random Forest para Classificación de Dígitos</a></span><ul class=\"toc-item\"><li><span><a href=\"#Matriz-de-confusión\" data-toc-modified-id=\"Matriz-de-confusión-1.4.1\"><span class=\"toc-item-num\">1.4.1 </span>Matriz de confusión</a></span></li><li><span><a href=\"#Pregunta-1-(3-puntos):\" data-toc-modified-id=\"Pregunta-1-(3-puntos):-1.4.2\"><span class=\"toc-item-num\">1.4.2 </span>Pregunta 1 (3 puntos):</a></span></li><li><span><a href=\"#Modifique-el-parámetro-max_depth-en-clf-=-DecisionTreeClassifier(max_depth-=-...)-¿Qué-ocurre-con-la-precisión-sobre-el-test-dataset-cuando-lo-disminuímos?-¿Y-cuando-lo-aumentamos?-Una-nota-importante:-cómo-comprobaría-si-el-dataset-está-balanceado-o-desbalanceado?\" data-toc-modified-id=\"Modifique-el-parámetro-max_depth-en-clf-=-DecisionTreeClassifier(max_depth-=-...)-¿Qué-ocurre-con-la-precisión-sobre-el-test-dataset-cuando-lo-disminuímos?-¿Y-cuando-lo-aumentamos?-Una-nota-importante:-cómo-comprobaría-si-el-dataset-está-balanceado-o-desbalanceado?-1.4.3\"><span class=\"toc-item-num\">1.4.3 </span>Modifique el parámetro max_depth en <code>clf = DecisionTreeClassifier(max_depth = ...)</code> ¿Qué ocurre con la precisión sobre el test dataset cuando lo disminuímos? ¿Y cuando lo aumentamos? Una nota importante: cómo comprobaría si el dataset está balanceado o desbalanceado?</a></span></li><li><span><a href=\"#Pregunta-2-(4-puntos):\" data-toc-modified-id=\"Pregunta-2-(4-puntos):-1.4.4\"><span class=\"toc-item-num\">1.4.4 </span>Pregunta 2 (4 puntos):</a></span></li><li><span><a href=\"#Repita-esta-clasificación-con-sklearn.ensemble.RandomForestClassifier--¿Mejoran-los-resultados-de-precisión-en-el-test-dataset?-Represente-la-matriz-de-correlación.\" data-toc-modified-id=\"Repita-esta-clasificación-con-sklearn.ensemble.RandomForestClassifier--¿Mejoran-los-resultados-de-precisión-en-el-test-dataset?-Represente-la-matriz-de-correlación.-1.4.5\"><span class=\"toc-item-num\">1.4.5 </span>Repita esta clasificación con <code>sklearn.ensemble.RandomForestClassifier</code> ¿Mejoran los resultados de precisión en el test dataset? Represente la matriz de correlación.</a></span></li><li><span><a href=\"#Pregunta-3-(1-punto):-De-acuerdo-a-lo-indicado-en-el-siguiente-enlace.-¿Podría-justificar-los-hiperparámetros-elegidos-dadas-las-características-de-nuestro-dataset?\" data-toc-modified-id=\"Pregunta-3-(1-punto):-De-acuerdo-a-lo-indicado-en-el-siguiente-enlace.-¿Podría-justificar-los-hiperparámetros-elegidos-dadas-las-características-de-nuestro-dataset?-1.4.6\"><span class=\"toc-item-num\">1.4.6 </span>Pregunta 3 (1 punto): De acuerdo a lo indicado en el <a href=\"https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestClassifier.html\" target=\"_blank\">siguiente enlace</a>. ¿Podría justificar los hiperparámetros elegidos dadas las características de nuestro dataset?</a></span></li><li><span><a href=\"#Pregunta-4-(2-puntos-adicionales):\" data-toc-modified-id=\"Pregunta-4-(2-puntos-adicionales):-1.4.7\"><span class=\"toc-item-num\">1.4.7 </span>Pregunta 4 (2 puntos adicionales):</a></span></li><li><span><a href=\"#Este-artículo-es-muy-interesante-a-la-hora-de-averiguar-cómo-realizar-cross-validation-y-hyperparameter-tunning-de-un-modelo.\" data-toc-modified-id=\"Este-artículo-es-muy-interesante-a-la-hora-de-averiguar-cómo-realizar-cross-validation-y-hyperparameter-tunning-de-un-modelo.-1.4.8\"><span class=\"toc-item-num\">1.4.8 </span><a href=\"https://chrisalbon.com/machine_learning/model_evaluation/cross_validation_parameter_tuning_grid_search/\" target=\"_blank\">Este artículo</a> es muy interesante a la hora de averiguar cómo realizar <strong>cross validation</strong> y <strong>hyperparameter tunning</strong> de un modelo.</a></span></li><li><span><a href=\"#¿Quién-se-atreve-a-reproducir-los-pasos-indicados-en-este-artículo-para-encontrar-los-parámetros-óptimos-en-el-RandomForestClassifier()?\" data-toc-modified-id=\"¿Quién-se-atreve-a-reproducir-los-pasos-indicados-en-este-artículo-para-encontrar-los-parámetros-óptimos-en-el-RandomForestClassifier()?-1.4.9\"><span class=\"toc-item-num\">1.4.9 </span>¿Quién se atreve a reproducir los pasos indicados en este artículo para encontrar los parámetros óptimos en el <code>RandomForestClassifier()</code>?</a></span></li></ul></li></ul></li></ul></div>",
"_____no_output_____"
],
[
"# Actividad 3. Algoritmos de Machine Learning. ",
"_____no_output_____"
],
[
"## Finalidad de la práctica",
"_____no_output_____"
],
[
"Se investigarán algortimos de aprendizaje supervisado y no supervisado sobre datasets conocidos.\nAprenderemos la metodología de uso de los principales `estimadores` de `Scikit-Learn API`, entre los que se incluyen los siguientes pasos:\n\n1. Elección una clase de modelo importando la clase de estimador adecuada desde `Scikit-Learn`.\n2. Elección de hiperparámetros creando una instancia de esta clase con los valores deseados.\n3. Organización los datos en una matriz de características y un vector objetivo siguiendo la discusión anterior.\n4. Ajuste del modelo a sus datos llamando al método `fit ()` de la instancia del modelo.\n5. Aplicación el modelo a los nuevos datos:\n\n * Para el aprendizaje supervisado, a menudo predecimos etiquetas para datos desconocidos utilizando el método predict ().\n * Para el aprendizaje no supervisado, a menudo transformamos o inferimos las propiedades de los datos utilizando el método transform () o predict ().\n\nAhora veremos varios ejemplos simples de aplicación de métodos de aprendizaje supervisados y no supervisados.",
"_____no_output_____"
],
[
"## Introduciendo Random Forests y Decision Trees",
"_____no_output_____"
],
[
"Los **Random Forests** son un ejemplo de un *ensemble learner* construido sobre árboles de decisión. Por esta razón, comenzaremos discutiendo los **árboles de decisión** o **Decision Trees**.\n\nLos árboles de decisión son formas muy intuitivas de clasificar o etiquetar objetos: simplemente hace una serie de preguntas diseñadas para enfocarse en la clasificación:",
"_____no_output_____"
],
[
"### Importamos las librerías necesarias",
"_____no_output_____"
]
],
[
[
"import matplotlib.pyplot as plt\n%matplotlib inline\nimport seaborn as sns\nfrom IPython.core.display import HTML\nimport numpy as np\nimport pandas as pd\nsns.set()",
"_____no_output_____"
],
[
"import fig_code\nfig_code.plot_example_decision_tree()",
"_____no_output_____"
]
],
[
[
"La división binaria hace que esto sea extremadamente eficiente.\nComo siempre, sin embargo, el truco es * hacer las preguntas correctas *.\nAquí es donde entra el proceso algorítmico: en el entrenamiento de un clasificador de árbol de decisión, el algoritmo examina las características y decide qué preguntas (o \"divisiones\") contienen la mayor información.\n\n### Creando un Decision Tree\n\nAquí hay un ejemplo de un clasificador de árbol de decisión en `scikit-learn`. Comenzaremos por definir algunos datos etiquetados bidimensionales:",
"_____no_output_____"
]
],
[
[
"from sklearn.datasets import make_blobs\n\nX, y = make_blobs(n_samples=500, centers=4,\n random_state=0, cluster_std=1.0)\nplt.scatter(X[:, 0], X[:, 1], c=y, s=10, cmap='rainbow');",
"_____no_output_____"
]
],
[
[
"Hacemos llamada a algunas funciones de ayuda:",
"_____no_output_____"
]
],
[
[
"from fig_code import visualize_tree, plot_tree_interactive",
"_____no_output_____"
]
],
[
[
"Usamos IPython's ``interact`` (Disponible en IPython 2.0+, requiere un live kernel) para ver cómo el árbol parte los datos progresivamente:",
"_____no_output_____"
]
],
[
[
"plot_tree_interactive(X, y);",
"_____no_output_____"
]
],
[
[
"Tenga en cuenta que a cada aumento de profundidad, cada nodo se divide en dos, **excepto los nodos que contienen una sola clase**. El resultado es una clasificación **no paramétrica** muy rápida y puede ser extremadamente útil en la práctica.",
"_____no_output_____"
],
[
"### Decision Trees y Overfitting\n\nUn problema con los árboles de decisión es que es muy fácil crear árboles que **se ajustan en exceso** a los datos. Nos encontramos con **overfitting**. Es decir, ¡son lo suficientemente flexibles como para que puedan aprender la estructura del ruido en los datos en lugar de la señal! Por ejemplo, observe dos árboles construidos en dos subconjuntos de este conjunto de datos:",
"_____no_output_____"
]
],
[
[
"from sklearn.tree import DecisionTreeClassifier\nclf = DecisionTreeClassifier()\n\nplt.figure()\nvisualize_tree(clf, X[:200], y[:200], boundaries=False)\nplt.figure()\nvisualize_tree(clf, X[-200:], y[-200:], boundaries=False)",
"_____no_output_____"
]
],
[
[
"¡Los detalles de las clasificaciones son completamente diferentes! Eso es una indicación de **overfitting o sobreajuste**: cuando predice el valor para un nuevo punto, el resultado refleja más el ruido en el modelo que la señal.",
"_____no_output_____"
],
[
"## Ensembles of Estimators: Random Forests\n\nUna posible forma de abordar el **overfitting** es usar un **ensembles method**: este es un meta-estimador que esencialmente promedia los resultados de muchos estimadores individuales que sobre-ajustan los datos. Sorprendentemente, las estimaciones resultantes son mucho más sólidas y precisas que las estimaciones individuales que las componen.\n\nUno de los métodos de conjunto más comunes es el **Random Forest**, en el que el conjunto está formado por muchos árboles de decisión que de alguna manera están perturbados.\n\nHay volúmenes de teoría y precedentes sobre cómo aleatorizar estos árboles, pero como ejemplo, imaginemos que un conjunto de estimadores se ajusta a los subconjuntos de los datos. Podemos tener una idea de cómo se verían estos de la siguiente manera:",
"_____no_output_____"
]
],
[
[
"def fit_randomized_tree(random_state=0):\n X, y = make_blobs(n_samples=300, centers=4,\n random_state=0, cluster_std=2.0)\n \n clf = DecisionTreeClassifier(max_depth=15)\n \n rng = np.random.RandomState(random_state)\n i = np.arange(len(y))\n rng.shuffle(i)\n visualize_tree(clf, X[i[:250]], y[i[:250]], boundaries=False,\n xlim=(X[:, 0].min(), X[:, 0].max()),\n ylim=(X[:, 1].min(), X[:, 1].max()))\n \nfrom ipywidgets import interact\ninteract(fit_randomized_tree, random_state=(0, 100));",
"_____no_output_____"
]
],
[
[
"Vea cómo cambian los detalles del modelo en función de la muestra, mientras que las características más grandes siguen siendo las mismas.\n\nEl clasificador de bosque aleatorio hará algo similar a esto, pero usa una versión combinada de todos estos árboles para llegar a una respuesta final:",
"_____no_output_____"
]
],
[
[
"from sklearn.ensemble import RandomForestClassifier\nclf = RandomForestClassifier(n_estimators=100, random_state=0)\nvisualize_tree(clf, X, y, boundaries=False);",
"_____no_output_____"
]
],
[
[
"Al promediar **100 modelos perturbados al azar**, terminamos con un modelo general que se ajusta mucho mejor a nuestros datos.\n\n**(Nota: anteriormente, aleatorizamos el modelo mediante submuestreo...\nLos bosques aleatorios utilizan medios de aleatorización más sofisticados, sobre los cuales puede leer, por ejemplo, la [documentación de scikit-learn](http://scikit-learn.org/stable/modules/ensemble.html#forest))**\n",
"_____no_output_____"
],
[
"_____\n## Decision Trees y Random Forest para Classificación de Dígitos\n\nTomando el dataset de **MNIST hand-written digits** vamos a probar la eficacia de un Decision Tree Classifier.",
"_____no_output_____"
],
[
"Cargamos el dataset en memoria.",
"_____no_output_____"
]
],
[
[
"from sklearn.datasets import load_digits\ndigits = load_digits()\ndigits.keys()",
"_____no_output_____"
],
[
"X = digits.data\ny = digits.target\nprint(X.shape)\nprint(y.shape)",
"(1797, 64)\n(1797,)\n"
]
],
[
[
"Para recordarnos lo que estamos viendo, visualizaremos los primeros puntos de datos:",
"_____no_output_____"
]
],
[
[
"# set up the figure\nfig = plt.figure(figsize=(6, 6)) # figure size in inches\nfig.subplots_adjust(left=0, right=1, bottom=0, top=1, hspace=0.05, wspace=0.05)\n\n# plot the digits: each image is 8x8 pixels\nfor i in range(64):\n ax = fig.add_subplot(8, 8, i + 1, xticks=[], yticks=[])\n ax.imshow(digits.images[i], cmap=plt.cm.binary, interpolation='nearest')\n \n # label the image with the target value\n ax.text(0, 7, str(digits.target[i]))",
"_____no_output_____"
]
],
[
[
"Podemos clasificar rápidamente los dígitos utilizando un árbol de decisión de la siguiente manera:",
"_____no_output_____"
],
[
"Hacemos una partición de los datos en set de entrenamiento y set de testeo con la función `train_test_split`: ",
"_____no_output_____"
]
],
[
[
"from sklearn.model_selection import train_test_split\nfrom sklearn import metrics\n\nXtrain, Xtest, ytrain, ytest = train_test_split(X, y, random_state=0)",
"_____no_output_____"
]
],
[
[
"Definimos el `clasificador` y lo ajustamos a nuestros datos de entrenamiento.",
"_____no_output_____"
]
],
[
[
"clf = DecisionTreeClassifier(max_depth=5)\nclf.fit(Xtrain, ytrain)",
"_____no_output_____"
]
],
[
[
"Predecimos las etiquetas del test dataset, para posteriormente poder comprobar su validez contra las reales:",
"_____no_output_____"
]
],
[
[
"ypred = clf.predict(Xtest)",
"_____no_output_____"
]
],
[
[
"Podemos comprobar la exactitud de este clasificador:",
"_____no_output_____"
]
],
[
[
"print('Precisión sobre el test dataset: ', metrics.accuracy_score(ypred, ytest)*100, '%')",
"Precisión sobre el test dataset: 66.44444444444444 %\n"
]
],
[
[
"### Matriz de confusión",
"_____no_output_____"
],
[
"Visualicemos el comportamiento de nuestro árbol clasificador con la matriz de confusión.\n\nPodéis encontrar detalles sobre esta matriz en [este artículo](https://data-speaks.luca-d3.com/2018/01/ML-a-tu-alcance-matriz-confusion.html).",
"_____no_output_____"
]
],
[
[
"from sklearn.metrics import classification_report, confusion_matrix\ncm = confusion_matrix(ytest, ypred)\nprint(classification_report(ytest, ypred))\nprint(cm)",
" precision recall f1-score support\n\n 0 0.94 0.92 0.93 37\n 1 0.58 0.44 0.50 43\n 2 0.47 0.16 0.24 44\n 3 0.42 0.71 0.53 45\n 4 0.85 0.87 0.86 38\n 5 0.93 0.88 0.90 48\n 6 0.98 0.92 0.95 52\n 7 0.84 0.65 0.73 48\n 8 0.38 0.85 0.53 48\n 9 1.00 0.26 0.41 47\n\n accuracy 0.66 450\n macro avg 0.74 0.67 0.66 450\nweighted avg 0.74 0.66 0.66 450\n\n[[34 0 0 0 1 2 0 0 0 0]\n [ 0 19 2 5 0 0 0 1 16 0]\n [ 1 1 7 4 0 0 1 0 30 0]\n [ 0 4 2 32 0 0 0 1 6 0]\n [ 1 2 0 0 33 0 0 1 1 0]\n [ 0 2 1 2 1 42 0 0 0 0]\n [ 0 3 0 0 0 0 48 0 1 0]\n [ 0 1 2 1 3 0 0 31 10 0]\n [ 0 0 0 6 0 0 0 1 41 0]\n [ 0 1 1 26 1 1 0 2 3 12]]\n"
],
[
"sns.heatmap(cm, square=True, annot=True, cbar=False)\nplt.xlabel('predicted value')\nplt.ylabel('true value');",
"_____no_output_____"
]
],
[
[
"### Pregunta 1 (3 puntos): \n### Modifique el parámetro max_depth en `clf = DecisionTreeClassifier(max_depth = ...)` ¿Qué ocurre con la precisión sobre el test dataset cuando lo disminuímos? ¿Y cuando lo aumentamos? Una nota importante: cómo comprobaría si el dataset está balanceado o desbalanceado?",
"_____no_output_____"
]
],
[
[
"clf = DecisionTreeClassifier(max_depth=3, random_state=0)\nclf.fit(Xtrain, ytrain)\nprint(f'Precisión: {clf.score(Xtest, ytest)*100} %')",
"Precisión: 46.0 %\n"
]
],
[
[
"**Respuesta:** La precisión **disminuye** cuando se decrementa la profundidad del árbol. Esto parece lógico porque no tiene suficiente posibilidades de dividir el problema.",
"_____no_output_____"
]
],
[
[
"clf = DecisionTreeClassifier(max_depth=11, random_state=0)\nclf.fit(Xtrain, ytrain)\nprint(f'Precisión: {clf.score(Xtest, ytest)*100} %')",
"Precisión: 84.44444444444444 %\n"
]
],
[
[
"**Respuesta:** La precisión **aumenta** cuando se incrementa la profundidad del árbol, pero llega a un punto que no sigue creciendo, en este caso el valor máximo de precisión conseguido es para el parámetro *max_depth=11*. ",
"_____no_output_____"
]
],
[
[
"y_length = len(y)\nfor i in range(10):\n occurrences = np.count_nonzero(y==i)\n perc_occurrences = np.count_nonzero(y==i) / len(y) * 100\n print(f'{occurrences} ocurrencias de la clase {i} => {round(perc_occurrences, 2)}%')",
"178 ocurrencias de la clase 0 => 9.91%\n182 ocurrencias de la clase 1 => 10.13%\n177 ocurrencias de la clase 2 => 9.85%\n183 ocurrencias de la clase 3 => 10.18%\n181 ocurrencias de la clase 4 => 10.07%\n182 ocurrencias de la clase 5 => 10.13%\n181 ocurrencias de la clase 6 => 10.07%\n179 ocurrencias de la clase 7 => 9.96%\n174 ocurrencias de la clase 8 => 9.68%\n180 ocurrencias de la clase 9 => 10.02%\n"
]
],
[
[
"**Respuesta:** Se observa que el porcentaje de ocurrencia de las clases *se encuentran muy cercano al 10%*, que es el valor de referencia para considerarse que están completamente balanceados, por lo que **este dataset no sufre de desbalanceo**.",
"_____no_output_____"
],
[
"### Pregunta 2 (4 puntos):\n### Repita esta clasificación con ``sklearn.ensemble.RandomForestClassifier`` ¿Mejoran los resultados de precisión en el test dataset? Represente la matriz de confusión.",
"_____no_output_____"
]
],
[
[
"Pista: Hay que ejecutar los mismos comandos que antes pero con una única variación:",
"_____no_output_____"
],
[
"from sklearn.ensemble import RandomForestClassifier\nclf = RandomForestClassifier()",
"_____no_output_____"
]
],
[
[
"from sklearn.ensemble import RandomForestClassifier\nclf = RandomForestClassifier(random_state=0)\nclf.fit(Xtrain, ytrain)\nprint(f'Precisión: {clf.score(Xtest, ytest)*100} %')",
"Precisión: 97.55555555555556 %\n"
]
],
[
[
"**Respuesta**: Se observa una **mejoría notable** en la precisión (97.55%) respecto al mejor resultado obtenido usando Decision Trees (84.67%).",
"_____no_output_____"
]
],
[
[
"ypred = clf.predict(Xtest)\ncm = confusion_matrix(ytest, ypred)\nprint(classification_report(ytest, ypred))\nprint(cm)",
" precision recall f1-score support\n\n 0 1.00 1.00 1.00 37\n 1 0.95 0.98 0.97 43\n 2 1.00 0.98 0.99 44\n 3 0.94 0.98 0.96 45\n 4 1.00 0.97 0.99 38\n 5 0.96 0.98 0.97 48\n 6 1.00 0.98 0.99 52\n 7 0.96 1.00 0.98 48\n 8 0.98 0.96 0.97 48\n 9 0.98 0.94 0.96 47\n\n accuracy 0.98 450\n macro avg 0.98 0.98 0.98 450\nweighted avg 0.98 0.98 0.98 450\n\n[[37 0 0 0 0 0 0 0 0 0]\n [ 0 42 0 0 0 1 0 0 0 0]\n [ 0 0 43 1 0 0 0 0 0 0]\n [ 0 0 0 44 0 0 0 0 1 0]\n [ 0 0 0 0 37 0 0 1 0 0]\n [ 0 0 0 0 0 47 0 0 0 1]\n [ 0 1 0 0 0 0 51 0 0 0]\n [ 0 0 0 0 0 0 0 48 0 0]\n [ 0 1 0 0 0 0 0 1 46 0]\n [ 0 0 0 2 0 1 0 0 0 44]]\n"
]
],
[
[
"**Respuesta**: Se puede observar en la matríz de confusión, el grado de confusión entre unos dígitos y otros *es muy bajo*, y esto coincide con el *alto grado de precisión* que se obtiene del modelo entrenado con **RandomForestClassifier**.",
"_____no_output_____"
],
[
"### Pregunta 3 (1 punto): De acuerdo a lo indicado en el [siguiente enlace](https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestClassifier.html). ¿Podría justificar los hiperparámetros elegidos dadas las características de nuestro dataset?",
"_____no_output_____"
]
],
[
[
"from sklearn.ensemble import RandomForestClassifier\nclf = RandomForestClassifier(n_estimators=83, max_depth=17, random_state=0)\nclf.fit(Xtrain, ytrain)\nprint(f'Precisión: {clf.score(Xtest, ytest)*100} %')",
"Precisión: 98.44444444444444 %\n"
]
],
[
[
"**Respuesta**: Se observado que los parámetros *n_estimators* y *max_depth* son los que influyen más en mejorar la precisión del modelo, ya que **mientras más estimadores se reduce la influencia del overfitting** que podría tener cada uno de ellos, sin embargo se observa que la tendencia de la mejora de la precisión se detiene (o incluso empeora) cuando superar la cantidad de 83 estimadores internos.\n\nOcurre algo similar con el parámetro *max_detph*, ya que **llega a un máximo del valor de la precisión en 17 niveles de profundidad**, y después empeora el resultado.",
"_____no_output_____"
],
[
"### Pregunta 4 (2 puntos adicionales):\n\n### [Este artículo](https://chrisalbon.com/machine_learning/model_evaluation/cross_validation_parameter_tuning_grid_search/) es muy interesante a la hora de averiguar cómo realizar **cross validation** y **hyperparameter tunning** de un modelo.\n\n### ¿Quién se atreve a reproducir los pasos indicados en este artículo para encontrar los parámetros óptimos en el `RandomForestClassifier()`?\n",
"_____no_output_____"
]
],
[
[
"from sklearn.model_selection import GridSearchCV\nfrom math import ceil\n\n#Parámetros candidatos\nparameter_candidates = [\n {'n_estimators': [10, 50, 83, 100], 'max_depth': [10, 15, 17, 20], 'random_state': [0]}\n]\n\n# Crear clasificador que buscará la mejor combinación entre parámetros\nclf = GridSearchCV(estimator=RandomForestClassifier(), param_grid=parameter_candidates)\nclf.fit(Xtrain, ytrain)\n\n# Ver la mejor precisión y los mejores valores de cada parámetro\nprint(f'Mejor precisión: {clf.best_score_*100} %')\nprint('Mejor n_estimators:',clf.best_estimator_.n_estimators) \nprint('Mejor max_depth:',clf.best_estimator_.max_depth)\n\n# Presición del modelos con los mejores parámetros seleccionados\nprint(f'Precisión con dataset de prueba: {clf.score(Xtest, ytest)*100} %')",
"Mejor precisión: 97.17802560925237 %\nMejor n_estimators: 100\nMejor max_depth: 15\nPrecisión con dataset de prueba: 97.77777777777777 %\n"
]
],
[
[
"**Respuesta:** Después de usar GridSearchCV para encontrar los mejores parámetros pudimos obtener *n_estimators=100* y *max_depth=15* y vimos que la precisión respecto al dataset de pruebas (Xtest y ytest) es de **97.77%**. Aquí podemos observar que el algoritmo GridSearchCV **no pudo detectar una combinación mejor** con los parámetros *n_estimators=83* y *max_depth=17* que obtuvimos previamente de forma manual que generaron una precisión del **98.44%**, a pesar que dichos parámetros se incluyeron como candidatos, sin embargo seleccionó los más cercanos a estos.",
"_____no_output_____"
],
[
"<img src=\"images/doge.jpg\" width=\"500\" height=\"500\" align=\"center\"/>",
"_____no_output_____"
]
],
[
[
"¡Por favor, no olvide guardar el Jupyter Notebook antes de mandar la práctica!",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"raw",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"raw"
] | [
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"raw",
"raw"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"raw"
]
] |
e7dfb063ee95f7d20dc179b760a15a7f4e079fd7 | 3,065 | ipynb | Jupyter Notebook | 01 Machine Learning/scikit_examples_jupyter/linear_model/plot_omp.ipynb | alphaolomi/colab | 19e4eb1bed56346dd18ba65638cda2d17a960d0c | [
"Apache-2.0"
] | null | null | null | 01 Machine Learning/scikit_examples_jupyter/linear_model/plot_omp.ipynb | alphaolomi/colab | 19e4eb1bed56346dd18ba65638cda2d17a960d0c | [
"Apache-2.0"
] | null | null | null | 01 Machine Learning/scikit_examples_jupyter/linear_model/plot_omp.ipynb | alphaolomi/colab | 19e4eb1bed56346dd18ba65638cda2d17a960d0c | [
"Apache-2.0"
] | null | null | null | 56.759259 | 1,932 | 0.622838 | [
[
[
"%matplotlib inline",
"_____no_output_____"
]
],
[
[
"\n# Orthogonal Matching Pursuit\n\n\nUsing orthogonal matching pursuit for recovering a sparse signal from a noisy\nmeasurement encoded with a dictionary\n\n",
"_____no_output_____"
]
],
[
[
"print(__doc__)\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom sklearn.linear_model import OrthogonalMatchingPursuit\nfrom sklearn.linear_model import OrthogonalMatchingPursuitCV\nfrom sklearn.datasets import make_sparse_coded_signal\n\nn_components, n_features = 512, 100\nn_nonzero_coefs = 17\n\n# generate the data\n\n# y = Xw\n# |x|_0 = n_nonzero_coefs\n\ny, X, w = make_sparse_coded_signal(n_samples=1,\n n_components=n_components,\n n_features=n_features,\n n_nonzero_coefs=n_nonzero_coefs,\n random_state=0)\n\nidx, = w.nonzero()\n\n# distort the clean signal\ny_noisy = y + 0.05 * np.random.randn(len(y))\n\n# plot the sparse signal\nplt.figure(figsize=(7, 7))\nplt.subplot(4, 1, 1)\nplt.xlim(0, 512)\nplt.title(\"Sparse signal\")\nplt.stem(idx, w[idx])\n\n# plot the noise-free reconstruction\nomp = OrthogonalMatchingPursuit(n_nonzero_coefs=n_nonzero_coefs)\nomp.fit(X, y)\ncoef = omp.coef_\nidx_r, = coef.nonzero()\nplt.subplot(4, 1, 2)\nplt.xlim(0, 512)\nplt.title(\"Recovered signal from noise-free measurements\")\nplt.stem(idx_r, coef[idx_r])\n\n# plot the noisy reconstruction\nomp.fit(X, y_noisy)\ncoef = omp.coef_\nidx_r, = coef.nonzero()\nplt.subplot(4, 1, 3)\nplt.xlim(0, 512)\nplt.title(\"Recovered signal from noisy measurements\")\nplt.stem(idx_r, coef[idx_r])\n\n# plot the noisy reconstruction with number of non-zeros set by CV\nomp_cv = OrthogonalMatchingPursuitCV(cv=5)\nomp_cv.fit(X, y_noisy)\ncoef = omp_cv.coef_\nidx_r, = coef.nonzero()\nplt.subplot(4, 1, 4)\nplt.xlim(0, 512)\nplt.title(\"Recovered signal from noisy measurements with CV\")\nplt.stem(idx_r, coef[idx_r])\n\nplt.subplots_adjust(0.06, 0.04, 0.94, 0.90, 0.20, 0.38)\nplt.suptitle('Sparse signal recovery with Orthogonal Matching Pursuit',\n fontsize=16)\nplt.show()",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code"
] | [
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
e7dfb13e15a8c30fd905f0ed9db9f0f67d9b6e88 | 32,753 | ipynb | Jupyter Notebook | tensorflow/contrib/autograph/examples/notebooks/workshop.ipynb | nicolasoyharcabal/tensorflow | 0d3b58cfe91c6b865a14701345d7a84ce949c0e3 | [
"Apache-2.0"
] | 848 | 2019-12-03T00:16:17.000Z | 2022-03-31T22:53:17.000Z | tensorflow/contrib/autograph/examples/notebooks/workshop.ipynb | mitcodelab/tensorflow | 8a1dcea0acb7fde84b43d7c922b2b1bffd6824d8 | [
"Apache-2.0"
] | 656 | 2019-12-03T00:48:46.000Z | 2022-03-31T18:41:54.000Z | tensorflow/contrib/autograph/examples/notebooks/workshop.ipynb | mitcodelab/tensorflow | 8a1dcea0acb7fde84b43d7c922b2b1bffd6824d8 | [
"Apache-2.0"
] | 506 | 2019-12-03T00:46:26.000Z | 2022-03-30T10:34:56.000Z | 28.984956 | 532 | 0.452875 | [
[
[
"!pip install -U -q tf-nightly",
"_____no_output_____"
],
[
"import tensorflow as tf\nfrom tensorflow.contrib import autograph\n\nimport matplotlib.pyplot as plt",
"_____no_output_____"
]
],
[
[
"# 1. AutoGraph writes graph code for you\n\n[AutoGraph](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/autograph/README.md) helps you write complicated graph code using just plain Python -- behind the scenes, AutoGraph automatically transforms your code into the equivalent TF graph code. We support a large chunk of the Python language, which is growing. [Please see this document for what we currently support, and what we're working on](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/autograph/LIMITATIONS.md).\n\nHere's a quick example of how it works:\n\n",
"_____no_output_____"
]
],
[
[
"# Autograph can convert functions like this...\ndef g(x):\n if x > 0:\n x = x * x\n else:\n x = 0.0\n return x\n\n# ...into graph-building functions like this:\ndef tf_g(x):\n with tf.name_scope('g'):\n\n def if_true():\n with tf.name_scope('if_true'):\n x_1, = x,\n x_1 = x_1 * x_1\n return x_1,\n\n def if_false():\n with tf.name_scope('if_false'):\n x_1, = x,\n x_1 = 0.0\n return x_1,\n\n x = autograph_utils.run_cond(tf.greater(x, 0), if_true, if_false)\n return x",
"_____no_output_____"
],
[
"# You can run your plain-Python code in graph mode,\n# and get the same results out, but with all the benfits of graphs:\nprint('Original value: %2.2f' % g(9.0))\n\n# Generate a graph-version of g and call it:\ntf_g = autograph.to_graph(g)\n\nwith tf.Graph().as_default():\n # The result works like a regular op: takes tensors in, returns tensors.\n # You can inspect the graph using tf.get_default_graph().as_graph_def()\n g_ops = tf_g(tf.constant(9.0))\n with tf.Session() as sess:\n print('Autograph value: %2.2f\\n' % sess.run(g_ops))\n\n\n# You can view, debug and tweak the generated code:\nprint(autograph.to_code(g))",
"_____no_output_____"
]
],
[
[
"#### Automatically converting complex control flow\n\nAutoGraph can convert a large chunk of the Python language into equivalent graph-construction code, and we're adding new supported language features all the time. In this section, we'll give you a taste of some of the functionality in AutoGraph.\nAutoGraph will automatically convert most Python control flow statements into their correct graph equivalent. \n \nWe support common statements like `while`, `for`, `if`, `break`, `return` and more. You can even nest them as much as you like. Imagine trying to write the graph version of this code by hand:\n",
"_____no_output_____"
]
],
[
[
"# Continue in a loop\ndef f(l):\n s = 0\n for c in l:\n if c % 2 > 0:\n continue\n s += c\n return s\n\nprint('Original value: %d' % f([10,12,15,20]))\n\ntf_f = autograph.to_graph(f)\nwith tf.Graph().as_default():\n with tf.Session():\n print('Graph value: %d\\n\\n' % tf_f(tf.constant([10,12,15,20])).eval())\n\nprint(autograph.to_code(f))",
"_____no_output_____"
]
],
[
[
"Try replacing the `continue` in the above code with `break` -- AutoGraph supports that as well! \n \nLet's try some other useful Python constructs, like `print` and `assert`. We automatically convert Python `assert` statements into the equivalent `tf.Assert` code. ",
"_____no_output_____"
]
],
[
[
"def f(x):\n assert x != 0, 'Do not pass zero!'\n return x * x\n\ntf_f = autograph.to_graph(f)\nwith tf.Graph().as_default():\n with tf.Session():\n try:\n print(tf_f(tf.constant(0)).eval())\n except tf.errors.InvalidArgumentError as e:\n print('Got error message:\\n%s' % e.message)",
"_____no_output_____"
]
],
[
[
"You can also use plain Python `print` functions in in-graph",
"_____no_output_____"
]
],
[
[
"def f(n):\n if n >= 0:\n while n < 5:\n n += 1\n print(n)\n return n\n\ntf_f = autograph.to_graph(f)\nwith tf.Graph().as_default():\n with tf.Session():\n tf_f(tf.constant(0)).eval()",
"_____no_output_____"
]
],
[
[
"Appending to lists in loops also works (we create a tensor list ops behind the scenes)",
"_____no_output_____"
]
],
[
[
"def f(n):\n z = []\n # We ask you to tell us the element dtype of the list\n autograph.set_element_type(z, tf.int32)\n for i in range(n):\n z.append(i)\n # when you're done with the list, stack it\n # (this is just like np.stack)\n return autograph.stack(z)\n\ntf_f = autograph.to_graph(f)\nwith tf.Graph().as_default():\n with tf.Session():\n print(tf_f(tf.constant(3)).eval())\n\nprint('\\n\\n'+autograph.to_code(f))",
"_____no_output_____"
],
[
"def fizzbuzz(num):\n if num % 3 == 0 and num % 5 == 0:\n print('FizzBuzz')\n elif num % 3 == 0:\n print('Fizz')\n elif num % 5 == 0:\n print('Buzz')\n else:\n print(num)\n return num",
"_____no_output_____"
],
[
"tf_g = autograph.to_graph(fizzbuzz)\n\nwith tf.Graph().as_default():\n # The result works like a regular op: takes tensors in, returns tensors.\n # You can inspect the graph using tf.get_default_graph().as_graph_def()\n g_ops = tf_g(tf.constant(15))\n with tf.Session() as sess:\n sess.run(g_ops) \n \n# You can view, debug and tweak the generated code:\nprint('\\n')\nprint(autograph.to_code(fizzbuzz))",
"_____no_output_____"
]
],
[
[
"# De-graphify Exercises\n",
"_____no_output_____"
],
[
"#### Easy print statements",
"_____no_output_____"
]
],
[
[
"# See what happens when you turn AutoGraph off.\n# Do you see the type or the value of x when you print it?\n\n# @autograph.convert()\ndef square_log(x):\n x = x * x\n print('Squared value of x =', x)\n return x\n\n\nwith tf.Graph().as_default():\n with tf.Session() as sess:\n print(sess.run(square_log(tf.constant(4))))",
"_____no_output_____"
]
],
[
[
"#### Convert the TensorFlow code into Python code for AutoGraph",
"_____no_output_____"
]
],
[
[
"def square_if_positive(x):\n x = tf.cond(tf.greater(x, 0), lambda: x * x, lambda: x)\n return x\n\nwith tf.Session() as sess:\n print(sess.run(square_if_positive(tf.constant(4))))",
"_____no_output_____"
],
[
"@autograph.convert()\ndef square_if_positive(x):\n\n pass # TODO: fill it in!\n\n\nwith tf.Session() as sess:\n print(sess.run(square_if_positive(tf.constant(4))))",
"_____no_output_____"
]
],
[
[
"#### Uncollapse to see answer",
"_____no_output_____"
]
],
[
[
"# Simple cond\[email protected]()\ndef square_if_positive(x):\n if x > 0:\n x = x * x\n return x\n\nwith tf.Graph().as_default(): \n with tf.Session() as sess:\n print(sess.run(square_if_positive(tf.constant(4))))",
"_____no_output_____"
]
],
[
[
"#### Nested If statement",
"_____no_output_____"
]
],
[
[
"def nearest_odd_square(x):\n\n def if_positive():\n x1 = x * x\n x1 = tf.cond(tf.equal(x1 % 2, 0), lambda: x1 + 1, lambda: x1)\n return x1,\n\n x = tf.cond(tf.greater(x, 0), if_positive, lambda: x)\n return x\n\nwith tf.Graph().as_default():\n with tf.Session() as sess:\n print(sess.run(nearest_odd_square(tf.constant(4))))",
"_____no_output_____"
],
[
"@autograph.convert()\ndef nearest_odd_square(x):\n\n pass # TODO: fill it in!\n\n\nwith tf.Session() as sess:\n print(sess.run(nearest_odd_square(tf.constant(4))))",
"_____no_output_____"
]
],
[
[
"#### Uncollapse to reveal answer",
"_____no_output_____"
]
],
[
[
"@autograph.convert()\ndef nearest_odd_square(x):\n if x > 0:\n x = x * x\n if x % 2 == 0:\n x = x + 1\n return x\n\nwith tf.Graph().as_default():\n with tf.Session() as sess:\n print(sess.run(nearest_odd_square(tf.constant(4))))",
"_____no_output_____"
]
],
[
[
"#### Convert a while loop",
"_____no_output_____"
]
],
[
[
"# Convert a while loop\ndef square_until_stop(x, y):\n x = tf.while_loop(lambda x: tf.less(x, y), lambda x: x * x, [x])\n return x\n\nwith tf.Graph().as_default():\n with tf.Session() as sess:\n print(sess.run(square_until_stop(tf.constant(4), tf.constant(100))))",
"_____no_output_____"
],
[
"@autograph.convert()\ndef square_until_stop(x, y):\n\n pass # TODO: fill it in!\n\n\nwith tf.Graph().as_default():\n with tf.Session() as sess:\n print(sess.run(square_until_stop(tf.constant(4), tf.constant(100))))",
"_____no_output_____"
]
],
[
[
"#### Uncollapse for the answer\n",
"_____no_output_____"
]
],
[
[
"@autograph.convert()\ndef square_until_stop(x, y):\n while x < y:\n x = x * x\n return x\n\nwith tf.Graph().as_default():\n with tf.Session() as sess:\n print(sess.run(square_until_stop(tf.constant(4), tf.constant(100))))",
"_____no_output_____"
]
],
[
[
"#### Nested loop and conditional",
"_____no_output_____"
]
],
[
[
"@autograph.convert()\ndef argwhere_cumsum(x, threshold):\n current_sum = 0.0\n idx = 0\n\n for i in range(len(x)):\n idx = i\n if current_sum >= threshold:\n break\n current_sum += x[i]\n return idx\n\nn = 10\nwith tf.Graph().as_default():\n with tf.Session() as sess:\n idx = argwhere_cumsum(tf.ones(n), tf.constant(float(n / 2)))\n print(sess.run(idx))",
"_____no_output_____"
],
[
"@autograph.convert()\ndef argwhere_cumsum(x, threshold):\n\n pass # TODO: fill it in!\n\n\nn = 10\nwith tf.Graph().as_default():\n with tf.Session() as sess:\n idx = argwhere_cumsum(tf.ones(n), tf.constant(float(n / 2)))\n print(sess.run(idx))",
"_____no_output_____"
]
],
[
[
"#### Uncollapse to see answer",
"_____no_output_____"
]
],
[
[
"@autograph.convert()\ndef argwhere_cumsum(x, threshold):\n current_sum = 0.0\n idx = 0\n for i in range(len(x)):\n idx = i\n if current_sum >= threshold:\n break\n current_sum += x[i]\n return idx\n\nn = 10\nwith tf.Graph().as_default(): \n with tf.Session() as sess:\n idx = argwhere_cumsum(tf.ones(n), tf.constant(float(n / 2)))\n print(sess.run(idx))",
"_____no_output_____"
]
],
[
[
"# 3. Training MNIST in-graph\n\nWriting control flow in AutoGraph is easy, so running a training loop in a TensorFlow graph should be easy as well! \n\nHere, we show an example of training a simple Keras model on MNIST, where the entire training process -- loading batches, calculating gradients, updating parameters, calculating validation accuracy, and repeating until convergence -- is done in-graph.",
"_____no_output_____"
],
[
"#### Download data",
"_____no_output_____"
]
],
[
[
"import gzip\nimport os\nimport shutil\n\nfrom six.moves import urllib\n\n\ndef download(directory, filename):\n filepath = os.path.join(directory, filename)\n if tf.gfile.Exists(filepath):\n return filepath\n if not tf.gfile.Exists(directory):\n tf.gfile.MakeDirs(directory)\n url = 'https://storage.googleapis.com/cvdf-datasets/mnist/' + filename + '.gz'\n zipped_filepath = filepath + '.gz'\n print('Downloading %s to %s' % (url, zipped_filepath))\n urllib.request.urlretrieve(url, zipped_filepath)\n with gzip.open(zipped_filepath, 'rb') as f_in, open(filepath, 'wb') as f_out:\n shutil.copyfileobj(f_in, f_out)\n os.remove(zipped_filepath)\n return filepath\n\n\ndef dataset(directory, images_file, labels_file):\n images_file = download(directory, images_file)\n labels_file = download(directory, labels_file)\n\n def decode_image(image):\n # Normalize from [0, 255] to [0.0, 1.0]\n image = tf.decode_raw(image, tf.uint8)\n image = tf.cast(image, tf.float32)\n image = tf.reshape(image, [784])\n return image / 255.0\n\n def decode_label(label):\n label = tf.decode_raw(label, tf.uint8)\n label = tf.reshape(label, [])\n return tf.to_int32(label)\n\n images = tf.data.FixedLengthRecordDataset(\n images_file, 28 * 28, header_bytes=16).map(decode_image)\n labels = tf.data.FixedLengthRecordDataset(\n labels_file, 1, header_bytes=8).map(decode_label)\n return tf.data.Dataset.zip((images, labels))\n\n\ndef mnist_train(directory):\n return dataset(directory, 'train-images-idx3-ubyte',\n 'train-labels-idx1-ubyte')\n\ndef mnist_test(directory):\n return dataset(directory, 't10k-images-idx3-ubyte', 't10k-labels-idx1-ubyte')",
"_____no_output_____"
]
],
[
[
"#### Define the model",
"_____no_output_____"
]
],
[
[
"def mlp_model(input_shape):\n model = tf.keras.Sequential((\n tf.keras.layers.Dense(100, activation='relu', input_shape=input_shape),\n tf.keras.layers.Dense(100, activation='relu'),\n tf.keras.layers.Dense(10, activation='softmax')))\n model.build()\n return model\n\n\ndef predict(m, x, y):\n y_p = m(x)\n losses = tf.keras.losses.categorical_crossentropy(y, y_p)\n l = tf.reduce_mean(losses)\n accuracies = tf.keras.metrics.categorical_accuracy(y, y_p)\n accuracy = tf.reduce_mean(accuracies)\n return l, accuracy\n\n\ndef fit(m, x, y, opt):\n l, accuracy = predict(m, x, y)\n opt.minimize(l)\n return l, accuracy\n\n\ndef setup_mnist_data(is_training, hp, batch_size):\n if is_training:\n ds = mnist_train('/tmp/autograph_mnist_data')\n ds = ds.shuffle(batch_size * 10)\n else:\n ds = mnist_test('/tmp/autograph_mnist_data')\n ds = ds.repeat()\n ds = ds.batch(batch_size)\n return ds\n\n\ndef get_next_batch(ds):\n itr = ds.make_one_shot_iterator()\n image, label = itr.get_next()\n x = tf.to_float(tf.reshape(image, (-1, 28 * 28)))\n y = tf.one_hot(tf.squeeze(label), 10)\n return x, y",
"_____no_output_____"
]
],
[
[
"#### Define the training loop",
"_____no_output_____"
]
],
[
[
"def train(train_ds, test_ds, hp):\n m = mlp_model((28 * 28,))\n opt = tf.train.MomentumOptimizer(hp.learning_rate, 0.9)\n\n # We'd like to save our losses to a list. In order for AutoGraph\n # to convert these lists into their graph equivalent,\n # we need to specify the element type of the lists.\n train_losses = []\n test_losses = []\n train_accuracies = []\n test_accuracies = []\n autograph.set_element_type(train_losses, tf.float32)\n autograph.set_element_type(test_losses, tf.float32)\n autograph.set_element_type(train_accuracies, tf.float32)\n autograph.set_element_type(test_accuracies, tf.float32)\n\n # This entire training loop will be run in-graph.\n i = tf.constant(0)\n while i < hp.max_steps:\n train_x, train_y = get_next_batch(train_ds)\n test_x, test_y = get_next_batch(test_ds)\n\n step_train_loss, step_train_accuracy = fit(m, train_x, train_y, opt)\n step_test_loss, step_test_accuracy = predict(m, test_x, test_y)\n\n if i % (hp.max_steps // 10) == 0:\n print('Step', i, 'train loss:', step_train_loss, 'test loss:',\n step_test_loss, 'train accuracy:', step_train_accuracy,\n 'test accuracy:', step_test_accuracy)\n\n train_losses.append(step_train_loss)\n test_losses.append(step_test_loss)\n train_accuracies.append(step_train_accuracy)\n test_accuracies.append(step_test_accuracy)\n\n i += 1\n\n # We've recorded our loss values and accuracies\n # to a list in a graph with AutoGraph's help.\n # In order to return the values as a Tensor,\n # we need to stack them before returning them.\n return (\n autograph.stack(train_losses),\n autograph.stack(test_losses),\n autograph.stack(train_accuracies),\n autograph.stack(test_accuracies),\n )",
"_____no_output_____"
],
[
"with tf.Graph().as_default():\n hp = tf.contrib.training.HParams(\n learning_rate=0.05,\n max_steps=500,\n )\n train_ds = setup_mnist_data(True, hp, 50)\n test_ds = setup_mnist_data(False, hp, 1000)\n tf_train = autograph.to_graph(train)\n loss_tensors = tf_train(train_ds, test_ds, hp)\n\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n (\n train_losses,\n test_losses,\n train_accuracies,\n test_accuracies\n ) = sess.run(loss_tensors)\n\n plt.title('MNIST train/test losses')\n plt.plot(train_losses, label='train loss')\n plt.plot(test_losses, label='test loss')\n plt.legend()\n plt.xlabel('Training step')\n plt.ylabel('Loss')\n plt.show()\n plt.title('MNIST train/test accuracies')\n plt.plot(train_accuracies, label='train accuracy')\n plt.plot(test_accuracies, label='test accuracy')\n plt.legend(loc='lower right')\n plt.xlabel('Training step')\n plt.ylabel('Accuracy')\n plt.show()",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |
e7dfba73d4a79f83baec0e9f0903e92687e2cb62 | 22,668 | ipynb | Jupyter Notebook | 82-3-analyticsvidhya-independence-day-hackathon.ipynb | nitishkthakur/CompetitionNotebooks | 72ca3a4bea4a1ea359247c8cb361b8a5798ff4ec | [
"MIT"
] | 1 | 2020-08-28T06:38:40.000Z | 2020-08-28T06:38:40.000Z | 82-3-analyticsvidhya-independence-day-hackathon.ipynb | nitishkthakur/CompetitionNotebooks | 72ca3a4bea4a1ea359247c8cb361b8a5798ff4ec | [
"MIT"
] | null | null | null | 82-3-analyticsvidhya-independence-day-hackathon.ipynb | nitishkthakur/CompetitionNotebooks | 72ca3a4bea4a1ea359247c8cb361b8a5798ff4ec | [
"MIT"
] | null | null | null | 35.089783 | 160 | 0.527881 | [
[
[
"# This Python 3 environment comes with many helpful analytics libraries installed\n# It is defined by the kaggle/python Docker image: https://github.com/kaggle/docker-python\n# For example, here's several helpful packages to load\n\nimport numpy as np # linear algebra\nimport pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)\n\n# Input data files are available in the read-only \"../input/\" directory\n# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory\n\nimport os\nfor dirname, _, filenames in os.walk('/kaggle/input'):\n for filename in filenames:\n print(os.path.join(dirname, filename))\n\n# You can write up to 5GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using \"Save & Run All\" \n# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport tensorflow as tf\nfrom sklearn import preprocessing, linear_model, ensemble, metrics, model_selection, svm, pipeline, naive_bayes\nfrom sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer\nimport nltk\nimport spacy\nimport textblob\nfrom nltk import word_tokenize \nfrom nltk.stem import WordNetLemmatizer\nfrom sklearn.multioutput import MultiOutputClassifier",
"/kaggle/input/janatahack-independence-day-2020-ml-hackathon/sample_submission_UVKGLZE.csv\n/kaggle/input/janatahack-independence-day-2020-ml-hackathon/train.csv\n/kaggle/input/janatahack-independence-day-2020-ml-hackathon/test.csv\n"
],
[
"# Read Data\ntrain = pd.read_csv('/kaggle/input/janatahack-independence-day-2020-ml-hackathon/train.csv')\ntest = pd.read_csv('/kaggle/input/janatahack-independence-day-2020-ml-hackathon/test.csv')\n",
"_____no_output_____"
],
[
"train.head()\n",
"_____no_output_____"
],
[
"train_id = train['ID']\ntest_id = test['ID']\n\n# Create indices to split train and test on later\ntrain['train_ind'] = np.arange(train.shape[0])\ntest['train_ind'] = np.arange(train.shape[0], train.shape[0]+test.shape[0])\n\n# Merge Train and Test - This approach only works for competitions - not for model deployment in real projects.\ndata = pd.concat([train, test], axis = 0)",
"_____no_output_____"
],
[
"# Create class which performs Label Encoding - if required\nclass categorical_encoder:\n def __init__(self, columns, kind = 'label', fill = True):\n self.kind = kind\n self.columns = columns\n self.fill = fill\n \n def fit(self, X):\n self.dict = {}\n self.fill_value = {}\n \n for col in self.columns:\n label = preprocessing.LabelEncoder().fit(X[col])\n self.dict[col] = label\n \n # To fill\n if self.fill:\n self.fill_value[col] = X[col].mode()[0]\n X[col] = X[col].fillna(self.fill_value[col])\n \n print('Label Encoding Done for {} columns'.format(len(self.columns)))\n return self\n def transform(self, X):\n for col in self.columns:\n if self.fill:\n X[col] = X[col].fillna(self.fill_value[col])\n \n X.loc[:, col] = self.dict[col].transform(X[col])\n print('Transformation Done')\n return X",
"_____no_output_____"
],
[
"# Create Lemmatizer - if required\nclass LemmaTokenizer(object):\n def __init__(self):\n self.wnl = WordNetLemmatizer()\n def __call__(self, articles):\n return [self.wnl.lemmatize(t) for t in word_tokenize(articles)]\n",
"_____no_output_____"
],
[
"# Function to Create CountEncoded and tf-idf features\ndef add_text_features(text_column_name, data_file, max_features = 2000, txn = 'tf-idf', min_df = 1, max_df = 1.0,\n ngram_range = (1, 1), lowercase = True, sparse = False, tokenizer = None):\n if txn == 'count':\n # Use Count Vectorizer\n counts = CountVectorizer(max_features = max_features, min_df = min_df, \n max_df = max_df, ngram_range = ngram_range, lowercase = lowercase, tokenizer=tokenizer).fit(data_file[text_column_name])\n if txn == 'tf-idf':\n counts = pipeline.make_pipeline(CountVectorizer(max_features = max_features, min_df = min_df, \n max_df = max_df, ngram_range = ngram_range, lowercase = lowercase, tokenizer=tokenizer),\n TfidfTransformer()).fit(data_file[text_column_name])\n text_features = counts.transform(data_file[text_column_name])\n \n # Return for sparse output\n if sparse: return text_features, None\n \n # Create Mapping\n if txn == 'count':\n mapping = {val: key for key, val in counts.vocabulary_.items()}\n if txn == 'tf-idf':\n mapping = {val: key for key, val in counts['countvectorizer'].vocabulary_.items()}\n \n # Create DataFrame\n text_features_data = pd.DataFrame(text_features.toarray())\n text_features_data = text_features_data.rename(mapping, axis = 1)\n text_cols = text_features_data.columns.tolist()\n \n # Append to dataframe\n data_copy = pd.concat([data_file.reset_index(drop = True), text_features_data.reset_index(drop = True)], axis = 1)\n return data_copy, text_cols",
"_____no_output_____"
],
[
"data_copy, text_cols = add_text_features(text_column_name = 'ABSTRACT', \n data_file = data, max_features = 150000, min_df = 5, max_df = .5,\n ngram_range = (1, 3), lowercase = True, sparse = True)",
"_____no_output_____"
],
[
"# Split the data back to train and test\nX_train = data_copy[:train.shape[0], :]\ny_train = data[['Computer Science', 'Physics', 'Mathematics',\n 'Statistics', 'Quantitative Biology', 'Quantitative Finance']].iloc[:train.shape[0]]\n\nX_test = data_copy[train.shape[0]:, :]\ny_test = data[['Computer Science', 'Physics', 'Mathematics',\n 'Statistics', 'Quantitative Biology', 'Quantitative Finance']].iloc[train.shape[0]:]",
"_____no_output_____"
],
[
"X_train",
"_____no_output_____"
],
[
"# Train model - Logistic Regression is a good option for Text classification problems\n#model = linear_model.LogisticRegressionCV(penalty = 'l2', Cs = 10, max_iter = 5000).fit(X_train, y_train)\n#model = linear_model.RidgeClassifierCV().fit(X_train, y_train)\nfrom sklearn import naive_bayes\n\n#model = MultiOutputClassifier(estimator = naive_bayes.MultinomialNB(alpha=1.0, fit_prior=True, class_prior=None)).fit(X_train, y_train)\nmodel = MultiOutputClassifier(estimator = linear_model.LogisticRegressionCV(Cs = 10, cv = 5, n_jobs = -1, max_iter = 5000)).fit(X_train, y_train)",
"_____no_output_____"
],
[
"def get_preds_multioutput(predictions):\n return np.array([[val[1] for val in inner] for inner in predictions]).T\n\ndef convert_probs_to_labels(predictions, threshold = .5, labels = None):\n final = []\n for prediction in predictions:\n temp = (prediction > threshold)*1\n final.append(temp)\n \n return final\n\ndef predict_1(predictions, threshold=.5):\n preds = get_preds_multioutput(predictions)\n preds = convert_probs_to_labels(preds, threshold = threshold, labels = None)\n return np.array(preds)\n\n#predict_1(model.predict_proba(X_test))",
"_____no_output_____"
],
[
"sub = pd.DataFrame()\nsub['ID'] = test_id\n\npreds = predict_1(model.predict_proba(X_test))\nsub[['Computer Science', 'Physics', 'Mathematics',\n 'Statistics', 'Quantitative Biology', 'Quantitative Finance']] = model.predict(X_test).astype(int)\nsub.to_csv('sub.csv', index = None)",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e7dfd7c5fefd933c698f21b5fd45536157bf243f | 210,310 | ipynb | Jupyter Notebook | Find_the_best_moving_average.ipynb | BiffTannon/Test | 0c027de21c46c4d3f5184828b301c89e33105e99 | [
"MIT"
] | 226 | 2020-04-21T06:57:43.000Z | 2022-03-30T19:37:24.000Z | Find_the_best_moving_average.ipynb | BiffTannon/Test | 0c027de21c46c4d3f5184828b301c89e33105e99 | [
"MIT"
] | 1 | 2021-02-18T22:40:13.000Z | 2021-03-17T07:20:25.000Z | Find_the_best_moving_average.ipynb | BiffTannon/Test | 0c027de21c46c4d3f5184828b301c89e33105e99 | [
"MIT"
] | 176 | 2020-02-28T10:44:44.000Z | 2022-03-30T12:29:31.000Z | 512.95122 | 82,430 | 0.938196 | [
[
[
"<a href=\"https://colab.research.google.com/github/gianlucamalato/machinelearning/blob/master/Find_the_best_moving_average.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>",
"_____no_output_____"
]
],
[
[
"!pip install yfinance",
"Requirement already satisfied: yfinance in /usr/local/lib/python3.6/dist-packages (0.1.54)\nRequirement already satisfied: numpy>=1.15 in /usr/local/lib/python3.6/dist-packages (from yfinance) (1.18.5)\nRequirement already satisfied: requests>=2.20 in /usr/local/lib/python3.6/dist-packages (from yfinance) (2.23.0)\nRequirement already satisfied: multitasking>=0.0.7 in /usr/local/lib/python3.6/dist-packages (from yfinance) (0.0.9)\nRequirement already satisfied: pandas>=0.24 in /usr/local/lib/python3.6/dist-packages (from yfinance) (1.0.5)\nRequirement already satisfied: idna<3,>=2.5 in /usr/local/lib/python3.6/dist-packages (from requests>=2.20->yfinance) (2.9)\nRequirement already satisfied: chardet<4,>=3.0.2 in /usr/local/lib/python3.6/dist-packages (from requests>=2.20->yfinance) (3.0.4)\nRequirement already satisfied: urllib3!=1.25.0,!=1.25.1,<1.26,>=1.21.1 in /usr/local/lib/python3.6/dist-packages (from requests>=2.20->yfinance) (1.24.3)\nRequirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.6/dist-packages (from requests>=2.20->yfinance) (2020.4.5.2)\nRequirement already satisfied: pytz>=2017.2 in /usr/local/lib/python3.6/dist-packages (from pandas>=0.24->yfinance) (2018.9)\nRequirement already satisfied: python-dateutil>=2.6.1 in /usr/local/lib/python3.6/dist-packages (from pandas>=0.24->yfinance) (2.8.1)\nRequirement already satisfied: six>=1.5 in /usr/local/lib/python3.6/dist-packages (from python-dateutil>=2.6.1->pandas>=0.24->yfinance) (1.12.0)\n"
],
[
"import yfinance\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.stats import ttest_ind\n\nimport datetime",
"_____no_output_____"
],
[
"plt.rcParams['figure.figsize'] = [10, 7]\n\nplt.rc('font', size=14)",
"_____no_output_____"
],
[
"np.random.seed(0)",
"_____no_output_____"
],
[
"y = np.arange(0,100,1) + np.random.normal(0,10,100)",
"_____no_output_____"
],
[
"sma = pd.Series(y).rolling(20).mean()",
"_____no_output_____"
],
[
"plt.plot(y,label=\"Time series\")\nplt.plot(sma,label=\"20-period SMA\")\n\nplt.legend()\nplt.show()",
"_____no_output_____"
],
[
"n_forward = 40\nname = 'GLD'\nstart_date = \"2010-01-01\"\nend_date = \"2020-06-15\"\n",
"_____no_output_____"
],
[
"ticker = yfinance.Ticker(\"FB\")\ndata = ticker.history(interval=\"1d\",start='2010-01-01',end=end_date)\n",
"_____no_output_____"
],
[
"plt.plot(data['Close'],label='Facebook')\n\n\nplt.plot(data['Close'].rolling(20).mean(),label = \"20-periods SMA\")\nplt.plot(data['Close'].rolling(50).mean(),label = \"50-periods SMA\")\nplt.plot(data['Close'].rolling(200).mean(),label = \"200-periods SMA\")\n\nplt.legend()\nplt.xlim((datetime.date(2019,1,1),datetime.date(2020,6,15)))\nplt.ylim((100,250))\nplt.show()",
"_____no_output_____"
],
[
"\nticker = yfinance.Ticker(name)\ndata = ticker.history(interval=\"1d\",start=start_date,end=end_date)\n\ndata['Forward Close'] = data['Close'].shift(-n_forward)\n\ndata['Forward Return'] = (data['Forward Close'] - data['Close'])/data['Close']",
"_____no_output_____"
],
[
"result = []\ntrain_size = 0.6\n\nfor sma_length in range(20,500):\n \n data['SMA'] = data['Close'].rolling(sma_length).mean()\n data['input'] = [int(x) for x in data['Close'] > data['SMA']]\n \n df = data.dropna()\n\n training = df.head(int(train_size * df.shape[0]))\n test = df.tail(int((1 - train_size) * df.shape[0]))\n \n tr_returns = training[training['input'] == 1]['Forward Return']\n test_returns = test[test['input'] == 1]['Forward Return']\n\n mean_forward_return_training = tr_returns.mean()\n mean_forward_return_test = test_returns.mean()\n\n pvalue = ttest_ind(tr_returns,test_returns,equal_var=False)[1]\n \n result.append({\n 'sma_length':sma_length,\n 'training_forward_return': mean_forward_return_training,\n 'test_forward_return': mean_forward_return_test,\n 'p-value':pvalue\n })",
"_____no_output_____"
],
[
"result.sort(key = lambda x : -x['training_forward_return'])",
"_____no_output_____"
],
[
"result[0]",
"_____no_output_____"
],
[
"best_sma = result[0]['sma_length']\ndata['SMA'] = data['Close'].rolling(best_sma).mean()",
"_____no_output_____"
],
[
"plt.plot(data['Close'],label=name)\n\nplt.plot(data['SMA'],label = \"{} periods SMA\".format(best_sma))\n\nplt.legend()\nplt.show()",
"_____no_output_____"
],
[
"",
"_____no_output_____"
]
]
] | [
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e7dfe4e4f2ad81319d786ecd37a682b6e19dd0c1 | 7,375 | ipynb | Jupyter Notebook | CourseContent/03-Intro.to.Python.and.Basic.Statistics/Week1/Practice.Exercise/2.Lab - Numpy.ipynb | averma111/AIML-PGP | c9f61dadcfb2ea8cb2ff5412c0ab583f7e3807ca | [
"Apache-2.0"
] | null | null | null | CourseContent/03-Intro.to.Python.and.Basic.Statistics/Week1/Practice.Exercise/2.Lab - Numpy.ipynb | averma111/AIML-PGP | c9f61dadcfb2ea8cb2ff5412c0ab583f7e3807ca | [
"Apache-2.0"
] | null | null | null | CourseContent/03-Intro.to.Python.and.Basic.Statistics/Week1/Practice.Exercise/2.Lab - Numpy.ipynb | averma111/AIML-PGP | c9f61dadcfb2ea8cb2ff5412c0ab583f7e3807ca | [
"Apache-2.0"
] | null | null | null | 18.813776 | 259 | 0.444475 | [
[
[
"# Numpy",
"_____no_output_____"
],
[
"We have seen python basic data structures in our last section. They are great but lack specialized features for data analysis. Like, adding roows, columns, operating on 2d matrices aren't readily available. So, we will use *numpy* for such functions.\n\n",
"_____no_output_____"
]
],
[
[
"import numpy as np",
"_____no_output_____"
]
],
[
[
"Numpy operates on *nd* arrays. These are similar to lists but contains homogenous elements but easier to store 2-d data.",
"_____no_output_____"
]
],
[
[
"l1 = [1,2,3,4]\nnd1 = np.array(l1)\nprint(nd1)\n\nl2 = [5,6,7,8]\nnd2 = np.array([l1,l2])\nprint(nd2)",
"[1 2 3 4]\n[[1 2 3 4]\n [5 6 7 8]]\n"
]
],
[
[
"Some functions on np.array()",
"_____no_output_____"
]
],
[
[
"print(nd2.shape)\n\nprint(nd2.size)\n\nprint(nd2.dtype)",
"(2, 4)\n8\nint32\n"
]
],
[
[
"### Question 1\n\nCreate an identity 2d-array or matrix (with ones across the diagonal).\n",
"_____no_output_____"
]
],
[
[
"np.identity(2)\nnp.eye(2)",
"_____no_output_____"
]
],
[
[
"### Question 2\n\nCreate a 2d-array or matrix of order 3x3 with values = 9,8,7,6,5,4,3,2,1 arranged in the same order.\n",
"_____no_output_____"
]
],
[
[
"d=np.matrix([[9,8,7],[6,5,4],[3,2,1]])\nd",
"_____no_output_____"
],
[
"np.arange(9,0,-1).reshape(3,3)",
"_____no_output_____"
]
],
[
[
"### Question 3\n\nReverse both the rows and columns of the given matrix.\n",
"_____no_output_____"
]
],
[
[
"d.T",
"_____no_output_____"
]
],
[
[
"### Question 4\nAdd + 1 to all the elements in the given matrix.",
"_____no_output_____"
]
],
[
[
"d + 1",
"_____no_output_____"
]
],
[
[
"Similarly you can do operations like scalar substraction, division, multiplication (operating on each element in the matrix)",
"_____no_output_____"
],
[
"### Question 5\n\nFind the mean of all elements in the given matrix nd6.\nnd6 = [[ 1 4 9 121 144 169]\n [ 16 25 36 196 225 256]\n [ 49 64 81 289 324 361]]\n \n",
"_____no_output_____"
]
],
[
[
"nd6 = np.matrix([[ 1, 4, 9, 121, 144, 169], [ 16, 25, 36, 196, 225, 256], [ 49, 64, 81, 289, 324, 361]])",
"_____no_output_____"
],
[
"nd6.mean()",
"_____no_output_____"
]
],
[
[
"### Question 7\n\nFind the dot product of two given matrices.\n",
"_____no_output_____"
]
],
[
[
"mat1 = np.arange(9).reshape(3,3)\nmat2 = np.arange(10,19,1).reshape(3,3)",
"_____no_output_____"
],
[
"mat1.dot(mat2)",
"_____no_output_____"
],
[
"mat1 @ mat2",
"_____no_output_____"
],
[
"np.dot(mat1, mat2)",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
]
] |
e7e0067a8e513cece9e30f9033f35d4019dffb7a | 204 | ipynb | Jupyter Notebook | src/utils/DiffbotUtils.ipynb | ClimateMind/climatemind-nlp | 66c0b7819d3fd488a6e08489c29f372cd3ad61d6 | [
"Apache-2.0"
] | 1 | 2021-01-16T20:55:05.000Z | 2021-01-16T20:55:05.000Z | src/utils/DiffbotUtils.ipynb | ClimateMind/nlp | 457efc7e8474696577da54d7c917e6cf0ee3f6c4 | [
"Apache-2.0"
] | 11 | 2021-04-04T02:00:25.000Z | 2021-10-11T02:40:17.000Z | src/utils/DiffbotUtils.ipynb | ClimateMind/climatemind-nlp | 66c0b7819d3fd488a6e08489c29f372cd3ad61d6 | [
"Apache-2.0"
] | null | null | null | 12 | 27 | 0.460784 | [
[
[
"empty"
]
]
] | [
"empty"
] | [
[
"empty"
]
] |
e7e00942ab2912b9d8ccec2fccb15da0e4f5008b | 31,129 | ipynb | Jupyter Notebook | notebooks/using_APIs_to_automate_the_process.ipynb | adrialuzllompart/festival-playlists | 53e24f8fddd9cf82af2f721d089d4511d1c11294 | [
"MIT"
] | null | null | null | notebooks/using_APIs_to_automate_the_process.ipynb | adrialuzllompart/festival-playlists | 53e24f8fddd9cf82af2f721d089d4511d1c11294 | [
"MIT"
] | null | null | null | notebooks/using_APIs_to_automate_the_process.ipynb | adrialuzllompart/festival-playlists | 53e24f8fddd9cf82af2f721d089d4511d1c11294 | [
"MIT"
] | 1 | 2020-12-06T03:46:56.000Z | 2020-12-06T03:46:56.000Z | 29.618459 | 259 | 0.465804 | [
[
[
"# Festival Playlists",
"_____no_output_____"
]
],
[
[
"import os\nimport numpy as np\nimport pandas as pd\nimport requests\nimport json\nimport spotipy\nfrom IPython.display import display",
"_____no_output_____"
]
],
[
[
"1. Use the Songkick API to get all the bands playing the festival\n2. Use the Setlist.FM API to get the setlists\n3. Use the Spotify API to create the playlists and add all the songs",
"_____no_output_____"
],
[
"### Set API credentials",
"_____no_output_____"
]
],
[
[
"setlistfm_api_key = os.getenv('SETLISTFM_API_KEY')\nspotify_client_id = os.getenv('SPOTIFY_CLIENT_ID')\nspotify_client_secret = os.getenv('SPOTIFY_CLIENT_SECRET')",
"_____no_output_____"
]
],
[
[
"### Setlist FM",
"_____no_output_____"
],
[
"#### Plan of action\n1. Given a lineup (list of band names), get their Musicbrainz identifiers (`mbid`) via `https://api.setlist.fm/rest/1.0/search/artists`\n2. Retrieve the setlists for each artist using their `mbid` via `https://api.setlist.fm/rest/1.0/artist/{artist_mbid}/setlists\n`",
"_____no_output_____"
]
],
[
[
"lineup = pd.read_csv(\n '/Users/adrialuz/Desktop/weekender.txt', header=None, names=['band'], encoding=\"ISO-8859-1\"\n)['band'].values",
"_____no_output_____"
],
[
"len(lineup)",
"_____no_output_____"
],
[
"lineup",
"_____no_output_____"
],
[
"artists_url = 'https://api.setlist.fm/rest/1.0/search/artists'",
"_____no_output_____"
],
[
"lineup_mbids = []\nnot_found = []\n\nfor name in lineup:\n req = requests.get(artists_url,\n headers={'x-api-key': setlistfm_api_key, 'Accept': 'application/json'},\n params={'artistName': name, 'p': 1, 'sort': 'relevance'}\n )\n \n i = 0\n while (not req.ok) & (i <= 5):\n req = requests.get(artists_url,\n headers={'x-api-key': setlistfm_api_key, 'Accept': 'application/json'},\n params={'artistName': name, 'p': 1, 'sort': 'relevance'}\n )\n i += 1\n \n if req.ok:\n artist_response = req.json()['artist']\n num_artists = len(artist_response)\n if num_artists > 1:\n for i in range(num_artists):\n if artist_response[i]['name'].lower() == name.lower():\n mbid = artist_response[i]['mbid']\n lineup_mbids.append({'name': name, 'mbid': mbid})\n break\n elif num_artists == 1:\n mbid = artist_response[0]['mbid']\n lineup_mbids.append({'name': name, 'mbid': mbid})\n elif num_artists == 0:\n print(f'No results I think for {name}')\n else:\n print(f'WTF {name}?')\n \n else:\n print(f'Couldn\\'t find {name}')\n not_found.append(name)",
"Couldn't find Fresquito y mango\nCouldn't find Mainline magic circus\nCouldn't find Chica gang\nCouldn't find Paco moreno\nCouldn't find The thurston moore band\nCouldn't find Maria arnal i marcel bages\nCouldn't find Unai muguruza\nCouldn't find Renaldo i Clara\nCouldn't find El tablao flamenco del capullo de jerez\n"
],
[
"lineup_mbids",
"_____no_output_____"
],
[
"not_found",
"_____no_output_____"
],
[
"artist_setlist = []\n\nfor a in lineup_mbids:\n songs_played = []\n mbid = a['mbid']\n setlists_url = f'https://api.setlist.fm/rest/1.0/artist/{mbid}/setlists'\n \n req = requests.get(setlists_url,\n headers={'x-api-key': setlistfm_api_key, 'Accept': 'application/json'},\n params={'p': 1}\n )\n \n i = 0\n while (not req.ok) & (i <= 5):\n req = requests.get(setlists_url,\n headers={'x-api-key': setlistfm_api_key, 'Accept': 'application/json'},\n params={'p': 1}\n )\n i += 1\n \n if req.ok:\n \n setlist_response = req.json()['setlist']\n num_setlists = len(setlist_response)\n\n for i in range(num_setlists):\n setlist = setlist_response[i]['sets']['set']\n num_sections = len(setlist)\n total_songs = []\n for p in range(num_sections):\n total_songs += setlist[p]['song']\n num_songs = len(total_songs)\n\n for i in range(num_songs):\n song = total_songs[i]\n song_title = song['name']\n # if the song is a cover add the original artist to the song title\n if 'cover' in song:\n song_title += ' {}'.format(song['cover']['name'])\n songs_played.append(song_title)\n \n most_played_songs = list(pd.Series(songs_played).value_counts().head(15).index)\n\n artist_setlist.append({\n 'artist': a['name'],\n 'setlist': most_played_songs\n })\n else:\n not_found.append(a['name'])",
"/var/folders/5g/z2_s3_7d75qgyjp72n_8q0p40000gq/T/ipykernel_81509/1942038417.py:42: DeprecationWarning: The default dtype for empty Series will be 'object' instead of 'float64' in a future version. Specify a dtype explicitly to silence this warning.\n most_played_songs = list(pd.Series(songs_played).value_counts().head(15).index)\n"
],
[
"not_found",
"_____no_output_____"
],
[
"artist_setlist",
"_____no_output_____"
],
[
"setlist_lengths = []\nshort_or_empty_setlist = []\n\nfor i in range(len(artist_setlist)):\n n_songs = len(artist_setlist[i]['setlist'])\n setlist_lengths.append({\n 'artist': artist_setlist[i]['artist'],\n 'n_songs': n_songs\n })\n \n if n_songs < 5:\n short_or_empty_setlist.append(artist_setlist[i]['artist'])",
"_____no_output_____"
],
[
"len(short_or_empty_setlist)",
"_____no_output_____"
],
[
"short_or_empty_setlist",
"_____no_output_____"
]
],
[
[
"### Spotify",
"_____no_output_____"
]
],
[
[
"username = 'adrialuz'\nscope = 'playlist-modify-public'\n\ntoken = spotipy.util.prompt_for_user_token(username, scope, redirect_uri='http://localhost:9090')\n\nsp = spotipy.Spotify(auth=token)\n\nsp.trace = False",
"_____no_output_____"
],
[
"sp.search('artist:Dua Lipa', limit=1, type='artist', market='GB')['artists']['items'][0]['id']",
"_____no_output_____"
],
[
"sp.search(\n f'artist:Khaled', limit=2, type='artist', market='GB'\n )['artists']['items']",
"_____no_output_____"
],
[
"spotify_ids = []\nfor a in short_or_empty_setlist:\n search_result = sp.search(\n f'artist:{a}', limit=5, type='artist', market='GB'\n )['artists']['items']\n \n if search_result:\n for i in range(len(search_result)):\n name = search_result[i]['name']\n if name.lower() == a.lower():\n artist_id = search_result[i]['id']\n spotify_ids.append(artist_id)\n break\n else:\n pass\n else:\n print(f'Couldn\\'t find {a} on Spotify.')",
"_____no_output_____"
],
[
"spotify_ids",
"_____no_output_____"
],
[
"sp.artist('59xdAObFYuaKO2phzzz07H')['name']",
"_____no_output_____"
],
[
"popular_songs = []\n\nfor artist_id in spotify_ids:\n search_results = sp.artist_top_tracks(artist_id, country='GB')['tracks']\n\n top_songs = []\n if search_results:\n for i in range(len(search_results)):\n song_name = search_results[i]['name']\n top_songs.append(song_name)\n popular_songs.append({\n 'artist': sp.artist(artist_id)['name'],\n 'setlist': top_songs\n })\n else:\n print(artist_id, sp.artist(artist_id)['name'])",
"_____no_output_____"
],
[
"popular_songs",
"_____no_output_____"
]
],
[
[
"Get the URI codes for each track",
"_____no_output_____"
]
],
[
[
"uris = []\nmissing_songs = []\n\nfor a in (artist_setlist + popular_songs):\n artist = a['artist']\n setlist = a['setlist']\n for s in setlist:\n s = s.replace(',', '').replace('\\'', '').replace('\"', '').replace('.', '').replace(\n '?', '').replace(')', '').replace('(', '').replace('/', '').replace(\n '\\\\', '').replace('&', '').replace('-', '')\n items = sp.search(q=f'artist:{artist} track:{s}', limit=1)['tracks']['items']\n if items:\n uri = items[0]['id']\n uris.append(uri)\n else:\n items = sp.search(q=f'track:{s}', limit=1)['tracks']['items']\n if items:\n if items != [None]:\n uri = items[0]['id']\n uris.append(uri)\n else:\n missing_songs.append({\n 'artist': artist,\n 'song': s\n })",
"_____no_output_____"
],
[
"len(uris)",
"_____no_output_____"
],
[
"len(missing_songs)",
"_____no_output_____"
],
[
"missing_songs",
"_____no_output_____"
],
[
"divisor = int(np.floor(len(uris) / np.ceil(len(uris) / 100)))\ntimes = int(np.floor(len(uris) / divisor))",
"_____no_output_____"
],
[
"for i in range(times):\n subset = uris[divisor*i:divisor*(i+1)]\n sp.user_playlist_add_tracks(username, playlist_id='2nUkznVEo8EgQXw0UucbpS',\n tracks=subset)",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e7e032733f9ce6a2cb812662dd3bb16a93800876 | 27,806 | ipynb | Jupyter Notebook | docs/notebooks/ExpectError.ipynb | bjrnmath/debuggingbook | 8b6cd36fc75a89464e9252e40e1d4edcb6a70559 | [
"MIT"
] | null | null | null | docs/notebooks/ExpectError.ipynb | bjrnmath/debuggingbook | 8b6cd36fc75a89464e9252e40e1d4edcb6a70559 | [
"MIT"
] | null | null | null | docs/notebooks/ExpectError.ipynb | bjrnmath/debuggingbook | 8b6cd36fc75a89464e9252e40e1d4edcb6a70559 | [
"MIT"
] | null | null | null | 27.945729 | 287 | 0.542832 | [
[
[
"# Error Handling\n\nThe code in this notebook helps with handling errors. Normally, an error in notebook code causes the execution of the code to stop; while an infinite loop in notebook code causes the notebook to run without end. This notebook provides two classes to help address these concerns.",
"_____no_output_____"
],
[
"**Prerequisites**\n\n* This notebook needs some understanding on advanced concepts in Python, notably \n * classes\n * the Python `with` statement\n * tracing\n * measuring time\n * exceptions",
"_____no_output_____"
],
[
"## Synopsis\n<!-- Automatically generated. Do not edit. -->\n\nTo [use the code provided in this chapter](Importing.ipynb), write\n\n```python\n>>> from debuggingbook.ExpectError import <identifier>\n```\n\nand then make use of the following features.\n\n\nThe `ExpectError` class allows you to catch and report exceptions, yet resume execution. This is useful in notebooks, as they would normally interrupt execution as soon as an exception is raised. Its typical usage is in conjunction with a `with` clause:\n\n```python\n>>> with ExpectError():\n>>> x = 1 / 0\nTraceback (most recent call last):\n File \"<ipython-input-1-264328004f25>\", line 2, in <module>\n x = 1 / 0\nZeroDivisionError: division by zero (expected)\n\n```\nThe `ExpectTimeout` class allows you to interrupt execution after the specified time. This is useful for interrupting code that might otherwise run forever.\n\n```python\n>>> with ExpectTimeout(5):\n>>> long_running_test()\nStart\n0 seconds have passed\n1 seconds have passed\n2 seconds have passed\n3 seconds have passed\n\nTraceback (most recent call last):\n File \"<ipython-input-1-7e5136e65261>\", line 2, in <module>\n long_running_test()\n File \"<ipython-input-1-c23727bcee7d>\", line 5, in long_running_test\n print(i, \"seconds have passed\")\n File \"<ipython-input-1-c23727bcee7d>\", line 5, in long_running_test\n print(i, \"seconds have passed\")\n File \"<ipython-input-1-a8ce35c8777b>\", line 25, in check_time\n raise TimeoutError\nTimeoutError (expected)\n\n```\nThe exception and the associated traceback are printed as error messages. If you do not want that, \nuse these keyword options:\n\n* `print_traceback` (default True) can be set to `False` to avoid the traceback being printed\n* `mute` (default False) can be set to `True` to completely avoid any output.\n\n",
"_____no_output_____"
],
[
"## Catching Errors\n\nThe class `ExpectError` allows to express that some code produces an exception. A typical usage looks as follows:\n\n```Python\nfrom ExpectError import ExpectError\n\nwith ExpectError():\n function_that_is_supposed_to_fail()\n```\n\nIf an exception occurs, it is printed on standard error; yet, execution continues.",
"_____no_output_____"
]
],
[
[
"import bookutils",
"_____no_output_____"
],
[
"import traceback\nimport sys",
"_____no_output_____"
],
[
"from types import FrameType, TracebackType",
"_____no_output_____"
],
[
"# ignore\nfrom typing import Union, Optional, Callable, Any",
"_____no_output_____"
],
[
"class ExpectError:\n \"\"\"Execute a code block expecting (and catching) an error.\"\"\"\n\n def __init__(self, exc_type: Optional[type] = None, \n print_traceback: bool = True, mute: bool = False):\n \"\"\"\n Constructor. Expect an exception of type `exc_type` (`None`: any exception).\n If `print_traceback` is set (default), print a traceback to stderr.\n If `mute` is set (default: False), do not print anything.\n \"\"\"\n self.print_traceback = print_traceback\n self.mute = mute\n self.expected_exc_type = exc_type\n\n def __enter__(self) -> Any:\n \"\"\"Begin of `with` block\"\"\"\n return self\n\n def __exit__(self, exc_type: type, \n exc_value: BaseException, tb: TracebackType) -> Optional[bool]:\n \"\"\"End of `with` block\"\"\"\n if exc_type is None:\n # No exception\n return\n\n if (self.expected_exc_type is not None\n and exc_type != self.expected_exc_type):\n raise # Unexpected exception\n\n # An exception occurred\n if self.print_traceback:\n lines = ''.join(\n traceback.format_exception(\n exc_type,\n exc_value,\n tb)).strip()\n else:\n lines = traceback.format_exception_only(\n exc_type, exc_value)[-1].strip()\n\n if not self.mute:\n print(lines, \"(expected)\", file=sys.stderr)\n return True # Ignore it",
"_____no_output_____"
]
],
[
[
"Here's an example:",
"_____no_output_____"
]
],
[
[
"def fail_test() -> None:\n # Trigger an exception\n x = 1 / 0",
"_____no_output_____"
],
[
"with ExpectError():\n fail_test()",
"Traceback (most recent call last):\n File \"<ipython-input-1-67c629a2a842>\", line 2, in <module>\n fail_test()\n File \"<ipython-input-1-3f58bc38b36b>\", line 3, in fail_test\n x = 1 / 0\nZeroDivisionError: division by zero (expected)\n"
],
[
"with ExpectError(print_traceback=False):\n fail_test()",
"ZeroDivisionError: division by zero (expected)\n"
]
],
[
[
"We can specify the type of the expected exception. This way, if something else happens, we will get notified.",
"_____no_output_____"
]
],
[
[
"with ExpectError(ZeroDivisionError):\n fail_test()",
"Traceback (most recent call last):\n File \"<ipython-input-1-a7a7ccccd794>\", line 2, in <module>\n fail_test()\n File \"<ipython-input-1-3f58bc38b36b>\", line 3, in fail_test\n x = 1 / 0\nZeroDivisionError: division by zero (expected)\n"
],
[
"with ExpectError():\n with ExpectError(ZeroDivisionError):\n some_nonexisting_function() # type: ignore",
"Traceback (most recent call last):\n File \"<ipython-input-1-e6c7dad1986d>\", line 3, in <module>\n some_nonexisting_function() # type: ignore\n File \"<ipython-input-1-e6c7dad1986d>\", line 3, in <module>\n some_nonexisting_function() # type: ignore\nNameError: name 'some_nonexisting_function' is not defined (expected)\n"
]
],
[
[
"## Catching Timeouts\n\nThe class `ExpectTimeout(seconds)` allows to express that some code may run for a long or infinite time; execution is thus interrupted after `seconds` seconds. A typical usage looks as follows:\n\n```Python\nfrom ExpectError import ExpectTimeout\n\nwith ExpectTimeout(2) as t:\n function_that_is_supposed_to_hang()\n```\n\nIf an exception occurs, it is printed on standard error (as with `ExpectError`); yet, execution continues.\n\nShould there be a need to cancel the timeout within the `with` block, `t.cancel()` will do the trick.\n\nThe implementation uses `sys.settrace()`, as this seems to be the most portable way to implement timeouts. It is not very efficient, though. Also, it only works on individual lines of Python code and will not interrupt a long-running system function.",
"_____no_output_____"
]
],
[
[
"import sys\nimport time",
"_____no_output_____"
],
[
"class ExpectTimeout:\n \"\"\"Execute a code block expecting (and catching) a timeout.\"\"\"\n\n def __init__(self, seconds: Union[int, float], \n print_traceback: bool = True, mute: bool = False):\n \"\"\"\n Constructor. Interrupe execution after `seconds` seconds.\n If `print_traceback` is set (default), print a traceback to stderr.\n If `mute` is set (default: False), do not print anything.\n \"\"\"\n\n self.seconds_before_timeout = seconds\n self.original_trace_function: Optional[Callable] = None\n self.end_time: Optional[float] = None\n self.print_traceback = print_traceback\n self.mute = mute\n\n def check_time(self, frame: FrameType, event: str, arg: Any) -> Callable:\n \"\"\"Tracing function\"\"\"\n if self.original_trace_function is not None:\n self.original_trace_function(frame, event, arg)\n\n current_time = time.time()\n if self.end_time and current_time >= self.end_time:\n raise TimeoutError\n\n return self.check_time\n\n def __enter__(self) -> Any:\n \"\"\"Begin of `with` block\"\"\"\n\n start_time = time.time()\n self.end_time = start_time + self.seconds_before_timeout\n\n self.original_trace_function = sys.gettrace()\n sys.settrace(self.check_time)\n return self\n\n def __exit__(self, exc_type: type, \n exc_value: BaseException, tb: TracebackType) -> Optional[bool]:\n \"\"\"End of `with` block\"\"\"\n\n self.cancel()\n\n if exc_type is None:\n return\n\n # An exception occurred\n if self.print_traceback:\n lines = ''.join(\n traceback.format_exception(\n exc_type,\n exc_value,\n tb)).strip()\n else:\n lines = traceback.format_exception_only(\n exc_type, exc_value)[-1].strip()\n\n if not self.mute:\n print(lines, \"(expected)\", file=sys.stderr)\n return True # Ignore it\n\n def cancel(self) -> None:\n sys.settrace(self.original_trace_function)",
"_____no_output_____"
]
],
[
[
"Here's an example:",
"_____no_output_____"
]
],
[
[
"def long_running_test() -> None:\n print(\"Start\")\n for i in range(10):\n time.sleep(1)\n print(i, \"seconds have passed\")\n print(\"End\")",
"_____no_output_____"
],
[
"with ExpectTimeout(5, print_traceback=False):\n long_running_test()",
"Start\n0 seconds have passed\n1 seconds have passed\n2 seconds have passed\n3 seconds have passed\n"
]
],
[
[
"Note that it is possible to nest multiple timeouts.",
"_____no_output_____"
]
],
[
[
"with ExpectTimeout(5):\n with ExpectTimeout(3):\n long_running_test()\n long_running_test()",
"Start\n0 seconds have passed\n1 seconds have passed\n"
]
],
[
[
"That's it, folks – enjoy!",
"_____no_output_____"
],
[
"## Synopsis\n\nThe `ExpectError` class allows you to catch and report exceptions, yet resume execution. This is useful in notebooks, as they would normally interrupt execution as soon as an exception is raised. Its typical usage is in conjunction with a `with` clause:",
"_____no_output_____"
]
],
[
[
"with ExpectError():\n x = 1 / 0",
"Traceback (most recent call last):\n File \"<ipython-input-1-264328004f25>\", line 2, in <module>\n x = 1 / 0\nZeroDivisionError: division by zero (expected)\n"
]
],
[
[
"The `ExpectTimeout` class allows you to interrupt execution after the specified time. This is useful for interrupting code that might otherwise run forever.",
"_____no_output_____"
]
],
[
[
"with ExpectTimeout(5):\n long_running_test()",
"Start\n0 seconds have passed\n1 seconds have passed\n2 seconds have passed\n3 seconds have passed\n"
]
],
[
[
"The exception and the associated traceback are printed as error messages. If you do not want that, \nuse these keyword options:\n\n* `print_traceback` (default True) can be set to `False` to avoid the traceback being printed\n* `mute` (default False) can be set to `True` to completely avoid any output.",
"_____no_output_____"
],
[
"## Lessons Learned\n\n* With the `ExpectError` class, it is very easy to handle errors without interrupting notebook execution.",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
]
] |
e7e0387c07b8ea5a01c67c82254b504d477e8a34 | 5,993 | ipynb | Jupyter Notebook | src/templates/v0.1.9/modules/export/json_to_txt_csv.ipynb | whatevery1says/we1s-templates | ce16ae4a39e3286ed7d9bf4a95bff001ac2d123e | [
"MIT"
] | null | null | null | src/templates/v0.1.9/modules/export/json_to_txt_csv.ipynb | whatevery1says/we1s-templates | ce16ae4a39e3286ed7d9bf4a95bff001ac2d123e | [
"MIT"
] | null | null | null | src/templates/v0.1.9/modules/export/json_to_txt_csv.ipynb | whatevery1says/we1s-templates | ce16ae4a39e3286ed7d9bf4a95bff001ac2d123e | [
"MIT"
] | null | null | null | 32.928571 | 320 | 0.59703 | [
[
[
"# Export JSON to TXT + CSV\n\nThe WE1S workflows use JSON format internally for manipulating data. However, you may wish to export JSON data from a project to plain text files with a CSV metadata file for use with other external tools.\n\nThis notebook uses JSON project data to export a collection of plain txt files — one per JSON document — containing only the document contents field or bag of words. Each file is named with the name of the JSON document and a `.txt` extension.\n\nIt also produces a `metadata.csv` file. This file contains a header and one row per document with the document filename plus required fields.\n\nOutput from this notebook can be imported using the import module by copying the `txt.zip` and `metadata.csv` from `project_data/txt` to `project_data/import`. However, it is generally not recommended to export and then reimport data, as you may lose metadata in the process.\n\n\n## Info\n\n__authors__ = 'Jeremy Douglass, Scott Kleinman' \n__copyright__ = 'copyright 2020, The WE1S Project' \n__license__ = 'MIT' \n__version__ = '2.6' \n__email__ = '[email protected]'\n",
"_____no_output_____"
],
[
"## Setup\n\nThis cell imports python modules and defines import file paths.",
"_____no_output_____"
]
],
[
[
"# Python imports\nfrom pathlib import Path\nfrom IPython.display import display, HTML\n\n# Get path to project_dir\ncurrent_dir = %pwd\nproject_dir = str(Path(current_dir).parent.parent)\njson_dir = project_dir + '/project_data/json'\nconfig_path = project_dir + '/config/config.py'\nexport_script_path = 'scripts/json_to_txt_csv.py'\n# Import the project configuration and classes\n%run {config_path}\n%run {export_script_path}\ndisplay(HTML('Ready!'))",
"_____no_output_____"
]
],
[
[
"## Configuration\n\nThe default configuration assumes:\n\n1. There are JSON files in `project_data/json`.\n2. Each JSON has the required fields `pub_date`, `title`, `author`.\n3. Each JSON file has either:\n - a `content` field, or\n - a `bag_of_words` field created using the `import` module tokenizer (see the \"Export Features Tables\" section below to export text from the `features` field).\n\nBy default, the notebook will export to `project_data/txt`.",
"_____no_output_____"
]
],
[
[
"limit = 10 # limit files exported -- 0 = unlimited.\n\ntxt_dir = project_dir + '/project_data/txt'\nmetafile = project_dir + '/project_data/txt/metadata.csv'\nzipfile = project_dir + '/project_data/txt/txt.zip'\n\n# The listed fields will be checked in order.\n# The first one encountered will be the export content.\n# Documents with no listed field will be excluded from export.\ntxt_content_fields = ['content', 'bag_of_words']\n\n# The listed fields will be copied from json to metadata.csv columns\ncsv_export_fields = ['pub_date', 'title', 'author']\n\n# Set to true to zip the exported text files and remove the originals \nzip_output = True\n\n# Delete any previous export contents in the `txt` directory, including `metadata` file and zip file\nclear_cache = True",
"_____no_output_____"
]
],
[
[
"## Export\n\nStart the export.",
"_____no_output_____"
]
],
[
[
"# Optionally, clear the cache\nif clear_cache:\n clear_txt(txt_dir, metafile=metafile, zipfile=zipfile)\n \n# Perform the export\njson_to_txt_csv(json_dir=json_dir,\n txt_dir=txt_dir,\n txt_content_fields=txt_content_fields,\n csv_export_fields=csv_export_fields,\n metafile=metafile,\n limit=limit)\n\n# Inspect results\nreport_results(txt_dir, metafile)\n\n# Optionally, zip the output\nif zip_output:\n zip_txt(txt_dir=txt_dir, zipfile=zipfile) ",
"_____no_output_____"
]
],
[
[
"## Export Features Tables\n\nIf your data contains features tables (lists of lists containing linguistic features), use the cell below to export features tables as CSV files for each document in your JSON folder. Set the `save_path` to a directory where you wish to save the CSV files. If you are using WE1S public data, this may apply to you.",
"_____no_output_____"
]
],
[
[
"# Configuration\nsave_path = ''\n\n# Run the export\nexport_features_tables(save_path, json_dir)",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
e7e03fb7d15f2618cf5cb65f797abf7e8f7a4650 | 732,758 | ipynb | Jupyter Notebook | fNIRS signal analysis/Randonstate/Extraction.ipynb | JulianLee310514065/Complete-Project | 450da85f73449c12296110346298356936caa4bf | [
"MIT"
] | null | null | null | fNIRS signal analysis/Randonstate/Extraction.ipynb | JulianLee310514065/Complete-Project | 450da85f73449c12296110346298356936caa4bf | [
"MIT"
] | null | null | null | fNIRS signal analysis/Randonstate/Extraction.ipynb | JulianLee310514065/Complete-Project | 450da85f73449c12296110346298356936caa4bf | [
"MIT"
] | null | null | null | 293.572917 | 164,046 | 0.913521 | [
[
[
"# import ipynb into other ipynb\n\n> 路過的神器,import ipynb form other ipynb\n\n[Source](https://stackoverflow.com/questions/20186344/importing-an-ipynb-file-from-another-ipynb-file)",
"_____no_output_____"
]
],
[
[
"# import ipynb.fs.full.try1 as try1\n# try1.good()",
"_____no_output_____"
]
],
[
[
"# 本 ipynb 的目標\n> 做 feature extracion 的 function\n\n\n\n# Referance\n\n1. 品妤學姊碩論\n2. 清彥學長碩論\n3. 杰勳學長碩論\n4. This paper (science report, 2019)\n```\nA Machine Learning Approach for\nthe Identification of a Biomarker of\nHuman Pain using fNIRS\n > Raul Fernandez Rojas1,9, Xu Huang1 & Keng-Liang Ou2,3,4,5,6,7,8\n```\n\n\n5. bbox --> annotation的bbox可以不用指定位置",
"_____no_output_____"
]
],
[
[
"import os\nimport glob\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n",
"_____no_output_____"
],
[
"# 老樣子,導到適合的資料夾\nprint(os.getcwd())\n# path = 'C:\\\\Users\\\\BOIL_PO\\\\Desktop\\\\VFT(2)\\\\VFT'\n# os.chdir(path)",
"c:\\Users\\BOIL_PO\\Desktop\\VFT_clean_version\n"
],
[
"all_csv = glob.glob('Filtered//*.csv')\nall_csv[:5]",
"_____no_output_____"
]
],
[
[
"# Time_Host 設成 index的原因:\n\n1. 可用loc切,即用index_name,可以準確地切30秒,不然用iloc還要算筆數\n舉例:\n\n`iloc` 取30秒,必須算 30秒有多少筆 `.iloc[:筆]`\n\n`loc` 取30秒,打`[:30]`他會自己取 < 30的 index",
"_____no_output_____"
]
],
[
[
"check_df = pd.read_csv(all_csv[5], index_col= 'Unnamed: 0').drop(columns= ['Time_Arduino', 'easingdata'])\n# print(check_df.dtypes)\n\ncheck_df = check_df.set_index('Time_Host')\ncheck_df.head()",
"_____no_output_____"
],
[
"# 讀了誰\ncols = check_df.columns\nprint(check_df.columns)",
"Index(['CH1_Oxy', 'CH1_Deoxy', 'CH2_Oxy', 'CH2_Deoxy', 'CH3_Oxy', 'CH3_Deoxy'], dtype='object')\n"
],
[
"# 畫圖確認\nstage1 = 30\nstage2 = 90\nstage3 = 160\ntext_size = 25\n\nplt.figure(figsize= (18, 14))\nfor i in range(int(len(check_df.columns)/2)):\n plt.subplot(3, 1, i+1)\n\n # 第一階段\n plt.plot(check_df.loc[:stage1].index, check_df.loc[:stage1][cols[2*i]], c= 'b', linewidth=3.0, label= 'Rest')\n plt.plot(check_df.loc[:stage1].index, check_df.loc[:stage1][cols[2*i+1]], c= 'r', linewidth=3.0, label= 'Rest')\n plt.axvspan(0, stage1, facecolor=sns.color_palette('Paired')[0], alpha=0.5)\n plt.vlines(stage1, -0.1, 1.3, linestyles= '--', colors= 'black', linewidth=2.0)\n plt.text(stage1/2, 1.2, \"rest\", size= text_size, ha=\"center\", va= 'center', bbox=dict(boxstyle=\"round\",ec=(1., 0.5, 0.5),fc=(1., 0.8, 0.8),))\n\n # 第二階段\n plt.plot(check_df.loc[stage1:stage2].index, check_df.loc[stage1:stage2][cols[2*i]], c= 'b', linewidth=3.0, label= 'Task')\n plt.plot(check_df.loc[stage1:stage2].index, check_df.loc[stage1:stage2][cols[2*i+1]], c= 'r', linewidth=3.0, label= 'Task')\n plt.axvspan(stage1, stage2, facecolor=sns.color_palette('Paired')[1], alpha=0.5)\n plt.vlines(stage2, -0.1, 1.3, linestyles= '--', colors= 'black', linewidth=2.0)\n plt.text((stage2 + stage1)/2, 1.2, 'Task', size= text_size, ha=\"center\", va= 'center', bbox=dict(boxstyle=\"round\",ec=(1., 0.5, 0.5),fc=(1., 0.8, 0.8),))\n\n # 第三階段\n plt.plot(check_df.loc[stage2:stage3].index, check_df.loc[stage2:stage3][cols[2*i]], c= 'b', linewidth=3.0, label= 'Recovery')\n plt.plot(check_df.loc[stage2:stage3].index, check_df.loc[stage2:stage3][cols[2*i+1]], c= 'r', linewidth=3.0, label= 'Recovery')\n plt.axvspan(stage2, stage3, facecolor=sns.color_palette('Paired')[2], alpha=0.75)\n plt.text((stage3 + stage2)/2, 1.2, 'Recovery', size= text_size, ha=\"center\", va= 'center', bbox=dict(boxstyle=\"round\",ec=(1., 0.5, 0.5),fc=(1., 0.8, 0.8),))\n\n plt.title(cols[2*i] + \"+\" + cols[2*i+1], fontdict={'fontsize': 24})\n plt.tight_layout(pad= 3)\nplt.show()",
"_____no_output_____"
]
],
[
[
"# 濾波請用for\n\n> 一定要用for,不然是文組\n>\n> 用 for i in range(len(AA)) 還好,但若是後面沒用到`**位置**`資訊,都是 AA[i],那不是文組,但你寫的是C\n>\n> Python 的 for 是神\n>\n> for str 可以出字母,for list 可以出元素,for model 可以出layer,還有好用的list comprehension `[x**3 for i in range(10) if x%2 == 0]`",
"_____no_output_____"
],
[
"# Feature Extraction (From Lowpass filter)\n\n\n### 清彥\n\n1. 階段起始斜率 (8s) $\\checkmark$\n * Task\n * Recovery\n> \n2. 階段平均的差 $\\checkmark$\n * Task mean – Rest mean\n * Recovery mean – Rest mean\n * Task mean – Recovery mean \n \n>\n3. 階段峰值 $\\checkmark$\n * Task\n\n>\n4. 階段標準差 $\\checkmark$\n * 三個\n\n>\n### 品妤\n\n>\n5. 階段平均 $\\checkmark$\n * 三個\n\n>\n6. 階段起始斜率 的差 $\\checkmark$\n * Task - Recovery\n\n\n### 我 \n1. AUC\n\n\n---\n### 杰勳 bandpass\n1. Stage skewness\n\n\n2. Stage kurtosis",
"_____no_output_____"
]
],
[
[
"# 就重寫,沒意義\nexam_df = pd.read_csv(all_csv[0], index_col= 'Unnamed: 0').drop(columns= ['Time_Arduino', 'easingdata'])\n# print(exam_df.dtypes)\n\nexam_df = exam_df.set_index('Time_Host')\nexam_df.head()",
"_____no_output_____"
]
],
[
[
"## 階段起始斜率 2*6= 12\n\n0. 定義 階段開始前\"八秒\",單位 `?/S`\n\n1. return list\n\n2. 30~38 -> Task\n \n3. 90~98 -> Recovery\n\n\n----",
"_____no_output_____"
]
],
[
[
"def stage_begin_slope(dataframe, plot= False, figsize= (10, 6), use_col= 0):\n #============================\n # Parameter: \n # dataframe: input dataframe\n # plot : whether to plot\n # figsize: plt.figure(figsize= figsize)\n # Return:\n # Tuple: \n # Tuple[0] : List of slope\n # Tuple[1] : List of index\n #=======================\n\n slope_df = dataframe.loc[30:38]\n slope12 = []\n slope12_index = [col + \"_Task_begin_slope\" for col in slope_df.columns]\n for i in range(len(slope_df.columns)):\n a = (slope_df.iloc[-1, i] - slope_df.iloc[0, i])/8 #八秒\n slope12.append(a)\n \n slope_df34 = dataframe.loc[90:98]\n slope34 = []\n slope34_index = [col + \"_stage_Recovery_slope\" for col in slope_df34.columns]\n for i in range(len(slope_df.columns)):\n a = (slope_df34.iloc[-1, i] - slope_df34.iloc[0, i])/8 #八秒\n slope34.append(a)\n \n\n\n if plot == True:\n #-------plot\n plt.figure(figsize= figsize)\n\n stage1 = 30\n stage2 = 90\n stage3 = 160\n text_size = 25\n\n xp1 = np.arange(30, 38, 0.1)\n x1 = np.arange(0, 8, 0.1)\n y1 = x1*slope12[use_col] + slope_df.iloc[0, use_col]\n\n xp2 = np.arange(90, 98, 0.1)\n x2 = np.arange(0, 8, 0.1)\n y2 = x2*slope34[use_col] + slope_df34.iloc[0, use_col]\n \n plt.plot(dataframe.loc[:stage1].index, dataframe.loc[:stage1, dataframe.columns[use_col]], c= 'b', linewidth=2.0, label= 'Rest')\n plt.axvspan(0, stage1, facecolor=sns.color_palette('Paired')[0], alpha=0.5)\n plt.vlines(stage1, -0.1, 1.3, linestyles= '--', colors= 'black', linewidth=2.0)\n plt.vlines(stage1 + 8, -0.1, 1.3, linestyles= '--', colors= 'black', linewidth=2.0)\n plt.text(stage1/2, 1.2, \"rest\", size= text_size, ha=\"center\", va= 'center', bbox=dict(boxstyle=\"round\",ec=(1., 0.5, 0.5),fc=(1., 0.8, 0.8),))\n\n # 第二階段\n plt.plot(dataframe.loc[stage1:stage2].index, dataframe.loc[stage1:stage2, dataframe.columns[use_col]], c= 'b', linewidth=2.0, label= 'Task')\n plt.plot(xp1, y1, linewidth=5.0, c= 'r')\n plt.axvspan(stage1, stage2, facecolor=sns.color_palette('Paired')[1], alpha=0.5)\n plt.vlines(stage2, -0.1, 1.3, linestyles= '--', colors= 'black', linewidth=2.0)\n plt.vlines(stage2 + 8, -0.1, 1.3, linestyles= '--', colors= 'black', linewidth=2.0)\n plt.text((stage2 + stage1)/2, 1.2, 'Task', size= text_size, ha=\"center\", va= 'center', bbox=dict(boxstyle=\"round\",ec=(1., 0.5, 0.5),fc=(1., 0.8, 0.8),))\n\n # 第三階段\n plt.plot(dataframe.loc[stage2:stage3].index, dataframe.loc[stage2:stage3, dataframe.columns[use_col]], c= 'b', linewidth=2.0, label= 'Recovery')\n plt.plot(xp2, y2, linewidth=5.0, c= 'r')\n plt.axvspan(stage2, stage3, facecolor=sns.color_palette('Paired')[2], alpha=0.75)\n plt.text((stage3 + stage2)/2, 1.2, 'Recovery', size= text_size, ha=\"center\", va= 'center', bbox=dict(boxstyle=\"round\",ec=(1., 0.5, 0.5),fc=(1., 0.8, 0.8),))\n\n plt.title(dataframe.columns[use_col] + \"_stage_begin_slope\", fontdict={'fontsize': 24})\n\n plt.show()\n\n\n return slope12 + slope34, slope12_index + slope34_index",
"_____no_output_____"
],
[
"# 畫看看\nstage_begin_slope(exam_df, plot= True)",
"_____no_output_____"
]
],
[
[
"### 畫全部 channel",
"_____no_output_____"
]
],
[
[
"# for i in range(6):\n# stage_begin_slope(exam_df, plot= True, use_col= i)",
"_____no_output_____"
]
],
[
[
"# 階段平均 3*6 = 18\n\n1. 0~30 -> Rest\n2. 30~90 -> Task\n3. 90~ 160 -> Recovery",
"_____no_output_____"
]
],
[
[
"def stage_mean(dataframe, plot= False, figsize= (10, 6), use_col= 0):\n\n #============================\n # Parameter: \n # dataframe: input dataframe\n # plot : whether to plot\n # figsize: plt.figure(figsize= figsize)\n # Return:\n # Tuple: \n # Tuple[0] : List of mean\n # Tuple[1] : List of index\n #=======================\n\n stage1 = 30\n stage2 = 90\n stage3 = 160\n\n Rest = []\n Task = []\n Recovery = []\n\n Rest_c = []\n Task_c = []\n Recovery_c = []\n\n for col in dataframe.columns:\n\n Rest.append(dataframe.loc[:stage1, col].mean()) #pandas 有 .mean() 可以用\n Rest_c.append(col + '_Rest_mean')\n\n Task.append(dataframe.loc[stage1:stage2, col].mean())\n Task_c.append(col + '_Task_mean')\n\n\n Recovery.append(dataframe.loc[stage2:stage3, col].mean())\n Recovery_c.append(col + '_Recovery_mean')\n\n\n\n if plot == True:\n #-------plot\n plt.figure(figsize= figsize)\n text_size = 25\n\n xp1 = np.arange(0, stage1, 0.1)\n y1 = np.full(xp1.shape, Rest[use_col])\n \n\n xp2 = np.arange(stage1, stage2, 0.1)\n y2 = np.full(xp2.shape, Task[use_col])\n\n xp3 = np.arange(stage2, stage3, 0.1)\n y3 = np.full(xp3.shape, Recovery[use_col])\n \n plt.plot(dataframe.loc[:stage1].index, dataframe.loc[:stage1, dataframe.columns[use_col]], c= 'b', linewidth=2.0, label= 'Rest')\n plt.plot(xp1, y1, linewidth=5.0, c= 'r')\n plt.axvspan(0, stage1, facecolor=sns.color_palette('Paired')[0], alpha=0.5)\n plt.vlines(stage1, -0.1, 1.3, linestyles= '--', colors= 'black', linewidth=2.0)\n plt.text(stage1/2, 1.2, \"rest\", size= text_size, ha=\"center\", va= 'center', bbox=dict(boxstyle=\"round\",ec=(1., 0.5, 0.5),fc=(1., 0.8, 0.8),))\n\n # 第二階段\n plt.plot(dataframe.loc[stage1:stage2].index, dataframe.loc[stage1:stage2, dataframe.columns[use_col]], c= 'b', linewidth=2.0, label= 'Task')\n \n plt.plot(xp2, y2, linewidth=5.0, c= 'r')\n plt.axvspan(stage1, stage2, facecolor=sns.color_palette('Paired')[1], alpha=0.5)\n plt.vlines(stage2, -0.1, 1.3, linestyles= '--', colors= 'black', linewidth=2.0)\n plt.text((stage2 + stage1)/2, 1.2, 'Task', size= text_size, ha=\"center\", va= 'center', bbox=dict(boxstyle=\"round\",ec=(1., 0.5, 0.5),fc=(1., 0.8, 0.8),))\n\n # 第三階段\n plt.plot(xp3, y3, linewidth=5.0, c= 'r')\n plt.plot(dataframe.loc[stage2:stage3].index, dataframe.loc[stage2:stage3, dataframe.columns[use_col]], c= 'b', linewidth=2.0, label= 'Recovery')\n \n plt.axvspan(stage2, stage3, facecolor=sns.color_palette('Paired')[2], alpha=0.75)\n plt.text((stage3 + stage2)/2, 1.2, 'Recovery', size= text_size, ha=\"center\", va= 'center', bbox=dict(boxstyle=\"round\",ec=(1., 0.5, 0.5),fc=(1., 0.8, 0.8),))\n\n plt.title(dataframe.columns[use_col] + \"_stage_mean\", fontdict={'fontsize': 24})\n\n plt.show()\n\n return Rest + Task + Recovery, Rest_c + Task_c + Recovery_c\n",
"_____no_output_____"
]
],
[
[
"### 畫全部 channel",
"_____no_output_____"
]
],
[
[
"# for i in range(6):\n# stage_mean(exam_df, plot= True, use_col=i)",
"_____no_output_____"
]
],
[
[
"# 階段平均的差 -> 2*6 = 12\n * Task mean – Rest mean\n * Task mean – Recovery mean \n\n# 活化值 -> 1*6\n * Recovery mean – Rest mean",
"_____no_output_____"
]
],
[
[
"def stage_mean_diff(dataframe, plot= False, figsize= (10, 6), use_col= 0):\n\n #============================\n # Parameter: \n # dataframe: input dataframe\n # plot : whether to plot\n # figsize: plt.figure(figsize= figsize)\n # Return:\n # Tuple: \n # Tuple[0] : List of mean diff or activation\n # Tuple[1] : List of index\n #=======================\n\n stage1 = 30\n stage2 = 90\n stage3 = 160\n\n Task_Rest = []\n Recovery_Rest = []\n Task_recovery = []\n \n\n Task_Rest_c = []\n Recovery_Rest_c = []\n Task_recovery_c = []\n\n for col in dataframe.columns:\n # 階段平均差\n Task_Rest.append(dataframe.loc[stage1:stage2, col].mean() - dataframe.loc[:stage1, col].mean())\n Task_Rest_c.append(col + '_Task_m_Rest')\n\n Task_recovery.append(dataframe.loc[stage1:stage2, col].mean() - dataframe.loc[stage2:stage3, col].mean())\n Task_recovery_c.append(col + '_Task_m_Recovery')\n\n # 活化值\n Recovery_Rest.append(dataframe.loc[stage2:stage3, col].mean() - dataframe.loc[:stage1, col].mean())\n Recovery_Rest_c.append(col + '_Recovery_Rest_Activation')\n\n\n\n if plot == True:\n\n import matplotlib.patches as patches\n\n Rest = []\n Task = []\n Recovery = []\n\n Rest_c = []\n Task_c = []\n Recovery_c = []\n\n for col in dataframe.columns:\n\n Rest.append(dataframe.loc[:stage1, col].mean())\n Rest_c.append(col + '_Rest_mean')\n\n Task.append(dataframe.loc[stage1:stage2, col].mean())\n Task_c.append(col + '_Task_mean')\n\n\n Recovery.append(dataframe.loc[stage2:stage3, col].mean())\n Recovery_c.append(col + '_Recovery_mean')\n\n #-------plot\n plt.figure(figsize= figsize)\n text_size = 25\n\n xp1 = np.arange(0, stage1, 0.1)\n y1 = np.full(xp1.shape, Rest[use_col])\n \n\n xp2 = np.arange(stage1, stage2, 0.1)\n y2 = np.full(xp2.shape, Task[use_col])\n\n xp3 = np.arange(stage2, stage3, 0.1)\n y3 = np.full(xp3.shape, Recovery[use_col])\n \n plt.plot(dataframe.loc[:stage1].index, dataframe.loc[:stage1, dataframe.columns[use_col]], c= 'b', linewidth=2.0, label= 'Rest')\n plt.plot(xp1, y1, linewidth=3.0, c= 'r')\n plt.axvspan(0, stage1, facecolor=sns.color_palette('Paired')[0], alpha=0.5)\n plt.vlines(stage1, -0.1, 1.3, linestyles= '--', colors= 'black', linewidth=1.0)\n plt.text(stage1/2, 1.2, \"rest\", size= text_size, ha=\"center\", va= 'center', bbox=dict(boxstyle=\"round\",ec=(1., 0.5, 0.5),fc=(1., 0.8, 0.8),))\n\n # 第二階段\n plt.plot(dataframe.loc[stage1:stage2].index, dataframe.loc[stage1:stage2, dataframe.columns[use_col]], c= 'b', linewidth=2.0, label= 'Task')\n plt.plot(xp2, y2, linewidth=3.0, c= 'r')\n\n plt.annotate(s='', xy=(stage1 + 2, Task[use_col] - 0.03), xytext=(stage1 + 2, Rest[use_col] +0.03), arrowprops=dict(arrowstyle='<->', mutation_scale=10, color= 'k', linewidth= 5))\n\n plt.axvspan(stage1, stage2, facecolor=sns.color_palette('Paired')[1], alpha=0.5)\n plt.vlines(stage2, -0.1, 1.3, linestyles= '--', colors= 'black', linewidth=1.0)\n plt.text((stage2 + stage1)/2, 1.2, 'Task', size= text_size, ha=\"center\", va= 'center', bbox=dict(boxstyle=\"round\",ec=(1., 0.5, 0.5),fc=(1., 0.8, 0.8),))\n\n # 第三階段\n plt.plot(xp3, y3, linewidth=3.0, c= 'r')\n plt.plot(dataframe.loc[stage2:stage3].index, dataframe.loc[stage2:stage3, dataframe.columns[use_col]], c= 'b', linewidth=2.0, label= 'Recovery')\n \n plt.annotate(s='', xy=(stage2 + 2, Recovery[use_col] - 0.03), xytext=(stage2 + 2, Task[use_col] +0.03),arrowprops=dict(arrowstyle='<->', mutation_scale=10, color= 'k', linewidth= 5))\n \n plt.axvspan(stage2, stage3, facecolor=sns.color_palette('Paired')[2], alpha=0.75)\n plt.text((stage3 + stage2)/2, 1.2, 'Recovery', size= text_size, ha=\"center\", va= 'center', bbox=dict(boxstyle=\"round\",ec=(1., 0.5, 0.5),fc=(1., 0.8, 0.8),))\n\n plt.title(dataframe.columns[use_col] + \"_stage_mean_diff\", fontdict={'fontsize': 24})\n\n plt.show()\n \n \n return Task_Rest + Recovery_Rest + Task_recovery, Task_Rest_c + Recovery_Rest_c + Task_recovery_c",
"_____no_output_____"
]
],
[
[
"### 畫畫看 channel",
"_____no_output_____"
]
],
[
[
"stage_mean_diff(exam_df, plot= True, use_col= 4)",
"_____no_output_____"
]
],
[
[
"# 階段峰值 1*6 = 6\n * Task\n",
"_____no_output_____"
]
],
[
[
"def stage_acivation(dataframe, plot= False, figsize= (10, 6), use_col= 0):\n\n #============================\n # Parameter: \n # dataframe: input dataframe\n # plot : whether to plot\n # figsize: plt.figure(figsize= figsize)\n # Return:\n # Tuple: \n # Tuple[0] : List of 峰值\n # Tuple[1] : List of index\n #=======================\n\n stage1 = 30\n stage2 = 90\n stage3 = 160\n\n diffs = []\n diffs_name = []\n\n for cols in dataframe.columns:\n\n diff = dataframe.loc[stage1:stage2, cols].max() - dataframe.loc[stage1:stage2, cols].min()\n diffs.append(diff)\n diffs_name.append(cols + \"_stage_activation\")\n\n\n if plot == True:\n #-------plot\n plt.figure(figsize= figsize)\n text_size = 25\n\n\n \n plt.plot(dataframe.loc[:stage1].index, dataframe.loc[:stage1, dataframe.columns[use_col]], c= 'b', linewidth=2.0, label= 'Rest')\n plt.axvspan(0, stage1, facecolor=sns.color_palette('Paired')[0], alpha=0.5)\n plt.vlines(stage1, -0.1, 1.3, linestyles= '--', colors= 'black', linewidth=2.0)\n plt.text(stage1/2, 1.2, \"rest\", size= text_size, ha=\"center\", va= 'center', bbox=dict(boxstyle=\"round\",ec=(1., 0.5, 0.5),fc=(1., 0.8, 0.8),))\n\n # 第二階段\n plt.plot(dataframe.loc[stage1:stage2].index, dataframe.loc[stage1:stage2, dataframe.columns[use_col]], c= 'b', linewidth=2.0, label= 'Task')\n plt.axvspan(stage1, stage2, facecolor=sns.color_palette('Paired')[1], alpha=0.5)\n plt.vlines(stage2, -0.1, 1.3, linestyles= '--', colors= 'black', linewidth=2.0)\n\n plt.hlines(dataframe.loc[stage1:stage2, dataframe.columns[use_col]].min(), stage1, stage2, linestyles= '-', colors= 'black', linewidth=5.0)\n plt.hlines(dataframe.loc[stage1:stage2, dataframe.columns[use_col]].max(), stage1, stage2, linestyles= '-', colors= 'black', linewidth=5.0)\n plt.annotate(s='', xy=( (stage1 + stage2)/2, dataframe[dataframe.columns[use_col]].loc[stage1:stage2].min()), xytext=( (stage1 + stage2)/2, dataframe[dataframe.columns[use_col]].loc[stage1:stage2].max()),arrowprops=dict(arrowstyle='<->', mutation_scale=10, color= 'k', linewidth= 5))\n\n plt.text((stage2 + stage1)/2, 1.2, 'Task', size= text_size, ha=\"center\", va= 'center', bbox=dict(boxstyle=\"round\",ec=(1., 0.5, 0.5),fc=(1., 0.8, 0.8),))\n\n # 第三階段\n plt.plot(dataframe.loc[stage2:stage3].index, dataframe.loc[stage2:stage3, dataframe.columns[use_col]], c= 'b', linewidth=2.0, label= 'Recovery')\n \n plt.axvspan(stage2, stage3, facecolor=sns.color_palette('Paired')[2], alpha=0.75)\n plt.text((stage3 + stage2)/2, 1.2, 'Recovery', size= text_size, ha=\"center\", va= 'center', bbox=dict(boxstyle=\"round\",ec=(1., 0.5, 0.5),fc=(1., 0.8, 0.8),))\n\n plt.title(dataframe.columns[use_col] + \"_stage_acivation\", fontdict={'fontsize': 24})\n\n plt.show()\n\n return diffs, diffs_name\n ",
"_____no_output_____"
]
],
[
[
"### 畫全部 channel",
"_____no_output_____"
]
],
[
[
"# for i in range(6):\n# stage_acivation(exam_df, plot= True, use_col= i)",
"_____no_output_____"
]
],
[
[
"# 階段標準差\n * 三個\n\n\n### 標準差不能歸一化 ",
"_____no_output_____"
]
],
[
[
"def stage_std(dataframe, plot= False, figsize= (10, 6), use_col= 0):\n\n #============================\n # Parameter: \n # dataframe: input dataframe\n # plot : whether to plot\n # figsize: plt.figure(figsize= figsize)\n # Return:\n # Tuple: \n # Tuple[0] : List of std\n # Tuple[1] : List of index\n #=======================\n\n \n stage1 = 30\n stage2 = 90\n stage3 = 160\n\n Rest_std = []\n Task_std = []\n Recovery_std = []\n\n Rest_std_c = []\n Task_std_c = []\n Recovery_std_c = []\n\n for col in dataframe.columns:\n\n Rest_std.append(dataframe.loc[:stage1, col].std()) # 簡單方便 .std\n Rest_std_c.append(col + '_Rest_std')\n\n Task_std.append(dataframe.loc[stage1:stage2, col].std())\n Task_std_c.append(col + '_Task_std')\n\n\n Recovery_std.append(dataframe.loc[stage2:stage3, col].std())\n Recovery_std_c.append(col + '_Recovery_std')\n\n\n\n if plot == True:\n\n\n Rest = []\n Task = []\n Recovery = []\n\n Rest_c = []\n Task_c = []\n Recovery_c = []\n\n for col in dataframe.columns:\n\n Rest.append(dataframe.loc[:stage1, col].mean())\n Rest_c.append(col + '_Rest_mean')\n\n Task.append(dataframe.loc[stage1:stage2, col].mean())\n Task_c.append(col + '_Task_mean')\n\n\n Recovery.append(dataframe.loc[stage2:stage3, col].mean())\n Recovery_c.append(col + '_Recovery_mean')\n #-------plot\n plt.figure(figsize= figsize)\n text_size = 25\n\n xp1 = np.arange(0, stage1, 0.1)\n y1 = np.full(xp1.shape, Rest[use_col])\n \n\n xp2 = np.arange(stage1, stage2, 0.1)\n y2 = np.full(xp2.shape, Task[use_col])\n\n xp3 = np.arange(stage2, stage3, 0.1)\n y3 = np.full(xp3.shape, Recovery[use_col])\n \n plt.plot(dataframe.loc[:stage1].index, dataframe.loc[:stage1, dataframe.columns[use_col]], c= 'b', linewidth=2.0, label= 'Rest')\n plt.plot(xp1, y1, linewidth=5.0, c= 'r')\n\n plt.errorbar((stage1)/2, Rest[use_col], Rest_std[use_col], linestyle='-', marker='^', elinewidth= 3, ecolor= 'k', capsize= 10)\n\n plt.axvspan(0, stage1, facecolor=sns.color_palette('Paired')[0], alpha=0.5)\n plt.vlines(stage1, -0.1, 1.3, linestyles= '--', colors= 'black', linewidth=2.0)\n plt.text(stage1/2, 1.2, \"rest\", size= text_size, ha=\"center\", va= 'center', bbox=dict(boxstyle=\"round\",ec=(1., 0.5, 0.5),fc=(1., 0.8, 0.8),))\n\n # 第二階段\n plt.plot(dataframe.loc[stage1:stage2].index, dataframe.loc[stage1:stage2, dataframe.columns[use_col]], c= 'b', linewidth=2.0, label= 'Task')\n plt.plot(xp2, y2, linewidth=5.0, c= 'r')\n\n plt.errorbar((stage1 + stage2)/2, Task[use_col], Task_std[use_col], linestyle='-', marker='^', elinewidth= 3, ecolor= 'k', capsize= 10)\n plt.axvspan(stage1, stage2, facecolor=sns.color_palette('Paired')[1], alpha=0.5)\n plt.vlines(stage2, -0.1, 1.3, linestyles= '--', colors= 'black', linewidth=2.0)\n plt.text((stage2 + stage1)/2, 1.2, 'Task', size= text_size, ha=\"center\", va= 'center', bbox=dict(boxstyle=\"round\",ec=(1., 0.5, 0.5),fc=(1., 0.8, 0.8),))\n\n # 第三階段\n plt.plot(xp3, y3, linewidth=5.0, c= 'r')\n plt.plot(dataframe.loc[stage2:stage3].index, dataframe.loc[stage2:stage3, dataframe.columns[use_col]], c= 'b', linewidth=2.0, label= 'Recovery')\n \n plt.errorbar((stage3 + stage2)/2, Recovery[use_col], Recovery_std[use_col], linestyle='-', marker='^', elinewidth= 3, ecolor= 'k', capsize= 10)\n plt.axvspan(stage2, stage3, facecolor=sns.color_palette('Paired')[2], alpha=0.75)\n plt.text((stage3 + stage2)/2, 1.2, 'Recovery', size= text_size, ha=\"center\", va= 'center', bbox=dict(boxstyle=\"round\",ec=(1., 0.5, 0.5),fc=(1., 0.8, 0.8),))\n\n plt.title(dataframe.columns[use_col] + \"_stage_std\", fontdict={'fontsize': 24})\n\n plt.show()\n\n return Rest_std + Task_std + Recovery_std, Rest_std_c + Task_std_c + Recovery_std_c",
"_____no_output_____"
]
],
[
[
"### 畫全部 channel",
"_____no_output_____"
]
],
[
[
"# for i in range(6):\n# stage_std(exam_df, plot= True, use_col= i)",
"_____no_output_____"
]
],
[
[
"# 階段起始斜率 的差\n * Task - Recovery",
"_____no_output_____"
]
],
[
[
"def stage_begin_slope_diff(dataframe):\n\n #============================\n # Parameter: \n # dataframe: input dataframe\n # plot : whether to plot\n # figsize: plt.figure(figsize= figsize)\n # Return:\n # Tuple: \n # Tuple[0] : List of slope diff\n # Tuple[1] : List of index\n #=======================\n\n slope_df = dataframe.loc[30:38]\n slope12 = []\n for i in range(len(slope_df.columns)):\n a = (slope_df.iloc[-1, i] - slope_df.iloc[0, i])/8 #八秒\n slope12.append(a)\n \n slope_df34 = dataframe.loc[90:98]\n slope34 = []\n for i in range(len(slope_df.columns)):\n a = (slope_df34.iloc[-1, i] - slope_df34.iloc[0, i])/8 #八秒\n slope34.append(a)\n\n\n colset = []\n for col in dataframe.columns:\n colset.append(col + \"_Task_Recovery_begin_slope_diff\")\n\n slope_diff = np.array(slope12) - np.array(slope34)\n\n return list(slope_diff), colset\n ",
"_____no_output_____"
],
[
"stage_begin_slope_diff(exam_df)",
"_____no_output_____"
]
],
[
[
"# Stage skewness -> use scipy\n * 三個階段\n\n> 資料分布靠左\"正\"\n>\n> 資料分布靠右\"負\"\n\n\n### [好用圖中圖](https://www.itread01.com/p/518289.html)",
"_____no_output_____"
]
],
[
[
"def stage_skew(dataframe, plot= False, figsize= (10, 6), use_col= 0):\n from scipy.stats import skew\n\n #============================\n # Parameter: \n # dataframe: input dataframe\n # plot : whether to plot\n # figsize: plt.figure(figsize= figsize)\n # Return:\n # Tuple: \n # Tuple[0] : List of skew\n # Tuple[1] : List of index\n #=======================\n\n stage1 = 30\n stage2 = 90\n stage3 = 160\n text_size = 25\n\n rest_skew = []\n task_skew = []\n recovery_skew = []\n\n rest_skew_c = []\n task_skew_c = []\n recovery_skew_c = []\n\n for cols in dataframe.columns:\n\n rest_skew.append(skew(dataframe.loc[:stage1, cols]))\n rest_skew_c.append(cols + '_rest_skew')\n\n task_skew.append(skew(dataframe.loc[stage1:stage2, cols]))\n task_skew_c.append(cols + '_task_skew')\n\n recovery_skew.append(skew(dataframe.loc[stage2:stage3, cols]))\n recovery_skew_c.append(cols + '_recovery_skew')\n\n if plot == True:\n #-------plot\n plt.figure(figsize= figsize)\n \n plt.plot(dataframe.loc[:stage1].index, dataframe.loc[:stage1, dataframe.columns[use_col]], c= 'b', linewidth=2.0, label= 'Rest')\n plt.axvspan(0, stage1, facecolor=sns.color_palette('Paired')[0], alpha=0.5)\n plt.vlines(stage1, -0.1, 1.3, linestyles= '--', colors= 'black', linewidth=2.0)\n plt.text(stage1/2, 1.2, \"rest\", size= text_size, ha=\"center\", va= 'center', bbox=dict(boxstyle=\"round\",ec=(1., 0.5, 0.5),fc=(1., 0.8, 0.8),))\n\n # 第二階段\n plt.plot(dataframe.loc[stage1:stage2].index, dataframe.loc[stage1:stage2, dataframe.columns[use_col]], c= 'b', linewidth=2.0, label= 'Task')\n plt.axvspan(stage1, stage2, facecolor=sns.color_palette('Paired')[1], alpha=0.5)\n \n \n \n plt.vlines(stage2, -0.1, 1.3, linestyles= '--', colors= 'black', linewidth=2.0)\n plt.text((stage2 + stage1)/2, 1.2, 'Task', size= text_size, ha=\"center\", va= 'center', bbox=dict(boxstyle=\"round\",ec=(1., 0.5, 0.5),fc=(1., 0.8, 0.8),))\n\n # 第三階段\n plt.plot(dataframe.loc[stage2:stage3].index, dataframe.loc[stage2:stage3, dataframe.columns[use_col]], c= 'b', linewidth=2.0, label= 'Recovery')\n plt.axvspan(stage2, stage3, facecolor=sns.color_palette('Paired')[2], alpha=0.75)\n plt.text((stage3 + stage2)/2, 1.2, 'Recovery', size= text_size, ha=\"center\", va= 'center', bbox=dict(boxstyle=\"round\",ec=(1., 0.5, 0.5),fc=(1., 0.8, 0.8),))\n \n plt.title(dataframe.columns[use_col] + \"_stage_skew\", fontdict={'fontsize': 24})\n\n plt.axes([0.65, 0.2, 0.2, 0.2])\n sns.histplot(dataframe.loc[stage1:stage2, dataframe.columns[use_col]], bins= 30)\n plt.title(\"Task skew\", fontdict={'fontsize': 13})\n\n\n \n\n plt.show()\n\n\n return rest_skew + task_skew + recovery_skew, rest_skew_c + task_skew_c + recovery_skew_c\n",
"_____no_output_____"
]
],
[
[
"### 畫全部 channel",
"_____no_output_____"
]
],
[
[
"# for i in range(6):\n# a = stage_skew(exam_df, plot= True, use_col= i)",
"_____no_output_____"
]
],
[
[
"# Stage kurtosis 峰度(尖度)\n\n * 三個",
"_____no_output_____"
]
],
[
[
"def stage_kurtosis(dataframe):\n\n from scipy.stats import kurtosis\n\n #============================\n # Parameter: \n # dataframe: input dataframe\n # plot : whether to plot\n # figsize: plt.figure(figsize= figsize)\n # Return:\n # Tuple: \n # Tuple[0] : List of kurtosis\n # Tuple[1] : List of index\n #=======================\n\n stage1 = 30\n stage2 = 90\n stage3 = 160\n text_size = 25\n\n rest_skew = []\n task_skew = []\n recovery_skew = []\n\n rest_skew_c = []\n task_skew_c = []\n recovery_skew_c = []\n\n for cols in dataframe.columns:\n\n rest_skew.append(kurtosis(dataframe.loc[:stage1, cols]))\n rest_skew_c.append(cols + '_rest_kurtosis')\n\n task_skew.append(kurtosis(dataframe.loc[stage1:stage2, cols]))\n task_skew_c.append(cols + '_task_kurtosis')\n\n recovery_skew.append(kurtosis(dataframe.loc[stage2:stage3, cols]))\n recovery_skew_c.append(cols + '_recovery_kurtosis')\n\n return rest_skew + task_skew + recovery_skew, rest_skew_c + task_skew_c + recovery_skew_c\n",
"_____no_output_____"
],
[
"stage_kurtosis(dataframe= exam_df)",
"_____no_output_____"
]
],
[
[
"# AUC -> use sklearn\n * 三個\n\n1. 看了很多,好比說scipy.integrate, numpy.trap\n2. 還是 sklearn的好用,(這邊其他的也可以試試不強制)",
"_____no_output_____"
]
],
[
[
"def stage_auc(dataframe, plot= False, figsize= (10, 6), use_col= 0):\n from sklearn.metrics import auc\n\n #============================\n # Parameter: \n # dataframe: input dataframe\n # plot : whether to plot\n # figsize: plt.figure(figsize= figsize)\n # Return:\n # Tuple: \n # Tuple[0] : List of auc\n # Tuple[1] : List of index\n #=======================\n\n stage1 = 30\n stage2 = 90\n stage3 = 160\n\n rest_auc = []\n Task_auc = []\n recovery_auc = []\n\n rest_auc_c = []\n Task_auc_c = []\n recovery_auc_c = []\n\n for cols in dataframe.columns: \n rest_auc.append(auc(dataframe.loc[:stage1, cols].index, dataframe.loc[:stage1, cols]))\n rest_auc_c.append(cols + '_rest_auc')\n\n Task_auc.append(auc(dataframe.loc[stage1:stage2, cols].index, dataframe.loc[stage1:stage2, cols]))\n Task_auc_c.append(cols + '_Task_auc')\n\n recovery_auc.append(auc(dataframe.loc[stage2:stage3, cols].index, dataframe.loc[stage2:stage3, cols]))\n recovery_auc_c.append(cols + '_recovery_auc')\n\n if plot == True:\n #-------plot\n plt.figure(figsize= figsize)\n \n plt.plot(dataframe.loc[:stage1].index, dataframe.loc[:stage1, dataframe.columns[use_col]], c= 'b', linewidth=2.0, label= 'Rest')\n \n yy1 = dataframe.loc[0:stage1, dataframe.columns[use_col]]\n plt.fill_between(np.linspace(0, stage1, yy1.shape[0]), yy1, step=\"pre\", facecolor=sns.color_palette('Paired')[0], y2=-0.1)\n plt.vlines(stage1, -0.1, 1.3, linestyles= '--', colors= 'black', linewidth=2.0)\n plt.text(stage1/2, 1.2, \"rest\", size= text_size, ha=\"center\", va= 'center', bbox=dict(boxstyle=\"round\",ec=(1., 0.5, 0.5),fc=(1., 0.8, 0.8),))\n\n # 第二階段\n plt.plot(dataframe.loc[stage1:stage2].index, dataframe.loc[stage1:stage2, dataframe.columns[use_col]], c= 'b', linewidth=2.0, label= 'Task')\n yy2 = dataframe.loc[stage1:stage2, dataframe.columns[use_col]]\n plt.fill_between(np.linspace(stage1, stage2, yy2.shape[0]), yy2, step=\"pre\", facecolor=sns.color_palette('Paired')[1], y2=-0.1)\n \n plt.vlines(stage2, -0.1, 1.3, linestyles= '--', colors= 'black', linewidth=2.0)\n plt.text((stage2 + stage1)/2, 1.2, 'Task', size= text_size, ha=\"center\", va= 'center', bbox=dict(boxstyle=\"round\",ec=(1., 0.5, 0.5),fc=(1., 0.8, 0.8),))\n\n # 第三階段\n plt.plot(dataframe.loc[stage2:stage3].index, dataframe.loc[stage2:stage3, dataframe.columns[use_col]], c= 'b', linewidth=2.0, label= 'Recovery')\n # plt.axvspan(stage2, stage3, facecolor=sns.color_palette('Paired')[2], alpha=0.75)\n plt.text((stage3 + stage2)/2, 1.2, 'Recovery', size= text_size, ha=\"center\", va= 'center', bbox=dict(boxstyle=\"round\",ec=(1., 0.5, 0.5),fc=(1., 0.8, 0.8),))\n \n yy3 = dataframe.loc[stage2:stage3, dataframe.columns[use_col]]\n plt.fill_between(np.linspace(stage2, stage3, yy3.shape[0]), yy3, step=\"pre\", facecolor=sns.color_palette('Paired')[2], y2=-0.1)\n\n plt.title(dataframe.columns[use_col] + \"_stage_auc\", fontdict={'fontsize': 24})\n\n plt.show()\n\n\n return rest_auc + Task_auc + recovery_auc, rest_auc_c + Task_auc_c + recovery_auc_c\n ",
"_____no_output_____"
]
],
[
[
"### 畫全部 channel",
"_____no_output_____"
]
],
[
[
"# for i in range(6):\n# stage_auc(exam_df, plot=True, use_col= i)",
"_____no_output_____"
]
],
[
[
"# FFT\n\n1. 取樣頻率要是頻率的兩倍\n\n",
"_____no_output_____"
],
[
"# 待釐清\n\n1. 要取 \"全部\"、\"還是三階段**各自**的\"、\"還是Task就好的\" fft\n > 目前是想說,既然訊號是連續三個階段一次做完的,生理訊號的頻率應該也會持續,所以取三個階段各自的沒意義,一次去取全部較好\n\n2. 平方 -> 是一個叫做 PSD 的東西\n`fft_ps = np.abs(fft_window)**2`\n\n### > referance\n\n[1. ML Fundamentals](https://ataspinar.com/2018/04/04/machine-learning-with-signal-processing-techniques/) -> use scipy\n\n[2. stackoverflow](https://stackoverflow.com/questions/45863400/python-fft-for-feature-extraction) -> use numpy\n",
"_____no_output_____"
],
[
"# 以 全時域(0~160s) 資料下去做 fft",
"_____no_output_____"
]
],
[
[
"# 第一行\ny = exam_df.iloc[:, 0].values",
"_____no_output_____"
]
],
[
[
"#### stack overflow -> numpy",
"_____no_output_____"
]
],
[
[
"# stack overflow\nimport numpy as np\n\nsample_rate = 24\nN = np.array(y).shape[-1]\n\n# 從 0 ~ 12Hz 取 N/個\nfft_window = np.fft.fft(y)\nfreq = np.fft.fftfreq(N, d=1/24)\n\n# 為啥要平方??\nfft_ps = np.abs(fft_window)**2",
"_____no_output_____"
],
[
"fft_window.shape, freq.shape, freq.max(), freq.min()",
"_____no_output_____"
],
[
"plt.plot(freq)",
"_____no_output_____"
]
],
[
[
"## 0.12Hz (cut down frequency)之後decay很快,看起來合理",
"_____no_output_____"
]
],
[
[
"import matplotlib.pyplot as plt\nfig = plt.figure(figsize=(14, 7))\n\n\nplt.plot(freq, 2.0/N *np.abs(fft_window), label= 'FFT')\n# plt.plot(freq, np.log10(fft_ps))\nplt.ylim(0, 0.08)\nplt.xlim(0.005, 0.4)\nplt.vlines(0.12, 0, 100, colors= 'r', linestyles= '--', label= 'Cut down Freq (low pass)', )\nplt.xlabel(\"Frequency\")\nplt.ylabel(\"Amplitude\")\nplt.annotate(\"0.12\", (0.110, 0.05), fontsize= 20, bbox=dict(boxstyle=\"round\", ec=(1., 0.5, 0.5), fc=(1., 0.8, 0.8),))\nplt.title('FFt', fontsize= 20)\nplt.legend()\nplt.show()",
"_____no_output_____"
]
],
[
[
"#### ML Fundamentals -> scipy",
"_____no_output_____"
]
],
[
[
"from scipy.fft import fft",
"_____no_output_____"
],
[
"def get_fft_values(y_values, T, N, f_s):\n f_values = np.linspace(0.0, 1.0/(2.0*T), N//2)\n fft_values_ = fft(y_values)\n\n # 歸一化嗎??\n fft_values = 2.0/N * np.abs(fft_values_[0:N//2])\n return f_values, fft_values\n \nf_s = 24\nT = 1/f_s\nN = np.array(y).shape[-1]\n \nf_values, fft_values = get_fft_values(y, T, N, f_s)\n \nplt.figure(figsize= (14, 7))\nplt.plot(f_values, fft_values, linestyle='-', color='blue')\nplt.xlabel('Frequency [Hz]', fontsize=16)\nplt.ylabel('Amplitude', fontsize=16)\nplt.title(\"Frequency domain of the signal\", fontsize=16)\nplt.vlines(0.12, 0, 0.085, colors= 'r', linestyles= '--', label= 'Cut down Freq (low pass)', )\nplt.annotate(\"0.12\", (0.110, 0.05), fontsize= 20, bbox=dict(boxstyle=\"round\", ec=(1., 0.5, 0.5), fc=(1., 0.8, 0.8),))\nplt.ylim(0, 0.08)\nplt.xlim(0.005, 0.4)\nplt.show()",
"_____no_output_____"
]
],
[
[
"# 找峰值 -> scipy \n\n> 下面網站有很多方法\n\n[1. 好用網站](https://www.delftstack.com/zh-tw/howto/python/find-peaks-in-python/)",
"_____no_output_____"
]
],
[
[
"import numpy as np\nfrom scipy.signal import argrelextrema\n\npeaks = argrelextrema(fft_values, np.greater)\nprint(peaks)",
"(array([ 5, 7, 12, 14, 16, 18, 21, 24, 27], dtype=int64),)\n"
],
[
"f_values[5], fft_values[5]\n\nfor ind in peaks[0]:\n print(f_values[ind], fft_values[ind])\n\npeaks[0]",
"0.04155124653739613 0.018987577734909184\n0.05817174515235457 0.02397264138412049\n0.0997229916897507 0.029568646094166144\n0.11634349030470914 0.015344380096557166\n0.1329639889196676 0.00885660258367288\n0.14958448753462605 0.004318247202516568\n0.1745152354570637 0.0038312471787040753\n0.1994459833795014 0.002523022373123932\n0.22437673130193908 0.0021880557234638105\n"
],
[
"plt.figure(figsize= (14, 7))\nplt.plot(f_values, fft_values, linestyle='-', color='blue')\nplt.xlabel('Frequency [Hz]', fontsize=16)\nplt.ylabel('Amplitude', fontsize=16)\nplt.title(\"Frequency domain of the signal\", fontsize=16)\nplt.vlines(0.12, 0, 0.085, colors= 'r', linestyles= '--', label= 'Cut down Freq (low pass)', )\nplt.annotate(\"0.12\", (0.110, 0.05), fontsize= 20, bbox=dict(boxstyle=\"round\", ec=(1., 0.5, 0.5), fc=(1., 0.8, 0.8),))\n\nfor ind in peaks[0]:\n plt.annotate(\"peak\", (f_values[ind]-0.005, fft_values[ind]), bbox=dict(boxstyle=\"Circle\", alpha= 0.4, ec=(1., 0.5, 0.5), fc=(1., 0.8, 0.8),))\n\n\nplt.ylim(0, 0.08)\nplt.xlim(0.005, 0.4)\nplt.show()",
"_____no_output_____"
],
[
"# 留0.12 以下 ?\n\nsave_index = [x for x in peaks[0] if f_values[x] <= 0.12]\nprint(save_index)\n\n# 直接找前3名\n\n# np.argsort ??\nuse_ind = np.argsort(fft_values[peaks[0]])[-3:][::-1]\nreal_ind = peaks[0][use_ind]\nreal_ind\n\nwhole = list(zip(f_values[real_ind], fft_values[real_ind]))\nwhole",
"[5, 7, 12, 14]\n"
],
[
"plt.figure(figsize= (14, 7))\nplt.plot(f_values, fft_values, linestyle='-', color='blue')\nplt.xlabel('Frequency [Hz]', fontsize=16)\nplt.ylabel('Amplitude', fontsize=16)\nplt.title(\"Frequency domain of the signal\", fontsize=16)\nplt.vlines(0.12, 0, 0.085, colors= 'r', linestyles= '--', label= 'Cut down Freq (low pass)', )\nplt.annotate(\"0.12\", (0.110, 0.05), fontsize= 20, bbox=dict(boxstyle=\"round\", ec=(1., 0.5, 0.5), fc=(1., 0.8, 0.8),))\n\nfor i, val in enumerate(whole):\n plt.annotate(f\"First {i+1} peak\", (val[0]+0.005, val[1]),size=10, bbox=dict(boxstyle=\"LArrow\", alpha= 0.5, ec=(1., 0.5, 0.5), fc=(1., 0.8, 0.8),))\n\n\nplt.ylim(0, 0.08)\nplt.xlim(0.005, 0.4)\nplt.show()",
"_____no_output_____"
]
],
[
[
"# FFT\n * 一張圖 3 個峰值,共有六種血氧(3channel * 含氧/缺氧)\n * 一個峰值會有兩數值,一個是 amp,一個是 峰值的頻率\n",
"_____no_output_____"
]
],
[
[
"def FFT(dataframe, f_s = 24, plot= False):\n from scipy.fft import fft\n import numpy as np\n from scipy.signal import argrelextrema\n\n #============================\n # Parameter: \n # dataframe: input dataframe\n # plot : whether to plot\n # figsize: plt.figure(figsize= figsize)\n # Return:\n # Tuple: \n # Tuple[0] : List of fft\n # Tuple[1] : List of index\n #=======================\n\n save_fft = []\n save_fft_index = []\n # column 0 fft\n\n for colss in dataframe.columns:\n y = dataframe.loc[:, colss].values\n\n def get_fft_values(y_values, T, N, f_s):\n f_values = np.linspace(0.0, 1.0/(2.0*T), N//2)\n fft_values_ = fft(y_values)\n\n # 歸一化嗎??\n fft_values = 2.0/N * np.abs(fft_values_[0:N//2])\n return f_values, fft_values\n \n f_s = f_s\n T = 1/f_s\n N = np.array(y).shape[-1]\n \n f_values, fft_values = get_fft_values(y, T, N, f_s)\n\n \n peaks = argrelextrema(fft_values, np.greater)\n # print(peaks)\n\n use_ind = np.argsort(fft_values[peaks[0]])[-3:][::-1]\n real_ind = peaks[0][use_ind]\n\n whole = list(zip(f_values[real_ind], fft_values[real_ind]))\n whole = list(np.array(whole).ravel())\n save_fft += whole\n\n save_fft_index += [f'{colss} First Freq', f'{colss} First Amp', f'{colss} Second Freq', f'{colss} Second Amp', f'{colss} Third Freq', f'{colss} Third Amp']\n\n\n\n if plot:\n\n plt.figure(figsize= (14, 7))\n plt.plot(f_values, fft_values, linestyle='-', color='blue')\n plt.xlabel('Frequency [Hz]', fontsize=16)\n plt.ylabel('Amplitude', fontsize=16)\n plt.title(f\"Frequency domain of the {colss} signal\", fontsize=16)\n plt.vlines(0.12, 0, 0.15, colors= 'r', linestyles= '--', label= 'Cut down Freq (low pass)', )\n plt.annotate(\"0.12\", (0.11, 0.1), fontsize= 20, bbox=dict(boxstyle=\"round\", ec=(1., 0.5, 0.5), fc=(1., 0.8, 0.8),))\n\n for ind in peaks[0]:\n plt.annotate(\"peak\", (f_values[ind]-0.005, fft_values[ind]), bbox=dict(boxstyle=\"Circle\", alpha= 0.4, ec=(1., 0.5, 0.5), fc=(1., 0.8, 0.8),))\n\n\n plt.ylim(0, 0.15)\n plt.xlim(0.005, 0.4)\n plt.show()\n\n return save_fft, save_fft_index\n",
"_____no_output_____"
],
[
"df = pd.read_csv(all_csv[5])\ndf = df.drop(columns= ['Unnamed: 0', 'Time_Arduino', 'easingdata'])\ndf = df.set_index('Time_Host')\nFFT(df, plot= True)",
"_____no_output_____"
]
],
[
[
"# 顛倒ndarray -> 帥\n\n`fft_values[real_ind][::-1]`",
"_____no_output_____"
],
[
"# Numpy 神技",
"_____no_output_____"
]
],
[
[
"test= np.arange(1, 10)\ntest",
"_____no_output_____"
],
[
"test[::-1]",
"_____no_output_____"
],
[
"test[::-2]",
"_____no_output_____"
],
[
"test[::-3]",
"_____no_output_____"
],
[
"test[::1]",
"_____no_output_____"
],
[
"test[::2]",
"_____no_output_____"
],
[
"test[::3]",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e7e043793065908aa4a50907ca51f6f9e62df57b | 8,591 | ipynb | Jupyter Notebook | docs/examples/devlop/devlop-scatter.ipynb | samwelborn/mpl-interactions | aa3b1bcc85332eda4d098e7ad16bea16615f9424 | [
"BSD-3-Clause"
] | 67 | 2020-08-09T16:41:32.000Z | 2022-03-31T20:46:20.000Z | docs/examples/devlop/devlop-scatter.ipynb | samwelborn/mpl-interactions | aa3b1bcc85332eda4d098e7ad16bea16615f9424 | [
"BSD-3-Clause"
] | 172 | 2020-08-04T00:31:19.000Z | 2022-03-17T19:19:03.000Z | docs/examples/devlop/devlop-scatter.ipynb | samwelborn/mpl-interactions | aa3b1bcc85332eda4d098e7ad16bea16615f9424 | [
"BSD-3-Clause"
] | 17 | 2020-08-06T17:26:01.000Z | 2022-01-04T23:46:01.000Z | 23.156334 | 105 | 0.474799 | [
[
[
"## Danger zone. This notebook is just here to be convenient for development",
"_____no_output_____"
]
],
[
[
"%matplotlib ipympl\nimport ipywidgets as widgets\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n%load_ext autoreload\n%autoreload 2\nfrom mpl_interactions import *",
"_____no_output_____"
],
[
"plt.close()\nfig, ax = plt.subplots()\nzoom_factory(ax)\nph = panhandler(fig)\nN = 50\nx = np.random.rand(N)\ny = np.random.rand(N)\ncolors = np.random.rand(N)\narea = (30 * np.random.rand(N)) ** 2 # 0 to 15 point radii\n\nscat = plt.scatter(x, y, s=area, c=colors, alpha=0.5, label=\"yikes\", cmap=\"viridis\")\nplt.legend()\nplt.show()",
"_____no_output_____"
],
[
"x_new = np.random.randn(N + 1000)\ny_new = np.random.randn(N + 1000)\n\nnew = np.array([x_new, y_new]).T\nscat.set_offsets(new)",
"_____no_output_____"
],
[
"def f(mean):\n \"\"\"\n should be able to return either:\n x, y\n\n or arr where arr.shape = (N, 2 )\n I should check that\n \"\"\"\n print(mean)\n x = np.random.rand(N) * mean\n y = np.random.rand(N) * mean\n return x, y",
"_____no_output_____"
],
[
"fig, ax = plt.subplots()\nzoom_factory(ax)\nph = panhandler(fig)\nN = 50\nx = np.random.rand(N)\ny = np.random.rand(N)\ncolors = np.random.rand(N)\narea = (30 * np.random.rand(N)) ** 2 # 0 to 15 point radii\n\nscat = plt.scatter(x, y, s=area, c=colors, alpha=0.5, label=\"yikes\")\nplt.legend()\nplt.show()",
"_____no_output_____"
],
[
"slider = widgets.FloatSlider(min=-0.5, max=1.5, step=0.01)\nax.plot([-10, 10], [0, 10])\n\n\ndef update(change):\n # print(change)\n out = f(change[\"new\"])\n\n out = np.asanyarray(out)\n if out.shape[0] == 2 and out.shape[1] != 2:\n # check if transpose is necessary\n # but not way to check if shape is 2x2\n out = out.T\n # print(out.shape)\n scat.set_offsets(out)\n # ax.ignore_existing_data_limits = True\n ax.update_datalim(scat.get_datalim(ax.transData))\n ax.autoscale_view()\n\n fig.canvas.draw()\n\n\nslider.observe(update, names=[\"value\"])\nslider",
"_____no_output_____"
],
[
"import matplotlib.pyplot as plt\nimport numpy as np\nfrom IPython.display import display\nfrom ipywidgets import widgets\n\nx = np.arange(10)\n\nfig, ax = plt.subplots()\nscatter = ax.scatter(x, x, label=\"y = a*x+b\")\n\nax.legend()\n\nline = ax.plot([-10, 10], [0, 1])[0]\n\n\ndef update_plot(a, b):\n y = a * x + b\n scatter.set_offsets(np.c_[x, y])\n line.set_data(x - 3, y)\n\n ax.relim()\n ax.ignore_existing_data_limits = True\n ax.update_datalim(scatter.get_datalim(ax.transData))\n ax.autoscale_view()\n\n fig.canvas.draw_idle()\n\n\na = widgets.FloatSlider(min=0.5, max=4, value=1, description=\"a:\")\nb = widgets.FloatSlider(min=0, max=40, value=10, description=\"b:\")\n\nwidgets.interactive(update_plot, a=a, b=b)",
"_____no_output_____"
],
[
"N = 50\n\n\ndef f(mean):\n x = np.random.rand(N) + mean\n y = np.random.rand(N) + mean\n return x, y\n\n\ndef f2(mean):\n x = np.random.rand(N) - mean\n y = np.random.rand(N) - mean\n return x, y\n\n\nblarg = interactive_scatter([f, f2], mean=(0, 1, 100), c=[np.random.randn(N), np.random.randn(N)])",
"_____no_output_____"
],
[
"N = 50\n\n\ndef f(mean):\n x = np.random.rand(N) + mean - 0.5\n y = np.random.rand(N) + mean - 0.5\n return x, y\n\n\ndef c_func(x, y, mean):\n return x\n\n\ndef s_func(x, y, mean):\n return 40 / x\n\n\ndef ec_func(x, y, mean):\n if np.random.rand() > 0.5:\n return \"black\"\n else:\n return \"red\"\n\n\nblarg = interactive_scatter(f, mean=(0, 1, 100), c=c_func, s=s_func, alpha=0.9, edgecolors=ec_func)",
"_____no_output_____"
],
[
"def alpha_func(mean):\n return mean / 1\n\n\nblarg2 = interactive_scatter(\n (x, y), mean=(0, 1, 100), c=c_func, s=s_func, alpha=alpha_func, edgecolors=ec_func\n)",
"_____no_output_____"
],
[
"N = 500\n\n\ndef f(mean):\n x = (np.random.rand(N) - 0.5) + mean\n y = 10 * (np.random.rand(N) - 0.5) + mean\n return x, y\n\n\n(x, y) = f(0.5)\n\n\ndef threshold(x, y, mean):\n colors = np.zeros((len(x), 4))\n colors[:, -1] = 1\n deltas = np.abs(y - mean)\n idx = deltas < 0.01\n deltas /= deltas.max()\n colors[~idx, -1] = np.clip(0.8 - deltas[~idx], 0, 1)\n # print(colors)\n return colors\n\n\nblarg2 = interactive_scatter((x, y), mean=(0, 1, 100), c=threshold)",
"_____no_output_____"
],
[
"from inspect import signature\n\n\ndef someMethod(arg1, kwarg1=None):\n pass\n\n\nsig = signature(someMethod)",
"_____no_output_____"
],
[
"len(sig.parameters)",
"_____no_output_____"
],
[
"from matplotlib.colors import is_color_like\n\nis_color_like(threshold(x, y, 4)[0])",
"_____no_output_____"
],
[
"scats.setscat.cmap([[0], [1], [23]]).shape",
"_____no_output_____"
],
[
"scat.cmap??",
"_____no_output_____"
],
[
"from matplotlib import colors as mcolors",
"_____no_output_____"
],
[
"mcolors.to_rgba_array(\"red\")",
"_____no_output_____"
]
]
] | [
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e7e04fde3d9d96058e71549fff040ecf33b03007 | 45,561 | ipynb | Jupyter Notebook | notebooks/ner_blog_post.ipynb | teresaborcuch/teresaborcuch.github.io | 7ffd610bef71505056e2274a5ad4366939aa4bc9 | [
"MIT"
] | null | null | null | notebooks/ner_blog_post.ipynb | teresaborcuch/teresaborcuch.github.io | 7ffd610bef71505056e2274a5ad4366939aa4bc9 | [
"MIT"
] | null | null | null | notebooks/ner_blog_post.ipynb | teresaborcuch/teresaborcuch.github.io | 7ffd610bef71505056e2274a5ad4366939aa4bc9 | [
"MIT"
] | null | null | null | 67.001471 | 23,514 | 0.725533 | [
[
[
"# EDA: Named Entity Recognition\n\nNamed entity recognition is the process of identifing particular elements from text, such as names, places, quantities, percentages, times/dates, etc. Identifying and quantifying what the general content types an article contains seems like a good predictor of what type of article it is. World news articles, for example, might mention more places than opinion articles, and business articles might have more percentages or dates than other sections. For each article, I'll count how many total mentions of people or places there are in the titles, as well as how many unique mentions for article bodies.\n\nThe Stanford NLP group has published three [Named-Entity Recognizers](http://nlp.stanford.edu/software/CRF-NER.shtml). The three class model recognizes locations, persons, and organizations, and at least for now, this is the one I'll be using. Although NER's are written in Java, there is the Pyner interface for Python, as well as an NLTK wrapper (which I'll be using).\n\nAlthough state-of-the-art taggers can achieve near-human levels of accuracy, this one does make a few mistakes. One obvious flaw is that if I feed the tagger unigram terms, two-part names such as \"Michael Jordan\" will count as (\"Michael\", \"PERSON\") and (\"Jordan\", \"PERSON\"). I can roughly correct for this by dividing my average name entity count by two if need be. Additionally, sometimes the tagger mis-tags certain people or places. For instance, it failed to recognize \"Cameroon\" as a location, but tagged the word \"Heartbreak\" in the article title \"A Personal Trainer for Heartbreak\" as a person. That being said, let's see what it can do on my news data.",
"_____no_output_____"
]
],
[
[
"import articledata\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport operator",
"_____no_output_____"
],
[
"data = pd.read_pickle('/Users/teresaborcuch/capstone_project/notebooks/pickled_data.pkl')",
"_____no_output_____"
]
],
[
[
"## Counting Named Entities\n\nHere is my count_entities function. The idea is to count the total mentions of a person or a place in an article's body or title and save them as columns in my existing data structure.",
"_____no_output_____"
]
],
[
[
"def count_entities(data = None, title = True):\n # set up tagger\n os.environ['CLASSPATH'] = \"/Users/teresaborcuch/stanford-ner-2013-11-12/stanford-ner.jar\"\n os.environ['STANFORD_MODELS'] = '/Users/teresaborcuch/stanford-ner-2013-11-12/classifiers'\n st = StanfordNERTagger('english.all.3class.distsim.crf.ser.gz')\n\n tagged_titles = []\n persons = []\n places = []\n\n if title:\n\n for x in data['title']:\n tokens = word_tokenize(x)\n tags = st.tag(tokens)\n tagged_titles.append(tags)\n\n for pair_list in tagged_titles:\n person_count = 0\n place_count = 0\n for pair in pair_list:\n if pair[1] == 'PERSON':\n person_count +=1\n elif pair[1] == 'LOCATION':\n place_count +=1\n else:\n continue\n persons.append(person_count)\n places.append(place_count)\n\n\n data['total_persons_title'] = persons\n data['total_places_title'] = places\n\n else:\n for x in data['body']:\n tokens = word_tokenize(x)\n tags = st.tag(tokens)\n tagged_titles.append(tags)\n\n for pair_list in tagged_titles:\n person_count = 0\n place_count = 0\n for pair in pair_list:\n if pair[1] == 'PERSON':\n person_count +=1\n elif pair[1] == 'LOCATION':\n place_count +=1\n else:\n continue\n persons.append(person_count)\n places.append(place_count)\n\n data['total_persons_body'] = persons\n data['total_places_body'] = places\n\n return data",
"_____no_output_____"
],
[
"# Count people and places in article titles and save as new columns\n# Warning - this is super slow! \ndata = articledata.count_entities(data = data, title = True)",
"_____no_output_____"
],
[
"data.head(1)",
"_____no_output_____"
],
[
"# pickle the file to avoid having to re-run this for future analyses\ndata.to_pickle('/Users/teresaborcuch/capstone_project/notebooks/ss_entity_data.pkl')",
"_____no_output_____"
],
[
"sns.set_style(\"whitegrid\", {'axes.grid' : False})\nfig = plt.figure(figsize = (12, 5))\n\nax1 = fig.add_subplot(1,2,1)\nax1.hist(data['total_persons_title'])\nax1.set_xlabel(\"Total Person Count in Article Titles \")\nax1.set_ylim(0,2500)\nax1.set_xlim(0,6)\n\nax2 = fig.add_subplot(1,2,2)\nax2.hist(data['total_places_title'])\nax2.set_xlabel(\"Total Place Count in Article Titles\")\nax2.set_ylim(0, 2500)\nax2.set_xlim(0,6)\nplt.show()",
"_____no_output_____"
]
],
[
[
"These graphs indicate that person and place counts from article are both strongly right skewed. It might be more interesting to compare mean person and place counts among different sections.",
"_____no_output_____"
]
],
[
[
"data.pivot_table(\n index = ['condensed_section'], \n values = ['total_persons_title', 'total_places_title']).sort_values('total_persons_title', ascending = False)",
"_____no_output_____"
]
],
[
[
"From this pivot table, it seems there are a few distinctions to be made between different sections. Entertainment and sports contain more person mentions on average than any other sections, and world news contains more places in the title than other sections.",
"_____no_output_____"
],
[
"## Finding Common Named Entities\n\nNow, I'll try to see which people are places get the most mentions in each section. I've written an evaluate_entities function that creates a dictionary of counts for each unique person or place in a particular section or for a particular source.",
"_____no_output_____"
]
],
[
[
"def evaluate_entities(data = None, section = None, source = None):\n section_mask = (data['condensed_section'] == section)\n source_mask = (data['source'] == source)\n\n if section and source:\n masked_data = data[section_mask & source_mask]\n\n elif section:\n masked_data = data[section_mask]\n\n elif source:\n masked_data = data[source_mask]\n\n else:\n masked_data = data\n\n # set up tagger\n os.environ['CLASSPATH'] = \"/Users/teresaborcuch/stanford-ner-2013-11-12/stanford-ner.jar\"\n os.environ['STANFORD_MODELS'] = '/Users/teresaborcuch/stanford-ner-2013-11-12/classifiers'\n st = StanfordNERTagger('english.all.3class.distsim.crf.ser.gz')\n \n # dictionaries to hold counts of entities\n person_dict = {}\n place_dict = {}\n\n for x in masked_data['body']:\n tokens = word_tokenize(x)\n tags = st.tag(tokens)\n for pair in tags:\n if pair[1] == 'PERSON':\n if pair[0] not in person_dict.keys():\n person_dict[pair[0]] = 1\n else:\n person_dict[pair[0]] +=1\n elif pair[1] == 'LOCATION':\n if pair[0] not in place_dict.keys():\n place_dict[pair[0]] = 1\n else:\n place_dict[pair[0]] += 1\n\n return person_dict, place_dict",
"_____no_output_____"
]
],
[
[
"### Commonly Mentioned People in World News and Entertainment",
"_____no_output_____"
]
],
[
[
"world_persons, world_places = articledata.evaluate_entities(data = data, section = 'world', source = None)",
"_____no_output_____"
],
[
"# get top 20 people from world news article bodies\nsorted_wp = sorted(world_persons.items(), key=operator.itemgetter(1))\nsorted_wp.reverse()\nsorted_wp[:20]",
"_____no_output_____"
]
],
[
[
"Perhaps as expected, Trump is the most commonly mentioned person in world news, with 1,237 mentions in 467 articles, with Obama and Putin coming in second and third. It's interesting to note that most of these names are political figures, but since the tagger only receives unigrams, partial names and first names are mentioned as well.",
"_____no_output_____"
]
],
[
[
"entertainment_persons, entertainment_places = articledata.evaluate_entities(data = data, section = 'entertainment', source = None)",
"_____no_output_____"
],
[
"sorted_ep = sorted(entertainment_persons.items(), key=operator.itemgetter(1))\nsorted_ep.reverse()\nsorted_ep[:20]",
"_____no_output_____"
]
],
[
[
"Now, I'll compare the top 20 people mentioned in entertainment articles. Trump still takes the number one spot, but interestingly, he's followed by a string of first names. NLTK provides a corpus of male and female-tagged first names, so counting the number of informal mentions or even the ratio of men to women might be a useful feature for classifying articles.",
"_____no_output_____"
],
[
"### Commonly Mentioned Places in World News and Entertainment\n\nCompared to those from the world news section, the locations in the entertainment section are mostly in the United States: New York City (pieced together from \"New\", \"York\", and \"City\") seems to be the most common, but Los Angeles, Manhattan, and Chicago also appear. There are a few international destinations (fashionable ones like London and Paris and their respective countries), but nowhere near as many as in the world news section, where, after the U.S, Iran, China, and Russia take the top spots.",
"_____no_output_____"
]
],
[
[
"# get top 20 places from world news article bodies\nsorted_wp = sorted(world_places.items(), key=operator.itemgetter(1))\nsorted_wp.reverse()\nsorted_wp[:20]",
"_____no_output_____"
],
[
"# get top 20 places from entertainment article bodies\nsorted_ep = sorted(entertainment_places.items(), key=operator.itemgetter(1))\nsorted_ep.reverse()\nsorted_ep[:20]",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
]
] |
e7e05d34317c7e95f9b671a24db7df9abd302720 | 939,595 | ipynb | Jupyter Notebook | Examples/3_GeneratePseudoData.ipynb | DavidGomezC/BOMBSbeta | 6061c8f972cf8e59e1bd6ed1fa0b62902a831356 | [
"MIT"
] | null | null | null | Examples/3_GeneratePseudoData.ipynb | DavidGomezC/BOMBSbeta | 6061c8f972cf8e59e1bd6ed1fa0b62902a831356 | [
"MIT"
] | null | null | null | Examples/3_GeneratePseudoData.ipynb | DavidGomezC/BOMBSbeta | 6061c8f972cf8e59e1bd6ed1fa0b62902a831356 | [
"MIT"
] | null | null | null | 408.69726 | 163,446 | 0.895505 | [
[
[
"empty"
]
]
] | [
"empty"
] | [
[
"empty"
]
] |
e7e0652893813e627aa91d48884ffa2596cf555e | 7,841 | ipynb | Jupyter Notebook | create_datasets.ipynb | RTANC/malaria-cnn | 430660d462e6ec210d541db988dfdf611cfe978d | [
"MIT"
] | null | null | null | create_datasets.ipynb | RTANC/malaria-cnn | 430660d462e6ec210d541db988dfdf611cfe978d | [
"MIT"
] | null | null | null | create_datasets.ipynb | RTANC/malaria-cnn | 430660d462e6ec210d541db988dfdf611cfe978d | [
"MIT"
] | null | null | null | 42.155914 | 140 | 0.669047 | [
[
[
"# import json\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.patches as mpatches\nfrom skimage.filters import threshold_otsu\nfrom skimage.io import imread, imsave\nfrom skimage.color import rgb2gray\nfrom skimage.measure import label, regionprops\nfrom skimage.util import crop\nimport json\nimport os\nimport uuid\nfrom PIL import Image, ImageFile, ImageFont, ImageDraw, ImageEnhance\nImageFile.LOAD_TRUNCATED_IMAGES = True\nimport random\nimport math\nimport imageio\nimport imgaug as ia\nimport glob\nfrom imgaug import augmenters as iaa\nfrom imgaug.augmentables.bbs import BoundingBox, BoundingBoxesOnImage\nimport shutil\nimport math\nimport requests\nfrom io import BytesIO",
"_____no_output_____"
],
[
"files = glob.glob(\"negative0*/*\")\nclass_name = \"negative\"\ntest_counts = {\n \"negative\": 1,\n \"positive\": 1\n}\n\nfor file in files:\n shutil.copy(file,os.path.join(\"datasets/train/\" + class_name, class_name + \"-{:010d}.jpg\".format(test_counts[class_name])))\n test_counts[class_name] += 1\n\nfiles = glob.glob(\"positive0*/*\")\nclass_name = \"positive\"\nfor file in files:\n shutil.copy(file,os.path.join(\"datasets/train/\" + class_name, class_name + \"-{:010d}.jpg\".format(test_counts[class_name])))\n test_counts[class_name] += 1\n",
"_____no_output_____"
],
[
"files = glob.glob(\"datasets/train/negative/*.jpg\")\nrandom.shuffle(files)\nfor file in files[-20:]:\n newfile = file.replace(\"/train\",\"/val\")\n shutil.move(file,newfile)\n print(file + \"-->\" + newfile)",
"datasets/train/negative/negative-0000000080.jpg-->datasets/val/negative/negative-0000000080.jpg\ndatasets/train/negative/negative-0000000044.jpg-->datasets/val/negative/negative-0000000044.jpg\ndatasets/train/negative/negative-0000000118.jpg-->datasets/val/negative/negative-0000000118.jpg\ndatasets/train/negative/negative-0000000038.jpg-->datasets/val/negative/negative-0000000038.jpg\ndatasets/train/negative/negative-0000000024.jpg-->datasets/val/negative/negative-0000000024.jpg\ndatasets/train/negative/negative-0000000089.jpg-->datasets/val/negative/negative-0000000089.jpg\ndatasets/train/negative/negative-0000000051.jpg-->datasets/val/negative/negative-0000000051.jpg\ndatasets/train/negative/negative-0000000112.jpg-->datasets/val/negative/negative-0000000112.jpg\ndatasets/train/negative/negative-0000000075.jpg-->datasets/val/negative/negative-0000000075.jpg\ndatasets/train/negative/negative-0000000002.jpg-->datasets/val/negative/negative-0000000002.jpg\ndatasets/train/negative/negative-0000000072.jpg-->datasets/val/negative/negative-0000000072.jpg\ndatasets/train/negative/negative-0000000061.jpg-->datasets/val/negative/negative-0000000061.jpg\ndatasets/train/negative/negative-0000000209.jpg-->datasets/val/negative/negative-0000000209.jpg\ndatasets/train/negative/negative-0000000153.jpg-->datasets/val/negative/negative-0000000153.jpg\ndatasets/train/negative/negative-0000000018.jpg-->datasets/val/negative/negative-0000000018.jpg\ndatasets/train/negative/negative-0000000165.jpg-->datasets/val/negative/negative-0000000165.jpg\ndatasets/train/negative/negative-0000000004.jpg-->datasets/val/negative/negative-0000000004.jpg\ndatasets/train/negative/negative-0000000103.jpg-->datasets/val/negative/negative-0000000103.jpg\ndatasets/train/negative/negative-0000000019.jpg-->datasets/val/negative/negative-0000000019.jpg\ndatasets/train/negative/negative-0000000073.jpg-->datasets/val/negative/negative-0000000073.jpg\n"
],
[
"files = glob.glob(\"datasets/train/positive/*.jpg\")\nrandom.shuffle(files)\nfor file in files[-20:]:\n newfile = file.replace(\"/train\",\"/val\")\n shutil.move(file,newfile)\n print(file + \"-->\" + newfile)",
"datasets/train/positive/positive-0000000095.jpg-->datasets/val/positive/positive-0000000095.jpg\ndatasets/train/positive/positive-0000000072.jpg-->datasets/val/positive/positive-0000000072.jpg\ndatasets/train/positive/positive-0000000017.jpg-->datasets/val/positive/positive-0000000017.jpg\ndatasets/train/positive/positive-0000000001.jpg-->datasets/val/positive/positive-0000000001.jpg\ndatasets/train/positive/positive-0000000013.jpg-->datasets/val/positive/positive-0000000013.jpg\ndatasets/train/positive/positive-0000000120.jpg-->datasets/val/positive/positive-0000000120.jpg\ndatasets/train/positive/positive-0000000144.jpg-->datasets/val/positive/positive-0000000144.jpg\ndatasets/train/positive/positive-0000000155.jpg-->datasets/val/positive/positive-0000000155.jpg\ndatasets/train/positive/positive-0000000003.jpg-->datasets/val/positive/positive-0000000003.jpg\ndatasets/train/positive/positive-0000000207.jpg-->datasets/val/positive/positive-0000000207.jpg\ndatasets/train/positive/positive-0000000192.jpg-->datasets/val/positive/positive-0000000192.jpg\ndatasets/train/positive/positive-0000000022.jpg-->datasets/val/positive/positive-0000000022.jpg\ndatasets/train/positive/positive-0000000047.jpg-->datasets/val/positive/positive-0000000047.jpg\ndatasets/train/positive/positive-0000000076.jpg-->datasets/val/positive/positive-0000000076.jpg\ndatasets/train/positive/positive-0000000048.jpg-->datasets/val/positive/positive-0000000048.jpg\ndatasets/train/positive/positive-0000000097.jpg-->datasets/val/positive/positive-0000000097.jpg\ndatasets/train/positive/positive-0000000085.jpg-->datasets/val/positive/positive-0000000085.jpg\ndatasets/train/positive/positive-0000000208.jpg-->datasets/val/positive/positive-0000000208.jpg\ndatasets/train/positive/positive-0000000190.jpg-->datasets/val/positive/positive-0000000190.jpg\ndatasets/train/positive/positive-0000000119.jpg-->datasets/val/positive/positive-0000000119.jpg\n"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code"
]
] |
e7e0659d2ebad26402d285a785c2681bec5fbcb5 | 174,623 | ipynb | Jupyter Notebook | convert HMMER.ipynb | cactuskid/kmer | 616a77b5ab603fcefa5b4d5db675b47022ccfab0 | [
"MIT"
] | null | null | null | convert HMMER.ipynb | cactuskid/kmer | 616a77b5ab603fcefa5b4d5db675b47022ccfab0 | [
"MIT"
] | null | null | null | convert HMMER.ipynb | cactuskid/kmer | 616a77b5ab603fcefa5b4d5db675b47022ccfab0 | [
"MIT"
] | null | null | null | 58.894772 | 539 | 0.451762 | [
[
[
"import pandas as pd\nimport glob\n\nimport uniprot",
"_____no_output_____"
],
[
"meta = glob.glob( './scaffolds/CONTIGS-SCAFFOLDS-Sheet1-Nov2018/*fasta.csv')\ngeno = glob.glob( './orfscan/*fasta.csv')\n\n\n\ncomplete_geno = glob.glob('./proteomes/done/*.csv')\nfastas = glob.glob('./proteomes/done/*.fasta')\nprint(fastas)\norfsbg = []\nfor file in fastas:\n seqids , _ = uniprot.read_fasta(file)\n orfsbg += seqids\n \nheader = 'targetname accession queryid accession2 E-value score bias domain-E-value domain-score domain-bias exp reg clu ov env dom rep inc description'.split()\n\nprint(geno)\n\n",
"['./proteomes/done/Thermotogae bacterium isolate B96_G9 B96_Guay9_scaffold_13443.fasta', './proteomes/done/Candidatus Thorarchaeota archaeon isolate B59_G1 B59.fasta', './proteomes/done/Candidatus Bathyarchaeota archaeon isolate B64_G1.fasta', './proteomes/done/Haloferax spQ22.fasta', './proteomes/done/Natrinema altunense strain AJ2 N_altunense.fasta', './proteomes/done/HaloplanusnatansDSM .fasta', './proteomes/done/Candidatus Geothermarchaeota archaeon isolate B85_G16.fasta', './proteomes/done/Archaeon GW2011_AR20.fasta']\n['./orfscan/QNDS01000002.1[1..40684].flat.fasta.csv', './orfscan/NZ_LOEP01000012.1[1..56511].flat.fasta.csv', './orfscan/QNAN01000020.1[1..7966].flat.fasta.csv', './orfscan/QMWF01000152.1[1..2919].flat.fasta.csv', './orfscan/QMYS01000134.1[1..4823].flat.fasta.csv', './orfscan/NZ_KE386573.1[2555621..2634980].flat.fasta.csv', './orfscan/CP010426.1[733481..806920].flat.fasta.csv']\n"
],
[
"dfs = []\nfor filename in geno:\n framedict = {}\n with open(filename , 'r') as infile:\n for i,line in enumerate(infile):\n if line[0] != '#':\n words = line.split()\n words[18] = ''.join([ w + ' ' for w in words[18:-1]])\n #print(words)\n framedict[i] = dict(zip( header , words[0:19]))\n df = pd.DataFrame.from_dict(framedict , orient='index')\n df['infile'] = filename\n #df.to_csv(filename + '.reformat.csv')\n dfs.append(df)\nglobaldf = pd.concat(dfs)\nglobaldf['domain'] = globaldf.accession.map(lambda x : str(int(x.split('.')[0].replace('PF',''))) )\n\n#globaldf = globaldf[globaldf['E-value']< .001]\nprint(globaldf)\ndfs=[]\ncomplete_geno = glob.glob('./proteomes/done/*reformat.csv')\nfor csv in complete_geno:\n df = pd.DataFrame.from_csv(csv)\n \n #df.to_csv(filename + '.reformat.csv')\n print(len(df))\n print(csv)\n \n dfs.append(df)\n\nbgdf = pd.concat(dfs , ignore_index= True)\nbgdf['domain'] = bgdf.accession.map(lambda x : str(int(x.split('.')[0].replace('PF',''))) )\n#globaldf = bgdf[bgdf['E-value']< .001]\nprint(bgdf)\n",
" targetname accession queryid accession2 E-value score bias \\\n3 YdjM PF04307.14 RKZ11176.1 - 1.1e-10 41.1 4.5 \n4 YwiC PF14256.6 RKZ11176.1 - 0.64 10.3 9.9 \n5 YdjM PF04307.14 RKZ11176.1 - 1.1e-10 41.1 4.5 \n6 YwiC PF14256.6 RKZ11176.1 - 0.64 10.3 9.9 \n7 Prok-JAB PF14464.6 RKZ11177.1 - 0.0016 18.2 0.1 \n8 RadC PF04002.15 RKZ11177.1 - 0.064 13.2 0.0 \n9 Prok-JAB PF14464.6 RKZ11177.1 - 0.0016 18.2 0.1 \n10 RadC PF04002.15 RKZ11177.1 - 0.064 13.2 0.0 \n11 KAR9 PF08580.10 RKZ11178.1 - 0.053 12.1 0.2 \n12 DUF948 PF06103.11 RKZ11178.1 - 0.14 12.4 0.0 \n13 TMEM251 PF15190.6 RKZ11178.1 - 0.23 11.4 1.0 \n14 KAR9 PF08580.10 RKZ11178.1 - 0.053 12.1 0.2 \n15 DUF948 PF06103.11 RKZ11178.1 - 0.14 12.4 0.0 \n16 TMEM251 PF15190.6 RKZ11178.1 - 0.23 11.4 1.0 \n17 PTS_EIIA_2 PF00359.22 RKZ11182.1 - 0.037 13.9 1.6 \n18 IMS PF00817.20 RKZ11182.1 - 0.04 13.9 0.1 \n19 PTS_EIIA_2 PF00359.22 RKZ11182.1 - 0.037 13.9 1.6 \n20 IMS PF00817.20 RKZ11182.1 - 0.04 13.9 0.1 \n21 DUF148 PF02520.17 RKZ11184.1 - 6.7e-05 23.0 2.7 \n22 Bacillus_HBL PF05791.11 RKZ11184.1 - 0.0013 18.6 1.1 \n23 MSP1_C PF07462.11 RKZ11184.1 - 0.0017 17.2 2.6 \n24 SKA2 PF16740.5 RKZ11184.1 - 0.0022 17.8 1.6 \n25 MobL PF18555.1 RKZ11184.1 - 0.0024 17.2 4.8 \n26 TRAF_BIRC3_bd PF16673.5 RKZ11184.1 - 0.0037 17.0 4.3 \n27 Fib_alpha PF08702.10 RKZ11184.1 - 0.0094 16.1 6.1 \n28 DUF4200 PF13863.6 RKZ11184.1 - 0.01 16.2 5.2 \n29 DUF3450 PF11932.8 RKZ11184.1 - 0.011 15.0 4.1 \n30 EAP30 PF04157.16 RKZ11184.1 - 0.013 14.8 0.1 \n31 Phage_tail_S PF05069.13 RKZ11184.1 - 0.019 14.8 0.2 \n32 Syntaxin_2 PF14523.6 RKZ11184.1 - 0.019 15.3 3.2 \n.. ... ... ... ... ... ... ... \n658 tRNA-synt_2d PF01409.20 AJF63140.1 - 0.11 11.9 0.2 \n659 tRNA-synt_2 PF00152.20 AJF63140.1 - 7.2e-76 255.3 0.0 \n660 tRNA_anti-codon PF01336.25 AJF63140.1 - 5.9e-13 48.6 0.1 \n661 RHH_3 PF12651.7 AJF63140.1 - 0.026 14.5 0.1 \n662 tRNA-synt_2d PF01409.20 AJF63140.1 - 0.11 11.9 0.2 \n663 ORC2 PF04084.14 AJF63141.1 - 0.022 13.7 0.2 \n664 DUF2072 PF09845.9 AJF63141.1 - 0.058 13.6 0.3 \n665 ORC2 PF04084.14 AJF63141.1 - 0.022 13.7 0.2 \n666 DUF2072 PF09845.9 AJF63141.1 - 0.058 13.6 0.3 \n667 Wzt_C PF14524.6 AJF63142.1 - 0.003 17.6 0.8 \n668 Invasin_D3 PF09134.10 AJF63142.1 - 0.017 15.4 0.4 \n669 Ribosomal_L9_N PF01281.19 AJF63142.1 - 0.075 12.6 0.1 \n670 Wzt_C PF14524.6 AJF63142.1 - 0.003 17.6 0.8 \n671 Invasin_D3 PF09134.10 AJF63142.1 - 0.017 15.4 0.4 \n672 Ribosomal_L9_N PF01281.19 AJF63142.1 - 0.075 12.6 0.1 \n673 MazE_antitoxin PF04014.18 AJF63144.1 - 2.2e-07 30.6 0.1 \n674 PhoU PF01895.19 AJF63144.1 - 0.0023 18.4 1.2 \n675 MazE_antitoxin PF04014.18 AJF63144.1 - 2.2e-07 30.6 0.1 \n676 PhoU PF01895.19 AJF63144.1 - 0.0023 18.4 1.2 \n677 DUF4969 PF16339.5 AJF63145.1 - 0.13 12.6 0.0 \n678 CCB1 PF12046.8 AJF63145.1 - 0.15 11.4 0.0 \n679 DUF4969 PF16339.5 AJF63145.1 - 0.13 12.6 0.0 \n680 CCB1 PF12046.8 AJF63145.1 - 0.15 11.4 0.0 \n681 Pkinase PF00069.25 AJF63146.1 - 1.4e-18 67.2 0.0 \n682 Pkinase_Tyr PF07714.17 AJF63146.1 - 2.1e-12 46.9 0.0 \n683 Glyco_hydro_88 PF07470.13 AJF63146.1 - 3.1e-09 36.4 5.9 \n684 C5-epim_C PF06662.13 AJF63146.1 - 0.00014 21.5 8.5 \n685 Glyco_hydro_127 PF07944.12 AJF63146.1 - 0.0004 19.0 0.2 \n686 Glyco_hydro_76 PF03663.14 AJF63146.1 - 0.00053 19.6 3.9 \n687 TAT_signal PF10518.9 AJF63146.1 - 0.048 13.5 0.1 \n\n domain-E-value domain-score domain-bias ... reg clu ov env dom rep inc \\\n3 1.4e-10 40.8 4.5 ... 1 0 0 1 1 1 1 \n4 0.51 10.6 7.2 ... 1 1 1 2 2 2 0 \n5 1.4e-10 40.8 4.5 ... 1 0 0 1 1 1 1 \n6 0.51 10.6 7.2 ... 1 1 1 2 2 2 0 \n7 0.0024 17.6 0.1 ... 1 1 0 1 1 1 1 \n8 0.095 12.7 0.0 ... 1 1 0 1 1 1 0 \n9 0.0024 17.6 0.1 ... 1 1 0 1 1 1 1 \n10 0.095 12.7 0.0 ... 1 1 0 1 1 1 0 \n11 0.06 11.9 0.2 ... 1 0 0 1 1 1 0 \n12 0.15 12.3 0.0 ... 1 0 0 1 1 1 0 \n13 0.28 11.2 1.0 ... 1 0 0 1 1 1 0 \n14 0.06 11.9 0.2 ... 1 0 0 1 1 1 0 \n15 0.15 12.3 0.0 ... 1 0 0 1 1 1 0 \n16 0.28 11.2 1.0 ... 1 0 0 1 1 1 0 \n17 13 5.7 0.1 ... 1 1 2 3 3 3 0 \n18 0.074 13.0 0.0 ... 2 0 0 2 2 2 0 \n19 13 5.7 0.1 ... 1 1 2 3 3 3 0 \n20 0.074 13.0 0.0 ... 2 0 0 2 2 2 0 \n21 7.3e-05 22.8 2.7 ... 1 0 0 1 1 1 1 \n22 0.0027 17.5 0.5 ... 1 1 1 2 2 2 1 \n23 0.0018 17.1 2.6 ... 1 0 0 1 1 1 1 \n24 0.28 11.0 0.1 ... 1 1 1 2 2 2 1 \n25 0.0029 16.9 4.8 ... 1 0 0 1 1 1 1 \n26 0.099 12.4 0.3 ... 1 1 0 2 2 2 1 \n27 0.011 16.0 6.1 ... 1 0 0 1 1 1 1 \n28 0.013 15.9 5.2 ... 1 0 0 1 1 1 0 \n29 0.013 14.8 4.1 ... 1 0 0 1 1 1 0 \n30 0.015 14.6 0.1 ... 1 0 0 1 1 1 0 \n31 0.03 14.2 0.2 ... 1 1 0 1 1 1 0 \n32 0.11 12.9 3.1 ... 1 1 0 1 1 1 0 \n.. ... ... ... ... .. .. .. .. .. .. .. \n658 3.4 7.1 0.0 ... 3 0 0 3 3 3 0 \n659 9e-76 254.9 0.0 ... 1 0 0 1 1 1 1 \n660 1.3e-12 47.5 0.1 ... 1 0 0 1 1 1 1 \n661 0.1 12.6 0.1 ... 1 0 0 1 1 1 0 \n662 3.4 7.1 0.0 ... 3 0 0 3 3 3 0 \n663 0.023 13.6 0.2 ... 1 0 0 1 1 1 0 \n664 0.063 13.5 0.3 ... 1 0 0 1 1 1 0 \n665 0.023 13.6 0.2 ... 1 0 0 1 1 1 0 \n666 0.063 13.5 0.3 ... 1 0 0 1 1 1 0 \n667 0.0047 17.0 0.8 ... 1 0 0 1 1 1 1 \n668 0.025 14.9 0.4 ... 1 0 0 1 1 1 0 \n669 0.14 11.7 0.1 ... 1 0 0 1 1 1 0 \n670 0.0047 17.0 0.8 ... 1 0 0 1 1 1 1 \n671 0.025 14.9 0.4 ... 1 0 0 1 1 1 0 \n672 0.14 11.7 0.1 ... 1 0 0 1 1 1 0 \n673 5.2e-07 29.4 0.1 ... 1 0 0 1 1 1 1 \n674 0.05 14.1 0.1 ... 1 1 1 2 2 2 1 \n675 5.2e-07 29.4 0.1 ... 1 0 0 1 1 1 1 \n676 0.05 14.1 0.1 ... 1 1 1 2 2 2 1 \n677 0.15 12.4 0.0 ... 1 0 0 1 1 1 0 \n678 0.16 11.3 0.0 ... 1 0 0 1 1 1 0 \n679 0.15 12.4 0.0 ... 1 0 0 1 1 1 0 \n680 0.16 11.3 0.0 ... 1 0 0 1 1 1 0 \n681 2.8e-18 66.2 0.0 ... 1 0 0 1 1 1 1 \n682 6e-12 45.4 0.0 ... 2 0 0 2 2 2 1 \n683 1.4e-08 34.2 5.9 ... 1 1 0 1 1 1 1 \n684 0.06 12.8 0.1 ... 3 2 1 4 4 4 2 \n685 0.17 10.3 0.2 ... 2 0 0 2 2 2 2 \n686 0.0016 18.0 0.7 ... 2 0 0 2 2 2 1 \n687 0.048 13.5 0.1 ... 2 0 0 2 2 2 0 \n\n description \\\n3 LexA-binding, inner membrane-associated putative \n4 YwiC-like \n5 LexA-binding, inner membrane-associated putative \n6 YwiC-like \n7 Prokaryotic homologs of the JAB \n8 RadC-like JAB \n9 Prokaryotic homologs of the JAB \n10 RadC-like JAB \n11 Yeast cortical protein \n12 Bacterial protein of unknown function \n13 Transmembrane protein \n14 Yeast cortical protein \n15 Bacterial protein of unknown function \n16 Transmembrane protein \n17 Phosphoenolpyruvate-dependent sugar phosphotra... \n18 impB/mucB/samB \n19 Phosphoenolpyruvate-dependent sugar phosphotra... \n20 impB/mucB/samB \n21 Domain of unknown function \n22 Bacillus haemolytic enterotoxin \n23 Merozoite surface protein 1 (MSP1) \n24 Spindle and kinetochore-associated protein \n25 MobL \n26 TNF receptor-associated factor BIRC3 binding \n27 Fibrinogen alpha/beta chain \n28 Domain of unknown function \n29 Protein of unknown function \n30 EAP30/Vps36 \n31 Phage virion morphogenesis \n32 Syntaxin-like \n.. ... \n658 tRNA synthetases class II core domain \n659 tRNA synthetases class II (D, K and \n660 OB-fold nucleic acid binding \n661 Ribbon-helix-helix \n662 tRNA synthetases class II core domain \n663 Origin recognition complex subunit \n664 Zn-ribbon containing \n665 Origin recognition complex subunit \n666 Zn-ribbon containing \n667 Wzt C-terminal \n668 Invasin, domain \n669 Ribosomal protein L9, N-terminal \n670 Wzt C-terminal \n671 Invasin, domain \n672 Ribosomal protein L9, N-terminal \n673 Antidote-toxin recognition MazE, bacterial \n674 PhoU \n675 Antidote-toxin recognition MazE, bacterial \n676 PhoU \n677 Domain of unknown function \n678 Cofactor assembly of complex C subunit \n679 Domain of unknown function \n680 Cofactor assembly of complex C subunit \n681 Protein kinase \n682 Protein tyrosine \n683 Glycosyl Hydrolase Family \n684 D-glucuronyl C5-epimerase \n685 Beta-L-arabinofuranosidase, \n686 Glycosyl hydrolase family \n687 TAT (twin-arginine translocation) pathway signal \n\n infile domain \n3 ./orfscan/QNDS01000002.1[1..40684].flat.fasta.csv 4307 \n4 ./orfscan/QNDS01000002.1[1..40684].flat.fasta.csv 14256 \n5 ./orfscan/QNDS01000002.1[1..40684].flat.fasta.csv 4307 \n6 ./orfscan/QNDS01000002.1[1..40684].flat.fasta.csv 14256 \n7 ./orfscan/QNDS01000002.1[1..40684].flat.fasta.csv 14464 \n8 ./orfscan/QNDS01000002.1[1..40684].flat.fasta.csv 4002 \n9 ./orfscan/QNDS01000002.1[1..40684].flat.fasta.csv 14464 \n10 ./orfscan/QNDS01000002.1[1..40684].flat.fasta.csv 4002 \n11 ./orfscan/QNDS01000002.1[1..40684].flat.fasta.csv 8580 \n12 ./orfscan/QNDS01000002.1[1..40684].flat.fasta.csv 6103 \n13 ./orfscan/QNDS01000002.1[1..40684].flat.fasta.csv 15190 \n14 ./orfscan/QNDS01000002.1[1..40684].flat.fasta.csv 8580 \n15 ./orfscan/QNDS01000002.1[1..40684].flat.fasta.csv 6103 \n16 ./orfscan/QNDS01000002.1[1..40684].flat.fasta.csv 15190 \n17 ./orfscan/QNDS01000002.1[1..40684].flat.fasta.csv 359 \n18 ./orfscan/QNDS01000002.1[1..40684].flat.fasta.csv 817 \n19 ./orfscan/QNDS01000002.1[1..40684].flat.fasta.csv 359 \n20 ./orfscan/QNDS01000002.1[1..40684].flat.fasta.csv 817 \n21 ./orfscan/QNDS01000002.1[1..40684].flat.fasta.csv 2520 \n22 ./orfscan/QNDS01000002.1[1..40684].flat.fasta.csv 5791 \n23 ./orfscan/QNDS01000002.1[1..40684].flat.fasta.csv 7462 \n24 ./orfscan/QNDS01000002.1[1..40684].flat.fasta.csv 16740 \n25 ./orfscan/QNDS01000002.1[1..40684].flat.fasta.csv 18555 \n26 ./orfscan/QNDS01000002.1[1..40684].flat.fasta.csv 16673 \n27 ./orfscan/QNDS01000002.1[1..40684].flat.fasta.csv 8702 \n28 ./orfscan/QNDS01000002.1[1..40684].flat.fasta.csv 13863 \n29 ./orfscan/QNDS01000002.1[1..40684].flat.fasta.csv 11932 \n30 ./orfscan/QNDS01000002.1[1..40684].flat.fasta.csv 4157 \n31 ./orfscan/QNDS01000002.1[1..40684].flat.fasta.csv 5069 \n32 ./orfscan/QNDS01000002.1[1..40684].flat.fasta.csv 14523 \n.. ... ... \n658 ./orfscan/CP010426.1[733481..806920].flat.fast... 1409 \n659 ./orfscan/CP010426.1[733481..806920].flat.fast... 152 \n660 ./orfscan/CP010426.1[733481..806920].flat.fast... 1336 \n661 ./orfscan/CP010426.1[733481..806920].flat.fast... 12651 \n662 ./orfscan/CP010426.1[733481..806920].flat.fast... 1409 \n663 ./orfscan/CP010426.1[733481..806920].flat.fast... 4084 \n664 ./orfscan/CP010426.1[733481..806920].flat.fast... 9845 \n665 ./orfscan/CP010426.1[733481..806920].flat.fast... 4084 \n666 ./orfscan/CP010426.1[733481..806920].flat.fast... 9845 \n667 ./orfscan/CP010426.1[733481..806920].flat.fast... 14524 \n668 ./orfscan/CP010426.1[733481..806920].flat.fast... 9134 \n669 ./orfscan/CP010426.1[733481..806920].flat.fast... 1281 \n670 ./orfscan/CP010426.1[733481..806920].flat.fast... 14524 \n671 ./orfscan/CP010426.1[733481..806920].flat.fast... 9134 \n672 ./orfscan/CP010426.1[733481..806920].flat.fast... 1281 \n673 ./orfscan/CP010426.1[733481..806920].flat.fast... 4014 \n674 ./orfscan/CP010426.1[733481..806920].flat.fast... 1895 \n675 ./orfscan/CP010426.1[733481..806920].flat.fast... 4014 \n676 ./orfscan/CP010426.1[733481..806920].flat.fast... 1895 \n677 ./orfscan/CP010426.1[733481..806920].flat.fast... 16339 \n678 ./orfscan/CP010426.1[733481..806920].flat.fast... 12046 \n679 ./orfscan/CP010426.1[733481..806920].flat.fast... 16339 \n680 ./orfscan/CP010426.1[733481..806920].flat.fast... 12046 \n681 ./orfscan/CP010426.1[733481..806920].flat.fast... 69 \n682 ./orfscan/CP010426.1[733481..806920].flat.fast... 7714 \n683 ./orfscan/CP010426.1[733481..806920].flat.fast... 7470 \n684 ./orfscan/CP010426.1[733481..806920].flat.fast... 6662 \n685 ./orfscan/CP010426.1[733481..806920].flat.fast... 7944 \n686 ./orfscan/CP010426.1[733481..806920].flat.fast... 3663 \n687 ./orfscan/CP010426.1[733481..806920].flat.fast... 10518 \n\n[2195 rows x 21 columns]\n14792\n./proteomes/done/HaloplanusnatansDSM .fasta.csv.reformat.csv\n7050\n./proteomes/done/Candidatus Bathyarchaeota archaeon isolate B64_G1.fasta.csv.reformat.csv\n32109\n./proteomes/done/Natrinema altunense strain AJ2 N_altunense.fasta.csv.reformat.csv\n"
],
[
"print(len(set(bgdf.queryid).intersection(set(globaldf.queryid))))",
"208\n"
],
[
"mapdict={}\nwith open('rdb_pfam_to_interpro.dat','r') as mapper:\n for line in mapper:\n words=line.split()\n mapdict[words[0]] = {'interpro':words[1]}\npfam2interpor=pd.DataFrame.from_dict(mapdict, orient='index')\ngodict={}\n\nwith open('rdb_interpro_and_go.dat', 'r', encoding = \"ISO-8859-1\" )as godat:\n for line in godat:\n \n if 'IPR' in line:\n dom = line.split()[1]\n \n if 'GO:' in line and 'IPR' in dom:\n if dom not in godict:\n godict[dom]= [] \n godict[dom] = {'goterms':''.join([ l.replace('\\\\','') +',' for l in line.split() if 'GO' in l ])[0:-1]}\npfam2interpor['pfam'] = pfam2interpor.index.map( lambda x: str(x))\nip2go = pd.DataFrame.from_dict(godict, orient='index')\ndef formatId(x):\n try :\n y = str(int(x.replace('IPR','')))\n except:\n y = None\n return y\n\nip2go['ipnum']= ip2go.index.map(formatId)\nip2go = ip2go.dropna(axis=0)\nprint(ip2go)\n\npfam2interpor = pfam2interpor.merge( ip2go, left_on= 'interpro' , right_on= 'ipnum' , how='left')\npfam2interpor = pfam2interpor.dropna(axis = 0)\n\nprint(pfam2interpor)",
" goterms ipnum\nIPR000003 GO:0005496,GO:0005634,GO:0006355 3\nIPR000005 GO:0003700,GO:0005622,GO:0006355 5\nIPR000006 GO:0005505 6\nIPR000009 GO:0008601,GO:0000159,GO:0007165 9\nIPR000011 GO:0004839,GO:0006512 11\nIPR000012 GO:0005554 12\nIPR000013 GO:0008237,GO:0005576,GO:0006508 13\nIPR000015 GO:0005215,GO:0016020,GO:0006810 15\nIPR000018 GO:0045028,GO:0016021,GO:0007186 18\nIPR000020 GO:0005576 20\nIPR000022 GO:0004075,GO:0009343 22\nIPR000023 GO:0006096 23\nIPR000024 GO:0004888,GO:0016020,GO:0007275 24\nIPR000025 GO:0008502,GO:0016021,GO:0007186 25\nIPR000026 GO:0004521 26\nIPR000028 GO:0004601,GO:0006804 28\nIPR000031 GO:0004638,GO:0009320,GO:0006189 31\nIPR000033 GO:0016020 33\nIPR000035 GO:0003905,GO:0006281 35\nIPR000036 GO:0008236,GO:0009279,GO:0006508 36\nIPR000037 GO:0003723,GO:0006412 37\nIPR000039 GO:0003735,GO:0005840,GO:0006412 39\nIPR000040 GO:0005524,GO:0005634,GO:0006355 40\nIPR000043 GO:0004013,GO:0006730 43\nIPR000044 GO:0005554 44\nIPR000045 GO:0008234,GO:0016020,GO:0006508 45\nIPR000046 GO:0004995,GO:0016021,GO:0007186 46\nIPR000047 GO:0003700,GO:0005634,GO:0006355 47\nIPR000051 GO:0008757 51\nIPR000053 GO:0006206 53\n... ... ...\nIPR006705 GO:0003824,GO:0016114 6705\nIPR006708 GO:0005777 6708\nIPR006710 GO:0004553 6710\nIPR006711 GO:0016563,GO:0005634,GO:0006350 6711\nIPR006712 GO:0016563,GO:0005634,GO:0006350 6712\nIPR006713 GO:0005515 6713\nIPR006714 GO:0030288,GO:0001539 6714\nIPR006716 GO:0000247,GO:0005783,GO:0006696 6716\nIPR006721 GO:0003936,GO:0005739,GO:0006754 6721\nIPR006722 GO:0005478,GO:0005622,GO:0006888 6722\nIPR006730 GO:0005634,GO:0007050 6730\nIPR006733 GO:0019031 6733\nIPR006741 GO:0016020 6741\nIPR006742 GO:0005179,GO:0005576,GO:0019953 6742\nIPR006746 GO:0006508 6746\nIPR006748 GO:0016773,GO:0019748 6748\nIPR006752 GO:0001539 6752\nIPR006754 GO:0003697 6754\nIPR006756 GO:0018662 6756\nIPR006759 GO:0016758,GO:0016020 6759\nIPR006761 GO:0005515,GO:0009790 6761\nIPR006762 GO:0003925,GO:0005737,GO:0008151 6762\nIPR006765 GO:0030639 6765\nIPR006774 GO:0003677,GO:0005634,GO:0006338 6774\nIPR006777 GO:0019028,GO:0016032 6777\nIPR006779 GO:0003677,GO:0005634 6779\nIPR006781 GO:0005576,GO:0042157 6781\nIPR006782 GO:0008083,GO:0016020,GO:0008151 6782\nIPR007116 GO:0006729 7116\nIPR007118 GO:0005576 7118\n\n[4020 rows x 2 columns]\n interpro pfam goterms ipnum\n0 2166 1 GO:0005524,GO:0006350 2166\n1 2969 10 GO:0008289,GO:0006810 2969\n2 1489 100 GO:0015070,GO:0005576,GO:0009405 1489\n6 3949 1003 GO:0005249,GO:0016020,GO:0006813 3949\n7 306 1005 GO:0008270 306\n11 559 1009 GO:0005524,GO:0009396 559\n13 1881 1010 GO:0005509 1881\n15 1595 1012 GO:0005554 1595\n17 2870 1015 GO:0004222,GO:0006508 2870\n22 3631 1020 GO:0015025,GO:0007166 3631\n23 827 1021 GO:0008009,GO:0005576,GO:0006955 827\n24 1508 1022 GO:0005234,GO:0016020,GO:0006811 1508\n25 3932 1023 GO:0016020,GO:0016049 3932\n26 5738 1028 GO:0003916,GO:0005694,GO:0006268 5738\n27 204 103 GO:0016499,GO:0016021,GO:0007186 204\n28 298 1031 GO:0004129,GO:0016020,GO:0006118 298\n29 2167 1032 GO:0005488,GO:0005743,GO:0006810 2167\n30 4102 1033 GO:0003950,GO:0005634,GO:0006471 4102\n31 5070 1034 GO:0019031 5070\n33 590 1036 GO:0004421,GO:0006084 590\n35 2137 1038 GO:0008800,GO:0017001 2137\n41 2418 1043 GO:0003700,GO:0005634,GO:0006355 2418\n44 1817 1046 GO:0005000,GO:0016021,GO:0007186 1817\n45 535 1047 GO:0005198,GO:0006928 535\n46 1749 1048 GO:0016519,GO:0016020,GO:0007186 1749\n48 4734 105 GO:0016021 4734\n49 3174 1050 GO:0003677,GO:0006355 3174\n50 271 1051 GO:0003735,GO:0005840,GO:0006412 271\n51 825 1052 GO:0005554 825\n54 1600 1055 GO:0005180,GO:0005576 1600\n... ... ... ... ...\n4155 3295 939 GO:0005149,GO:0005576,GO:0006955 3295\n4156 4070 94 GO:0016494,GO:0016021,GO:0007186 4070\n4157 2502 940 GO:0008745,GO:0009253 2502\n4160 2322 943 GO:0005489,GO:0005746,GO:0006118 2322\n4162 4842 945 GO:0015377,GO:0016021,GO:0006821 4842\n4163 1981 946 GO:0008047,GO:0005576,GO:0016042 1981\n4165 1250 948 GO:0008270,GO:0005975 1250\n4166 3089 949 GO:0016787,GO:0006725 3089\n4167 3353 95 GO:0005351,GO:0016020,GO:0009401 3353\n4170 2975 952 GO:0005525,GO:0007186 2975\n4174 526 957 GO:0004872,GO:0005788 526\n4175 612 958 GO:0005554,GO:0016021 612\n4176 1000 959 GO:0004553,GO:0005975 1000\n4177 1355 96 GO:0004918,GO:0016021,GO:0006935 1355\n4178 1394 960 GO:0004221,GO:0006511 1394\n4182 2204 964 GO:0006573 2204\n4190 1945 972 GO:0005524,GO:0005634,GO:0006289 1945\n4191 2016 974 GO:0004601,GO:0006804 2016\n4193 6721 976 GO:0003936,GO:0005739,GO:0006754 6721\n4194 6722 977 GO:0005478,GO:0005622,GO:0006888 6722\n4198 2036 984 GO:0005554 2036\n4199 1851 985 GO:0005215,GO:0016020,GO:0006810 1851\n4200 2225 986 GO:0006694 2225\n4201 1682 987 GO:0005261,GO:0016020,GO:0006812 1682\n4202 1962 988 GO:0004066,GO:0006529 1962\n4204 4253 99 GO:0005554 4253\n4205 3430 990 GO:0006725 3430\n4210 4097 995 GO:0016462,GO:0005737 4097\n4212 4846 997 GO:0009306 4846\n4213 453 999 GO:0004107,GO:0009073 453\n\n[2411 rows x 4 columns]\n"
],
[
"globaldf1 = globaldf.merge( pfam2interpor, left_on= 'domain' , right_on= 'pfam' , how='left')\nannotdf = globaldf1[ globaldf1.goterms.notna() ]\nbgdf1 = bgdf.merge( pfam2interpor, left_on= 'domain' , right_on= 'pfam' , how='left')\nbgdf1 = bgdf1[ bgdf1.goterms.notna() ]\n",
"_____no_output_____"
],
[
"godict={}\ngocounts = {}\n\nfor quid in annotdf.queryid.unique():\n sub = annotdf[annotdf.queryid == quid ]\n godict[quid] = set(sub.goterms.tolist() )\n for go in gos:\n if go not in gocounts:\n gocounts[go]= 1 \n else:\n gocounts[go] +=1\nprint(len(godict))\n\nbgdict={}\n\nfor quid in bgdf1.queryid.unique():\n sub = bgdf1[bgdf1.queryid == quid ]\n bgdict[quid] =set(sub.goterms.tolist())\n \nprint(len(bgdf1.queryid.unique()))\n\n",
"97\n12199\n"
],
[
"print(len(annotdf.queryid.unique()))",
"97\n"
],
[
"\n",
"_____no_output_____"
],
[
"from goatools.base import download_go_basic_obo\nfrom goatools.obo_parser import GODag\nobodag = GODag(\"./go-basic.obo\")\nfrom goatools.associations import read_ncbi_gene2go",
"./go-basic.obo: fmt(1.2) rel(2019-02-13) 47,395 GO Terms\n"
],
[
"\nfrom goatools.go_enrichment import GOEnrichmentStudy\n\ngoeaobj = GOEnrichmentStudy(\n bgdf1.queryid.unique(), # List of protein-coding genes\n totaldict, # geneid/GO associations\n obodag, # Ontologies\n propagate_counts = False,\n alpha = 0.5, # default significance cut-off\n methods = ['fdr_bh'])",
"fisher module not installed. Falling back on scipy.stats.fisher_exact\n100% 12,199 of 12,199 population items found in association\n"
],
[
"goea_results_all = goeaobj.run_study(list(annotdf.queryid.unique()))\ngoea_results_sig = [r for r in goea_results_all if r.p_fdr_bh < 0.5]\n\nfor res in goea_results_sig:\n print(str(res.GO) + '\\t' + str(res.p_fdr_bh) )\n\n\nfor name in annotdf.infile.unique():\n print(name)\n \n subdf = annotdf[ annotdf.infile == name ]\n print(subdf)\n goea_results_all = goeaobj.run_study(list(subdf.queryid.unique()))\n goea_results_sig = [r for r in goea_results_all if r.p_fdr_bh < 0.5]\n for res in goea_results_sig:\n print(str(res.GO) + '\\t' + str(res.p_fdr_bh) )",
"100% 97 of 97 study items found in association\n100% 97 of 97 study items found in population(12199)\nCalculating 140 uncorrected p-values using fisher_scipy_stats\n 140 GO terms are associated with 3,592 of 12,199 population items\n 20 GO terms are associated with 30 of 97 study items\n 2 GO terms found significant (< 0.5=alpha) after multitest correction: statsmodels fdr_bh\nGO:0042254\t0.00027137569938561097\nGO:0000752\t0.45548359698359303\n./orfscan/QNDS01000002.1[1..40684].flat.fasta.csv\n targetname accession queryid accession2 E-value score bias \\\n5 RadC PF04002.15 RKZ11177.1 - 0.064 13.2 0.0 \n7 RadC PF04002.15 RKZ11177.1 - 0.064 13.2 0.0 \n14 PTS_EIIA_2 PF00359.22 RKZ11182.1 - 0.037 13.9 1.6 \n15 IMS PF00817.20 RKZ11182.1 - 0.04 13.9 0.1 \n16 PTS_EIIA_2 PF00359.22 RKZ11182.1 - 0.037 13.9 1.6 \n17 IMS PF00817.20 RKZ11182.1 - 0.04 13.9 0.1 \n18 DUF148 PF02520.17 RKZ11184.1 - 6.7e-05 23.0 2.7 \n27 EAP30 PF04157.16 RKZ11184.1 - 0.013 14.8 0.1 \n33 H-kinase_dim PF02895.14 RKZ11184.1 - 0.042 14.2 1.3 \n38 Apolipoprotein PF01442.18 RKZ11184.1 - 0.082 12.8 2.8 \n48 DUF148 PF02520.17 RKZ11184.1 - 6.7e-05 23.0 2.7 \n57 EAP30 PF04157.16 RKZ11184.1 - 0.013 14.8 0.1 \n63 H-kinase_dim PF02895.14 RKZ11184.1 - 0.042 14.2 1.3 \n68 Apolipoprotein PF01442.18 RKZ11184.1 - 0.082 12.8 2.8 \n88 Laminin_G_1 PF00054.23 RKZ11192.1 - 0.11 12.7 0.0 \n90 Laminin_G_1 PF00054.23 RKZ11192.1 - 0.11 12.7 0.0 \n92 Laminin_G_1 PF00054.23 RKZ11192.1 - 0.11 12.7 0.0 \n94 Laminin_G_1 PF00054.23 RKZ11192.1 - 0.11 12.7 0.0 \n100 OrfB_IS605 PF01385.19 RKZ11196.1 - 6.4e-26 90.9 0.1 \n107 OrfB_IS605 PF01385.19 RKZ11196.1 - 6.4e-26 90.9 0.1 \n118 IFN-gamma PF00714.17 RKZ11201.1 - 0.12 12.3 0.0 \n119 IFN-gamma PF00714.17 RKZ11201.1 - 0.12 12.3 0.0 \n130 Laminin_G_1 PF00054.23 RKZ11205.1 - 0.0048 17.1 0.1 \n138 Laminin_G_1 PF00054.23 RKZ11205.1 - 0.0048 17.1 0.1 \n164 PPC PF04151.15 RKZ11212.1 - 3.8 8.7 13.6 \n175 PPC PF04151.15 RKZ11212.1 - 3.8 8.7 13.6 \n177 NIF PF03031.18 RKZ11213.1 - 0.037 13.8 0.3 \n180 NIF PF03031.18 RKZ11213.1 - 0.037 13.8 0.3 \n186 YajC PF02699.15 RKZ11214.1 - 0.062 13.2 0.0 \n192 YajC PF02699.15 RKZ11214.1 - 0.062 13.2 0.0 \n\n domain-E-value domain-score domain-bias ... dom rep inc \\\n5 0.095 12.7 0.0 ... 1 1 0 \n7 0.095 12.7 0.0 ... 1 1 0 \n14 13 5.7 0.1 ... 3 3 0 \n15 0.074 13.0 0.0 ... 2 2 0 \n16 13 5.7 0.1 ... 3 3 0 \n17 0.074 13.0 0.0 ... 2 2 0 \n18 7.3e-05 22.8 2.7 ... 1 1 1 \n27 0.015 14.6 0.1 ... 1 1 0 \n33 0.59 10.6 1.3 ... 1 1 0 \n38 0.11 12.4 2.8 ... 1 1 0 \n48 7.3e-05 22.8 2.7 ... 1 1 1 \n57 0.015 14.6 0.1 ... 1 1 0 \n63 0.59 10.6 1.3 ... 1 1 0 \n68 0.11 12.4 2.8 ... 1 1 0 \n88 1.7 8.9 0.0 ... 2 2 0 \n90 1.7 8.9 0.0 ... 2 2 0 \n92 1.7 8.9 0.0 ... 2 2 0 \n94 1.7 8.9 0.0 ... 2 2 0 \n100 6.4e-26 90.9 0.1 ... 2 2 1 \n107 6.4e-26 90.9 0.1 ... 2 2 1 \n118 0.13 12.1 0.0 ... 1 1 0 \n119 0.13 12.1 0.0 ... 1 1 0 \n130 0.19 11.9 0.0 ... 2 2 1 \n138 0.19 11.9 0.0 ... 2 2 1 \n164 3.5 8.8 0.3 ... 5 5 0 \n175 3.5 8.8 0.3 ... 5 5 0 \n177 0.063 13.1 0.2 ... 1 1 0 \n180 0.063 13.1 0.2 ... 1 1 0 \n186 0.18 11.7 0.0 ... 1 1 0 \n192 0.18 11.7 0.0 ... 1 1 0 \n\n description \\\n5 RadC-like JAB \n7 RadC-like JAB \n14 Phosphoenolpyruvate-dependent sugar phosphotra... \n15 impB/mucB/samB \n16 Phosphoenolpyruvate-dependent sugar phosphotra... \n17 impB/mucB/samB \n18 Domain of unknown function \n27 EAP30/Vps36 \n33 Signal transducing histidine kinase, homodimeric \n38 Apolipoprotein A1/A4/E \n48 Domain of unknown function \n57 EAP30/Vps36 \n63 Signal transducing histidine kinase, homodimeric \n68 Apolipoprotein A1/A4/E \n88 Laminin G \n90 Laminin G \n92 Laminin G \n94 Laminin G \n100 Probable \n107 Probable \n118 Interferon \n119 Interferon \n130 Laminin G \n138 Laminin G \n164 Bacterial pre-peptidase C-terminal \n175 Bacterial pre-peptidase C-terminal \n177 NLI interacting factor-like \n180 NLI interacting factor-like \n186 Preprotein translocase \n192 Preprotein translocase \n\n infile domain interpro pfam \\\n5 ./orfscan/QNDS01000002.1[1..40684].flat.fasta.csv 4002 2097 4002 \n7 ./orfscan/QNDS01000002.1[1..40684].flat.fasta.csv 4002 2097 4002 \n14 ./orfscan/QNDS01000002.1[1..40684].flat.fasta.csv 359 5009 359 \n15 ./orfscan/QNDS01000002.1[1..40684].flat.fasta.csv 817 3969 817 \n16 ./orfscan/QNDS01000002.1[1..40684].flat.fasta.csv 359 5009 359 \n17 ./orfscan/QNDS01000002.1[1..40684].flat.fasta.csv 817 3969 817 \n18 ./orfscan/QNDS01000002.1[1..40684].flat.fasta.csv 2520 678 2520 \n27 ./orfscan/QNDS01000002.1[1..40684].flat.fasta.csv 4157 348 4157 \n33 ./orfscan/QNDS01000002.1[1..40684].flat.fasta.csv 2895 2582 2895 \n38 ./orfscan/QNDS01000002.1[1..40684].flat.fasta.csv 1442 4156 1442 \n48 ./orfscan/QNDS01000002.1[1..40684].flat.fasta.csv 2520 678 2520 \n57 ./orfscan/QNDS01000002.1[1..40684].flat.fasta.csv 4157 348 4157 \n63 ./orfscan/QNDS01000002.1[1..40684].flat.fasta.csv 2895 2582 2895 \n68 ./orfscan/QNDS01000002.1[1..40684].flat.fasta.csv 1442 4156 1442 \n88 ./orfscan/QNDS01000002.1[1..40684].flat.fasta.csv 54 3321 54 \n90 ./orfscan/QNDS01000002.1[1..40684].flat.fasta.csv 54 3321 54 \n92 ./orfscan/QNDS01000002.1[1..40684].flat.fasta.csv 54 3321 54 \n94 ./orfscan/QNDS01000002.1[1..40684].flat.fasta.csv 54 3321 54 \n100 ./orfscan/QNDS01000002.1[1..40684].flat.fasta.csv 1385 4209 1385 \n107 ./orfscan/QNDS01000002.1[1..40684].flat.fasta.csv 1385 4209 1385 \n118 ./orfscan/QNDS01000002.1[1..40684].flat.fasta.csv 714 3659 714 \n119 ./orfscan/QNDS01000002.1[1..40684].flat.fasta.csv 714 3659 714 \n130 ./orfscan/QNDS01000002.1[1..40684].flat.fasta.csv 54 3321 54 \n138 ./orfscan/QNDS01000002.1[1..40684].flat.fasta.csv 54 3321 54 \n164 ./orfscan/QNDS01000002.1[1..40684].flat.fasta.csv 4151 3154 4151 \n175 ./orfscan/QNDS01000002.1[1..40684].flat.fasta.csv 4151 3154 4151 \n177 ./orfscan/QNDS01000002.1[1..40684].flat.fasta.csv 3031 2545 3031 \n180 ./orfscan/QNDS01000002.1[1..40684].flat.fasta.csv 3031 2545 3031 \n186 ./orfscan/QNDS01000002.1[1..40684].flat.fasta.csv 2699 4903 2699 \n192 ./orfscan/QNDS01000002.1[1..40684].flat.fasta.csv 2699 4903 2699 \n\n goterms ipnum \n5 GO:0003779,GO:0015629,GO:0007010 2097 \n7 GO:0003779,GO:0015629,GO:0007010 2097 \n14 GO:0004482,GO:0006370 5009 \n15 GO:0005249,GO:0008076,GO:0006813 3969 \n16 GO:0004482,GO:0006370 5009 \n17 GO:0005249,GO:0008076,GO:0006813 3969 \n18 GO:0003677,GO:0005718,GO:0007283 678 \n27 GO:0008320,GO:0016020,GO:0006886 348 \n33 GO:0016740,GO:0008152 2582 \n38 GO:0005215,GO:0016020,GO:0006810 4156 \n48 GO:0003677,GO:0005718,GO:0007283 678 \n57 GO:0008320,GO:0016020,GO:0006886 348 \n63 GO:0016740,GO:0008152 2582 \n68 GO:0005215,GO:0016020,GO:0006810 4156 \n88 GO:0016666,GO:0006807 3321 \n90 GO:0016666,GO:0006807 3321 \n92 GO:0016666,GO:0006807 3321 \n94 GO:0016666,GO:0006807 3321 \n100 GO:0008937,GO:0006118 4209 \n107 GO:0008937,GO:0006118 4209 \n118 GO:0007275 3659 \n119 GO:0007275 3659 \n130 GO:0016666,GO:0006807 3321 \n138 GO:0016666,GO:0006807 3321 \n164 GO:0004519,GO:0006308 3154 \n175 GO:0004519,GO:0006308 3154 \n177 GO:0004871,GO:0005622,GO:0007165 2545 \n180 GO:0004871,GO:0005622,GO:0007165 2545 \n186 GO:0005199,GO:0030115 4903 \n192 GO:0005199,GO:0030115 4903 \n\n[30 rows x 25 columns]\n100% 10 of 10 study items found in association\n100% 10 of 10 study items found in population(12199)\nCalculating 140 uncorrected p-values using fisher_scipy_stats\n 140 GO terms are associated with 3,592 of 12,199 population items\n 1 GO terms are associated with 1 of 10 study items\n 0 GO terms found significant (< 0.5=alpha) after multitest correction: statsmodels fdr_bh\n./orfscan/NZ_LOEP01000012.1[1..56511].flat.fasta.csv\n targetname accession queryid accession2 E-value score \\\n256 Phage_integrase PF00589.22 WP_058826353.1 - 5.1e-13 49.2 \n258 Phage_integrase PF00589.22 WP_058826353.1 - 5.1e-13 49.2 \n268 Fzo_mitofusin PF04799.13 WP_058826355.1 - 0.13 11.8 \n270 FUSC PF04632.12 WP_058826355.1 - 0.2 10.1 \n271 LPP PF04728.13 WP_058826355.1 - 0.22 11.9 \n280 V_ATPase_I PF01496.19 WP_058826355.1 - 1.3 6.9 \n296 Fzo_mitofusin PF04799.13 WP_058826355.1 - 0.13 11.8 \n298 FUSC PF04632.12 WP_058826355.1 - 0.2 10.1 \n299 LPP PF04728.13 WP_058826355.1 - 0.22 11.9 \n308 V_ATPase_I PF01496.19 WP_058826355.1 - 1.3 6.9 \n330 AAA PF00004.29 WP_058826363.1 - 1.3e-05 25.6 \n334 AAA PF00004.29 WP_058826363.1 - 1.3e-05 25.6 \n341 FUSC PF04632.12 WP_058826366.1 - 0.77 8.2 \n345 PMEI PF04043.15 WP_058826366.1 - 7.5 6.9 \n349 FUSC PF04632.12 WP_058826366.1 - 0.77 8.2 \n353 PMEI PF04043.15 WP_058826366.1 - 7.5 6.9 \n358 V_ATPase_I PF01496.19 WP_058826367.1 - 0.24 9.3 \n363 Presenilin PF01080.17 WP_058826367.1 - 4.9 5.8 \n369 V_ATPase_I PF01496.19 WP_058826367.1 - 0.24 9.3 \n374 Presenilin PF01080.17 WP_058826367.1 - 4.9 5.8 \n385 Cytochrom_C_2 PF01322.20 WP_058826371.1 - 0.026 15.4 \n390 DUF349 PF03993.12 WP_058826371.1 - 0.58 10.5 \n397 Cytochrom_C_2 PF01322.20 WP_058826371.1 - 0.026 15.4 \n402 DUF349 PF03993.12 WP_058826371.1 - 0.58 10.5 \n412 GRIP PF01465.20 WP_058826373.1 - 0.62 10.0 \n413 GRIP PF01465.20 WP_058826373.1 - 0.62 10.0 \n420 AAA PF00004.29 WP_058826374.1 - 0.0044 17.4 \n421 DEAD PF00270.29 WP_058826374.1 - 0.011 15.5 \n430 AAA PF00004.29 WP_058826374.1 - 0.0044 17.4 \n431 DEAD PF00270.29 WP_058826374.1 - 0.011 15.5 \n461 TrmB PF01978.19 WP_058826380.1 - 0.0016 18.3 \n465 UPF0122 PF04297.14 WP_058826380.1 - 0.027 14.7 \n470 HTH_5 PF01022.20 WP_058826380.1 - 0.099 12.5 \n472 TrmB PF01978.19 WP_058826380.1 - 0.0016 18.3 \n476 UPF0122 PF04297.14 WP_058826380.1 - 0.027 14.7 \n481 HTH_5 PF01022.20 WP_058826380.1 - 0.099 12.5 \n484 AAA PF00004.29 WP_058826382.1 - 1.4e-38 132.3 \n493 IstB_IS21 PF01695.17 WP_058826382.1 - 0.0018 18.0 \n506 Bac_DnaA PF00308.18 WP_058826382.1 - 0.05 13.4 \n508 ABC_tran PF00005.27 WP_058826382.1 - 0.079 13.5 \n515 ATPase_2 PF01637.18 WP_058826382.1 - 0.49 10.3 \n516 AAA PF00004.29 WP_058826382.1 - 1.4e-38 132.3 \n525 IstB_IS21 PF01695.17 WP_058826382.1 - 0.0018 18.0 \n538 Bac_DnaA PF00308.18 WP_058826382.1 - 0.05 13.4 \n540 ABC_tran PF00005.27 WP_058826382.1 - 0.079 13.5 \n547 ATPase_2 PF01637.18 WP_058826382.1 - 0.49 10.3 \n562 Transglut_core PF01841.19 WP_082677660.1 - 3.9e-09 36.9 \n\n bias domain-E-value domain-score domain-bias ... dom rep inc \\\n256 0.0 4.4e-12 46.1 0.0 ... 2 2 1 \n258 0.0 4.4e-12 46.1 0.0 ... 2 2 1 \n268 2.2 0.31 10.6 0.7 ... 2 2 0 \n270 1.3 0.32 9.5 1.3 ... 1 1 0 \n271 4.6 28 5.2 0.2 ... 2 2 0 \n280 8.6 3.2 5.6 8.6 ... 1 1 0 \n296 2.2 0.31 10.6 0.7 ... 2 2 0 \n298 1.3 0.32 9.5 1.3 ... 1 1 0 \n299 4.6 28 5.2 0.2 ... 2 2 0 \n308 8.6 3.2 5.6 8.6 ... 1 1 0 \n330 0.1 0.014 15.8 0.0 ... 3 3 2 \n334 0.1 0.014 15.8 0.0 ... 3 3 2 \n341 10.1 0.78 8.2 8.7 ... 2 2 0 \n345 11.0 62 3.9 10.8 ... 1 1 0 \n349 10.1 0.78 8.2 8.7 ... 2 2 0 \n353 11.0 62 3.9 10.8 ... 1 1 0 \n358 1.3 0.28 9.1 1.3 ... 1 1 0 \n363 4.2 5.7 5.6 4.2 ... 1 1 0 \n369 1.3 0.28 9.1 1.3 ... 1 1 0 \n374 4.2 5.7 5.6 4.2 ... 1 1 0 \n385 1.1 0.041 14.8 1.1 ... 1 1 0 \n390 12.3 0.32 11.3 1.7 ... 2 2 0 \n397 1.1 0.041 14.8 1.1 ... 1 1 0 \n402 12.3 0.32 11.3 1.7 ... 2 2 0 \n412 2.0 0.69 9.9 0.1 ... 2 2 0 \n413 2.0 0.69 9.9 0.1 ... 2 2 0 \n420 0.0 0.017 15.5 0.0 ... 2 2 1 \n421 0.0 0.46 10.2 0.0 ... 2 2 0 \n430 0.0 0.017 15.5 0.0 ... 2 2 1 \n431 0.0 0.46 10.2 0.0 ... 2 2 0 \n461 0.0 0.019 14.8 0.0 ... 2 2 1 \n465 0.0 0.057 13.7 0.0 ... 1 1 0 \n470 0.2 0.32 10.9 0.1 ... 2 2 0 \n472 0.0 0.019 14.8 0.0 ... 2 2 1 \n476 0.0 0.057 13.7 0.0 ... 1 1 0 \n481 0.2 0.32 10.9 0.1 ... 2 2 0 \n484 0.0 3.2e-38 131.1 0.0 ... 1 1 1 \n493 0.0 0.0048 16.7 0.0 ... 1 1 1 \n506 0.1 0.38 10.6 0.1 ... 1 1 0 \n508 0.8 0.23 12.0 0.0 ... 2 2 0 \n515 5.6 13 5.6 0.1 ... 2 2 0 \n516 0.0 3.2e-38 131.1 0.0 ... 1 1 1 \n525 0.0 0.0048 16.7 0.0 ... 1 1 1 \n538 0.1 0.38 10.6 0.1 ... 1 1 0 \n540 0.8 0.23 12.0 0.0 ... 2 2 0 \n547 5.6 13 5.6 0.1 ... 2 2 0 \n562 0.0 5.4e-09 36.5 0.0 ... 1 1 1 \n\n description \\\n256 Phage integrase \n258 Phage integrase \n268 fzo-like conserved \n270 Fusaric acid resistance protein \n271 Lipoprotein \n280 V-type ATPase 116kDa subunit \n296 fzo-like conserved \n298 Fusaric acid resistance protein \n299 Lipoprotein \n308 V-type ATPase 116kDa subunit \n330 ATPase family associated with various cellular... \n334 ATPase family associated with various cellular... \n341 Fusaric acid resistance protein \n345 Plant invertase/pectin methylesterase \n349 Fusaric acid resistance protein \n353 Plant invertase/pectin methylesterase \n358 V-type ATPase 116kDa subunit \n363 \n369 V-type ATPase 116kDa subunit \n374 \n385 Cytochrome \n390 Domain of Unknown Function \n397 Cytochrome \n402 Domain of Unknown Function \n412 GRIP \n413 GRIP \n420 ATPase family associated with various cellular... \n421 DEAD/DEAH box \n430 ATPase family associated with various cellular... \n431 DEAD/DEAH box \n461 Sugar-specific transcriptional regulator \n465 Putative helix-turn-helix protein, YlxM / p13 \n470 Bacterial regulatory protein, arsR \n472 Sugar-specific transcriptional regulator \n476 Putative helix-turn-helix protein, YlxM / p13 \n481 Bacterial regulatory protein, arsR \n484 ATPase family associated with various cellular... \n493 IstB-like ATP binding \n506 Bacterial dnaA \n508 ABC \n515 ATPase domain predominantly from \n516 ATPase family associated with various cellular... \n525 IstB-like ATP binding \n538 Bacterial dnaA \n540 ABC \n547 ATPase domain predominantly from \n562 Transglutaminase-like \n\n infile domain interpro pfam \\\n256 ./orfscan/NZ_LOEP01000012.1[1..56511].flat.fas... 589 5018 589 \n258 ./orfscan/NZ_LOEP01000012.1[1..56511].flat.fas... 589 5018 589 \n268 ./orfscan/NZ_LOEP01000012.1[1..56511].flat.fas... 4799 4210 4799 \n270 ./orfscan/NZ_LOEP01000012.1[1..56511].flat.fas... 4632 386 4632 \n271 ./orfscan/NZ_LOEP01000012.1[1..56511].flat.fas... 4728 3106 4728 \n280 ./orfscan/NZ_LOEP01000012.1[1..56511].flat.fas... 1496 2163 1496 \n296 ./orfscan/NZ_LOEP01000012.1[1..56511].flat.fas... 4799 4210 4799 \n298 ./orfscan/NZ_LOEP01000012.1[1..56511].flat.fas... 4632 386 4632 \n299 ./orfscan/NZ_LOEP01000012.1[1..56511].flat.fas... 4728 3106 4728 \n308 ./orfscan/NZ_LOEP01000012.1[1..56511].flat.fas... 1496 2163 1496 \n330 ./orfscan/NZ_LOEP01000012.1[1..56511].flat.fas... 4 5031 4 \n334 ./orfscan/NZ_LOEP01000012.1[1..56511].flat.fas... 4 5031 4 \n341 ./orfscan/NZ_LOEP01000012.1[1..56511].flat.fas... 4632 386 4632 \n345 ./orfscan/NZ_LOEP01000012.1[1..56511].flat.fas... 4043 751 4043 \n349 ./orfscan/NZ_LOEP01000012.1[1..56511].flat.fas... 4632 386 4632 \n353 ./orfscan/NZ_LOEP01000012.1[1..56511].flat.fas... 4043 751 4043 \n358 ./orfscan/NZ_LOEP01000012.1[1..56511].flat.fas... 1496 2163 1496 \n363 ./orfscan/NZ_LOEP01000012.1[1..56511].flat.fas... 1080 5424 1080 \n369 ./orfscan/NZ_LOEP01000012.1[1..56511].flat.fas... 1496 2163 1496 \n374 ./orfscan/NZ_LOEP01000012.1[1..56511].flat.fas... 1080 5424 1080 \n385 ./orfscan/NZ_LOEP01000012.1[1..56511].flat.fas... 1322 2570 1322 \n390 ./orfscan/NZ_LOEP01000012.1[1..56511].flat.fas... 3993 1233 3993 \n397 ./orfscan/NZ_LOEP01000012.1[1..56511].flat.fas... 1322 2570 1322 \n402 ./orfscan/NZ_LOEP01000012.1[1..56511].flat.fas... 3993 1233 3993 \n412 ./orfscan/NZ_LOEP01000012.1[1..56511].flat.fas... 1465 2571 1465 \n413 ./orfscan/NZ_LOEP01000012.1[1..56511].flat.fas... 1465 2571 1465 \n420 ./orfscan/NZ_LOEP01000012.1[1..56511].flat.fas... 4 5031 4 \n421 ./orfscan/NZ_LOEP01000012.1[1..56511].flat.fas... 270 5433 270 \n430 ./orfscan/NZ_LOEP01000012.1[1..56511].flat.fas... 4 5031 4 \n431 ./orfscan/NZ_LOEP01000012.1[1..56511].flat.fas... 270 5433 270 \n461 ./orfscan/NZ_LOEP01000012.1[1..56511].flat.fas... 1978 602 1978 \n465 ./orfscan/NZ_LOEP01000012.1[1..56511].flat.fas... 4297 1904 4297 \n470 ./orfscan/NZ_LOEP01000012.1[1..56511].flat.fas... 1022 1508 1022 \n472 ./orfscan/NZ_LOEP01000012.1[1..56511].flat.fas... 1978 602 1978 \n476 ./orfscan/NZ_LOEP01000012.1[1..56511].flat.fas... 4297 1904 4297 \n481 ./orfscan/NZ_LOEP01000012.1[1..56511].flat.fas... 1022 1508 1022 \n484 ./orfscan/NZ_LOEP01000012.1[1..56511].flat.fas... 4 5031 4 \n493 ./orfscan/NZ_LOEP01000012.1[1..56511].flat.fas... 1695 1897 1695 \n506 ./orfscan/NZ_LOEP01000012.1[1..56511].flat.fas... 308 6672 308 \n508 ./orfscan/NZ_LOEP01000012.1[1..56511].flat.fas... 5 2229 5 \n515 ./orfscan/NZ_LOEP01000012.1[1..56511].flat.fas... 1637 2539 1637 \n516 ./orfscan/NZ_LOEP01000012.1[1..56511].flat.fas... 4 5031 4 \n525 ./orfscan/NZ_LOEP01000012.1[1..56511].flat.fas... 1695 1897 1695 \n538 ./orfscan/NZ_LOEP01000012.1[1..56511].flat.fas... 308 6672 308 \n540 ./orfscan/NZ_LOEP01000012.1[1..56511].flat.fas... 5 2229 5 \n547 ./orfscan/NZ_LOEP01000012.1[1..56511].flat.fas... 1637 2539 1637 \n562 ./orfscan/NZ_LOEP01000012.1[1..56511].flat.fas... 1841 2784 1841 \n\n goterms ipnum \n256 GO:0004500,GO:0006584 5018 \n258 GO:0004500,GO:0006584 5018 \n268 GO:0003677 4210 \n270 GO:0007334 386 \n271 GO:0003677,GO:0005634,GO:0006355 3106 \n280 GO:0005180,GO:0005576 2163 \n296 GO:0003677 4210 \n298 GO:0007334 386 \n299 GO:0003677,GO:0005634,GO:0006355 3106 \n308 GO:0005180,GO:0005576 2163 \n330 GO:0016218,GO:0017000 5031 \n334 GO:0016218,GO:0017000 5031 \n341 GO:0007334 386 \n345 GO:0004725,GO:0005622,GO:0006470 751 \n349 GO:0007334 386 \n353 GO:0004725,GO:0005622,GO:0006470 751 \n358 GO:0005180,GO:0005576 2163 \n363 GO:0005249,GO:0016020,GO:0006813 5424 \n369 GO:0005180,GO:0005576 2163 \n374 GO:0005249,GO:0016020,GO:0006813 5424 \n385 GO:0004871,GO:0007165 2570 \n390 GO:0005554 1233 \n397 GO:0004871,GO:0007165 2570 \n402 GO:0005554 1233 \n412 GO:0006355 2571 \n413 GO:0006355 2571 \n420 GO:0016218,GO:0017000 5031 \n421 GO:0004890,GO:0016021,GO:0007214 5433 \n430 GO:0016218,GO:0017000 5031 \n431 GO:0004890,GO:0016021,GO:0007214 5433 \n461 GO:0004559,GO:0005975 602 \n465 GO:0005856,GO:0007160 1904 \n470 GO:0005234,GO:0016020,GO:0006811 1508 \n472 GO:0004559,GO:0005975 602 \n476 GO:0005856,GO:0007160 1904 \n481 GO:0005234,GO:0016020,GO:0006811 1508 \n484 GO:0016218,GO:0017000 5031 \n493 GO:0005215,GO:0016020,GO:0006810 1897 \n506 GO:0005554,GO:0016021 6672 \n508 GO:0016020 2229 \n515 GO:0016491,GO:0008152 2539 \n516 GO:0016218,GO:0017000 5031 \n525 GO:0005215,GO:0016020,GO:0006810 1897 \n538 GO:0005554,GO:0016021 6672 \n540 GO:0016020 2229 \n547 GO:0016491,GO:0008152 2539 \n562 GO:0003735,GO:0005840,GO:0006412 2784 \n\n[47 rows x 25 columns]\n100% 11 of 11 study items found in association\n100% 11 of 11 study items found in population(12199)\nCalculating 140 uncorrected p-values using fisher_scipy_stats\n 140 GO terms are associated with 3,592 of 12,199 population items\n 5 GO terms are associated with 5 of 11 study items\n 1 GO terms found significant (< 0.5=alpha) after multitest correction: statsmodels fdr_bh\nGO:0000752\t0.11412667495736266\n./orfscan/QNAN01000020.1[1..7966].flat.fasta.csv\n targetname accession queryid accession2 E-value score bias \\\n568 Transglut_core PF01841.19 RKX41254.1 - 1.7e-11 44.5 0.0 \n570 DUF600 PF04634.12 RKX41254.1 - 0.084 12.9 0.0 \n572 Transglut_core PF01841.19 RKX41254.1 - 1.7e-11 44.5 0.0 \n574 DUF600 PF04634.12 RKX41254.1 - 0.084 12.9 0.0 \n590 HTH_5 PF01022.20 RKX41259.1 - 0.065 13.1 0.0 \n595 HTH_5 PF01022.20 RKX41259.1 - 0.065 13.1 0.0 \n\n domain-E-value domain-score domain-bias ... dom rep inc \\\n568 2.8e-11 43.9 0.0 ... 1 1 1 \n570 0.13 12.3 0.0 ... 1 1 0 \n572 2.8e-11 43.9 0.0 ... 1 1 1 \n574 0.13 12.3 0.0 ... 1 1 0 \n590 0.13 12.2 0.0 ... 1 1 0 \n595 0.13 12.2 0.0 ... 1 1 0 \n\n description \\\n568 Transglutaminase-like \n570 Protein of unknown function, \n572 Transglutaminase-like \n574 Protein of unknown function, \n590 Bacterial regulatory protein, arsR \n595 Bacterial regulatory protein, arsR \n\n infile domain interpro pfam \\\n568 ./orfscan/QNAN01000020.1[1..7966].flat.fasta.csv 1841 2784 1841 \n570 ./orfscan/QNAN01000020.1[1..7966].flat.fasta.csv 4634 1138 4634 \n572 ./orfscan/QNAN01000020.1[1..7966].flat.fasta.csv 1841 2784 1841 \n574 ./orfscan/QNAN01000020.1[1..7966].flat.fasta.csv 4634 1138 4634 \n590 ./orfscan/QNAN01000020.1[1..7966].flat.fasta.csv 1022 1508 1022 \n595 ./orfscan/QNAN01000020.1[1..7966].flat.fasta.csv 1022 1508 1022 \n\n goterms ipnum \n568 GO:0003735,GO:0005840,GO:0006412 2784 \n570 GO:0008270,GO:0005634,GO:0006355 1138 \n572 GO:0003735,GO:0005840,GO:0006412 2784 \n574 GO:0008270,GO:0005634,GO:0006355 1138 \n590 GO:0005234,GO:0016020,GO:0006811 1508 \n595 GO:0005234,GO:0016020,GO:0006811 1508 \n\n[6 rows x 25 columns]\n100% 2 of 2 study items found in association\n100% 2 of 2 study items found in population(12199)\nCalculating 140 uncorrected p-values using fisher_scipy_stats\n 140 GO terms are associated with 3,592 of 12,199 population items\n 0 GO terms are associated with 0 of 2 study items\n 0 GO terms found significant (< 0.5=alpha) after multitest correction: statsmodels fdr_bh\n./orfscan/QMWF01000152.1[1..2919].flat.fasta.csv\n targetname accession queryid accession2 E-value score bias \\\n607 Big_1 PF02369.16 RLG58776.1 - 0.049 13.6 4.6 \n\n domain-E-value domain-score domain-bias ... dom rep inc \\\n607 0.092 12.8 0.0 ... 3 3 0 \n\n description \\\n607 Bacterial Ig-like domain (group \n\n infile domain interpro pfam \\\n607 ./orfscan/QMWF01000152.1[1..2919].flat.fasta.csv 2369 1163 2369 \n\n goterms ipnum \n607 GO:0008248,GO:0005732,GO:0006371 1163 \n\n[1 rows x 25 columns]\n100% 1 of 1 study items found in association\n100% 1 of 1 study items found in population(12199)\nCalculating 140 uncorrected p-values using fisher_scipy_stats\n 140 GO terms are associated with 3,592 of 12,199 population items\n 0 GO terms are associated with 0 of 1 study items\n 0 GO terms found significant (< 0.5=alpha) after multitest correction: statsmodels fdr_bh\n./orfscan/QMYS01000134.1[1..4823].flat.fasta.csv\n targetname accession queryid accession2 E-value score bias \\\n609 NEAT PF05031.12 RLI53188.1 - 0.072 13.7 2.5 \n611 NEAT PF05031.12 RLI53188.1 - 0.072 13.7 2.5 \n\n domain-E-value domain-score domain-bias ... dom rep inc \\\n609 0.25 12.0 2.5 ... 1 1 0 \n611 0.25 12.0 2.5 ... 1 1 0 \n\n description \\\n609 Iron Transport-associated \n611 Iron Transport-associated \n\n infile domain interpro pfam \\\n609 ./orfscan/QMYS01000134.1[1..4823].flat.fasta.csv 5031 1125 5031 \n611 ./orfscan/QMYS01000134.1[1..4823].flat.fasta.csv 5031 1125 5031 \n\n goterms ipnum \n609 GO:0005509 1125 \n611 GO:0005509 1125 \n\n[2 rows x 25 columns]\n100% 1 of 1 study items found in association\n100% 1 of 1 study items found in population(12199)\nCalculating 140 uncorrected p-values using fisher_scipy_stats\n 140 GO terms are associated with 3,592 of 12,199 population items\n 1 GO terms are associated with 1 of 1 study items\n 1 GO terms found significant (< 0.5=alpha) after multitest correction: statsmodels fdr_bh\nGO:0005509\t0.36724321666088444\n./orfscan/NZ_KE386573.1[2555621..2634980].flat.fasta.csv\n targetname accession queryid accession2 E-value score \\\n642 Hormone_3 PF00159.18 WP_049937212.1 - 0.00048 20.1 \n653 Not3 PF04065.15 WP_049937212.1 - 0.02 14.4 \n665 Herpes_UL6 PF01763.16 WP_049937212.1 - 0.071 11.6 \n672 HSP70 PF00012.20 WP_049937212.1 - 0.12 10.5 \n673 DUF349 PF03993.12 WP_049937212.1 - 0.12 12.6 \n677 V_ATPase_I PF01496.19 WP_049937212.1 - 0.3 8.9 \n688 Hormone_3 PF00159.18 WP_049937212.1 - 0.00048 20.1 \n699 Not3 PF04065.15 WP_049937212.1 - 0.02 14.4 \n711 Herpes_UL6 PF01763.16 WP_049937212.1 - 0.071 11.6 \n718 HSP70 PF00012.20 WP_049937212.1 - 0.12 10.5 \n719 DUF349 PF03993.12 WP_049937212.1 - 0.12 12.6 \n723 V_ATPase_I PF01496.19 WP_049937212.1 - 0.3 8.9 \n736 V_ATPase_I PF01496.19 WP_049937213.1 - 0.0014 16.6 \n739 Hormone_3 PF00159.18 WP_049937213.1 - 0.0081 16.2 \n741 Herpes_UL6 PF01763.16 WP_049937213.1 - 0.0094 14.5 \n780 Prefoldin PF02996.17 WP_049937213.1 - 0.22 11.4 \n788 Fzo_mitofusin PF04799.13 WP_049937213.1 - 0.47 10.0 \n805 V_ATPase_I PF01496.19 WP_049937213.1 - 0.0014 16.6 \n808 Hormone_3 PF00159.18 WP_049937213.1 - 0.0081 16.2 \n810 Herpes_UL6 PF01763.16 WP_049937213.1 - 0.0094 14.5 \n849 Prefoldin PF02996.17 WP_049937213.1 - 0.22 11.4 \n857 Fzo_mitofusin PF04799.13 WP_049937213.1 - 0.47 10.0 \n874 TrmB PF01978.19 WP_084510071.1 - 9.1e-06 25.5 \n876 HxlR PF01638.17 WP_084510071.1 - 5.7e-05 22.9 \n882 Ribosomal_S19e PF01090.19 WP_084510071.1 - 0.0064 16.2 \n887 MarR PF01047.22 WP_084510071.1 - 0.029 14.3 \n888 HTH_5 PF01022.20 WP_084510071.1 - 0.035 14.0 \n897 TrmB PF01978.19 WP_084510071.1 - 9.1e-06 25.5 \n899 HxlR PF01638.17 WP_084510071.1 - 5.7e-05 22.9 \n905 Ribosomal_S19e PF01090.19 WP_084510071.1 - 0.0064 16.2 \n... ... ... ... ... ... ... \n1375 Phage_int_SAM_1 PF02899.17 WP_049937273.1 - 1.6e-05 25.1 \n1377 Dynamitin PF04912.14 WP_049937274.1 - 0.001 18.5 \n1379 RhoGAP PF00620.27 WP_049937274.1 - 0.015 15.2 \n1387 Dynamitin PF04912.14 WP_049937274.1 - 0.001 18.5 \n1389 RhoGAP PF00620.27 WP_049937274.1 - 0.015 15.2 \n1397 RHH_1 PF01402.21 WP_049937275.1 - 0.0024 17.8 \n1401 RHH_1 PF01402.21 WP_049937275.1 - 0.0024 17.8 \n1405 RHH_1 PF01402.21 WP_049937275.1 - 0.0024 17.8 \n1409 RHH_1 PF01402.21 WP_049937275.1 - 0.0024 17.8 \n1413 RHH_1 PF01402.21 WP_049937275.1 - 0.0024 17.8 \n1417 RHH_1 PF01402.21 WP_049937275.1 - 0.0024 17.8 \n1423 HTH_5 PF01022.20 WP_049937277.1 - 2e-06 27.6 \n1426 TrmB PF01978.19 WP_049937277.1 - 5.8e-05 22.9 \n1433 MarR PF01047.22 WP_049937277.1 - 0.016 15.1 \n1434 GntR PF00392.21 WP_049937277.1 - 0.02 14.5 \n1440 HTH_5 PF01022.20 WP_049937277.1 - 2e-06 27.6 \n1443 TrmB PF01978.19 WP_049937277.1 - 5.8e-05 22.9 \n1450 MarR PF01047.22 WP_049937277.1 - 0.016 15.1 \n1451 GntR PF00392.21 WP_049937277.1 - 0.02 14.5 \n1455 FAD_binding_3 PF01494.19 WP_049937278.1 - 0.0015 17.8 \n1456 FAD_binding_3 PF01494.19 WP_049937278.1 - 0.0015 17.8 \n1463 MarR PF01047.22 WP_084510083.1 - 2.5e-05 24.1 \n1464 HTH_5 PF01022.20 WP_084510083.1 - 3.9e-05 23.4 \n1465 TrmB PF01978.19 WP_084510083.1 - 0.00017 21.4 \n1467 GntR PF00392.21 WP_084510083.1 - 0.00098 18.7 \n1484 MarR PF01047.22 WP_084510083.1 - 2.5e-05 24.1 \n1485 HTH_5 PF01022.20 WP_084510083.1 - 3.9e-05 23.4 \n1486 TrmB PF01978.19 WP_084510083.1 - 0.00017 21.4 \n1488 GntR PF00392.21 WP_084510083.1 - 0.00098 18.7 \n1507 GAF PF01590.26 WP_049937280.1 - 8.7e-11 42.6 \n\n bias domain-E-value domain-score domain-bias ... dom rep inc \\\n642 0.1 0.015 15.4 0.1 ... 2 2 1 \n653 3.5 0.03 13.7 3.5 ... 1 1 0 \n665 0.3 0.08 11.5 0.3 ... 1 1 0 \n672 2.2 0.15 10.2 2.2 ... 1 1 0 \n673 2.8 0.17 12.2 2.8 ... 1 1 0 \n677 1.1 0.31 8.9 1.1 ... 1 1 0 \n688 0.1 0.015 15.4 0.1 ... 2 2 1 \n699 3.5 0.03 13.7 3.5 ... 1 1 0 \n711 0.3 0.08 11.5 0.3 ... 1 1 0 \n718 2.2 0.15 10.2 2.2 ... 1 1 0 \n719 2.8 0.17 12.2 2.8 ... 1 1 0 \n723 1.1 0.31 8.9 1.1 ... 1 1 0 \n736 1.7 0.0015 16.5 1.7 ... 1 1 1 \n739 0.0 0.016 15.3 0.0 ... 1 1 1 \n741 2.9 0.011 14.3 2.9 ... 1 1 1 \n780 9.3 0.95 9.4 4.5 ... 3 3 0 \n788 5.7 5.8 6.5 2.3 ... 2 2 0 \n805 1.7 0.0015 16.5 1.7 ... 1 1 1 \n808 0.0 0.016 15.3 0.0 ... 1 1 1 \n810 2.9 0.011 14.3 2.9 ... 1 1 1 \n849 9.3 0.95 9.4 4.5 ... 3 3 0 \n857 5.7 5.8 6.5 2.3 ... 2 2 0 \n874 0.1 1.4e-05 24.9 0.1 ... 1 1 1 \n876 0.0 7.5e-05 22.5 0.0 ... 1 1 1 \n882 0.0 0.0074 16.0 0.0 ... 1 1 1 \n887 0.0 0.045 13.7 0.0 ... 1 1 0 \n888 0.0 0.056 13.3 0.0 ... 1 1 0 \n897 0.1 1.4e-05 24.9 0.1 ... 1 1 1 \n899 0.0 7.5e-05 22.5 0.0 ... 1 1 1 \n905 0.0 0.0074 16.0 0.0 ... 1 1 1 \n... ... ... ... ... ... .. .. .. \n1375 0.1 4.9e-05 23.5 0.1 ... 1 1 1 \n1377 2.8 0.0012 18.3 2.8 ... 1 1 1 \n1379 0.1 0.021 14.7 0.1 ... 1 1 0 \n1387 2.8 0.0012 18.3 2.8 ... 1 1 1 \n1389 0.1 0.021 14.7 0.1 ... 1 1 0 \n1397 1.8 0.0082 16.0 1.4 ... 2 2 1 \n1401 1.8 0.0082 16.0 1.4 ... 2 2 1 \n1405 1.8 0.0082 16.0 1.4 ... 2 2 1 \n1409 1.8 0.0082 16.0 1.4 ... 2 2 1 \n1413 1.8 0.0082 16.0 1.4 ... 2 2 1 \n1417 1.8 0.0082 16.0 1.4 ... 2 2 1 \n1423 0.0 3.2e-06 26.9 0.0 ... 1 1 1 \n1426 0.0 9.7e-05 22.2 0.0 ... 1 1 1 \n1433 0.0 0.032 14.1 0.0 ... 1 1 0 \n1434 0.7 0.033 13.8 0.2 ... 2 2 0 \n1440 0.0 3.2e-06 26.9 0.0 ... 1 1 1 \n1443 0.0 9.7e-05 22.2 0.0 ... 1 1 1 \n1450 0.0 0.032 14.1 0.0 ... 1 1 0 \n1451 0.7 0.033 13.8 0.2 ... 2 2 0 \n1455 0.4 0.086 12.1 0.0 ... 2 2 2 \n1456 0.4 0.086 12.1 0.0 ... 2 2 2 \n1463 0.0 4.4e-05 23.3 0.0 ... 1 1 1 \n1464 0.0 5.7e-05 22.9 0.0 ... 1 1 1 \n1465 0.0 0.00029 20.7 0.0 ... 1 1 1 \n1467 0.1 0.0016 18.0 0.1 ... 1 1 1 \n1484 0.0 4.4e-05 23.3 0.0 ... 1 1 1 \n1485 0.0 5.7e-05 22.9 0.0 ... 1 1 1 \n1486 0.0 0.00029 20.7 0.0 ... 1 1 1 \n1488 0.1 0.0016 18.0 0.1 ... 1 1 1 \n1507 0.8 2.8e-05 24.8 0.0 ... 4 4 2 \n\n description \\\n642 Pancreatic hormone \n653 Not1 N-terminal domain, CCR4-Not complex \n665 Herpesvirus UL6 \n672 Hsp70 \n673 Domain of Unknown Function \n677 V-type ATPase 116kDa subunit \n688 Pancreatic hormone \n699 Not1 N-terminal domain, CCR4-Not complex \n711 Herpesvirus UL6 \n718 Hsp70 \n719 Domain of Unknown Function \n723 V-type ATPase 116kDa subunit \n736 V-type ATPase 116kDa subunit \n739 Pancreatic hormone \n741 Herpesvirus UL6 \n780 Prefoldin \n788 fzo-like conserved \n805 V-type ATPase 116kDa subunit \n808 Pancreatic hormone \n810 Herpesvirus UL6 \n849 Prefoldin \n857 fzo-like conserved \n874 Sugar-specific transcriptional regulator \n876 HxlR-like \n882 Ribosomal protein \n887 MarR \n888 Bacterial regulatory protein, arsR \n897 Sugar-specific transcriptional regulator \n899 HxlR-like \n905 Ribosomal protein \n... ... \n1375 Phage integrase, N-terminal SAM-like \n1377 \n1379 RhoGAP \n1387 \n1389 RhoGAP \n1397 Ribbon-helix-helix protein, copG \n1401 Ribbon-helix-helix protein, copG \n1405 Ribbon-helix-helix protein, copG \n1409 Ribbon-helix-helix protein, copG \n1413 Ribbon-helix-helix protein, copG \n1417 Ribbon-helix-helix protein, copG \n1423 Bacterial regulatory protein, arsR \n1426 Sugar-specific transcriptional regulator \n1433 MarR \n1434 Bacterial regulatory proteins, gntR \n1440 Bacterial regulatory protein, arsR \n1443 Sugar-specific transcriptional regulator \n1450 MarR \n1451 Bacterial regulatory proteins, gntR \n1455 FAD binding \n1456 FAD binding \n1463 MarR \n1464 Bacterial regulatory protein, arsR \n1465 Sugar-specific transcriptional regulator \n1467 Bacterial regulatory proteins, gntR \n1484 MarR \n1485 Bacterial regulatory protein, arsR \n1486 Sugar-specific transcriptional regulator \n1488 Bacterial regulatory proteins, gntR \n1507 GAF \n\n infile domain interpro pfam \\\n642 ./orfscan/NZ_KE386573.1[2555621..2634980].flat... 159 3251 159 \n653 ./orfscan/NZ_KE386573.1[2555621..2634980].flat... 4065 304 4065 \n665 ./orfscan/NZ_KE386573.1[2555621..2634980].flat... 1763 1289 1763 \n672 ./orfscan/NZ_KE386573.1[2555621..2634980].flat... 12 1872 12 \n673 ./orfscan/NZ_KE386573.1[2555621..2634980].flat... 3993 1233 3993 \n677 ./orfscan/NZ_KE386573.1[2555621..2634980].flat... 1496 2163 1496 \n688 ./orfscan/NZ_KE386573.1[2555621..2634980].flat... 159 3251 159 \n699 ./orfscan/NZ_KE386573.1[2555621..2634980].flat... 4065 304 4065 \n711 ./orfscan/NZ_KE386573.1[2555621..2634980].flat... 1763 1289 1763 \n718 ./orfscan/NZ_KE386573.1[2555621..2634980].flat... 12 1872 12 \n719 ./orfscan/NZ_KE386573.1[2555621..2634980].flat... 3993 1233 3993 \n723 ./orfscan/NZ_KE386573.1[2555621..2634980].flat... 1496 2163 1496 \n736 ./orfscan/NZ_KE386573.1[2555621..2634980].flat... 1496 2163 1496 \n739 ./orfscan/NZ_KE386573.1[2555621..2634980].flat... 159 3251 159 \n741 ./orfscan/NZ_KE386573.1[2555621..2634980].flat... 1763 1289 1763 \n780 ./orfscan/NZ_KE386573.1[2555621..2634980].flat... 2996 539 2996 \n788 ./orfscan/NZ_KE386573.1[2555621..2634980].flat... 4799 4210 4799 \n805 ./orfscan/NZ_KE386573.1[2555621..2634980].flat... 1496 2163 1496 \n808 ./orfscan/NZ_KE386573.1[2555621..2634980].flat... 159 3251 159 \n810 ./orfscan/NZ_KE386573.1[2555621..2634980].flat... 1763 1289 1763 \n849 ./orfscan/NZ_KE386573.1[2555621..2634980].flat... 2996 539 2996 \n857 ./orfscan/NZ_KE386573.1[2555621..2634980].flat... 4799 4210 4799 \n874 ./orfscan/NZ_KE386573.1[2555621..2634980].flat... 1978 602 1978 \n876 ./orfscan/NZ_KE386573.1[2555621..2634980].flat... 1638 2540 1638 \n882 ./orfscan/NZ_KE386573.1[2555621..2634980].flat... 1090 1553 1090 \n887 ./orfscan/NZ_KE386573.1[2555621..2634980].flat... 1047 535 1047 \n888 ./orfscan/NZ_KE386573.1[2555621..2634980].flat... 1022 1508 1022 \n897 ./orfscan/NZ_KE386573.1[2555621..2634980].flat... 1978 602 1978 \n899 ./orfscan/NZ_KE386573.1[2555621..2634980].flat... 1638 2540 1638 \n905 ./orfscan/NZ_KE386573.1[2555621..2634980].flat... 1090 1553 1090 \n... ... ... ... ... \n1375 ./orfscan/NZ_KE386573.1[2555621..2634980].flat... 2899 775 2899 \n1377 ./orfscan/NZ_KE386573.1[2555621..2634980].flat... 4912 4756 4912 \n1379 ./orfscan/NZ_KE386573.1[2555621..2634980].flat... 620 1160 620 \n1387 ./orfscan/NZ_KE386573.1[2555621..2634980].flat... 4912 4756 4912 \n1389 ./orfscan/NZ_KE386573.1[2555621..2634980].flat... 620 1160 620 \n1397 ./orfscan/NZ_KE386573.1[2555621..2634980].flat... 1402 1563 1402 \n1401 ./orfscan/NZ_KE386573.1[2555621..2634980].flat... 1402 1563 1402 \n1405 ./orfscan/NZ_KE386573.1[2555621..2634980].flat... 1402 1563 1402 \n1409 ./orfscan/NZ_KE386573.1[2555621..2634980].flat... 1402 1563 1402 \n1413 ./orfscan/NZ_KE386573.1[2555621..2634980].flat... 1402 1563 1402 \n1417 ./orfscan/NZ_KE386573.1[2555621..2634980].flat... 1402 1563 1402 \n1423 ./orfscan/NZ_KE386573.1[2555621..2634980].flat... 1022 1508 1022 \n1426 ./orfscan/NZ_KE386573.1[2555621..2634980].flat... 1978 602 1978 \n1433 ./orfscan/NZ_KE386573.1[2555621..2634980].flat... 1047 535 1047 \n1434 ./orfscan/NZ_KE386573.1[2555621..2634980].flat... 392 296 392 \n1440 ./orfscan/NZ_KE386573.1[2555621..2634980].flat... 1022 1508 1022 \n1443 ./orfscan/NZ_KE386573.1[2555621..2634980].flat... 1978 602 1978 \n1450 ./orfscan/NZ_KE386573.1[2555621..2634980].flat... 1047 535 1047 \n1451 ./orfscan/NZ_KE386573.1[2555621..2634980].flat... 392 296 392 \n1455 ./orfscan/NZ_KE386573.1[2555621..2634980].flat... 1494 4760 1494 \n1456 ./orfscan/NZ_KE386573.1[2555621..2634980].flat... 1494 4760 1494 \n1463 ./orfscan/NZ_KE386573.1[2555621..2634980].flat... 1047 535 1047 \n1464 ./orfscan/NZ_KE386573.1[2555621..2634980].flat... 1022 1508 1022 \n1465 ./orfscan/NZ_KE386573.1[2555621..2634980].flat... 1978 602 1978 \n1467 ./orfscan/NZ_KE386573.1[2555621..2634980].flat... 392 296 392 \n1484 ./orfscan/NZ_KE386573.1[2555621..2634980].flat... 1047 535 1047 \n1485 ./orfscan/NZ_KE386573.1[2555621..2634980].flat... 1022 1508 1022 \n1486 ./orfscan/NZ_KE386573.1[2555621..2634980].flat... 1978 602 1978 \n1488 ./orfscan/NZ_KE386573.1[2555621..2634980].flat... 392 296 392 \n1507 ./orfscan/NZ_KE386573.1[2555621..2634980].flat... 1590 6620 1590 \n\n goterms ipnum \n642 GO:0005505,GO:0006118 3251 \n653 GO:0004735,GO:0006561 304 \n665 GO:0003700,GO:0005634,GO:0006355 1289 \n672 GO:0004190,GO:0016020,GO:0006508 1872 \n673 GO:0005554 1233 \n677 GO:0005180,GO:0005576 2163 \n688 GO:0005505,GO:0006118 3251 \n699 GO:0004735,GO:0006561 304 \n711 GO:0003700,GO:0005634,GO:0006355 1289 \n718 GO:0004190,GO:0016020,GO:0006508 1872 \n719 GO:0005554 1233 \n723 GO:0005180,GO:0005576 2163 \n736 GO:0005180,GO:0005576 2163 \n739 GO:0005505,GO:0006118 3251 \n741 GO:0003700,GO:0005634,GO:0006355 1289 \n780 GO:0004928,GO:0016020 539 \n788 GO:0003677 4210 \n805 GO:0005180,GO:0005576 2163 \n808 GO:0005505,GO:0006118 3251 \n810 GO:0003700,GO:0005634,GO:0006355 1289 \n849 GO:0004928,GO:0016020 539 \n857 GO:0003677 4210 \n874 GO:0004559,GO:0005975 602 \n876 GO:0004197,GO:0006508 2540 \n882 GO:0008094,GO:0006310 1553 \n887 GO:0005198,GO:0006928 535 \n888 GO:0005234,GO:0016020,GO:0006811 1508 \n897 GO:0004559,GO:0005975 602 \n899 GO:0004197,GO:0006508 2540 \n905 GO:0008094,GO:0006310 1553 \n... ... ... \n1375 GO:0007342 775 \n1377 GO:0015359,GO:0016021,GO:0006865 4756 \n1379 GO:0008769,GO:0006508 1160 \n1387 GO:0015359,GO:0016021,GO:0006865 4756 \n1389 GO:0008769,GO:0006508 1160 \n1397 GO:0004185,GO:0006508 1563 \n1401 GO:0004185,GO:0006508 1563 \n1405 GO:0004185,GO:0006508 1563 \n1409 GO:0004185,GO:0006508 1563 \n1413 GO:0004185,GO:0006508 1563 \n1417 GO:0004185,GO:0006508 1563 \n1423 GO:0005234,GO:0016020,GO:0006811 1508 \n1426 GO:0004559,GO:0005975 602 \n1433 GO:0005198,GO:0006928 535 \n1434 GO:0005215,GO:0016020,GO:0006810 296 \n1440 GO:0005234,GO:0016020,GO:0006811 1508 \n1443 GO:0004559,GO:0005975 602 \n1450 GO:0005198,GO:0006928 535 \n1451 GO:0005215,GO:0016020,GO:0006810 296 \n1455 GO:0015359,GO:0016021,GO:0006865 4760 \n1456 GO:0015359,GO:0016021,GO:0006865 4760 \n1463 GO:0005198,GO:0006928 535 \n1464 GO:0005234,GO:0016020,GO:0006811 1508 \n1465 GO:0004559,GO:0005975 602 \n1467 GO:0005215,GO:0016020,GO:0006810 296 \n1484 GO:0005198,GO:0006928 535 \n1485 GO:0005234,GO:0016020,GO:0006811 1508 \n1486 GO:0004559,GO:0005975 602 \n1488 GO:0005215,GO:0016020,GO:0006810 296 \n1507 GO:0016706,GO:0019538 6620 \n\n[151 rows x 25 columns]\n100% 30 of 30 study items found in association\n100% 30 of 30 study items found in population(12199)\nCalculating 140 uncorrected p-values using fisher_scipy_stats\n 140 GO terms are associated with 3,592 of 12,199 population items\n 8 GO terms are associated with 8 of 30 study items\n 0 GO terms found significant (< 0.5=alpha) after multitest correction: statsmodels fdr_bh\n./orfscan/CP010426.1[733481..806920].flat.fasta.csv\n targetname accession queryid accession2 E-value score \\\n1530 HigB-like_toxin PF05015.13 AJF63049.1 - 0.011 16.1 \n1535 HigB-like_toxin PF05015.13 AJF63049.1 - 0.011 16.1 \n1547 HigB-like_toxin PF05015.13 AJF63052.1 - 0.0018 18.6 \n1553 HigB-like_toxin PF05015.13 AJF63052.1 - 0.0018 18.6 \n1560 RNA_pol_Rpb1_5 PF04998.17 AJF63054.1 - 6.6e-40 137.2 \n1563 RNA_pol_Rpb1_5 PF04998.17 AJF63054.1 - 6.6e-40 137.2 \n1570 RNA_pol_Rpb1_5 PF04998.17 AJF63055.1 - 7.3e-24 84.6 \n1571 Dynamin_M PF01031.20 AJF63055.1 - 0.019 14.2 \n1576 RNA_pol_Rpb1_5 PF04998.17 AJF63055.1 - 7.3e-24 84.6 \n1577 Dynamin_M PF01031.20 AJF63055.1 - 0.019 14.2 \n1578 RNA_pol_Rpb2_6 PF00562.28 AJF63056.1 - 1.2e-119 399.9 \n1579 RNA_pol_Rpb2_7 PF04560.20 AJF63056.1 - 2e-25 89.0 \n1581 RNA_pol_Rpb2_5 PF04567.17 AJF63056.1 - 4.9e-11 43.0 \n1587 RNA_pol_Rpb2_6 PF00562.28 AJF63056.1 - 1.2e-119 399.9 \n1588 RNA_pol_Rpb2_7 PF04560.20 AJF63056.1 - 2e-25 89.0 \n1590 RNA_pol_Rpb2_5 PF04567.17 AJF63056.1 - 4.9e-11 43.0 \n1596 RNA_pol_Rpb2_1 PF04563.15 AJF63057.1 - 2.2e-47 161.2 \n1597 RNA_pol_Rpb2_3 PF04565.16 AJF63057.1 - 2.3e-24 85.4 \n1599 RNA_pol_Rpb2_1 PF04563.15 AJF63057.1 - 2.2e-47 161.2 \n1600 RNA_pol_Rpb2_3 PF04565.16 AJF63057.1 - 2.3e-24 85.4 \n1602 RNA_pol_Rpb5_C PF01191.19 AJF63058.1 - 1.2e-29 102.0 \n1607 RNA_pol_Rpb5_C PF01191.19 AJF63058.1 - 1.2e-29 102.0 \n1612 RNA_pol_Rpb5_C PF01191.19 AJF63058.1 - 1.2e-29 102.0 \n1617 RNA_pol_Rpb5_C PF01191.19 AJF63058.1 - 1.2e-29 102.0 \n1648 MazE_antitoxin PF04014.18 AJF63060.1 - 6.5e-07 29.1 \n1649 MazE_antitoxin PF04014.18 AJF63060.1 - 6.5e-07 29.1 \n1662 DDE_Tnp_IS66 PF03050.14 AJF63063.1 - 0.0015 18.0 \n1664 rve PF00665.26 AJF63063.1 - 0.0041 17.3 \n1670 DDE_Tnp_IS66 PF03050.14 AJF63063.1 - 0.0015 18.0 \n1672 rve PF00665.26 AJF63063.1 - 0.0041 17.3 \n... ... ... ... ... ... ... \n2107 SirB PF04247.12 AJF63132.1 - 0.025 14.7 \n2111 tRNA-synt_2d PF01409.20 AJF63133.1 - 0.07 12.6 \n2116 tRNA-synt_2d PF01409.20 AJF63133.1 - 0.07 12.6 \n2122 NTP_transf_2 PF01909.23 AJF63135.1 - 1.4e-09 38.1 \n2124 MarR PF01047.22 AJF63135.1 - 1.1e-05 25.2 \n2125 GntR PF00392.21 AJF63135.1 - 0.00063 19.3 \n2129 HigB-like_toxin PF05015.13 AJF63135.1 - 0.0056 17.1 \n2131 TrmB PF01978.19 AJF63135.1 - 0.035 14.0 \n2132 HTH_5 PF01022.20 AJF63135.1 - 0.051 13.4 \n2136 NTP_transf_2 PF01909.23 AJF63135.1 - 1.4e-09 38.1 \n2138 MarR PF01047.22 AJF63135.1 - 1.1e-05 25.2 \n2139 GntR PF00392.21 AJF63135.1 - 0.00063 19.3 \n2143 HigB-like_toxin PF05015.13 AJF63135.1 - 0.0056 17.1 \n2145 TrmB PF01978.19 AJF63135.1 - 0.035 14.0 \n2146 HTH_5 PF01022.20 AJF63135.1 - 0.051 13.4 \n2150 RadC PF04002.15 AJF63136.1 - 1.4e-42 144.5 \n2151 SWIRM PF04433.17 AJF63136.1 - 0.0012 19.1 \n2153 RadC PF04002.15 AJF63136.1 - 1.4e-42 144.5 \n2154 SWIRM PF04433.17 AJF63136.1 - 0.0012 19.1 \n2157 NTP_transf_2 PF01909.23 AJF63139.1 - 4.7e-09 36.4 \n2160 NTP_transf_2 PF01909.23 AJF63139.1 - 4.7e-09 36.4 \n2165 tRNA-synt_2d PF01409.20 AJF63140.1 - 0.11 11.9 \n2169 tRNA-synt_2d PF01409.20 AJF63140.1 - 0.11 11.9 \n2176 Ribosomal_L9_N PF01281.19 AJF63142.1 - 0.075 12.6 \n2179 Ribosomal_L9_N PF01281.19 AJF63142.1 - 0.075 12.6 \n2180 MazE_antitoxin PF04014.18 AJF63144.1 - 2.2e-07 30.6 \n2181 PhoU PF01895.19 AJF63144.1 - 0.0023 18.4 \n2182 MazE_antitoxin PF04014.18 AJF63144.1 - 2.2e-07 30.6 \n2183 PhoU PF01895.19 AJF63144.1 - 0.0023 18.4 \n2193 Glyco_hydro_76 PF03663.14 AJF63146.1 - 0.00053 19.6 \n\n bias domain-E-value domain-score domain-bias ... dom rep inc \\\n1530 1.1 0.015 15.7 1.1 ... 1 1 0 \n1535 1.1 0.015 15.7 1.1 ... 1 1 0 \n1547 1.5 0.0024 18.2 1.5 ... 1 1 1 \n1553 1.5 0.0024 18.2 1.5 ... 1 1 1 \n1560 10.3 1.2e-32 113.5 3.1 ... 2 2 2 \n1563 10.3 1.2e-32 113.5 3.1 ... 2 2 2 \n1570 0.1 1.4e-23 83.7 0.1 ... 1 1 1 \n1571 0.0 0.035 13.3 0.0 ... 1 1 0 \n1576 0.1 1.4e-23 83.7 0.1 ... 1 1 1 \n1577 0.0 0.035 13.3 0.0 ... 1 1 0 \n1578 0.1 1.6e-119 399.5 0.1 ... 1 1 1 \n1579 0.0 4.1e-25 88.0 0.0 ... 1 1 1 \n1581 1.6 1.5e-10 41.5 1.6 ... 1 1 1 \n1587 0.1 1.6e-119 399.5 0.1 ... 1 1 1 \n1588 0.0 4.1e-25 88.0 0.0 ... 1 1 1 \n1590 1.6 1.5e-10 41.5 1.6 ... 1 1 1 \n1596 0.9 3.7e-47 160.5 0.9 ... 1 1 1 \n1597 0.0 4.2e-24 84.5 0.0 ... 1 1 1 \n1599 0.9 3.7e-47 160.5 0.9 ... 1 1 1 \n1600 0.0 4.2e-24 84.5 0.0 ... 1 1 1 \n1602 0.8 1.3e-29 101.9 0.8 ... 1 1 1 \n1607 0.8 1.3e-29 101.9 0.8 ... 1 1 1 \n1612 0.8 1.3e-29 101.9 0.8 ... 1 1 1 \n1617 0.8 1.3e-29 101.9 0.8 ... 1 1 1 \n1648 0.1 1.4e-06 28.0 0.1 ... 2 2 1 \n1649 0.1 1.4e-06 28.0 0.1 ... 2 2 1 \n1662 0.0 0.0025 17.3 0.0 ... 1 1 1 \n1664 0.0 0.0087 16.2 0.0 ... 1 1 1 \n1670 0.0 0.0025 17.3 0.0 ... 1 1 1 \n1672 0.0 0.0087 16.2 0.0 ... 1 1 1 \n... ... ... ... ... ... .. .. .. \n2107 0.2 0.027 14.6 0.2 ... 1 1 0 \n2111 0.2 3.3 7.1 0.0 ... 3 3 0 \n2116 0.2 3.3 7.1 0.0 ... 3 3 0 \n2122 0.4 3.3e-09 36.9 0.1 ... 2 2 1 \n2124 1.5 2.9e-05 23.9 0.9 ... 2 2 1 \n2125 0.3 0.0014 18.3 0.3 ... 1 1 1 \n2129 0.9 0.0083 16.5 0.2 ... 2 2 1 \n2131 0.1 0.074 13.0 0.1 ... 1 1 0 \n2132 0.1 0.13 12.1 0.1 ... 2 2 0 \n2136 0.4 3.3e-09 36.9 0.1 ... 2 2 1 \n2138 1.5 2.9e-05 23.9 0.9 ... 2 2 1 \n2139 0.3 0.0014 18.3 0.3 ... 1 1 1 \n2143 0.9 0.0083 16.5 0.2 ... 2 2 1 \n2145 0.1 0.074 13.0 0.1 ... 1 1 0 \n2146 0.1 0.13 12.1 0.1 ... 2 2 0 \n2150 0.1 2.1e-42 144.0 0.1 ... 1 1 1 \n2151 0.7 0.051 13.9 0.1 ... 3 3 1 \n2153 0.1 2.1e-42 144.0 0.1 ... 1 1 1 \n2154 0.7 0.051 13.9 0.1 ... 3 3 1 \n2157 1.5 1.2e-08 35.1 0.7 ... 2 2 1 \n2160 1.5 1.2e-08 35.1 0.7 ... 2 2 1 \n2165 0.2 3.4 7.1 0.0 ... 3 3 0 \n2169 0.2 3.4 7.1 0.0 ... 3 3 0 \n2176 0.1 0.14 11.7 0.1 ... 1 1 0 \n2179 0.1 0.14 11.7 0.1 ... 1 1 0 \n2180 0.1 5.2e-07 29.4 0.1 ... 1 1 1 \n2181 1.2 0.05 14.1 0.1 ... 2 2 1 \n2182 0.1 5.2e-07 29.4 0.1 ... 1 1 1 \n2183 1.2 0.05 14.1 0.1 ... 2 2 1 \n2193 3.9 0.0016 18.0 0.7 ... 2 2 1 \n\n description \\\n1530 RelE-like toxin of type II toxin-antitoxin sys... \n1535 RelE-like toxin of type II toxin-antitoxin sys... \n1547 RelE-like toxin of type II toxin-antitoxin sys... \n1553 RelE-like toxin of type II toxin-antitoxin sys... \n1560 RNA polymerase Rpb1, domain \n1563 RNA polymerase Rpb1, domain \n1570 RNA polymerase Rpb1, domain \n1571 Dynamin central \n1576 RNA polymerase Rpb1, domain \n1577 Dynamin central \n1578 RNA polymerase Rpb2, domain \n1579 RNA polymerase Rpb2, domain \n1581 RNA polymerase Rpb2, domain \n1587 RNA polymerase Rpb2, domain \n1588 RNA polymerase Rpb2, domain \n1590 RNA polymerase Rpb2, domain \n1596 RNA polymerase beta \n1597 RNA polymerase Rpb2, domain \n1599 RNA polymerase beta \n1600 RNA polymerase Rpb2, domain \n1602 RNA polymerase Rpb5, C-terminal \n1607 RNA polymerase Rpb5, C-terminal \n1612 RNA polymerase Rpb5, C-terminal \n1617 RNA polymerase Rpb5, C-terminal \n1648 Antidote-toxin recognition MazE, bacterial \n1649 Antidote-toxin recognition MazE, bacterial \n1662 Transposase IS66 \n1664 Integrase core \n1670 Transposase IS66 \n1672 Integrase core \n... ... \n2107 Invasion gene expression up-regulator, \n2111 tRNA synthetases class II core domain \n2116 tRNA synthetases class II core domain \n2122 Nucleotidyltransferase \n2124 MarR \n2125 Bacterial regulatory proteins, gntR \n2129 RelE-like toxin of type II toxin-antitoxin sys... \n2131 Sugar-specific transcriptional regulator \n2132 Bacterial regulatory protein, arsR \n2136 Nucleotidyltransferase \n2138 MarR \n2139 Bacterial regulatory proteins, gntR \n2143 RelE-like toxin of type II toxin-antitoxin sys... \n2145 Sugar-specific transcriptional regulator \n2146 Bacterial regulatory protein, arsR \n2150 RadC-like JAB \n2151 SWIRM \n2153 RadC-like JAB \n2154 SWIRM \n2157 Nucleotidyltransferase \n2160 Nucleotidyltransferase \n2165 tRNA synthetases class II core domain \n2169 tRNA synthetases class II core domain \n2176 Ribosomal protein L9, N-terminal \n2179 Ribosomal protein L9, N-terminal \n2180 Antidote-toxin recognition MazE, bacterial \n2181 PhoU \n2182 Antidote-toxin recognition MazE, bacterial \n2183 PhoU \n2193 Glycosyl hydrolase family \n\n infile domain interpro pfam \\\n1530 ./orfscan/CP010426.1[733481..806920].flat.fast... 5015 2676 5015 \n1535 ./orfscan/CP010426.1[733481..806920].flat.fast... 5015 2676 5015 \n1547 ./orfscan/CP010426.1[733481..806920].flat.fast... 5015 2676 5015 \n1553 ./orfscan/CP010426.1[733481..806920].flat.fast... 5015 2676 5015 \n1560 ./orfscan/CP010426.1[733481..806920].flat.fast... 4998 3473 4998 \n1563 ./orfscan/CP010426.1[733481..806920].flat.fast... 4998 3473 4998 \n1570 ./orfscan/CP010426.1[733481..806920].flat.fast... 4998 3473 4998 \n1571 ./orfscan/CP010426.1[733481..806920].flat.fast... 1031 298 1031 \n1576 ./orfscan/CP010426.1[733481..806920].flat.fast... 4998 3473 4998 \n1577 ./orfscan/CP010426.1[733481..806920].flat.fast... 1031 298 1031 \n1578 ./orfscan/CP010426.1[733481..806920].flat.fast... 562 2196 562 \n1579 ./orfscan/CP010426.1[733481..806920].flat.fast... 4560 628 4560 \n1581 ./orfscan/CP010426.1[733481..806920].flat.fast... 4567 4731 4567 \n1587 ./orfscan/CP010426.1[733481..806920].flat.fast... 562 2196 562 \n1588 ./orfscan/CP010426.1[733481..806920].flat.fast... 4560 628 4560 \n1590 ./orfscan/CP010426.1[733481..806920].flat.fast... 4567 4731 4567 \n1596 ./orfscan/CP010426.1[733481..806920].flat.fast... 4563 3093 4563 \n1597 ./orfscan/CP010426.1[733481..806920].flat.fast... 4565 2040 4565 \n1599 ./orfscan/CP010426.1[733481..806920].flat.fast... 4563 3093 4563 \n1600 ./orfscan/CP010426.1[733481..806920].flat.fast... 4565 2040 4565 \n1602 ./orfscan/CP010426.1[733481..806920].flat.fast... 1191 186 1191 \n1607 ./orfscan/CP010426.1[733481..806920].flat.fast... 1191 186 1191 \n1612 ./orfscan/CP010426.1[733481..806920].flat.fast... 1191 186 1191 \n1617 ./orfscan/CP010426.1[733481..806920].flat.fast... 1191 186 1191 \n1648 ./orfscan/CP010426.1[733481..806920].flat.fast... 4014 3753 4014 \n1649 ./orfscan/CP010426.1[733481..806920].flat.fast... 4014 3753 4014 \n1662 ./orfscan/CP010426.1[733481..806920].flat.fast... 3050 1028 3050 \n1664 ./orfscan/CP010426.1[733481..806920].flat.fast... 665 266 665 \n1670 ./orfscan/CP010426.1[733481..806920].flat.fast... 3050 1028 3050 \n1672 ./orfscan/CP010426.1[733481..806920].flat.fast... 665 266 665 \n... ... ... ... ... \n2107 ./orfscan/CP010426.1[733481..806920].flat.fast... 4247 1347 4247 \n2111 ./orfscan/CP010426.1[733481..806920].flat.fast... 1409 4823 1409 \n2116 ./orfscan/CP010426.1[733481..806920].flat.fast... 1409 4823 1409 \n2122 ./orfscan/CP010426.1[733481..806920].flat.fast... 1909 2530 1909 \n2124 ./orfscan/CP010426.1[733481..806920].flat.fast... 1047 535 1047 \n2125 ./orfscan/CP010426.1[733481..806920].flat.fast... 392 296 392 \n2129 ./orfscan/CP010426.1[733481..806920].flat.fast... 5015 2676 5015 \n2131 ./orfscan/CP010426.1[733481..806920].flat.fast... 1978 602 1978 \n2132 ./orfscan/CP010426.1[733481..806920].flat.fast... 1022 1508 1022 \n2136 ./orfscan/CP010426.1[733481..806920].flat.fast... 1909 2530 1909 \n2138 ./orfscan/CP010426.1[733481..806920].flat.fast... 1047 535 1047 \n2139 ./orfscan/CP010426.1[733481..806920].flat.fast... 392 296 392 \n2143 ./orfscan/CP010426.1[733481..806920].flat.fast... 5015 2676 5015 \n2145 ./orfscan/CP010426.1[733481..806920].flat.fast... 1978 602 1978 \n2146 ./orfscan/CP010426.1[733481..806920].flat.fast... 1022 1508 1022 \n2150 ./orfscan/CP010426.1[733481..806920].flat.fast... 4002 2097 4002 \n2151 ./orfscan/CP010426.1[733481..806920].flat.fast... 4433 2967 4433 \n2153 ./orfscan/CP010426.1[733481..806920].flat.fast... 4002 2097 4002 \n2154 ./orfscan/CP010426.1[733481..806920].flat.fast... 4433 2967 4433 \n2157 ./orfscan/CP010426.1[733481..806920].flat.fast... 1909 2530 1909 \n2160 ./orfscan/CP010426.1[733481..806920].flat.fast... 1909 2530 1909 \n2165 ./orfscan/CP010426.1[733481..806920].flat.fast... 1409 4823 1409 \n2169 ./orfscan/CP010426.1[733481..806920].flat.fast... 1409 4823 1409 \n2176 ./orfscan/CP010426.1[733481..806920].flat.fast... 1281 2149 1281 \n2179 ./orfscan/CP010426.1[733481..806920].flat.fast... 1281 2149 1281 \n2180 ./orfscan/CP010426.1[733481..806920].flat.fast... 4014 3753 4014 \n2181 ./orfscan/CP010426.1[733481..806920].flat.fast... 1895 3687 1895 \n2182 ./orfscan/CP010426.1[733481..806920].flat.fast... 4014 3753 4014 \n2183 ./orfscan/CP010426.1[733481..806920].flat.fast... 1895 3687 1895 \n2193 ./orfscan/CP010426.1[733481..806920].flat.fast... 3663 2592 3663 \n\n goterms ipnum \n1530 GO:0007046 2676 \n1535 GO:0007046 2676 \n1547 GO:0007046 2676 \n1553 GO:0007046 2676 \n1560 GO:0008987,GO:0009435 3473 \n1563 GO:0008987,GO:0009435 3473 \n1570 GO:0008987,GO:0009435 3473 \n1571 GO:0004129,GO:0016020,GO:0006118 298 \n1576 GO:0008987,GO:0009435 3473 \n1577 GO:0004129,GO:0016020,GO:0006118 298 \n1578 GO:0003796,GO:0016998 2196 \n1579 GO:0005000,GO:0016021,GO:0007186 628 \n1581 GO:0005975 4731 \n1587 GO:0003796,GO:0016998 2196 \n1588 GO:0005000,GO:0016021,GO:0007186 628 \n1590 GO:0005975 4731 \n1596 GO:0016329,GO:0006915 3093 \n1597 GO:0008648,GO:0007268 2040 \n1599 GO:0016329,GO:0006915 3093 \n1600 GO:0008648,GO:0007268 2040 \n1602 GO:0008083,GO:0005576,GO:0006955 186 \n1607 GO:0008083,GO:0005576,GO:0006955 186 \n1612 GO:0008083,GO:0005576,GO:0006955 186 \n1617 GO:0008083,GO:0005576,GO:0006955 186 \n1648 GO:0008855,GO:0009318,GO:0006308 3753 \n1649 GO:0008855,GO:0009318,GO:0006308 3753 \n1662 GO:0004621,GO:0005576 1028 \n1664 GO:0003735,GO:0005840,GO:0006412 266 \n1670 GO:0004621,GO:0005576 1028 \n1672 GO:0003735,GO:0005840,GO:0006412 266 \n... ... ... \n2107 GO:0005529,GO:0005975 1347 \n2111 GO:0016986,GO:0005634,GO:0006352 4823 \n2116 GO:0016986,GO:0005634,GO:0006352 4823 \n2122 GO:0045735 2530 \n2124 GO:0005198,GO:0006928 535 \n2125 GO:0005215,GO:0016020,GO:0006810 296 \n2129 GO:0007046 2676 \n2131 GO:0004559,GO:0005975 602 \n2132 GO:0005234,GO:0016020,GO:0006811 1508 \n2136 GO:0045735 2530 \n2138 GO:0005198,GO:0006928 535 \n2139 GO:0005215,GO:0016020,GO:0006810 296 \n2143 GO:0007046 2676 \n2145 GO:0004559,GO:0005975 602 \n2146 GO:0005234,GO:0016020,GO:0006811 1508 \n2150 GO:0003779,GO:0015629,GO:0007010 2097 \n2151 GO:0005198,GO:0005874,GO:0007018 2967 \n2153 GO:0003779,GO:0015629,GO:0007010 2097 \n2154 GO:0005198,GO:0005874,GO:0007018 2967 \n2157 GO:0045735 2530 \n2160 GO:0045735 2530 \n2165 GO:0016986,GO:0005634,GO:0006352 4823 \n2169 GO:0016986,GO:0005634,GO:0006352 4823 \n2176 GO:0005215,GO:0016020,GO:0006810 2149 \n2179 GO:0005215,GO:0016020,GO:0006810 2149 \n2180 GO:0008855,GO:0009318,GO:0006308 3753 \n2181 GO:0009538,GO:0015979 3687 \n2182 GO:0008855,GO:0009318,GO:0006308 3753 \n2183 GO:0009538,GO:0015979 3687 \n2193 GO:0007155 2592 \n\n[125 rows x 25 columns]\n100% 42 of 42 study items found in association\n100% 42 of 42 study items found in population(12199)\nCalculating 140 uncorrected p-values using fisher_scipy_stats\n 140 GO terms are associated with 3,592 of 12,199 population items\n 12 GO terms are associated with 15 of 42 study items\n 1 GO terms found significant (< 0.5=alpha) after multitest correction: statsmodels fdr_bh\nGO:0042254\t2.1201786161459427e-05\n"
],
[
"for go in gocounts:\n print(go + '\\t' + str(gocounts[go]) )",
"GO:0004412\t5\nGO:0008652\t5\n\t422\nGO:0006355\t21\nGO:0016740\t12\nGO:0008152\t26\nGO:0005554\t23\nGO:0004890\t15\nGO:0004997\t5\nGO:0007214\t15\nGO:0016021\t55\nGO:0007186\t30\nGO:0003676\t5\nGO:0004645\t1\nGO:0005975\t33\nGO:0003677\t22\nGO:0006304\t2\nGO:0017000\t25\nGO:0008270\t3\nGO:0006810\t29\nGO:0005622\t9\nGO:0016218\t25\nGO:0016491\t17\nGO:0016020\t104\nGO:0005215\t29\nGO:0007600\t6\nGO:0006779\t1\nGO:0003779\t4\nGO:0008883\t1\nGO:0003735\t25\nGO:0005840\t22\nGO:0006412\t25\nGO:0016534\t1\nGO:0005634\t23\nGO:0016533\t1\nGO:0008324\t5\nGO:0006812\t11\nGO:0006350\t6\nGO:0003968\t4\nGO:0005576\t20\nGO:0005524\t27\nGO:0006265\t8\nGO:0004500\t10\nGO:0006584\t10\nGO:0006099\t12\nGO:0008964\t12\nGO:0005505\t5\nGO:0004958\t2\nGO:0005186\t5\nGO:0006953\t1\nGO:0003794\t1\nGO:0006813\t12\nGO:0005249\t11\nGO:0008026\t1\nGO:0008565\t4\nGO:0015628\t1\nGO:0015627\t1\nGO:0005180\t9\nGO:0007334\t6\nGO:0006118\t22\nGO:0004556\t5\nGO:0006814\t1\nGO:0004974\t1\nGO:0008508\t1\nGO:0009249\t6\nGO:0005198\t15\nGO:0004559\t17\nGO:0005234\t16\nGO:0006811\t19\nGO:0006928\t12\nGO:0016855\t1\nGO:0006520\t1\nGO:0007342\t4\nGO:0005126\t2\nGO:0006955\t3\nGO:0006064\t2\nGO:0008880\t2\nGO:0008076\t5\nGO:0008937\t8\nGO:0008898\t1\nGO:0005737\t5\nGO:0006004\t1\nGO:0008736\t1\nGO:0007018\t3\nGO:0005874\t3\nGO:0016003\t3\nGO:0006508\t24\nGO:0004197\t13\nGO:0009236\t2\nGO:0005746\t1\nGO:0005489\t1\nGO:0016993\t2\nGO:0003774\t1\nGO:0009288\t1\nGO:0006935\t3\nGO:0004945\t3\nGO:0006031\t2\nGO:0004100\t2\nGO:0009058\t4\nGO:0006281\t3\nGO:0016788\t3\nGO:0004977\t1\nGO:0004563\t2\nGO:0009117\t1\nGO:0016787\t1\nGO:0005509\t5\nGO:0006487\t1\nGO:0006260\t4\nGO:0003887\t2\nGO:0005179\t3\nGO:0006310\t9\nGO:0004909\t2\nGO:0003934\t1\nGO:0005923\t1\nGO:0015049\t1\nGO:0015947\t1\nGO:0005732\t4\nGO:0006371\t4\nGO:0008248\t4\nGO:0016758\t2\nGO:0006367\t3\nGO:0003702\t3\nGO:0005667\t2\nGO:0003797\t2\nGO:0006961\t2\nGO:0003700\t9\nGO:0004553\t4\nGO:0001619\t1\nGO:0008410\t1\nGO:0004519\t3\nGO:0006308\t5\nGO:0007165\t4\nGO:0004747\t1\nGO:0006014\t1\nGO:0001584\t7\nGO:0005267\t1\nGO:0003723\t7\nGO:0030259\t1\nGO:0016881\t1\nGO:0004725\t3\nGO:0006470\t4\nGO:0009273\t1\nGO:0008289\t4\nGO:0005549\t1\nGO:0007608\t1\nGO:0030639\t2\nGO:0005351\t3\nGO:0009401\t2\nGO:0015098\t3\nGO:0015689\t3\nGO:0007602\t3\nGO:0006464\t2\nGO:0008474\t2\nGO:0005764\t2\nGO:0015662\t5\nGO:0005856\t4\nGO:0007160\t3\nGO:0016526\t4\nGO:0004802\t1\nGO:0004759\t1\nGO:0008307\t1\nGO:0007517\t1\nGO:0006730\t1\nGO:0008236\t2\nGO:0009279\t2\nGO:0005875\t1\nGO:0006886\t10\nGO:0007275\t2\nGO:0007631\t1\nGO:0019904\t1\nGO:0008761\t5\nGO:0009103\t5\nGO:0045005\t1\nGO:0008715\t1\nGO:0008654\t1\nGO:0019953\t1\nGO:0006823\t1\nGO:0015076\t1\nGO:0008234\t1\nGO:0008033\t1\nGO:0006571\t1\nGO:0004665\t1\nGO:0008643\t1\nGO:0015934\t3\nGO:0030163\t1\nGO:0016755\t1\nGO:0005718\t3\nGO:0007001\t2\nGO:0004949\t2\nGO:0007166\t1\nGO:0004888\t1\nGO:0005496\t1\nGO:0004928\t3\nGO:0004799\t1\nGO:0006231\t1\nGO:0008299\t1\nGO:0004452\t1\nGO:0003824\t3\nGO:0009307\t1\nGO:0009036\t1\nGO:0006352\t3\nGO:0016986\t3\nGO:0030036\t1\nGO:0009497\t3\nGO:0009405\t2\nGO:0005216\t2\nGO:0015485\t1\nGO:0005093\t1\nGO:0015031\t2\nGO:0003904\t1\nGO:0004190\t4\nGO:0004918\t2\nGO:0006207\t1\nGO:0009274\t2\nGO:0009252\t2\nGO:0004590\t1\nGO:0016916\t2\nGO:0008151\t3\nGO:0008528\t1\nGO:0004985\t1\nGO:0008810\t2\nGO:0008483\t1\nGO:0005673\t1\nGO:0004722\t1\nGO:0008287\t1\nGO:0005788\t1\nGO:0004872\t1\nGO:0003793\t2\nGO:0015070\t3\nGO:0006270\t1\nGO:0005727\t1\nGO:0005177\t1\nGO:0007155\t2\nGO:0000145\t1\nGO:0006887\t1\nGO:0016624\t1\nGO:0006865\t3\nGO:0005515\t1\nGO:0015359\t3\nGO:0006120\t2\nGO:0008137\t2\nGO:0005748\t2\nGO:0005125\t2\nGO:0045735\t3\nGO:0009244\t1\nGO:0008415\t1\nGO:0009165\t2\nGO:0004146\t2\nGO:0016151\t1\nGO:0006869\t1\nGO:0009435\t3\nGO:0004514\t1\nGO:0007596\t1\nGO:0004540\t2\nGO:0006803\t1\nGO:0004364\t1\nGO:0004930\t3\nGO:0016494\t1\nGO:0009255\t3\nGO:0004456\t3\nGO:0016706\t2\nGO:0019538\t2\nGO:0004991\t1\nGO:0009378\t1\nGO:0006952\t1\nGO:0004751\t1\nGO:0009052\t1\nGO:0003918\t1\nGO:0006268\t1\nGO:0005694\t1\nGO:0006859\t1\nGO:0015542\t1\nGO:0003899\t1\nGO:0005199\t2\nGO:0030115\t2\nGO:0008083\t2\nGO:0000103\t1\nGO:0016772\t1\nGO:0008374\t1\nGO:0003746\t1\nGO:0008203\t1\nGO:0006414\t1\nGO:0008803\t1\nGO:0006725\t1\nGO:0008246\t1\nGO:0003743\t1\nGO:0006413\t1\nGO:0005432\t1\nGO:0006816\t1\nGO:0005523\t1\nGO:0015979\t2\nGO:0009374\t1\nGO:0016493\t1\nGO:0015629\t2\nGO:0007010\t2\nGO:0006370\t1\nGO:0004482\t1\nGO:0007283\t1\nGO:0008320\t1\nGO:0016666\t2\nGO:0006807\t2\nGO:0004871\t3\nGO:0004735\t1\nGO:0006561\t1\nGO:0008094\t2\nGO:0006804\t1\nGO:0004601\t1\nGO:0004536\t1\nGO:0004982\t1\nGO:0008769\t1\nGO:0004185\t1\nGO:0007046\t3\nGO:0008987\t2\nGO:0004129\t1\nGO:0016998\t1\nGO:0005000\t1\nGO:0003796\t1\nGO:0006915\t1\nGO:0007268\t1\nGO:0016329\t1\nGO:0008648\t1\nGO:0008855\t2\nGO:0009318\t2\nGO:0004621\t1\nGO:0008725\t1\nGO:0004866\t1\nGO:0004635\t1\nGO:0000105\t1\nGO:0003767\t1\nGO:0008375\t1\nGO:0016597\t1\nGO:0009002\t1\nGO:0005529\t2\nGO:0003713\t1\nGO:0004107\t1\nGO:0009073\t1\nGO:0009538\t1\n"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e7e0773179473aaf63b0e0c27746d99f6094c293 | 258,862 | ipynb | Jupyter Notebook | Red Wine Quality Analysis.ipynb | RagsX137/Red-Wine-Quality-Analysis | 09c8e38a50e97c6beb3c5578929a18d3d9bcc07d | [
"Apache-2.0"
] | null | null | null | Red Wine Quality Analysis.ipynb | RagsX137/Red-Wine-Quality-Analysis | 09c8e38a50e97c6beb3c5578929a18d3d9bcc07d | [
"Apache-2.0"
] | null | null | null | Red Wine Quality Analysis.ipynb | RagsX137/Red-Wine-Quality-Analysis | 09c8e38a50e97c6beb3c5578929a18d3d9bcc07d | [
"Apache-2.0"
] | 1 | 2019-01-06T16:32:09.000Z | 2019-01-06T16:32:09.000Z | 167.115558 | 23,446 | 0.86815 | [
[
[
"# Analysis of Red Wine Quality",
"_____no_output_____"
],
[
"# Index\n## 1. Reading the data and importing the libraries\n## 2. EDA\n## 3. Correlation Matrix \n## 4. Modeling \n - Linear Model\n - Weighted KNN\n - Random Forest\n - Conditional Inference Random Forest\n - Decision Tree Model\n## 5. Modeling Results Table\n## 6. Conclusion",
"_____no_output_____"
],
[
"## 1. Reading the data and importing the libraries",
"_____no_output_____"
]
],
[
[
"library(tidyverse)\nlibrary(grid)\nlibrary(gridExtra)\nlibrary(e1071)\nlibrary(caret)",
"_____no_output_____"
],
[
"df1 <- read.csv(\"C:/Users/kausha2/Documents/Data Analytics/DataSets/winequality/winequality/winequality-red.csv\", sep = \";\")",
"_____no_output_____"
],
[
"head(df1)",
"_____no_output_____"
],
[
"summary(df1$quality)",
"_____no_output_____"
]
],
[
[
"### Creating a new variable --> WineAttribute : Good (1) or bad (0) for binary classification",
"_____no_output_____"
]
],
[
[
"df1$wine_attribute <- ifelse(df1$quality > 5, 1, 0 )",
"_____no_output_____"
],
[
"head(df1)",
"_____no_output_____"
]
],
[
[
"## 2. EDA",
"_____no_output_____"
],
[
"#### How is the wine quality distributed?",
"_____no_output_____"
]
],
[
[
"qplot(df1$quality, geom=\"histogram\", binwidth = 1)",
"_____no_output_____"
]
],
[
[
"- The dataset is dominated by values 5 and 6. There are less wines with a quality of 4 and 7 whereas there are hardly any wines that have values less 3 and 8\n- there are two options : either split the quality variable into 3 parts by quantiles : top 20, middle 60 and bottom 20 or split based on the mean i.e. Good wines are those which have values >5 and bad wines are those with values less or equal to 5",
"_____no_output_____"
],
[
"#### Looking at the different histograms to check the shape of the distributions",
"_____no_output_____"
]
],
[
[
"p1 <- qplot(df1$pH, geom=\"histogram\", binwidth = 0.05) \np2 <- qplot(df1$alcohol, geom=\"histogram\",binwidth = 0.099) \np3 <- qplot(df1$volatile.acidity, geom=\"histogram\",binwidth = 0.05) \np4 <- qplot(df1$citric.acid, geom=\"histogram\",binwidth = 0.05)\ngrid.arrange(p1,p2,p3,p4, ncol=2, nrow=2)",
"_____no_output_____"
]
],
[
[
"- We see that pH looks normally distributed \n- Volatile acidity, Alcohol and citric acid have a positive skew shape but dont seem to follow a particular distribution",
"_____no_output_____"
]
],
[
[
"p1 <- qplot(df1$residual.sugar, geom=\"histogram\", binwidth = 0.1) \np2 <- qplot(df1$chlorides, geom=\"histogram\",binwidth = 0.01) \np3 <- qplot(df1$density, geom=\"histogram\",binwidth = 0.001) \np4 <- qplot(df1$free.sulfur.dioxide, geom=\"histogram\",binwidth = 1)\ngrid.arrange(p1,p2,p3,p4, ncol=2, nrow=2)",
"_____no_output_____"
]
],
[
[
"- Density seems to follow a normal distribution. \n- Residual sugar and chlorides seem to follow a normal distribution initially but flatten out later\n- Free sulfur dioxide content seems to have a positive skew shaped distribution",
"_____no_output_____"
]
],
[
[
"p1 <- qplot(df1$pH, geom=\"density\") \np2 <- qplot(df1$alcohol, geom=\"density\") \np3 <- qplot(df1$volatile.acidity, geom=\"density\") \np4 <- qplot(df1$citric.acid, geom=\"density\")\ngrid.arrange(p1,p2,p3,p4, ncol=2, nrow=2)",
"_____no_output_____"
],
[
"p1 <- qplot(df1$residual.sugar, geom=\"density\") \np2 <- qplot(df1$chlorides, geom=\"density\") \np3 <- qplot(df1$density, geom=\"density\") \np4 <- qplot(df1$free.sulfur.dioxide, geom=\"density\")\ngrid.arrange(p1,p2,p3,p4, ncol=2, nrow=2)",
"_____no_output_____"
]
],
[
[
"- The kernel density plots seem to agree with the histograms and our conclusions",
"_____no_output_____"
]
],
[
[
"p1 <- ggplot(df1, aes(x=\"pH\", y=pH)) + stat_boxplot(geom ='errorbar') + geom_boxplot()\np2 <- ggplot(df1, aes(x=\"alcohol\", y=alcohol)) + stat_boxplot(geom ='errorbar') + geom_boxplot()\np3 <- ggplot(df1, aes(x=\"volatile.acidity\", y=volatile.acidity)) + stat_boxplot(geom ='errorbar') + geom_boxplot()\np4 <- ggplot(df1, aes(x=\"citric.acid\", y=citric.acid)) + stat_boxplot(geom ='errorbar') + geom_boxplot()\ngrid.arrange(p1,p2,p3,p4, ncol=2, nrow=2)",
"_____no_output_____"
]
],
[
[
"- pH and acidity seem to have a lot of outliers.\n- The pH of an acidic substance is usally below 5 but for wines it seems to concentrate in the area between 2.7 and 4.0.\n- The alcohol content is between 8.4 to 15 but there seem to be many outliers. The Age of the wine also affects its alcohol content. This could explain the outliers but since we don't have an age variable there is no way to check it.",
"_____no_output_____"
],
[
"## 3. Correlation Matrix\n### Checking the Correlation between variables\n (sourced from : http://www.sthda.com/english/wiki/ggplot2-quick-correlation-matrix-heatmap-r-software-and-data-visualization)",
"_____no_output_____"
]
],
[
[
"#data(attitude)\ndf2 <- df1\ndf2$wine_attribute <- NULL\n\nlibrary(ggplot2)\nlibrary(reshape2)\n\n\n#(cor(df1) ) # correlation matrix\ncormat <- cor(df2)\nmelted_cormat <- melt(cor(df2))\n\n#ggplot(data = melted_cormat, aes(x=Var1, y=Var2, fill=value)) + \n # geom_tile()\n\n# Get lower triangle of the correlation matrix\n get_lower_tri<-function(cormat){\n cormat[upper.tri(cormat)] <- NA\n return(cormat)\n }\n # Get upper triangle of the correlation matrix\n get_upper_tri <- function(cormat){\n cormat[lower.tri(cormat)]<- NA\n return(cormat)\n }\n\nupper_tri <- get_upper_tri(cormat)\n#upper_tri\n\n# Melt the correlation matrix\nlibrary(reshape2)\nmelted_cormat <- melt(upper_tri, na.rm = TRUE)\n# Heatmap\n\n\nreorder_cormat <- function(cormat){\n# Use correlation between variables as distance\ndd <- as.dist((1-cormat)/2)\nhc <- hclust(dd)\ncormat <-cormat[hc$order, hc$order]\n}\n\n\n# Reorder the correlation matrix\ncormat <- reorder_cormat(cormat)\nupper_tri <- get_upper_tri(cormat)\n# Melt the correlation matrix\nmelted_cormat <- melt(upper_tri, na.rm = TRUE)\n# Create a ggheatmap\nggheatmap <- ggplot(melted_cormat, aes(Var2, Var1, fill = value))+\n geom_tile(color = \"white\")+\n scale_fill_gradient2(low = \"blue\", high = \"red\", mid = \"white\", \n midpoint = 0, limit = c(-1,1), space = \"Lab\", \n name=\"Pearson\\nCorrelation\") +\n theme_minimal()+ # minimal theme\n theme(axis.text.x = element_text(angle = 45, vjust = 1, \n size = 12, hjust = 1))+\n coord_fixed()\n# Print the heatmap\n#print(ggheatmap)\n\nggheatmap + \ngeom_text(aes(Var2, Var1, label = round(value,2) ), color = \"black\", size = 3) +\ntheme(\n axis.title.x = element_blank(),\n axis.title.y = element_blank(),\n panel.grid.major = element_blank(),\n panel.border = element_blank(),\n panel.background = element_blank(),\n axis.ticks = element_blank(),\n legend.justification = c(1, 0),\n legend.position = c(0.6, 0.7),\n legend.direction = \"horizontal\")+\n guides(fill = guide_colorbar(barwidth = 7, barheight = 1,\n title.position = \"top\", title.hjust = 0.5))",
"Warning message:\n\"package 'reshape2' was built under R version 3.3.2\"\nAttaching package: 'reshape2'\n\nThe following object is masked from 'package:tidyr':\n\n smiths\n\n"
]
],
[
[
"- The values in Red are positively correlated while those in Blue are negatively correlated. The density of the color determines the strength of correlation.\n- Quality has a negative correlation with volatile acidity, and total sulfur dioxide content. While it has a positive correlation with alcohol content and citric acid.\n- It can be seen that pH and fixed acidity have a strong negative correlation, \n- Residual sugar and sulphates have a very slight positive correlation\n- Free sulfur dioxide and total sulfur dioxide are strongly positively correlated ( as expected ). But the fixed acidity and volatile acidity are negatively correlated. Interesting fact that could be used for modeling..",
"_____no_output_____"
]
],
[
[
"p1 <- ggplot(df1, aes(x= volatile.acidity, y= quality)) + geom_point() + geom_smooth(method=lm)\np2 <- ggplot(df1, aes(x= total.sulfur.dioxide, y= quality)) + geom_point() + geom_smooth(method=lm)\np3 <- ggplot(df1, aes(x= alcohol, y= quality)) + geom_point() + geom_smooth(method=lm)\np4 <- ggplot(df1, aes(x= citric.acid, y= quality)) + geom_point() + geom_smooth(method=lm)\n#p5 <- ggplot(df1, aes(x= sulphates, y= quality)) + geom_point() + geom_smooth(method=lm)\n\ngrid.arrange(p1,p2,p3,p4, ncol=2, nrow=2)",
"_____no_output_____"
]
],
[
[
"### This confirms our analysis from the correlation matrix",
"_____no_output_____"
],
[
"## 4. Modeling\n\n### We'll be using a 10-fold cross validation\n\nWe perform the 10 fold CV on the learning dataset and try to predict the valid dataset",
"_____no_output_____"
]
],
[
[
"# Train Test Split\nm <- dim(df1)[1] # Select the rows of iris\n\nval <- sample(1:m, size = round(m/3), replace = FALSE, prob = rep(1/m, m)) \n\ndf1.learn <- df1[-val,] \t# train\ndf1.valid <- df1[val,]\t# test\n\n\n# 10 Fold CV\n\nlibrary(caret)\n\n# define training control\ntrain_control <- trainControl(method=\"cv\", number=10)\n\n#trControl <- train_control",
"_____no_output_____"
]
],
[
[
"### The linear model : trying to predict wine quality from the variables",
"_____no_output_____"
]
],
[
[
"head(df1,1)",
"_____no_output_____"
],
[
"model1 <- lm(as.numeric(quality)~ 0 + volatile.acidity + chlorides\n + log(free.sulfur.dioxide) + log(total.sulfur.dioxide) + density + pH + sulphates + alcohol, data = df1)\nsummary(model1)",
"_____no_output_____"
],
[
"df1.valid$prediction <- predict(model1,df1.valid)",
"_____no_output_____"
],
[
"df1.valid$prediction_lm <- round(df1.valid$prediction)",
"_____no_output_____"
],
[
"x <- confusionMatrix(df1.valid$prediction_lm, df1.valid$quality)\nacc_lm <- x$overall[1]\n\nprint(c(\"accuracy of linear model is :\", (acc_lm*100) ))",
"Warning message in levels(reference) != levels(data):\n\"longer object length is not a multiple of shorter object length\"Warning message in confusionMatrix.default(df1.valid$prediction_lm, df1.valid$quality):\n\"Levels are not in the same order for reference and data. Refactoring data to match.\""
],
[
"ggplot(df1.valid) + geom_point(aes(pH, quality), color = \"red\") + geom_point(aes(pH, prediction)) ",
"_____no_output_____"
]
],
[
[
" From the above graph we see that this is not what we intended. Therefore we move on to classification models\n\n## Weighted KNN\n\nUsing multiple K, distance metrics and kernels",
"_____no_output_____"
]
],
[
[
"require(kknn)\n",
"_____no_output_____"
],
[
"model2 <- kknn( factor(wine_attribute) ~ fixed.acidity + volatile.acidity + citric.acid + residual.sugar\n + chlorides + free.sulfur.dioxide + total.sulfur.dioxide + density + pH\n + sulphates + alcohol, df1.learn, df1.valid) \nx <- confusionMatrix(df1.valid$wine_attribute, model2$fit)\ny <- (x$table)\ny\nacc_kknn1 <- (y[1,1]+y[2,2]) / (y[1,1]+y[1,2]+y[2,2]+y[2,1])\nprint(c(\"accuracy of KKNN is :\", round(acc_kknn1*100,3) ))",
"_____no_output_____"
],
[
"model3 <- train.kknn(factor(wine_attribute) ~ fixed.acidity + volatile.acidity + citric.acid + residual.sugar\n + chlorides + free.sulfur.dioxide + total.sulfur.dioxide + density + pH\n + sulphates + alcohol, df1.learn,trControl = train_control, kmax = 15, kernel = c(\"triangular\", \"epanechnikov\", \"biweight\", \"triweight\", \"cos\", \"inv\", \"gaussian\", \"rank\", \"optimal\"), distance = 1)\nsummary(model3)\n\nx <- confusionMatrix(predict(model3, df1.valid), df1.valid$wine_attribute)\ny <- (x$table)\ny\nacc_kknn2 <- (y[1,1]+y[2,2]) / (y[1,1]+y[1,2]+y[2,2]+y[2,1])\nprint(c(\"accuracy of KKNN is :\", round(acc_kknn2*100,3) ))",
"\nCall:\ntrain.kknn(formula = factor(wine_attribute) ~ fixed.acidity + volatile.acidity + citric.acid + residual.sugar + chlorides + free.sulfur.dioxide + total.sulfur.dioxide + density + pH + sulphates + alcohol, data = df1.learn, kmax = 15, distance = 1, kernel = c(\"triangular\", \"epanechnikov\", \"biweight\", \"triweight\", \"cos\", \"inv\", \"gaussian\", \"rank\", \"optimal\"), trControl = train_control)\n\nType of response variable: nominal\nMinimal misclassification: 0.2045028\nBest kernel: inv\nBest k: 11\n"
],
[
"model4 <- train.kknn(factor(wine_attribute) ~ fixed.acidity + volatile.acidity + citric.acid + residual.sugar\n + chlorides + free.sulfur.dioxide + total.sulfur.dioxide + density + pH\n + sulphates + alcohol, df1.learn,trControl = train_control, kmax = 15, kernel = c(\"triangular\", \"epanechnikov\", \"biweight\", \"triweight\", \"cos\", \"inv\", \"gaussian\", \"rank\", \"optimal\"), distance = 5)\nsummary(model4)\n\nx <- confusionMatrix(predict(model4, df1.valid), df1.valid$wine_attribute)\ny <- (x$table)\ny\nacc_kknn3 <- (y[1,1]+y[2,2]) / (y[1,1]+y[1,2]+y[2,2]+y[2,1])\nprint(c(\"accuracy of KKNN is :\", round(acc_kknn3*100,3) ))",
"\nCall:\ntrain.kknn(formula = factor(wine_attribute) ~ fixed.acidity + volatile.acidity + citric.acid + residual.sugar + chlorides + free.sulfur.dioxide + total.sulfur.dioxide + density + pH + sulphates + alcohol, data = df1.learn, kmax = 15, distance = 5, kernel = c(\"triangular\", \"epanechnikov\", \"biweight\", \"triweight\", \"cos\", \"inv\", \"gaussian\", \"rank\", \"optimal\"), trControl = train_control)\n\nType of response variable: nominal\nMinimal misclassification: 0.206379\nBest kernel: inv\nBest k: 13\n"
]
],
[
[
"Weighted KKNN gave us decent results. Lets see if we can improve on it.",
"_____no_output_____"
],
[
"# Tree Models\n## Random Forest",
"_____no_output_____"
]
],
[
[
"library(randomForest)",
"_____no_output_____"
],
[
"model5 <- randomForest(as.factor(wine_attribute) ~ fixed.acidity + volatile.acidity + citric.acid + residual.sugar\n + chlorides + free.sulfur.dioxide + total.sulfur.dioxide + density + pH\n + sulphates + alcohol, df1.learn,trControl = train_control, importance=TRUE, ntree=2000)",
"_____no_output_____"
],
[
"df1.valid$prediction <- predict(model5, df1.valid)",
"_____no_output_____"
],
[
"x <- confusionMatrix(df1.valid$prediction, df1.valid$wine_attribute)\n\ny <- (x$table)\n\ny\n\nacc_rf <- (y[1,1]+y[2,2]) / (y[1,1]+y[1,2]+y[2,2]+y[2,1])\n\nprint(c(\"accuracy of Random Forest is :\", round(acc_rf*100,3) ))",
"_____no_output_____"
],
[
"importance(model5)",
"_____no_output_____"
],
[
"varImpPlot(model5) # importance of each variable",
"_____no_output_____"
]
],
[
[
"## Ensembling Random Forest with the Conditional Inference Tree",
"_____no_output_____"
]
],
[
[
"library(party)",
"_____no_output_____"
],
[
"model5x <- cforest(as.factor(wine_attribute) ~ fixed.acidity + volatile.acidity + citric.acid + residual.sugar\n + chlorides + free.sulfur.dioxide + total.sulfur.dioxide + density + pH\n + sulphates + alcohol, df1.learn, controls=cforest_unbiased(ntree=2000, mtry=3))\n\n",
"_____no_output_____"
],
[
"df1.valid$pred_cforest <- predict(model5x, df1.valid, OOB=TRUE, type = \"response\")",
"_____no_output_____"
],
[
"x <- confusionMatrix(df1.valid$pred_cforest, df1.valid$wine_attribute)\n\ny <- (x$table)\n\ny\n\nacc_cf <- (y[1,1]+y[2,2]) / (y[1,1]+y[1,2]+y[2,2]+y[2,1])\n\nprint(c(\"accuracy of Conditional Forest is :\", round(acc_cf*100,3) ))",
"_____no_output_____"
]
],
[
[
"## Decision Trees using Rpart",
"_____no_output_____"
]
],
[
[
"library(rattle)\nlibrary(rpart.plot)\nlibrary(RColorBrewer)\nlibrary(rpart)",
"_____no_output_____"
],
[
"rpart.grid <- expand.grid(.cp=0.2)\n\nmodel6 <- train(as.factor(wine_attribute) ~ fixed.acidity + volatile.acidity + citric.acid + residual.sugar\n + chlorides + free.sulfur.dioxide + total.sulfur.dioxide + density + pH\n + sulphates + alcohol, df1.learn, method=\"rpart\",trControl = train_control,tuneGrid=rpart.grid)",
"_____no_output_____"
],
[
"# How one of these trees look like\n\nmodel6s <- rpart(as.factor(wine_attribute) ~ fixed.acidity + volatile.acidity + citric.acid + residual.sugar\n + chlorides + free.sulfur.dioxide + total.sulfur.dioxide + density + pH\n + sulphates + alcohol, df1.learn, method = \"class\")\n\nfancyRpartPlot(model6s)",
"_____no_output_____"
],
[
"df1.valid$pred_dtree <- predict(model6, df1.valid)",
"_____no_output_____"
],
[
"x <- confusionMatrix(df1.valid$pred_dtree, df1.valid$wine_attribute)\n\ny <- (x$table)\n\ny\n\nacc_dt <- (y[1,1]+y[2,2]) / (y[1,1]+y[1,2]+y[2,2]+y[2,1])\n\nprint(c(\"accuracy of Decision Tree classifier is :\", round(acc_dt*100,3) ))",
"_____no_output_____"
]
],
[
[
"## 5. Modeling Results Table",
"_____no_output_____"
]
],
[
[
"Model_Name <- c(\"Linear Model\", \"Simple_KKNN\",\"KKNN_dist1\",\"KKNN_dist2\", \"RandomForest\", \"Conditional Forest\", \"Decision Tree\")\nOverall_Accuracy <- c(acc_lm*100, acc_kknn1*100, acc_kknn2*100, acc_kknn3*100, acc_rf*100, acc_cf*100, acc_dt*100)\n\nfinal <- data.frame(Model_Name,Overall_Accuracy)\n\nfinal$Overall_Accuracy <- round( final$Overall_Accuracy, 3)",
"_____no_output_____"
],
[
"final",
"_____no_output_____"
]
],
[
[
"## 6. Conclusion \n\n### The linear model gives a very high R Squared value(0.987) but it is nowhere close to the values we need as seen from the graph. The accuracy of the linear model is very low.\n### Random Forest gives an overall best accuracy. \n### The models will give better results had the wine been split into 3 categories instead of two. This is largely because wines of quality 5 and 6 dominate the dataset.\n\n### The reason for using the different algorithms :\n- Linear Regression : Most widely used model in many real world applications. Though quality is a discrete variable, I wanted to see how the Linear Model stacks up against classifiers.\n- KKNN : The weighted KNN has an advantage over the standard KNN as it takes into account kernel densities.\n- Tree Models: The Random Forest is a great classifier overall and its robust enough to be used in the stock condition. Decision tree forms the most basic tree based classifier and its also easy to visualize.\n- The Conditional Inference forest ensembles a randomForest but uses the Conditional Inference Tree as its estimator. However, it is not easy to run a 10-fold cross validation on this model.\n",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
]
] |
e7e07b031d59ca648071ddcdc6f4f95fda0eccdc | 8,137 | ipynb | Jupyter Notebook | week04/week04 - Get Data (Clean version).ipynb | AnaRita93/spiced_projects | 64f0caec4008cc9ccb528e71ec16afba78728b8e | [
"MIT"
] | null | null | null | week04/week04 - Get Data (Clean version).ipynb | AnaRita93/spiced_projects | 64f0caec4008cc9ccb528e71ec16afba78728b8e | [
"MIT"
] | null | null | null | week04/week04 - Get Data (Clean version).ipynb | AnaRita93/spiced_projects | 64f0caec4008cc9ccb528e71ec16afba78728b8e | [
"MIT"
] | null | null | null | 57.302817 | 5,323 | 0.554135 | [
[
[
"#Set a user agent:\nheaders = {\"User-Agent\": \"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/93.0.4577.63 Safari/537.36\"}\ntimer = 1 #second",
"_____no_output_____"
],
[
"import requests\nimport re\nimport time\n\n#Inspect a lyrics webpage and find all artists songs linked\nflorence = 'https://www.azlyrics.com/f/florencethemachine.html'\n",
"_____no_output_____"
],
[
"#sleep? NO - its just one request\nflorence_response = requests.get(florence, headers=headers)",
"_____no_output_____"
],
[
"florence_response.text[0:10000]",
"_____no_output_____"
],
[
"# find a way to extract all the urls that point to the lyrics from that page\n# test on https://regex101.com/\nall_florence_links = re.findall('\\/lyrics\\/florencethemachine\\/.*html', florence_response.text)",
"_____no_output_____"
],
[
"all_florence_links",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e7e08819207b6f43311e4cc8e03799c76714a37e | 221,332 | ipynb | Jupyter Notebook | scripts/.ipynb_checkpoints/E. Figures 6 and 7 [R]-checkpoint.ipynb | aakhmetz/AkhmKim2019Scripts | c348f6702a135e30aea5fc1eb3d8f4ca18b146e3 | [
"MIT"
] | 1 | 2019-11-04T00:10:17.000Z | 2019-11-04T00:10:17.000Z | scripts/.ipynb_checkpoints/E. Figures 6 and 7 [R]-checkpoint.ipynb | aakhmetz/AkhmKim2019Scripts | c348f6702a135e30aea5fc1eb3d8f4ca18b146e3 | [
"MIT"
] | null | null | null | scripts/.ipynb_checkpoints/E. Figures 6 and 7 [R]-checkpoint.ipynb | aakhmetz/AkhmKim2019Scripts | c348f6702a135e30aea5fc1eb3d8f4ca18b146e3 | [
"MIT"
] | 1 | 2019-11-04T00:10:01.000Z | 2019-11-04T00:10:01.000Z | 437.41502 | 66,990 | 0.923418 | [
[
[
"libraries = c(\"dplyr\",\"ggplot2\",\"gridExtra\",\"RColorBrewer\",\"zoo\",\"scales\")\nfor(x in libraries) {\n library(x,character.only=TRUE,warn.conflicts=FALSE) }\n\nif (Sys.info()[['sysname']]=='Windows') {\n windowsFonts(Times = windowsFont(\"Times New Roman\"))\n theme_set(theme_bw(base_size=12,base_family='Times')) \n} else { theme_set(theme_bw(base_size=12)) }\n\n# Initialization of array for recorded plots\nplot_point_sizes <- c(); nm <- c(); plot_point_sizes <- list()\n\n\n# clrs = c(\"black\",brewer.pal(8,\"Set1\")[4],brewer.pal(8,\"Set1\")[2])\n# clrs = rep(brewer.pal(8,\"Set1\")[2],3)\nclrs = rep(brewer.pal(8,\"Greys\")[6],3)\n\nclrs_plt = brewer.pal(8,\"Spectral\") %>% rev #c(\"white\",brewer.pal(6,\"YlOrBr\"))",
"Warning message:\n“package ‘dplyr’ was built under R version 3.5.2”"
],
[
"cs = c(3.6,3.2)\noptions(repr.plot.width=cs[1],repr.plot.height=cs[2])\n\ncln_nms = c(\"time\",\"sigma\",\"resistance\",\"size\")\nread.table(file=\"../figures/draft/Fig7-trj_periodic.csv\",header=FALSE,sep=\",\",col.names=cln_nms) %>% \n as.data.frame %>% mutate(time=time/30,resistance=100*resistance) -> df_periodic\nread.table(file=\"../figures/draft/Fig7-trj_const.csv\",header=FALSE,sep=\",\",col.names=cln_nms) %>% \n as.data.frame %>% mutate(time=time/30,resistance=100*resistance) -> df_const\nread.table(file=\"../figures/draft/Fig7-trj_optimal.csv\",header=FALSE,sep=\",\",col.names=cln_nms) %>% \n as.data.frame %>% mutate(time=(max(time)-time)/30,resistance=100*resistance) -> df_optimal\n\nprint(\"Static\")\ndf_const %>% tail(1) %>% .$size\n\nprint(\"Periodic\")\ndf_periodic %>% tail(1) %>% .$size\n\nprint(\"Optimal\")\ndf_optimal %>% tail(1) %>% .$size\n\nprint(\"Plotting\")\nsz = 1.75; fc = .5\nx0 = .4; len_seg = 1.8\ny0 = 1.9\n\np1 = df_periodic %>% ggplot(aes(x=time,y=size)) +\n geom_hline(yintercept=1,size=.25,color=\"black\",linetype=\"dashed\") +\n geom_path(color=clrs[1],size=sz) + \n geom_path(aes(color=sigma),lineend=\"round\",size=sz*fc) +\n geom_path(data=df_const,color=clrs[2],size=sz) + \n geom_path(data=df_const,aes(color=sigma),lineend=\"round\",size=sz*fc) +\n# geom_path(data=df_optimal,aes(x=max(time)-time),color=clrs[3],size=sz) + \n# geom_path(data=df_optimal,aes(x=max(time)-time,color=sigma),lineend=\"round\",size=sz*fc) \n geom_path(data=df_optimal,aes(x=time,y=filter(df_optimal,time==min(time))$size/size),color=clrs[3],size=sz) + \n geom_path(data=df_optimal,aes(x=time,y=filter(df_optimal,time==min(time))$size/size,color=sigma),lineend=\"round\",size=sz*fc) +\n theme_bw(base_size=12,base_family='Times') + \n labs(x=\"Time (months)\",y=\"Fold change in tumor size\") +\n scale_color_gradientn(limits=c(0,1),oob=squish, \n colours=clrs_plt,\n values=seq(0,1,length.out=6)) +\n scale_x_continuous(expand=c(0,0),breaks = seq(0,18,6)) + \n scale_y_continuous(expand=c(0,0)) +\n coord_cartesian(ylim=c(0.7,1.5)) +\n guides(color=FALSE) +\n theme(\n panel.grid.major = element_blank(),\n panel.grid.minor = element_blank(),\n axis.title.y=element_text(vjust=7), \n plot.margin = unit(c(.5,.5,1,1.3),\"lines\"),\n legend.background = element_rect(fill=\"white\")) +\n annotate(\"text\",x=x0+.5,y=y0,label='regimen',colour=\"black\",size=3.5,fontface=1,hjust=0,family=\"Times\") +\n annotate(\"text\",x=x0+.75,y=y0-0.18,label='periodic',colour=\"black\",size=3,fontface=1,hjust=1,vjust=.5,family=\"Times\") +\n annotate(\"text\",x=x0+.75,y=y0-0.09,label='static',colour=\"black\",size=3,fontface=1,hjust=1,vjust=.5,family=\"Times\") +\n annotate(\"text\",x=x0+.75,y=y0-0.27,label='optimal',colour=\"black\",size=3,fontface=1,hjust=1,vjust=.5,family=\"Times\")\np1\n\np2 = df_periodic %>% ggplot(aes(x=time,y=resistance)) +\n geom_path(color=clrs[1],size=sz) + \n geom_path(aes(color=sigma),lineend=\"round\",size=sz*fc) +\n geom_path(data=df_const,color=clrs[2],size=sz) + \n geom_path(data=df_const,aes(color=sigma),lineend=\"round\",size=sz*fc) +\n geom_path(data=df_optimal,color=clrs[3],size=sz) + \n geom_path(data=df_optimal,aes(color=sigma),lineend=\"round\",size=sz*fc) +\n theme_bw(base_size=12,base_family='Times') + \n labs(x=\"Time (months)\",y=\"Intratumoral resistance (%)\") +\n scale_color_gradientn(limits=c(0,1),oob=squish,\n colours=clrs_plt,\n values=seq(0,1,length.out=6)) +\n scale_x_continuous(expand=c(0,0),breaks = seq(0,6,2)) +\n scale_y_continuous(expand=c(0,0)) +\n coord_cartesian(ylim=c(0,100)) +\n guides(color=guide_colourbar(title=\"Treatment intensity\",title.position=\"top\",title.vjust=1)) +\n theme(\n panel.grid.major = element_blank(),\n panel.grid.minor = element_blank(),\n legend.text=element_text(size=8.5),\n legend.key.height = unit(.8, 'lines'),\n legend.title=element_text(size=10,vjust=1),\n legend.direction = \"horizontal\", \n legend.position = c(.72,.2),\n legend.key = element_rect(size = 5),\n plot.margin = unit(c(.5,.5,1,.5),\"lines\")\n )\np2\n\nggsave(plot=p1,width=cs[1],height=cs[2],filename=\"../figures/draft/Fig5-A.pdf\",useDingbats=FALSE)\nggsave(plot=p2,width=cs[1],height=cs[2],filename=\"../figures/draft/Fig5-B.pdf\",useDingbats=FALSE)",
"[1] \"Static\"\n"
]
],
[
[
"# Optimal treatment for varied lengths of time horizons",
"_____no_output_____"
]
],
[
[
"cs = c(3.6,3.2)\noptions(repr.plot.width=cs[1],repr.plot.height=cs[2])\n\ncln_nms = c(\"T\",\"time\",\"sigma\",\"resistance\",\"size\")\nread.table(file=\"../figures/draft/Fig7X-trjs_optimal.csv\",header=FALSE,sep=\",\",col.names=cln_nms) %>% \n as.data.frame -> df_optimal\n\nprint(\"Optimal\")\ndf_optimal %>% tail(1) %>% .$size\n\nsz = 1.5; fc = .5\nx0 = .4; len_seg = 1.8\ny0 = 1.9\n\np1 = ggplot() +\n geom_hline(yintercept=1,size=.25,color=\"black\",linetype=\"dashed\")\n\nidx = 1\nfor (T0 in rev(unique(df_optimal %>% filter(T<=240) %>% .$T))) {\n df_optimal %>% filter(T==T0) %>% mutate(time=(max(time)-time)/30,resistance=100*resistance) -> df_optimal0\n size0 = filter(df_optimal0,time==min(time))$size\n df_optimal0$size = size0/df_optimal0$size\n# print(df_optimal0 %>% arrange(time))\n p1 = p1 + \n geom_vline(xintercept=T0/30,size=.25,color=\"black\",linetype=\"dashed\") +\n geom_path(data=df_optimal0,aes(x=time,y=size),color=clrs[1],size=sz) +\n geom_path(data=df_optimal0,aes(x=time,y=size),color=clrs_plt[idx],size=fc*sz) \n idx = idx + 1\n}\n\np1 = p1 + \n theme_bw(base_size=12,base_family='Times') + \n labs(x=\"Time horizon (months)\",y=\"Fold change in tumor size\") +\n scale_color_gradientn(limits=c(0,1),oob=squish, \n colours=clrs_plt,\n values=seq(0,1,length.out=6)) +\n scale_x_continuous(expand=c(0,0),breaks = seq(0,18,1)) + \n scale_y_continuous(expand=c(0,0)) +\n coord_cartesian(ylim=c(0.7,1.5)) +\n theme(\n panel.grid.major = element_blank(),\n panel.grid.minor = element_blank(),\n plot.margin = unit(c(.5,.5,1,1.3),\"lines\"),\n legend.background = element_rect(fill=\"white\")) \n\np1\n\nggsave(plot=p1,width=cs[1],height=cs[2],filename=\"../figures/draft/FigSXa.pdf\",useDingbats=FALSE)",
"[1] \"Optimal\"\n"
],
[
"max(df_optimal$T/30)",
"_____no_output_____"
],
[
"df_optimal %>% group_by(T) %>% filter(time==T) %>% ungroup %>% mutate(T = T/30) %>% \n ggplot(aes(x=T,y=1e9*size)) +\n geom_path() +\n labs(x='Time horizon (months)', y=expression('Final tumour size (initial '*10^9*' cells)')) +\n scale_x_continuous(expand=c(0,0),breaks = seq(0,48,12)) + \n scale_y_continuous(expand=c(0,0),breaks = c(1e9,1e10,2e10)) + \n coord_cartesian(xlim=c(0,max(df_optimal$T/30)+.4),ylim=c(5e8,2.2e10)) +\n# scale_y_log10(breaks = c(1e8,1e9,1e10),\n# labels = trans_format(\"log10\", math_format(10^.x))) +\n theme_bw(base_size=12,base_family='Times') + \n theme(\n panel.grid.major = element_blank(),\n panel.grid.minor = element_blank(),\n plot.margin = unit(c(.5,.5,1,1.3),\"lines\"),\n legend.background = element_rect(fill=\"white\")) -> p1\n\np1\n\nggsave(plot=p1,width=cs[1],height=cs[2],filename=\"../figures/draft/FigSXb.pdf\",useDingbats=FALSE)",
"_____no_output_____"
]
],
[
[
"# Another figure for solution of the optimal control problem",
"_____no_output_____"
]
],
[
[
"cs = c(4.2,2.75)\noptions(repr.plot.width=cs[1],repr.plot.height=cs[2])\n\ncln_nms = c(\"trajectory\",\"time\",\"sigma\",\"resistance\",\"size\")\nread.table(file=\"../figures/draft/Fig6-trjs_optimal-final.csv\",header=TRUE,sep=\",\",col.names=cln_nms) %>% \n as.data.frame %>% mutate(time=time/30,resistance=100*resistance) -> df\n\n# clrs = brewer.pal(9,\"Set1\")\nsz = 1.5; fc = 0.5\nx0 = 1.4; len_seg = 1.8\n\ntmx = 4\n\np2 = df %>% filter(trajectory!=0) %>% \n ggplot(aes(x=tmx-time,y=resistance,group=factor(trajectory))) +\n geom_path(data=filter(df,trajectory==0),color=\"black\",size=sz*.25,linetype=5) + \n geom_path(data=filter(df,trajectory==1),color=\"darkgrey\",size=sz) + \n geom_path(data=filter(df,trajectory==1),aes(color=sigma),lineend=\"round\",size=sz*fc) +\n geom_path(data=filter(df,trajectory==2),color=clrs[3],size=sz) + \n geom_path(data=filter(df,trajectory==2),aes(color=sigma),lineend=\"round\",size=sz*fc) +\n geom_path(data=filter(df,trajectory==3),color=clrs[2],size=sz) + \n geom_path(data=filter(df,trajectory==3),aes(color=sigma),lineend=\"round\",size=sz*fc) +\n# geom_path(aes(color=sigma),lineend=\"round\",size=sz*fc) +\n theme_bw(base_size=12,base_family='Times') + \n labs(x=\"Time until the end of the treatment\",y=\"Intratumoral resistance (%)\") +\n scale_color_gradientn(limits=c(0,1),oob=squish,\n colours=clrs_plt,\n values=seq(0,1,length.out=6)) +\n scale_x_continuous(expand=c(0,0),breaks=c(0,tmx),labels=c(\"\",0)) +\n scale_y_continuous(expand=c(0,0)) +\n coord_cartesian(ylim=c(0,100),xlim=c(0,tmx)) +\n guides(color=guide_colourbar(title=\"Treatment\\nintensity\",title.position=\"top\",title.vjust=2)) +\n theme(\n panel.grid.major = element_blank(),\n panel.grid.minor = element_blank(),\n legend.text=element_text(size=8.5),\n legend.key.height = unit(.8, 'lines'),\n legend.title=element_text(size=10),\n legend.direction = \"vertical\", \n axis.title.x = element_text(vjust=0),\n legend.key = element_rect(size = 5),\n plot.margin = unit(c(.5,.5,1,.5),\"lines\")\n )\np2\n\nggsave(plot=p2,width=cs[1],height=cs[2],filename=\"../figures/draft/Fig4.pdf\",useDingbats=FALSE)",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
]
] |
e7e09f9499b2967001cc23f7734a33611517addc | 19,619 | ipynb | Jupyter Notebook | LinearRegression.ipynb | duartele/exerc-jupyternotebook | 29236eadb600acd0737cd023d6337bc01739b3da | [
"MIT"
] | null | null | null | LinearRegression.ipynb | duartele/exerc-jupyternotebook | 29236eadb600acd0737cd023d6337bc01739b3da | [
"MIT"
] | null | null | null | LinearRegression.ipynb | duartele/exerc-jupyternotebook | 29236eadb600acd0737cd023d6337bc01739b3da | [
"MIT"
] | null | null | null | 112.752874 | 15,694 | 0.862276 | [
[
[
"import numpy as np\nimport pandas as pd\nfrom matplotlib import pyplot as plt\nfrom sklearn import linear_model as lm\nimport statsmodels.formula.api as smf",
"_____no_output_____"
],
[
"df = pd.read_csv('p027_2.txt', sep=\";\")",
"_____no_output_____"
],
[
"df.columns\n#df.columns = ['Minutos', 'Unidades']",
"_____no_output_____"
],
[
"X = np.array(df['Units']).reshape(-1,1) #Transform in one column\ny = np.array(df['Minutes']).reshape(-1,1)",
"_____no_output_____"
],
[
"#ordinary least squares - OLS\nmodel_2 = smf.ols('Minutes ~ Units', data=df).fit()",
"_____no_output_____"
],
[
"print(model_2.summary())",
"_____no_output_____"
],
[
"model_lm = lm.LinearRegression(fit_intercept=False)\nmodel_lm.fit(X,y) #X and Y need to be arrays - not pd.Series",
"_____no_output_____"
],
[
"b1 = model_lm.coef_\nb0 = model_lm.intercept_\nyhat = b0 + b1*X",
"_____no_output_____"
],
[
"df['yhat02'] = yhat",
"_____no_output_____"
],
[
"#plot(x,y)\nplt.scatter(df['Units'],df['Minutes'])\nplt.plot(df['Units'], df['yhat'], color='red')\nplt.plot(df['Units'], df['yhat02'], color='black');",
"_____no_output_____"
],
[
"#Least Square Error\ndf['Error'] = (df['Minutes']-df['yhat'])**2\nprint(f'SS M01: {np.sum(df[\"Error\"])}')",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e7e0c0ffe09d07c0acc8c8e94711a48162b30d54 | 2,954 | ipynb | Jupyter Notebook | Arvind/08 - Accessing Items.ipynb | Arvind-collab/Data-Science | 4d7027c308adba2b414f97abfe151c8881674da4 | [
"MIT"
] | null | null | null | Arvind/08 - Accessing Items.ipynb | Arvind-collab/Data-Science | 4d7027c308adba2b414f97abfe151c8881674da4 | [
"MIT"
] | null | null | null | Arvind/08 - Accessing Items.ipynb | Arvind-collab/Data-Science | 4d7027c308adba2b414f97abfe151c8881674da4 | [
"MIT"
] | null | null | null | 20.950355 | 101 | 0.413338 | [
[
[
"\n### Accessing Items\n",
"_____no_output_____"
],
[
"<table align=\"left\">\n <tr>\n <td width=\"6%\">\n <img src=\"question_icon.png\">\n </td>\n <td>\n <div align=\"left\", style=\"font-size:120%\">\n <font color=\"#21618C\">\n <b>1. Write a program to retrieve the keys/values of dictionary </b>\n </font>\n </div>\n </td>\n </tr>\n</table>",
"_____no_output_____"
],
[
"**Use the dictionary**\n\ndictionary2 = {0:3, 'x':5, 1:2}",
"_____no_output_____"
]
],
[
[
"dictionary2 = {0:3, 'x':5, 1:2}\ndictionary2",
"_____no_output_____"
]
],
[
[
"<table align=\"left\">\n <tr>\n <td width=\"6%\">\n <img src=\"question_icon.png\">\n </td>\n <td>\n <div align=\"left\", style=\"font-size:120%\">\n <font color=\"#21618C\">\n <b>2. Write a program to get the value for 'Age' from the dictionary</b>\n </font>\n </div>\n </td>\n </tr>\n</table>",
"_____no_output_____"
],
[
"**Use the dictionary**\n\ndictionary3 = {'Weight': 67, 'BMI': 25, 'Age': 27, 'Profession': 'CA'}",
"_____no_output_____"
]
],
[
[
"dictionary3 = {'Weight': 67, 'BMI': 25, 'Age': 27, 'Profession': 'CA'}\ndictionary3['Age']",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
]
] |
e7e0da4b03e686c6a0f1e970ab550268402bca19 | 78,593 | ipynb | Jupyter Notebook | Heart Disease UCI.ipynb | satyamuralidhar/Kaggle-HeartDisease_UCI | b7cad8a1d46e27461ad6566104308308b6e01d09 | [
"Apache-2.0"
] | null | null | null | Heart Disease UCI.ipynb | satyamuralidhar/Kaggle-HeartDisease_UCI | b7cad8a1d46e27461ad6566104308308b6e01d09 | [
"Apache-2.0"
] | null | null | null | Heart Disease UCI.ipynb | satyamuralidhar/Kaggle-HeartDisease_UCI | b7cad8a1d46e27461ad6566104308308b6e01d09 | [
"Apache-2.0"
] | null | null | null | 84.782093 | 19,108 | 0.763669 | [
[
[
"import numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport seaborn as sns",
"_____no_output_____"
],
[
"df = pd.read_csv('heart.csv')",
"_____no_output_____"
],
[
"df.isnull().sum()",
"_____no_output_____"
],
[
"df",
"_____no_output_____"
],
[
"sns.barplot(x=df['age'],y=df['target'])",
"_____no_output_____"
],
[
"plt.hist(x=df['age'],histtype='bar')",
"_____no_output_____"
],
[
"plt.hist(x=df['thalach'],histtype='bar',color='green')",
"_____no_output_____"
],
[
"plt.hist(x=df['chol'],histtype='bar',color='yellow')",
"_____no_output_____"
],
[
"from sklearn.preprocessing import StandardScaler , LabelEncoder\nscaler = StandardScaler()",
"_____no_output_____"
],
[
"label = LabelEncoder()\ntrain = df.iloc[:,:-1]\ntrain",
"_____no_output_____"
],
[
"train['oldpeak'] = label.fit_transform(train['oldpeak'])",
"_____no_output_____"
],
[
"target = df['target']",
"_____no_output_____"
],
[
"X_scaled = scaler.fit_transform(train)",
"_____no_output_____"
],
[
"from sklearn.metrics import roc_curve , roc_auc_score , confusion_matrix , accuracy_score\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.ensemble import GradientBoostingClassifier\ngrad = GradientBoostingClassifier()\nX_train,X_test,y_train,y_test = train_test_split(X_scaled,target,test_size=0.4,random_state=120)\ngrad.fit(X_train,y_train)\ny_pred=grad.predict(X_test)\naccuracy_score(y_pred,y_test)",
"_____no_output_____"
],
[
"auc = roc_auc_score(y_test,y_pred)\nconfusion = confusion_matrix(y_test,y_pred)\ntp = confusion[0][0]\nfp = confusion[0][1]\nfn = confusion[1][0]\ntn = confusion[1][1]\n",
"_____no_output_____"
],
[
"from sklearn.metrics import plot_confusion_matrix\ndisp = plot_confusion_matrix(grad,X_test,y_test,cmap=plt.cm.Blues,normalize=None)\n#disp = plot_confusion_matrix(lg,X_test,y_test,cmap='viridis',normalize=None)\ndisp.confusion_matrix",
"_____no_output_____"
],
[
"\n# finding accuracy \naccuracy = (tp+tn)/(tp+tn+fp+fn)\naccuracy",
"_____no_output_____"
],
[
"fpr , tpr , thresholds = roc_curve(y_test,y_pred)\nplt.plot(fpr,tpr,color = 'darkblue',label = 'ROC')\nplt.plot([0,1],[0,1],color='orange',linestyle='--',label=\"ROC Curve(area=%0.2f)\"%auc)\nplt.xlabel('False + ve rate')\nplt.ylabel('True +ve rate')\nplt.legend()\nplt.show()",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e7e0dba1c8b12b529f50b7aede3dbe38c0fe1c3c | 451,542 | ipynb | Jupyter Notebook | DataPrepFinal.ipynb | danielburdeno/Kindle-eBook-Recommendations | bb3c8016c0817f78c9143c1b218366d7cbb9b588 | [
"MIT"
] | 2 | 2022-01-25T18:26:54.000Z | 2022-01-26T20:10:27.000Z | DataPrepFinal.ipynb | danielburdeno/Kindle-eBook-Recommendations | bb3c8016c0817f78c9143c1b218366d7cbb9b588 | [
"MIT"
] | null | null | null | DataPrepFinal.ipynb | danielburdeno/Kindle-eBook-Recommendations | bb3c8016c0817f78c9143c1b218366d7cbb9b588 | [
"MIT"
] | 1 | 2022-01-25T18:26:58.000Z | 2022-01-25T18:26:58.000Z | 271.52255 | 377,329 | 0.871988 | [
[
[
"",
"_____no_output_____"
],
[
"# Kindle eBook Recommendation System: Data Preparation\nAuthors: Daniel Burdeno",
"_____no_output_____"
],
[
"---",
"_____no_output_____"
],
[
"# Contents\n<l></l>\n\n<span style=\"font-size:1.2em;\">\n\n- <a href=\"#Overview\">Overview</a>\n\n- <a href=\"#Business Understanding\">Business Understanding</a> \n\n- <a href=\"#Data Understanding\">Data Understanding</a> \n\n- <a href=\"#Data Preparation\">Data Preparation</a> \n \n - <a href=\"#Imports and Functions\">Imports and Functions</a>\n \n - <a href=\"#Meta Data\">Meta Data</a>\n\n - <a href=\"#Review Data\">Review Data</a> \n\n - <a href=\"#CSV Files\">CSV Files</a>",
"_____no_output_____"
],
[
"# <a id=\"#Overview\">Overview</a>",
"_____no_output_____"
],
[
"> This project aims to build a two system approach to recommending Kindle eBook's to both existing reviewers and new users looking to find similar books. For existing reviewers a collaborative approach is taken by comparing similar reviewer profiles based on exisitng ratings. A content-based approach is taken in order to recommend books based on similar review text data and can be used by anyone.",
"_____no_output_____"
],
[
"# <a id=\"Business Understanding\">Business Understanding</a>",
"_____no_output_____"
],
[
"> Currently eBooks are outsold by print books at about a 4 to 1 ratio. In 2020 there was 191 million eBooks sold. While Amazon holds over 70% of the market in eBooks via their kindle platform there is a large untapped potential for increasing eBook sales and promoting the use of eReaders compared to print. By utilzing quality recommendation systems Amazon can boost the interest and useablity of eBooks thus improving upon this market. The kindle platform and eBooks in general are incredidly accesibile for anyone with a tablet, smartphone, computer, or eReader. These eBooks can be immediatley purchased from a multitude of platforms and are able to read within minutes of purchase, which is far superior to obtaining a print book. This notion of real time purchase and useablily plays greater into Amazon's one click purchase philsophy.\n\n> The kindle store is also full of cheap reads, with some eBooks even being free with certain subsripctions like prime and unlimited. A broad span of genres are available ranging from things like self-help books, cookbooks, and photography books to more traditional literature genres like Science Fiction & Fantasy and Romance novels. A final huge plus for the advocacy of eBooks is the ease in which readers can rate and reviews books they have either just read or already read. This can all be done via the same platform used to access and read the eBook (aka kindle). Ultimately this plays into the collection of more review and rating data wich in turn can attribute to better performing recommendations for each indiviudal user. A quality recommendation system can thus create a positive feedback loop that not only enhances itself but promotoes the increase in eBook sales across the board.",
"_____no_output_____"
],
[
"# <a id=\"Data Understanding\">Data Understanding</a>",
"_____no_output_____"
],
[
"> Data for this project was pulled from a compiled dataset of Amazon kindle store reviews and meta data in two seperate JSON files. The datasets can be found [here](https://nijianmo.github.io/amazon/index.html). I utlized the smaller dataset known as 5-core which contained data for products and reviewers with at least 5 entries. Data from the Kindle Store sets were used, both the 5-core review data and the full metadata file. Due to the large size of these datasets I downloaded them locally and saved to an external repository outside of github.\n\n> Data Instructions: Naviatged through the linked page requires an entry form of basic information (name, email) in order to begin downloads. Given the size of the two datasets allow several minutes for the downloads to occur. Once saved to your local drive (I placed the data one repository above the linked github repository) the JSON files can be loaded into jupyter notebooks via pandas (pd.read_json) using the compression='gz' and lines=True. Due to the size of the review text dataset be prepared for a large memory usage when loading it in.",
"_____no_output_____"
],
[
"# <a id=\"Data Preparation\">Data Preparation</a>",
"_____no_output_____"
],
[
"## <a id=\"Imports and Functions\">Imports and Functions</a>",
"_____no_output_____"
],
[
"> For data preparation and cleaning I primarily used built-in pandas methods and functions, utlizing numpy as well. Basic visualiztions were created with matplotlib. Warnings is imported to ignore the copy/slice warning when slicing a dataframe. I created a function that returns the third value in a list which was passed into the larger function used to clean the meta data file. See the function for detailed description of what is occuring. This function was updated as I explored the dataset and outputs. I also set a style for matplotlib for consistency across notebooks and visualations.",
"_____no_output_____"
]
],
[
[
"import pandas as pd\nimport numpy as np\nimport matplotlib as plt\nimport warnings\n\n# Show plots in notebook\n%matplotlib inline\nwarnings.filterwarnings('ignore')\n# Set matplotlib style to match other notebook graphics\nplt.style.use('fast')",
"_____no_output_____"
],
[
"# Function that takes a list and returns the third value, used in a .apply below to iterate through a dataframe column\ndef getthirdValue(aList):\n return aList[2:3]",
"_____no_output_____"
],
[
"# Compiled meta_data cleaning for ease of use, takes in a dataframe and returns the 'cleaned' version\ndef meta_clean(data):\n # Creating a new genre column based on the category column, third value in the list is the one we want\n data['genre'] = data['category'].apply(getthirdValue)\n # Change into single string and remove html code\n data['genre'] = data['genre'].apply(lambda x: ','.join(map(str, x)))\n data['genre'] = data['genre'].str.replace('amp;', '')\n # Retrieve print length from the details columns dictionary and return as new column\n print_length = [d.get('Print Length:') for d in data['details']]\n data['print_length'] = print_length\n # Returns only the print length minus any text\n data['print_length'] = data['print_length'].str.extract('(\\d+)', expand=False)\n data['print_length']= data['print_length'].astype(float)\n # Retrieve word wise feature from the details columns dictionary and return as new column\n word_wise = [d.get('Word Wise:') for d in data['details']]\n data['word_wise'] = word_wise\n # Retrieve lending feature from the details columns dictionary and return as new column\n lending = [d.get('Lending:') for d in data['details']]\n data['lending'] = lending\n # Transform word wise and lending columns into binary values using dictionary and .map\n bool_dict = {'Enabled': 1, 'Not Enabled': 0}\n data['word_wise'] = data['word_wise'].map(bool_dict)\n data['lending'] = data['lending'].map(bool_dict)\n # Clean brand column, removing unnecessary text, and rename to author as this is what it represents\n data['brand'] = data['brand'].str.replace(\"Visit Amazon's\", '')\n data['brand'] = data['brand'].str.replace(\"Page\", '')\n data.rename(columns={'brand': 'author'}, inplace=True)\n # Remove/replace unnecessary text in the title column, including html code\n data['title'] = data['title'].str.replace(\"amp;\", \"\", regex=True)\n data['title'] = data['title'].str.replace(\"'\", \"'\", regex=True)\n data['title'] = data['title'].str.replace(\" - Kindle edition\", \"\")\n data['title'] = data['title'].str.replace(\" eBook\", \"\")\n # Dropping unnecessary/incomplete columns\n data.drop(columns=['details', 'category', 'tech1', 'description', 'fit', 'tech2',\n 'feature', 'rank', 'also_view', 'main_cat',\n 'similar_item', 'date', 'price', 'imageURL',\n 'imageURLHighRes', 'also_buy'], inplace=True)\n return data.head()",
"_____no_output_____"
]
],
[
[
"## <a id=\"Load Data\">Load Data</a>",
"_____no_output_____"
],
[
"> As stated in the Data Understanding section, we have two seperate JSON files to load in. One containing individual user reviews and the other containing book meta data. The meta data will need to be heavily cleaned to extract relevant information for this project. These large initial data files were loaded in from a local folder external to the repository for the project due to their size and necessary compression.",
"_____no_output_____"
]
],
[
[
"# Meta data load-in, file stored as compressed JSON, each line is a JSON entry hence the lines=True arguement\npath = 'C:\\\\Users\\\\danie\\\\Documents\\\\Flatiron\\\\Projects\\\\Capstone\\\\Rawdata\\\\meta_Kindle_Store.gz'\ndf_meta = pd.read_json(path, compression='gzip', lines=True)",
"_____no_output_____"
],
[
"# Review data load-in, file stored as compressed JSON, each line is a JSON entry hence the lines=True arguement\npath = 'C:\\\\Users\\\\danie\\\\Documents\\\\Flatiron\\\\Projects\\\\Capstone\\\\Rawdata\\\\Kindle_Store_5.gz'\ndf_rev = pd.read_json(path, compression='gzip', lines=True)",
"_____no_output_____"
]
],
[
[
"## <a id=\"Meta Data\">Meta Data</a>",
"_____no_output_____"
]
],
[
[
"df_meta.info()",
"<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 491670 entries, 0 to 491669\nData columns (total 19 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 category 491670 non-null object \n 1 tech1 491670 non-null object \n 2 description 491670 non-null object \n 3 fit 491670 non-null object \n 4 title 491670 non-null object \n 5 also_buy 491670 non-null object \n 6 tech2 491670 non-null object \n 7 brand 491670 non-null object \n 8 feature 491670 non-null object \n 9 rank 491670 non-null object \n 10 also_view 491670 non-null object \n 11 details 491670 non-null object \n 12 main_cat 491670 non-null object \n 13 similar_item 491670 non-null object \n 14 date 0 non-null datetime64[ns]\n 15 price 491670 non-null object \n 16 asin 491670 non-null object \n 17 imageURL 491670 non-null object \n 18 imageURLHighRes 491670 non-null object \ndtypes: datetime64[ns](1), object(18)\nmemory usage: 71.3+ MB\n"
],
[
"df_meta.head()",
"_____no_output_____"
]
],
[
[
"> Taking a look at the .info() and .head() of the meta data shows a plethora of cleaning that needs to occur. There are a multitude of unusable columns with blank information including tech1, tech2, fit, description, rank, main_cat, price, and image columns. Within the category column (a list) and the details column (a dictionary) I need to pull out relevant information. Further exploration online shows that the brand column is actually the eBook author and will need to extracted in useful information as well.\n\n> Each entry for the category column seen below is a list which needs to be dealt with in order to extract the correct information. Categories also contained things that are not eBooks. I removed any category that does not donate eBook. It is also clear that the third value of the list describes what can be attritubted as a genre of eBook. I took the third value from this list in order to create a new genre column.\n\n> Each entry for the details column seen below is a dictionary. Taking a look at the first row shows me that I can pull out useful information from this dicitonary, including print_length and two kindle features. Word_wise designates if the book has built in dictionary support and lending designates if the eBook can be lent to other users. The product ID#s ('asin') is already another column in the dataframe so will not be extracted. ",
"_____no_output_____"
]
],
[
[
"# Taking a look at what is within the category columns, it contains lists\ndf_meta['category'].value_counts()",
"_____no_output_____"
],
[
"# Using a dual nested lambda function and .apply I can subset the dataframe to only contain categories that denote eBooks\ndf_meta = df_meta.loc[\n lambda df: df.category.apply(\n lambda l: 'Kindle eBooks' in l)]",
"_____no_output_____"
],
[
"# Pulling out the first row of the details dictionary to epxlore\ndetails = list(df_meta['details'])\ndetails[0]",
"_____no_output_____"
],
[
"# Running my compiled clean function on meta data, see above for descriptions\nmeta_clean(df_meta)",
"_____no_output_____"
],
[
"# Checking the clean I still have several unwanted entries within genre including a blank one\ndf_meta.genre.value_counts()",
"_____no_output_____"
],
[
"# Subsetting to remove the genres with less the 1000 entries\ndf_meta = df_meta[df_meta['genre'].map(df_meta['genre'].value_counts()) > 1000]\n# Remove the blank genre entry\ndf_meta = df_meta.loc[df_meta['genre'] != '']",
"_____no_output_____"
]
],
[
[
"> After running the clean function on my meta data I noticed that there was still blank entries for things like title and I should check for null values. However a lot of these were just blank entries not actually denoated as NaN so I had to parse through the dataframe and replace any blank entry with NaN in order to accurately find and address them. Print length NaN's were filled in using the mean value of each genre. The rest of the entries were dropped, I needed accurate title and author in order to make recommendations.",
"_____no_output_____"
]
],
[
[
"# Converting blank entries to NaN using regex expression and looking at nulls\ndf_meta = df_meta.replace(r'^\\s*$', np.nan, regex=True)\ndf_meta.isna().sum()",
"_____no_output_____"
],
[
"# Dropping nulls and using groupby to sort by genre so I can fill in any print_length nulls based on mean genre value\ndf_meta['print_length'] = df_meta.groupby(['genre'], sort=False)['print_length'].apply(lambda x: x.fillna(x.mean()))\ndf_meta.dropna(inplace=True)",
"_____no_output_____"
],
[
"# Checking for any duplicate book entries, none were found. Asin value denotates a unique Amazon product identifier\ndf_meta.asin.value_counts()",
"_____no_output_____"
],
[
"# Creating a list of the product numbers in match with review dataset\nasin_list = df_meta['asin'].tolist()",
"_____no_output_____"
]
],
[
[
"## <a id=\"Review Data\">Review Data</a>",
"_____no_output_____"
]
],
[
[
"df_rev.info()",
"<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 2222983 entries, 0 to 2222982\nData columns (total 12 columns):\n # Column Dtype \n--- ------ ----- \n 0 overall int64 \n 1 verified bool \n 2 reviewTime object\n 3 reviewerID object\n 4 asin object\n 5 style object\n 6 reviewerName object\n 7 reviewText object\n 8 summary object\n 9 unixReviewTime int64 \n 10 vote object\n 11 image object\ndtypes: bool(1), int64(2), object(9)\nmemory usage: 188.7+ MB\n"
],
[
"df_rev.head()",
"_____no_output_____"
],
[
"# I thought style would contain genre but it did not, entries will be subsetted using the meta data, so ignore column\ndf_rev['style'].value_counts()",
"_____no_output_____"
]
],
[
[
"> Taking a look at the review data shows some cleaning that needs to occur as well, including dropping unneeded columns and exploring several others. The overall column denotes the rating that a user gave to the item (critical information). Verified is either True or False, and looking it up it designates if a reviewer was verified to not have recieved the book for free or written a review for monetary gain. I considered it important to only included verified reviews in order to not introduce positive bais into the system. Only a small set of the reviews did not contain any text which is critical to my content based system so they will be dropped from the data. \n\n> This data set was suppose to contain only products and reviewers that had 5 or more entries (5-core) however upon exploration I found this to not be true. I kept the larger dataset for review text and content based recommendations but I also subsetted the data to only contain reviewers that had made 5 or more reviews for my collaborative filtering model. This is due to the nature of collaborative filtering requiring reviewer profiles to be fleshed out and will not work well at all with only a few reviews.",
"_____no_output_____"
]
],
[
[
"# Matching reviewed products with products with meta data\ndf_rev = df_rev[df_rev['asin'].isin(asin_list)]",
"_____no_output_____"
],
[
"df_rev.verified.value_counts()",
"_____no_output_____"
],
[
"# Dropping any rows that were not verified\nindexNames = df_rev[df_rev['verified'] == False].index\ndf_rev.drop(indexNames , inplace=True)",
"_____no_output_____"
],
[
"# Dropping unused columns\ndf_rev_use = df_rev.drop(columns=['reviewTime', 'verified',\n 'style', 'reviewerName', \n 'unixReviewTime', 'image', 'vote', 'summary'])",
"_____no_output_____"
],
[
"df_rev_use.isna().sum()",
"_____no_output_____"
],
[
"# Dropping entries without review text\ndf_rev_use.dropna(inplace=True)",
"_____no_output_____"
],
[
"# Dropping any possible duplicate reviews\ndf_rev_use.drop_duplicates(inplace=True)",
"_____no_output_____"
],
[
"# Checking if data was really 5-core\ndf_rev_use['reviewerID'].value_counts()",
"_____no_output_____"
],
[
"# Most reviewed books\ndf_rev_use['asin'].value_counts()",
"_____no_output_____"
],
[
"df_rev_use.info()",
"<class 'pandas.core.frame.DataFrame'>\nInt64Index: 1398682 entries, 2754 to 2222982\nData columns (total 4 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 overall 1398682 non-null int64 \n 1 reviewerID 1398682 non-null object\n 2 asin 1398682 non-null object\n 3 reviewText 1398682 non-null object\ndtypes: int64(1), object(3)\nmemory usage: 53.4+ MB\n"
]
],
[
[
"## <a id=\"CSV Files\">CSV Files</a>",
"_____no_output_____"
],
[
"> For ease of use in further notebooks the cleaned and compiled dataframes were saved to individual csv files within the data folder. These files can be saved locally and were not pushed to github due to size constraints. The meta data files were saved and pushed to github inorder for heroku/streamlit to have access to them. ",
"_____no_output_____"
]
],
[
[
"# Save cleaned review dataframe to csv for use in other notebook, this set includes reviewers with less than 5 reviews\ndf_rev_use.to_csv('Data/df_rev_all.csv')",
"_____no_output_____"
],
[
"# Subsetting review data to only include reviewers with 5 or more reviews\ndf_rev5 = df_rev_use[df_rev_use['reviewerID'].map(df_rev_use['reviewerID'].value_counts()) > 4]\ndf_rev5.info()",
"<class 'pandas.core.frame.DataFrame'>\nInt64Index: 1306690 entries, 2755 to 2222971\nData columns (total 4 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 overall 1306690 non-null int64 \n 1 reviewerID 1306690 non-null object\n 2 asin 1306690 non-null object\n 3 reviewText 1306690 non-null object\ndtypes: int64(1), object(3)\nmemory usage: 49.8+ MB\n"
],
[
"df_rev5['reviewerID'].value_counts()",
"_____no_output_____"
],
[
"# Save cleaned subset of review data\ndf_rev5.to_csv('Data/df_rev5.csv')",
"_____no_output_____"
],
[
"# Creating sets of books for each review set in order to match meta data\nasin_set = set(df_rev['asin'].tolist())\nasin_set5 = set(df_rev5['asin'].tolist())\nprint(len(asin_set))\nprint(len(asin_set5))",
"94212\n93732\n"
],
[
"# Meta data for books from the larger review set\ndf_meta_all = df_meta.loc[df_meta['asin'].isin(asin_set)]\n# Meta data for books from the smaller 5-core review set\ndf_meta5 = df_meta.loc[df_meta['asin'].isin(asin_set5)]\n# Save dataframes as csv for use in other notebooks\ndf_meta_all.to_csv('Data/meta_all.csv')\ndf_meta5.to_csv('Data/meta5.csv')",
"_____no_output_____"
]
],
[
[
"> In order to conduct natural language processing and produce content based on review text I needed to aggregate review text based on individual books. I used the unique product number, 'asin', to groupby and then join review text for each book into a new data dataframe below. This dataframe will be used to produce a document term matrix for every book.",
"_____no_output_____"
]
],
[
[
"# Groupby using 'asin' and custom aggregate to join all review text\ndf_books_rev = df_rev_use.groupby(['asin'], as_index = False).agg({'reviewText': ' '.join})\ndf_books_rev.to_csv('Data/df_books_rev.csv')",
"_____no_output_____"
],
[
"df_books_rev.head()",
"_____no_output_____"
],
[
"df_books_rev.info()",
"<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 94211 entries, 0 to 94210\nData columns (total 2 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 asin 94211 non-null object\n 1 reviewText 94211 non-null object\ndtypes: object(2)\nmemory usage: 1.4+ MB\n"
]
],
[
[
"> With the cleaned and saved files ready go I moved into two seperate notebooks to build my collaborative filtering system and content-based system. These notebooks can be found at: [CollaborativeFiltering](https://github.com/danielburdeno/Kindle-Recommendations/blob/main/CollaborativeFiltering.ipynb) and [ContentBased](https://github.com/danielburdeno/Kindle-Recommendations/blob/main/ContentBased.ipynb) respectively.",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
]
] |
e7e0f2908d6b0de0ec8d7ebdf2fcb3fce72b6a18 | 1,744 | ipynb | Jupyter Notebook | examples/gallery/demos/bokeh/us_unemployment.ipynb | ppwadhwa/holoviews | e8e2ec08c669295479f98bb2f46bbd59782786bf | [
"BSD-3-Clause"
] | 864 | 2019-11-13T08:18:27.000Z | 2022-03-31T13:36:13.000Z | examples/gallery/demos/bokeh/us_unemployment.ipynb | ppwadhwa/holoviews | e8e2ec08c669295479f98bb2f46bbd59782786bf | [
"BSD-3-Clause"
] | 1,117 | 2019-11-12T16:15:59.000Z | 2022-03-30T22:57:59.000Z | examples/gallery/demos/bokeh/us_unemployment.ipynb | ppwadhwa/holoviews | e8e2ec08c669295479f98bb2f46bbd59782786bf | [
"BSD-3-Clause"
] | 180 | 2019-11-19T16:44:44.000Z | 2022-03-28T22:49:18.000Z | 23.253333 | 135 | 0.538417 | [
[
[
"URL: http://bokeh.pydata.org/en/latest/docs/gallery/unemployment.html\n\nMost examples work across multiple plotting backends, this example is also available for:\n\n* [Matplotlib - US unemployment example](../matplotlib/us_unemployment.ipynb)",
"_____no_output_____"
]
],
[
[
"import pandas as pd\nimport holoviews as hv\nfrom holoviews import opts\n\nhv.extension('bokeh')",
"_____no_output_____"
]
],
[
[
"## Defining data",
"_____no_output_____"
]
],
[
[
"from bokeh.sampledata.unemployment1948 import data\n\ndata = pd.melt(data.drop('Annual', 1), id_vars='Year', var_name='Month', value_name='Unemployment')\nheatmap = hv.HeatMap(data, label=\"US Unemployment (1948 - 2013)\")",
"_____no_output_____"
]
],
[
[
"## Plot",
"_____no_output_____"
]
],
[
[
"colors = [\"#75968f\", \"#a5bab7\", \"#c9d9d3\", \"#e2e2e2\", \"#dfccce\", \"#ddb7b1\", \"#cc7878\", \"#933b41\", \"#550b1d\"]\nheatmap.opts(\n opts.HeatMap(width=900, height=400, xrotation=45, xaxis='top', labelled=[],\n tools=['hover'], cmap=colors))",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
e7e0f414de26ea13c75a4e4a5f0f5568c1bd7007 | 44,785 | ipynb | Jupyter Notebook | notebooks/tf-2-workflow-smpipelines.ipynb | yegortokmakov/amazon-sagemaker-workshop | b9dd98a96fe466f0a9a80549e387c428fc0aa43e | [
"Apache-2.0"
] | null | null | null | notebooks/tf-2-workflow-smpipelines.ipynb | yegortokmakov/amazon-sagemaker-workshop | b9dd98a96fe466f0a9a80549e387c428fc0aa43e | [
"Apache-2.0"
] | null | null | null | notebooks/tf-2-workflow-smpipelines.ipynb | yegortokmakov/amazon-sagemaker-workshop | b9dd98a96fe466f0a9a80549e387c428fc0aa43e | [
"Apache-2.0"
] | null | null | null | 44.429563 | 870 | 0.620945 | [
[
[
"## TensorFlow 2 Complete Project Workflow in Amazon SageMaker\n### Data Preprocessing -> Training -> Automatic Model Tuning -> Deployment\n \n1. [Introduction](#Introduction)\n2. [SageMaker Processing for dataset transformation](#SageMakerProcessing)\n5. [SageMaker hosted training](#SageMakerHostedTraining)\n6. [Automatic Model Tuning](#AutomaticModelTuning)\n7. [SageMaker hosted endpoint](#SageMakerHostedEndpoint)\n8. [Workflow Automation with SageMaker Pipelines](#WorkflowAutomation)\n 1. [Pipeline Parameters](#PipelineParameters)\n 2. [Processing Step](#ProcessingStep)\n 3. [Training and Model Creation Steps](#TrainingModelCreation)\n 4. [Batch Scoring Step](#BatchScoringStep)\n 5. [Creating and executing the pipeline](#CreatingExecutingPipeline)\n9. [ML Lineage Tracking](#LineageOfPipelineArtifacts)\n10. [Extensions](#Extensions)\n\n \n## Introduction <a class=\"anchor\" id=\"Introduction\">\n\nIf you are using TensorFlow 2, you can use the Amazon SageMaker prebuilt TensorFlow 2 framework container with training scripts similar to those you would use outside SageMaker. This notebook presents such a workflow, including all key steps such as preprocessing data with SageMaker Processing, and model training and deployment with SageMaker hosted training and inference. Automatic Model Tuning in SageMaker is used to tune the model's hyperparameters. \n\nWorking through these steps in a notebook is part of the prototyping process; however, a repeatable production workflow typically is run outside notebooks. To demonstrate automating the workflow, we'll use [Amazon SageMaker Pipelines](https://aws.amazon.com/sagemaker/pipelines) for workflow orchestration. Purpose-built for machine learning (ML), SageMaker Pipelines helps you automate different steps of the ML workflow including data processing, model training, and batch prediction (scoring), and apply conditions such as approvals for model quality. It also includes a model registry and model lineage tracker. \n\nTo enable you to run this notebook within a reasonable time (typically less than an hour), this notebook's use case is a straightforward regression task: predicting house prices based on the well-known Boston Housing dataset. This public dataset contains 13 features regarding housing stock of towns in the Boston area. Features include average number of rooms, accessibility to radial highways, adjacency to a major river, etc. \n\nTo begin, we'll import some necessary packages and set up directories for training and test data. We'll also set up a SageMaker Session to perform various operations, and specify an Amazon S3 bucket to hold input data and output. The default bucket used here is created by SageMaker if it doesn't already exist, and named in accordance with the AWS account ID and AWS Region. ",
"_____no_output_____"
]
],
[
[
"import boto3\nimport os\nimport sagemaker\nimport tensorflow as tf\n\nsess = sagemaker.session.Session()\nbucket = sess.default_bucket() \nregion = boto3.Session().region_name\n\ndata_dir = os.path.join(os.getcwd(), 'data')\nos.makedirs(data_dir, exist_ok=True)\n\ntrain_dir = os.path.join(os.getcwd(), 'data/train')\nos.makedirs(train_dir, exist_ok=True)\n\ntest_dir = os.path.join(os.getcwd(), 'data/test')\nos.makedirs(test_dir, exist_ok=True)\n\nraw_dir = os.path.join(os.getcwd(), 'data/raw')\nos.makedirs(raw_dir, exist_ok=True)\n\nbatch_dir = os.path.join(os.getcwd(), 'data/batch')\nos.makedirs(batch_dir, exist_ok=True)",
"_____no_output_____"
]
],
[
[
"# SageMaker Processing for dataset transformation <a class=\"anchor\" id=\"SageMakerProcessing\">\n\nNext, we'll import the dataset and transform it with SageMaker Processing, which can be used to process terabytes of data in a SageMaker-managed cluster separate from the instance running your notebook server. In a typical SageMaker workflow, notebooks are only used for prototyping and can be run on relatively inexpensive and less powerful instances, while processing, training and model hosting tasks are run on separate, more powerful SageMaker-managed instances. SageMaker Processing includes off-the-shelf support for Scikit-learn, as well as a Bring Your Own Container option, so it can be used with many different data transformation technologies and tasks. An alternative to SageMaker Processing is [SageMaker Data Wrangler](https://aws.amazon.com/sagemaker/data-wrangler/), a visual data preparation tool integrated with the SageMaker Studio UI. \n\nTo work with SageMaker Processing, first we'll load the Boston Housing dataset, save the raw feature data and upload it to Amazon S3 so it can be accessed by SageMaker Processing. We'll also save the labels for training and testing.",
"_____no_output_____"
]
],
[
[
"import numpy as np\nfrom tensorflow.python.keras.datasets import boston_housing\nfrom sklearn.preprocessing import StandardScaler\n\n(x_train, y_train), (x_test, y_test) = boston_housing.load_data()\n\nnp.save(os.path.join(raw_dir, 'x_train.npy'), x_train)\nnp.save(os.path.join(raw_dir, 'x_test.npy'), x_test)\nnp.save(os.path.join(raw_dir, 'y_train.npy'), y_train)\nnp.save(os.path.join(raw_dir, 'y_test.npy'), y_test)\ns3_prefix = 'tf-2-workflow'\nrawdata_s3_prefix = '{}/data/raw'.format(s3_prefix)\nraw_s3 = sess.upload_data(path='./data/raw/', key_prefix=rawdata_s3_prefix)\nprint(raw_s3)",
"_____no_output_____"
]
],
[
[
"Next, simply supply an ordinary Python data preprocessing script as shown below. For this example, we're using a SageMaker prebuilt Scikit-learn framework container, which includes many common functions for processing data. There are few limitations on what kinds of code and operations you can run, and only a minimal API contract: input and output data must be placed in specified directories. If this is done, SageMaker Processing automatically loads the input data from S3 and uploads transformed data back to S3 when the job is complete.",
"_____no_output_____"
]
],
[
[
"%%writefile preprocessing.py\n\nimport glob\nimport numpy as np\nimport os\nfrom sklearn.preprocessing import StandardScaler\n\nif __name__=='__main__':\n \n input_files = glob.glob('{}/*.npy'.format('/opt/ml/processing/input'))\n print('\\nINPUT FILE LIST: \\n{}\\n'.format(input_files))\n scaler = StandardScaler()\n for file in input_files:\n raw = np.load(file)\n # only transform feature columns\n if 'y_' not in file:\n transformed = scaler.fit_transform(raw)\n if 'train' in file:\n if 'y_' in file:\n output_path = os.path.join('/opt/ml/processing/train', 'y_train.npy')\n np.save(output_path, raw)\n print('SAVED LABEL TRAINING DATA FILE\\n')\n else:\n output_path = os.path.join('/opt/ml/processing/train', 'x_train.npy')\n np.save(output_path, transformed)\n print('SAVED TRANSFORMED TRAINING DATA FILE\\n')\n else:\n if 'y_' in file:\n output_path = os.path.join('/opt/ml/processing/test', 'y_test.npy')\n np.save(output_path, raw)\n print('SAVED LABEL TEST DATA FILE\\n')\n else:\n output_path = os.path.join('/opt/ml/processing/test', 'x_test.npy')\n np.save(output_path, transformed)\n print('SAVED TRANSFORMED TEST DATA FILE\\n')",
"_____no_output_____"
]
],
[
[
"Before starting the SageMaker Processing job, we instantiate a `SKLearnProcessor` object. This object allows you to specify the instance type to use in the job, as well as how many instances. Although the Boston Housing dataset is quite small, we'll use two instances to showcase how easy it is to spin up a cluster for SageMaker Processing. ",
"_____no_output_____"
]
],
[
[
"from sagemaker import get_execution_role\nfrom sagemaker.sklearn.processing import SKLearnProcessor\n\nsklearn_processor1 = SKLearnProcessor(framework_version='0.23-1',\n role=get_execution_role(),\n instance_type='ml.m5.xlarge',\n instance_count=2)",
"_____no_output_____"
]
],
[
[
"We're now ready to run the Processing job. To enable distributing the data files equally among the instances, we specify the `ShardedByS3Key` distribution type in the `ProcessingInput` object. This ensures that if we have `n` instances, each instance will receive `1/n` files from the specified S3 bucket. It may take around 3 minutes for the following code cell to run, mainly to set up the cluster. At the end of the job, the cluster automatically will be torn down by SageMaker. ",
"_____no_output_____"
]
],
[
[
"from sagemaker.processing import ProcessingInput, ProcessingOutput\nfrom time import gmtime, strftime \n\nprocessing_job_name = \"tf-2-workflow-{}\".format(strftime(\"%d-%H-%M-%S\", gmtime()))\noutput_destination = 's3://{}/{}/data'.format(bucket, s3_prefix)\n\nsklearn_processor1.run(code='preprocessing.py',\n job_name=processing_job_name,\n inputs=[ProcessingInput(\n source=raw_s3,\n destination='/opt/ml/processing/input',\n s3_data_distribution_type='ShardedByS3Key')],\n outputs=[ProcessingOutput(output_name='train',\n destination='{}/train'.format(output_destination),\n source='/opt/ml/processing/train'),\n ProcessingOutput(output_name='test',\n destination='{}/test'.format(output_destination),\n source='/opt/ml/processing/test')])\n\npreprocessing_job_description = sklearn_processor1.jobs[-1].describe()",
"_____no_output_____"
]
],
[
[
"In the log output of the SageMaker Processing job above, you should be able to see logs in two different colors for the two different instances, and that each instance received different files. Without the `ShardedByS3Key` distribution type, each instance would have received a copy of **all** files. By spreading the data equally among `n` instances, you should receive a speedup by approximately a factor of `n` for most stateless data transformations. After saving the job results locally, we'll move on to training and inference code.",
"_____no_output_____"
]
],
[
[
"x_train_in_s3 = '{}/train/x_train.npy'.format(output_destination)\ny_train_in_s3 = '{}/train/y_train.npy'.format(output_destination)\nx_test_in_s3 = '{}/test/x_test.npy'.format(output_destination)\ny_test_in_s3 = '{}/test/y_test.npy'.format(output_destination)\n\n!aws s3 cp {x_train_in_s3} ./data/train/x_train.npy\n!aws s3 cp {y_train_in_s3} ./data/train/y_train.npy\n!aws s3 cp {x_test_in_s3} ./data/test/x_test.npy\n!aws s3 cp {y_test_in_s3} ./data/test/y_test.npy",
"_____no_output_____"
]
],
[
[
"## SageMaker hosted training <a class=\"anchor\" id=\"SageMakerHostedTraining\">\n\nNow that we've prepared a dataset, we can move on to SageMaker's model training functionality. With SageMaker hosted training the actual training itself occurs not on the notebook instance, but on a separate cluster of machines managed by SageMaker. Before starting hosted training, the data must be in S3, or an EFS or FSx for Lustre file system. We'll upload to S3 now, and confirm the upload was successful.",
"_____no_output_____"
]
],
[
[
"s3_prefix = 'tf-2-workflow'\n\ntraindata_s3_prefix = '{}/data/train'.format(s3_prefix)\ntestdata_s3_prefix = '{}/data/test'.format(s3_prefix)",
"_____no_output_____"
],
[
"train_s3 = sess.upload_data(path='./data/train/', key_prefix=traindata_s3_prefix)\ntest_s3 = sess.upload_data(path='./data/test/', key_prefix=testdata_s3_prefix)\n\ninputs = {'train':train_s3, 'test': test_s3}\n\nprint(inputs)",
"_____no_output_____"
]
],
[
[
"We're now ready to set up an Estimator object for hosted training. We simply call `fit` to start the actual hosted training.",
"_____no_output_____"
]
],
[
[
"from sagemaker.tensorflow import TensorFlow\n\ntrain_instance_type = 'ml.c5.xlarge'\nhyperparameters = {'epochs': 30, 'batch_size': 128, 'learning_rate': 0.01}\n\nhosted_estimator = TensorFlow(\n source_dir='tf-2-workflow-smpipelines',\n entry_point='train.py',\n instance_type=train_instance_type,\n instance_count=1,\n hyperparameters=hyperparameters,\n role=sagemaker.get_execution_role(),\n base_job_name='tf-2-workflow',\n framework_version='2.3.1',\n py_version='py37')",
"_____no_output_____"
]
],
[
[
"After starting the hosted training job with the `fit` method call below, you should observe the valication loss converge with each epoch. Can we do better? We'll look into a way to do so in the **Automatic Model Tuning** section below. In the meantime, the hosted training job should take about 3 minutes to complete. ",
"_____no_output_____"
]
],
[
[
"hosted_estimator.fit(inputs)",
"_____no_output_____"
]
],
[
[
"The training job produces a model saved in S3 that we can retrieve. This is an example of the modularity of SageMaker: having trained the model in SageMaker, you can now take the model out of SageMaker and run it anywhere else. Alternatively, you can deploy the model into a production-ready environment using SageMaker's hosted endpoints functionality, as shown in the **SageMaker hosted endpoint** section below.\n\nRetrieving the model from S3 is very easy: the hosted training estimator you created above stores a reference to the model's location in S3. You simply copy the model from S3 using the estimator's `model_data` property and unzip it to inspect the contents.",
"_____no_output_____"
]
],
[
[
"!aws s3 cp {hosted_estimator.model_data} ./model/model.tar.gz",
"_____no_output_____"
]
],
[
[
"The unzipped archive should include the assets required by TensorFlow Serving to load the model and serve it, including a .pb file: ",
"_____no_output_____"
]
],
[
[
"!tar -xvzf ./model/model.tar.gz -C ./model",
"_____no_output_____"
]
],
[
[
"## Automatic Model Tuning <a class=\"anchor\" id=\"AutomaticModelTuning\">\n\nSo far we have simply run one Hosted Training job without any real attempt to tune hyperparameters to produce a better model. Selecting the right hyperparameter values to train your model can be difficult, and typically is very time consuming if done manually. The right combination of hyperparameters is dependent on your data and algorithm; some algorithms have many different hyperparameters that can be tweaked; some are very sensitive to the hyperparameter values selected; and most have a non-linear relationship between model fit and hyperparameter values. SageMaker Automatic Model Tuning helps automate the hyperparameter tuning process: it runs multiple training jobs with different hyperparameter combinations to find the set with the best model performance.\n\nWe begin by specifying the hyperparameters we wish to tune, and the range of values over which to tune each one. We also must specify an objective metric to be optimized: in this use case, we'd like to minimize the validation loss.",
"_____no_output_____"
]
],
[
[
"from sagemaker.tuner import IntegerParameter, CategoricalParameter, ContinuousParameter, HyperparameterTuner\n\nhyperparameter_ranges = {\n 'learning_rate': ContinuousParameter(0.001, 0.2, scaling_type=\"Logarithmic\"),\n 'epochs': IntegerParameter(10, 50),\n 'batch_size': IntegerParameter(64, 256),\n}\n\nmetric_definitions = [{'Name': 'loss',\n 'Regex': ' loss: ([0-9\\\\.]+)'},\n {'Name': 'val_loss',\n 'Regex': ' val_loss: ([0-9\\\\.]+)'}]\n\nobjective_metric_name = 'val_loss'\nobjective_type = 'Minimize'",
"_____no_output_____"
]
],
[
[
"Next we specify a HyperparameterTuner object that takes the above definitions as parameters. Each tuning job must be given a budget: a maximum number of training jobs. A tuning job will complete after that many training jobs have been executed. \n\nWe also can specify how much parallelism to employ, in this case five jobs, meaning that the tuning job will complete after three series of five jobs in parallel have completed. For the default Bayesian Optimization tuning strategy used here, the tuning search is informed by the results of previous groups of training jobs, so we don't run all of the jobs in parallel, but rather divide the jobs into groups of parallel jobs. There is a trade-off: using more parallel jobs will finish tuning sooner, but likely will sacrifice tuning search accuracy. \n\nNow we can launch a hyperparameter tuning job by calling the `fit` method of the HyperparameterTuner object. The tuning job may take around 10 minutes to finish. While you're waiting, the status of the tuning job, including metadata and results for invidual training jobs within the tuning job, can be checked in the SageMaker console in the **Hyperparameter tuning jobs** panel. ",
"_____no_output_____"
]
],
[
[
"tuner = HyperparameterTuner(hosted_estimator,\n objective_metric_name,\n hyperparameter_ranges,\n metric_definitions,\n max_jobs=15,\n max_parallel_jobs=5,\n objective_type=objective_type)\n\ntuning_job_name = \"tf-2-workflow-{}\".format(strftime(\"%d-%H-%M-%S\", gmtime()))\ntuner.fit(inputs, job_name=tuning_job_name)\ntuner.wait()",
"_____no_output_____"
]
],
[
[
"After the tuning job is finished, we can use the `HyperparameterTuningJobAnalytics` object from the SageMaker Python SDK to list the top 5 tuning jobs with the best performance. Although the results vary from tuning job to tuning job, the best validation loss from the tuning job (under the FinalObjectiveValue column) likely will be substantially lower than the validation loss from the hosted training job above, where we did not perform any tuning other than manually increasing the number of epochs once. ",
"_____no_output_____"
]
],
[
[
"tuner_metrics = sagemaker.HyperparameterTuningJobAnalytics(tuning_job_name)\ntuner_metrics.dataframe().sort_values(['FinalObjectiveValue'], ascending=True).head(5)",
"_____no_output_____"
]
],
[
[
"The total training time and training jobs status can be checked with the following lines of code. Because automatic early stopping is by default off, all the training jobs should be completed normally. For an example of a more in-depth analysis of a tuning job, see the SageMaker official sample [HPO_Analyze_TuningJob_Results.ipynb](https://github.com/awslabs/amazon-sagemaker-examples/blob/master/hyperparameter_tuning/analyze_results/HPO_Analyze_TuningJob_Results.ipynb) notebook.",
"_____no_output_____"
]
],
[
[
"total_time = tuner_metrics.dataframe()['TrainingElapsedTimeSeconds'].sum() / 3600\nprint(\"The total training time is {:.2f} hours\".format(total_time))\ntuner_metrics.dataframe()['TrainingJobStatus'].value_counts()",
"_____no_output_____"
]
],
[
[
"## SageMaker hosted endpoint <a class=\"anchor\" id=\"SageMakerHostedEndpoint\">\n\nAssuming the best model from the tuning job is better than the model produced by the individual hosted training job above, we could now easily deploy that model to production. A convenient option is to use a SageMaker hosted endpoint, which serves real time predictions from the trained model (For asynchronous, offline predictions on large datasets, you can use either SageMaker Processing or SageMaker Batch Transform.). The endpoint will retrieve the TensorFlow SavedModel created during training and deploy it within a SageMaker TensorFlow Serving container. This all can be accomplished with one line of code. \n\nMore specifically, by calling the `deploy` method of the HyperparameterTuner object we instantiated above, we can directly deploy the best model from the tuning job to a SageMaker hosted endpoint.",
"_____no_output_____"
]
],
[
[
"tuning_predictor = tuner.deploy(initial_instance_count=1, instance_type='ml.m5.xlarge')",
"_____no_output_____"
]
],
[
[
"We can compare the predictions generated by this endpoint with the actual target values: ",
"_____no_output_____"
]
],
[
[
"results = tuning_predictor.predict(x_test[:10])['predictions'] \nflat_list = [float('%.1f'%(item)) for sublist in results for item in sublist]\nprint('predictions: \\t{}'.format(np.array(flat_list)))\nprint('target values: \\t{}'.format(y_test[:10].round(decimals=1)))",
"_____no_output_____"
]
],
[
[
"To avoid billing charges from stray resources, you can delete the prediction endpoint to release its associated instance(s).",
"_____no_output_____"
]
],
[
[
"sess.delete_endpoint(tuning_predictor.endpoint_name)",
"_____no_output_____"
]
],
[
[
"## Workflow Automation with SageMaker Pipelines <a class=\"anchor\" id=\"WorkflowAutomation\">\n\nIn the previous parts of this notebook, we prototyped various steps of a TensorFlow project within the notebook itself, with some steps being run on external SageMaker resources (hosted training, model tuning, hosted endpoints). Notebooks are great for prototyping, but generally are not used in production-ready machine learning pipelines. \n\nA very simple pipeline in SageMaker includes processing the dataset to get it ready for training, performing the actual training, and then using the model to perform some form of inference such as batch predition (scoring). We'll use SageMaker Pipelines to automate these steps, keeping the pipeline simple for now: it easily can be extended into a far more complex pipeline.",
"_____no_output_____"
],
[
"### Pipeline parameters <a class=\"anchor\" id=\"PipelineParameters\">\n\nBefore we begin to create the pipeline itself, we should think about how to parameterize it. For example, we may use different instance types for different purposes, such as CPU-based types for data processing and GPU-based or more powerful types for model training. These are all \"knobs\" of the pipeline that we can parameterize. Parameterizing enables custom pipeline executions and schedules without having to modify the pipeline definition.",
"_____no_output_____"
]
],
[
[
"from sagemaker.workflow.parameters import (\n ParameterInteger,\n ParameterString,\n)\n\n# raw input data\ninput_data = ParameterString(name=\"InputData\", default_value=raw_s3)\n\n# processing step parameters\nprocessing_instance_type = ParameterString(name=\"ProcessingInstanceType\", default_value=\"ml.m5.xlarge\")\nprocessing_instance_count = ParameterInteger(name=\"ProcessingInstanceCount\", default_value=2)\n\n# training step parameters\ntraining_instance_type = ParameterString(name=\"TrainingInstanceType\", default_value=\"ml.c5.2xlarge\")\ntraining_instance_count = ParameterInteger(name=\"TrainingInstanceCount\", default_value=1)\n\n# batch inference step parameters\nbatch_instance_type = ParameterString(name=\"BatchInstanceType\", default_value=\"ml.c5.xlarge\")\nbatch_instance_count = ParameterInteger(name=\"BatchInstanceCount\", default_value=1)",
"_____no_output_____"
]
],
[
[
"### Processing Step <a class=\"anchor\" id=\"ProcessingStep\">\n\nThe first step in the pipeline will preprocess the data to prepare it for training. We create a `SKLearnProcessor` object similar to the one above, but now parameterized so we can separately track and change the job configuration as needed, for example to increase the instance type size and count to accommodate a growing dataset.",
"_____no_output_____"
]
],
[
[
"from sagemaker.sklearn.processing import SKLearnProcessor\n\nrole = sagemaker.get_execution_role()\nframework_version = \"0.23-1\"\n\nsklearn_processor = SKLearnProcessor(\n framework_version=framework_version,\n instance_type=processing_instance_type,\n instance_count=processing_instance_count,\n base_job_name=\"tf-2-workflow-process\",\n sagemaker_session=sess,\n role=role,\n)",
"_____no_output_____"
],
[
"from sagemaker.processing import ProcessingInput, ProcessingOutput\nfrom sagemaker.workflow.steps import ProcessingStep\n\n\nstep_process = ProcessingStep(\n name=\"TF2Process\",\n processor=sklearn_processor,\n inputs=[\n ProcessingInput(source=input_data, destination=\"/opt/ml/processing/input\", s3_data_distribution_type='ShardedByS3Key'),\n ],\n outputs=[\n ProcessingOutput(output_name=\"train\", source=\"/opt/ml/processing/train\"),\n ProcessingOutput(output_name=\"test\", source=\"/opt/ml/processing/test\"),\n ],\n code=\"./preprocessing.py\",\n)",
"_____no_output_____"
]
],
[
[
"### Training and Model Creation Steps <a class=\"anchor\" id=\"TrainingModelCreation\">\n\nThe following code sets up a pipeline step for a training job. We start by specifying which SageMaker prebuilt TensorFlow 2 training container to use for the job.",
"_____no_output_____"
]
],
[
[
"from sagemaker.tensorflow import TensorFlow\nfrom sagemaker.inputs import TrainingInput\nfrom sagemaker.workflow.steps import TrainingStep\nfrom sagemaker.workflow.step_collections import RegisterModel\n\ntensorflow_version = '2.3.1'\npython_version = 'py37'\n\nimage_uri_train = sagemaker.image_uris.retrieve(\n framework=\"tensorflow\",\n region=region,\n version=tensorflow_version,\n py_version=python_version,\n instance_type=training_instance_type,\n image_scope=\"training\"\n )",
"_____no_output_____"
]
],
[
[
"Next, we specify an `Estimator` object, and define a `TrainingStep` to insert the training job in the pipeline with inputs from the previous SageMaker Processing step.",
"_____no_output_____"
]
],
[
[
"import time\n\nmodel_path = f\"s3://{bucket}/TF2WorkflowTrain\"\ntraining_parameters = {'epochs': 44, 'batch_size': 128, 'learning_rate': 0.0125, 'for_pipeline': 'true'}\n\nestimator = TensorFlow(\n image_uri=image_uri_train,\n source_dir='tf-2-workflow-smpipelines',\n entry_point='train.py',\n instance_type=training_instance_type,\n instance_count=training_instance_count,\n role=role,\n base_job_name=\"tf-2-workflow-train\",\n output_path=model_path,\n hyperparameters=training_parameters\n)",
"_____no_output_____"
],
[
"step_train = TrainingStep(\n name=\"TF2WorkflowTrain\",\n estimator=estimator,\n inputs={\n \"train\": TrainingInput(\n s3_data=step_process.properties.ProcessingOutputConfig.Outputs[\n \"train\"\n ].S3Output.S3Uri\n ),\n \"test\": TrainingInput(\n s3_data=step_process.properties.ProcessingOutputConfig.Outputs[\n \"test\"\n ].S3Output.S3Uri\n )\n },\n)",
"_____no_output_____"
]
],
[
[
"As another step, we create a SageMaker `Model` object to wrap the model artifact, and associate it with a separate SageMaker prebuilt TensorFlow Serving inference container to potentially use later.",
"_____no_output_____"
]
],
[
[
"from sagemaker.model import Model\nfrom sagemaker.inputs import CreateModelInput\nfrom sagemaker.workflow.steps import CreateModelStep\n\nimage_uri_inference = sagemaker.image_uris.retrieve(\n framework=\"tensorflow\",\n region=region,\n version=tensorflow_version,\n py_version=python_version,\n instance_type=batch_instance_type,\n image_scope=\"inference\"\n )\nmodel = Model(\n image_uri=image_uri_inference,\n model_data=step_train.properties.ModelArtifacts.S3ModelArtifacts,\n sagemaker_session=sess,\n role=role,\n)\n\ninputs_model = CreateModelInput(\n instance_type=batch_instance_type\n)\n\nstep_create_model = CreateModelStep(\n name=\"TF2WorkflowCreateModel\",\n model=model,\n inputs=inputs_model,\n)",
"_____no_output_____"
]
],
[
[
"### Batch Scoring Step <a class=\"anchor\" id=\"BatchScoringStep\">\n \nThe final step in this pipeline is offline, batch scoring (inference/prediction). The inputs to this step will be the model we trained earlier, and the test data. A simple, ordinary Python script is all we need to do the actual batch inference.",
"_____no_output_____"
]
],
[
[
"%%writefile batch-score.py\n\nimport os\nimport subprocess\nimport sys\nimport numpy as np\nimport pathlib\nimport tarfile\n\ndef install(package):\n subprocess.check_call([sys.executable, \"-m\", \"pip\", \"install\", package])\n\nif __name__ == \"__main__\":\n \n install('tensorflow==2.3.1')\n model_path = f\"/opt/ml/processing/model/model.tar.gz\"\n with tarfile.open(model_path, 'r:gz') as tar:\n tar.extractall('./model')\n import tensorflow as tf\n model = tf.keras.models.load_model('./model/1')\n test_path = \"/opt/ml/processing/test/\"\n x_test = np.load(os.path.join(test_path, 'x_test.npy'))\n y_test = np.load(os.path.join(test_path, 'y_test.npy'))\n scores = model.evaluate(x_test, y_test, verbose=2)\n print(\"\\nTest MSE :\", scores)\n \n output_dir = \"/opt/ml/processing/batch\"\n pathlib.Path(output_dir).mkdir(parents=True, exist_ok=True)\n evaluation_path = f\"{output_dir}/score-report.txt\"\n with open(evaluation_path, 'w') as writer:\n writer.write(f\"Test MSE : {scores}\")",
"_____no_output_____"
]
],
[
[
"In regard to the SageMaker features we could use to perform batch scoring, we have several choices, including SageMaker Processing and SageMaker Batch Transform. We'll use SageMaker Processing here.",
"_____no_output_____"
]
],
[
[
"batch_scorer = SKLearnProcessor(\n framework_version=framework_version,\n instance_type=batch_instance_type,\n instance_count=batch_instance_count,\n base_job_name=\"tf-2-workflow-batch\",\n sagemaker_session=sess,\n role=role )\n\nstep_batch = ProcessingStep(\n name=\"TF2WorkflowBatchScoring\",\n processor=batch_scorer,\n inputs=[\n ProcessingInput(\n source=step_train.properties.ModelArtifacts.S3ModelArtifacts,\n destination=\"/opt/ml/processing/model\"\n ),\n ProcessingInput(\n source=step_process.properties.ProcessingOutputConfig.Outputs[\n \"test\"\n ].S3Output.S3Uri,\n destination=\"/opt/ml/processing/test\"\n )\n ],\n outputs=[\n ProcessingOutput(output_name=\"batch\", source=\"/opt/ml/processing/batch\"),\n ],\n code=\"./batch-score.py\" )",
"_____no_output_____"
]
],
[
[
"### Creating and executing the pipeline <a class=\"anchor\" id=\"CreatingExecutingPipeline\">\n\nWith all of the pipeline steps now defined, we can define the pipeline itself as a `Pipeline` object comprising a series of those steps. Parallel and conditional steps also are possible.",
"_____no_output_____"
]
],
[
[
"from sagemaker.workflow.pipeline import Pipeline\n\npipeline_name = f\"TF2Workflow\"\n\npipeline = Pipeline(\n name=pipeline_name,\n parameters=[input_data,\n processing_instance_type, \n processing_instance_count, \n training_instance_type, \n training_instance_count,\n batch_instance_type,\n batch_instance_count],\n steps=[step_process, \n step_train, \n step_create_model,\n step_batch\n ],\n sagemaker_session=sess\n)",
"_____no_output_____"
]
],
[
[
"We can inspect the pipeline definition in JSON format:",
"_____no_output_____"
]
],
[
[
"import json\n\ndefinition = json.loads(pipeline.definition())\ndefinition",
"_____no_output_____"
]
],
[
[
"After upserting its definition, we can start the pipeline with the `Pipeline` object's `start` method:",
"_____no_output_____"
]
],
[
[
"pipeline.upsert(role_arn=role)\nexecution = pipeline.start()",
"_____no_output_____"
]
],
[
[
"We can now confirm that the pipeline is executing. In the log output below, confirm that `PipelineExecutionStatus` is `Executing`.",
"_____no_output_____"
]
],
[
[
"execution.describe()",
"_____no_output_____"
]
],
[
[
"Typically this pipeline should take about 10 minutes to complete. We can wait for completion by invoking `wait()`. After execution is complete, we can list the status of the pipeline steps.",
"_____no_output_____"
]
],
[
[
"execution.wait()\nexecution.list_steps()",
"_____no_output_____"
]
],
[
[
"### Check the score report\n\nAfter the batch scoring job in the pipeline is complete, the batch scoring report is uploaded to S3. For simplicity, this report simply states the test MSE, but in general reports can include as much detail as desired. Reports such as these also can be formatted for use in conditional approval steps in SageMaker Pipelines. For example, the pipeline could have a condition step that only allows further steps to proceed only if the MSE is lower than some threshold. ",
"_____no_output_____"
]
],
[
[
"report_path = f\"{step_batch.outputs[0].destination}/score-report.txt\"\n!aws s3 cp {report_path} ./score-report.txt && cat score-report.txt",
"_____no_output_____"
]
],
[
[
"## ML Lineage Tracking <a class=\"anchor\" id=\"LineageOfPipelineArtifacts\">\n\nSageMaker ML Lineage Tracking creates and stores information about the steps of a ML workflow from data preparation to model deployment. With the tracking information you can reproduce the workflow steps, track model and dataset lineage, and establish model governance and audit standards.\n\nLet's now check out the lineage of the model generated by the pipeline above. The lineage table identifies the resources used in training, including the timestamped train and test data sources, and the specific version of the TensorFlow 2 container in use during the training job. ",
"_____no_output_____"
]
],
[
[
"from sagemaker.lineage.visualizer import LineageTableVisualizer\n\nviz = LineageTableVisualizer(sagemaker.session.Session())\n\nfor execution_step in reversed(execution.list_steps()):\n if execution_step['StepName'] == 'TF2WorkflowTrain':\n display(viz.show(pipeline_execution_step=execution_step))",
"_____no_output_____"
]
],
[
[
"## Extensions <a class=\"anchor\" id=\"Extensions\">\n\nWe've covered a lot of content in this notebook: SageMaker Processing for data transformation, Automatic Model Tuning, and SageMaker hosted training and inference. These are central elements for most deep learning workflows in SageMaker. Additionally, we examined how SageMaker Pipelines helps automate deep learning workflows after completion of the prototyping phase of a project.\n\nBesides all of the SageMaker features explored above, there are many other features that may be applicable to your project. For example, to handle common problems during deep learning model training such as vanishing or exploding gradients, **SageMaker Debugger** is useful. To manage common problems such as data drift after a model is in production, **SageMaker Model Monitor** can be applied.",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
e7e0f50fd902478e7891978e5f7f38247103927f | 7,270 | ipynb | Jupyter Notebook | notebooks/05 - Linear Models for Regression.ipynb | YiGaolooking/ODSC_ML_advanced_workshop | cd6c420456f68146878f17214c49058a3642fcaa | [
"MIT"
] | null | null | null | notebooks/05 - Linear Models for Regression.ipynb | YiGaolooking/ODSC_ML_advanced_workshop | cd6c420456f68146878f17214c49058a3642fcaa | [
"MIT"
] | null | null | null | notebooks/05 - Linear Models for Regression.ipynb | YiGaolooking/ODSC_ML_advanced_workshop | cd6c420456f68146878f17214c49058a3642fcaa | [
"MIT"
] | 1 | 2019-05-20T18:56:12.000Z | 2019-05-20T18:56:12.000Z | 21.319648 | 146 | 0.543879 | [
[
[
"import matplotlib.pyplot as plt\n% matplotlib inline\nimport numpy as np\nplt.rcParams['figure.dpi'] = 300",
"_____no_output_____"
],
[
"from sklearn.linear_model import Ridge, LinearRegression",
"_____no_output_____"
],
[
"from sklearn.model_selection import cross_val_score",
"_____no_output_____"
],
[
"from sklearn.datasets import load_boston\nboston = load_boston()",
"_____no_output_____"
],
[
"from sklearn.utils import shuffle\nX, y = boston.data, boston.target\nX, y = shuffle(X, y)",
"_____no_output_____"
],
[
"X.shape",
"_____no_output_____"
],
[
"fig, axes = plt.subplots(3, 5, figsize=(20, 10))\nfor i, ax in enumerate(axes.ravel()):\n if i > 12:\n ax.set_visible(False)\n continue\n ax.plot(X[:, i], y, 'o', alpha=.5)\n ax.set_title(\"{}: {}\".format(i, boston.feature_names[i]))\n ax.set_ylabel(\"MEDV\")",
"_____no_output_____"
],
[
"print(X.shape)\nprint(y.shape)",
"_____no_output_____"
],
[
"from sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42)",
"_____no_output_____"
],
[
"np.mean(cross_val_score(LinearRegression(), X_train, y_train, cv=10))",
"_____no_output_____"
],
[
"np.mean(cross_val_score(Ridge(), X_train, y_train, cv=10))",
"_____no_output_____"
],
[
"from sklearn.model_selection import GridSearchCV\nparam_grid = {'alpha': np.logspace(-3, 3, 14)}\nprint(param_grid)",
"_____no_output_____"
],
[
"grid = GridSearchCV(Ridge(), param_grid, cv=10)\ngrid.fit(X_train, y_train)",
"_____no_output_____"
],
[
"import pandas as pd\nresults = pd.DataFrame(grid.cv_results_)\nresults.plot('param_alpha', 'mean_train_score')\nresults.plot('param_alpha', 'mean_test_score', ax=plt.gca())\n\nplt.legend()\nplt.xscale(\"log\")",
"_____no_output_____"
],
[
"from sklearn.preprocessing import PolynomialFeatures, scale\nX_poly = PolynomialFeatures(include_bias=False).fit_transform(scale(X))\nprint(X_poly.shape)\nX_train, X_test, y_train, y_test = train_test_split(X_poly, y, random_state=42)",
"_____no_output_____"
],
[
"np.mean(cross_val_score(LinearRegression(), X_train, y_train, cv=10))",
"_____no_output_____"
],
[
"np.mean(cross_val_score(Ridge(), X_train, y_train, cv=10))",
"_____no_output_____"
],
[
"grid = GridSearchCV(Ridge(), param_grid, cv=10)\ngrid.fit(X_train, y_train)",
"_____no_output_____"
],
[
"results = pd.DataFrame(grid.cv_results_)\nresults.plot('param_alpha', 'mean_train_score')\nresults.plot('param_alpha', 'mean_test_score', ax=plt.gca())\nplt.legend()\nplt.xscale(\"log\")",
"_____no_output_____"
],
[
"print(grid.best_params_)\nprint(grid.best_score_)",
"_____no_output_____"
],
[
"lr = LinearRegression().fit(X_train, y_train)\nplt.scatter(range(X_poly.shape[1]), lr.coef_, c=np.sign(lr.coef_), cmap=\"bwr_r\")",
"_____no_output_____"
],
[
"ridge = grid.best_estimator_\nplt.scatter(range(X_poly.shape[1]), ridge.coef_, c=np.sign(ridge.coef_), cmap=\"bwr_r\")",
"_____no_output_____"
],
[
"ridge100 = Ridge(alpha=100).fit(X_train, y_train)\nridge1 = Ridge(alpha=1).fit(X_train, y_train)\nplt.figure(figsize=(8, 4))\n\nplt.plot(ridge1.coef_, 'o', label=\"alpha=1\")\nplt.plot(ridge.coef_, 'o', label=\"alpha=14\")\nplt.plot(ridge100.coef_, 'o', label=\"alpha=100\")\nplt.legend()",
"_____no_output_____"
]
],
[
[
"# Exercise\nLoad the diabetes dataset using ``sklearn.datasets.load_diabetes``. Apply ``LinearRegression`` and ``Ridge`` and visualize the coefficients.",
"_____no_output_____"
]
]
] | [
"code",
"markdown"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
]
] |
e7e10417c1a4793ff1bc37ffedf381f8e75497b0 | 268,396 | ipynb | Jupyter Notebook | example/3D_VP_MHDExample.ipynb | MHDFlows/MHDFlows | 0861ca1aae752b5b3f6478ce71d0cb3ad106632e | [
"MIT"
] | 2 | 2022-03-28T05:55:08.000Z | 2022-03-29T03:48:38.000Z | example/3D_VP_MHDExample.ipynb | MHDFlows/MHDFlows | 0861ca1aae752b5b3f6478ce71d0cb3ad106632e | [
"MIT"
] | null | null | null | example/3D_VP_MHDExample.ipynb | MHDFlows/MHDFlows | 0861ca1aae752b5b3f6478ce71d0cb3ad106632e | [
"MIT"
] | null | null | null | 646.737349 | 234,566 | 0.93991 | [
[
[
"empty"
]
]
] | [
"empty"
] | [
[
"empty"
]
] |
e7e104c567e25f4402ad66ed050c6b4f4b7a2876 | 1,154 | ipynb | Jupyter Notebook | test.ipynb | damazter/pysweep | f1274585ee5f9312ef9a9554d8b4ec975b7c4901 | [
"MIT"
] | 3 | 2020-01-14T12:41:55.000Z | 2021-11-11T09:01:20.000Z | test.ipynb | damazter/pysweep | f1274585ee5f9312ef9a9554d8b4ec975b7c4901 | [
"MIT"
] | 3 | 2019-11-28T15:12:05.000Z | 2021-08-17T16:11:37.000Z | test.ipynb | damazter/pysweep | f1274585ee5f9312ef9a9554d8b4ec975b7c4901 | [
"MIT"
] | 1 | 2020-01-05T20:17:31.000Z | 2020-01-05T20:17:31.000Z | 18.918033 | 49 | 0.543328 | [
[
[
"import pysweep\nimport pysweep.datahandling\nimport qcodes as qc",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code"
]
] |
e7e109e5221d392e4e41930fd303ff6a30aefd53 | 529,990 | ipynb | Jupyter Notebook | docs/notebooks/0004 - Analyzing a Full Deployment NetCDF.ipynb | axiom-data-science/GUTILS | 565fd0d2089d8cb1cecef4e8b658b9a2f9797c11 | [
"MIT"
] | 8 | 2017-04-06T21:48:44.000Z | 2021-08-17T16:29:47.000Z | docs/notebooks/0004 - Analyzing a Full Deployment NetCDF.ipynb | axiom-data-science/GUTILS | 565fd0d2089d8cb1cecef4e8b658b9a2f9797c11 | [
"MIT"
] | 9 | 2017-04-21T21:24:55.000Z | 2022-03-21T08:16:46.000Z | docs/notebooks/0004 - Analyzing a Full Deployment NetCDF.ipynb | axiom-data-science/GUTILS | 565fd0d2089d8cb1cecef4e8b658b9a2f9797c11 | [
"MIT"
] | 9 | 2017-01-05T19:06:39.000Z | 2022-03-02T01:18:21.000Z | 275.74922 | 171,102 | 0.870126 | [
[
[
"%matplotlib inline\nfrom IPython.lib.pretty import pprint\nimport logging\nlogger = logging.getLogger('gutils')\nlogger.handlers = [logging.StreamHandler()]\nlogger.setLevel(logging.DEBUG)\n\ndef plot_profiles(default_df, z=None, t=None):\n import matplotlib.dates as mpd\n import matplotlib.pyplot as plt\n t = t or 't'\n z = z or 'z'\n df = default_df.copy()\n df[z] = df[z].values * -1\n df[t] = mpd.date2num(df[t].dt.to_pydatetime())\n df.plot.scatter(x=t, y=z, c='profile', cmap='inferno', figsize=(18,10,))\n plt.show()",
"_____no_output_____"
],
[
"from pathlib import Path\nfrom gutils.slocum import SlocumReader\nfrom pocean.dsg import ContiguousRaggedTrajectoryProfile as crtp\n\ndata_folder = Path('.').absolute().parent.parent / 'gutils' / 'tests' / 'resources' / 'slocum'\nnc_file = data_folder / 'full_deployment.nc'\n\ndf = None\naxes = {\n 't': 'time',\n 'x': 'lon',\n 'y': 'lat',\n 'z': 'depth'\n}\nwith crtp(nc_file) as ncd:\n df = ncd.to_dataframe(axes=axes)",
"_____no_output_____"
],
[
"df",
"_____no_output_____"
],
[
"from gutils.yo import assign_profiles\ndf2 = df.copy()\ndf2 = df2.rename(columns={'time': 't', 'lon': 'x', 'lat': 'y', 'depth': 'z'})\ndf2 = assign_profiles(df2)\nplot_profiles(df2)",
"_____no_output_____"
],
[
"import matplotlib.dates as mpd\nimport matplotlib.pyplot as plt\ndf3 = df2.copy()\ndf3['z'] = df3.z.values * -1\ndf3['t'] = mpd.date2num(df3.t.dt.to_pydatetime())\ndf3.plot.scatter(x='t', y='z', c='temperature', cmap='viridis', title='Temperature', figsize=(18,10))\ndf3.plot.scatter(x='t', y='z', c='salinity', cmap='plasma', title='Salinity', figsize=(18,10))\nplt.show()",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code"
]
] |
e7e10b80e3f9173afb23e4924872ea9122b53fe5 | 157,401 | ipynb | Jupyter Notebook | Reports/v0/DSADS Dataset.ipynb | hillshadow/continual-learning-for-HAR | 21aeb99efa4bebf8f3f9f00f4452a8fd91e20c75 | [
"MIT"
] | 1 | 2020-08-15T11:32:27.000Z | 2020-08-15T11:32:27.000Z | Reports/v0/DSADS Dataset.ipynb | hillshadow/continual-learning-for-HAR | 21aeb99efa4bebf8f3f9f00f4452a8fd91e20c75 | [
"MIT"
] | null | null | null | Reports/v0/DSADS Dataset.ipynb | hillshadow/continual-learning-for-HAR | 21aeb99efa4bebf8f3f9f00f4452a8fd91e20c75 | [
"MIT"
] | null | null | null | 166.561905 | 80,212 | 0.854137 | [
[
[
"link: https://www.kaggle.com/jindongwang92/crossposition-activity-recognition\n\nhttps://archive.ics.uci.edu/ml/datasets/pamap2+physical+activity+monitoring\n \n# DSADS\n\nColumns 1~405 are features, listed in the order of 'Torso', 'Right Arm', 'Left Arm', 'Right Leg', and 'Left Leg'. Each position contains 81 columns of features. \n\n* Column 406 is the activity sequence indicating the executing of activities (usually not used in experiments). \n* Column 407 is the activity label (1~19). \n* Column 408 denotes the person (1~8)\n\nB. Barshan and M. C. Yuksek, “Recognizing daily and sports activities ¨ in two open source machine learning environments using body-worn sensor units,” The Computer Journal, vol. 57, no. 11, pp. 1649–1667, 2014.\n\n#### Feature extraction by\n\nJindong Wang, Yiqiang Chen, Lisha Hu, Xiaohui Peng, and Philip S. Yu. Stratified Transfer Learning for Cross-domain Activity Recognition. 2018 IEEE International Conference on Pervasive Computing and Communications (PerCom).\n",
"_____no_output_____"
]
],
[
[
"import scipy.io\nimport warnings\nwarnings.filterwarnings('ignore')\n\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt",
"_____no_output_____"
],
[
"filename = \"dsads\"\nmat = scipy.io.loadmat('../Dataset/DASDS/'+filename+\".mat\")\nmat",
"_____no_output_____"
],
[
"raw = pd.DataFrame(mat[\"data_dsads\"])\nraw.head()",
"_____no_output_____"
],
[
"columns = [\"Feat\"+str(i) for i in range(405)] + [\"ActivitySeq\", \"ActivityID\", \"PersonID\"]\nraw.columns = columns\nraw.head()\nraw[\"ActivityID\"].unique()",
"_____no_output_____"
],
[
"activityNames = [\n \"sitting\",\n \"standing\",\n \"lying on back side\",\n \"lying on right side\",\n \"ascending stairs\",\n \"descending stairs\",\n \"standing in an elevator still\",\n \"moving around in an elevator\",\n \"walking in a parking lot\",\n \"walking on a treadmill1\",\n \"walking on a treadmill2\",\n \"running on a treadmill3\",\n \"exercising on a stepper\",\n \"exercising on a cross trainer\",\n \"cycling in horizontal positions\",\n \"cycling in vertical positions\",\n \"rowing\",\n \"jumping\",\n \"playing basketball\"\n]",
"_____no_output_____"
],
[
"def add_activityname(x):\n name = \"R\"+str(int(x[\"PersonID\"]))+\"_\"+activityNames[int(x[\"ActivityID\"])-1]\n name = activityNames[int(x[\"ActivityID\"])-1]\n return name\n\nraw[\"ActivityName\"] = raw.apply(add_activityname, axis=1)\ndf = raw.drop('ActivityID', 1)\ndf = df.drop('PersonID', 1)\ndf = df.drop('ActivitySeq', 1)\ndf.head()",
"_____no_output_____"
],
[
"# Scale to [0, 1]\nfor i in range(243):\n f = (df[\"Feat\"+str(i)]+1)/2\n df[\"Feat\"+str(i)] = f",
"_____no_output_____"
],
[
"df.head()",
"_____no_output_____"
],
[
"df.to_csv(filename+\".feat\", index=False)",
"_____no_output_____"
],
[
"df[\"ActivityName\"].unique()",
"_____no_output_____"
],
[
"activity_labels = df[\"ActivityName\"].unique()\nind = np.arange(len(activity_labels))\nplt.rcParams['figure.figsize'] = [10, 5]\nnRow = []\nfor label in activity_labels:\n c = len(df[df[\"ActivityName\"]==label])\n nRow.append(c)\n \nplt.rcParams['figure.figsize'] = [20, 5]\np1 = plt.bar(ind, nRow)\n\nplt.ylabel('Number of records')\nplt.title('Number of records in raw data of each activity class')\nplt.xticks(ind, activity_labels, rotation='vertical')\n\nplt.show()",
"_____no_output_____"
],
[
"from functools import cmp_to_key\n\nfrom matplotlib import colors as mcolors\nplt.rcParams['figure.figsize'] = [10, 5]\nvectors = df\ncolors = [\"red\", \"green\", \"blue\", \"gold\", \"yellow\"] + list(mcolors.TABLEAU_COLORS.values()) \n\np = vectors[\"ActivityName\"]\nv = vectors[[\"ActivityName\"]]\nv[\"c\"] = 1\n\nlabels = p.unique()\ncount = v.groupby(['ActivityName']).agg(['count'])[(\"c\", \"count\")]\nlabels, count\n\ndef compare(item1, item2):\n return count[item2] - count[item1]\n\nprint(labels)\nlabels = sorted(labels, key=cmp_to_key(compare))\nsizes = [count[l] for l in labels]\n\nfig1, ax1 = plt.subplots()\npatches, texts = ax1.pie(sizes, colors=colors)\nax1.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle.\nplt.legend(patches, labels, loc=\"best\")\nplt.tight_layout()\nplt.show()",
"['sitting' 'standing' 'lying on back side' 'lying on right side'\n 'ascending stairs' 'descending stairs' 'standing in an elevator still'\n 'moving around in an elevator' 'walking in a parking lot'\n 'walking on a treadmill1' 'walking on a treadmill2'\n 'running on a treadmill3' 'exercising on a stepper'\n 'exercising on a cross trainer' 'cycling in horizontal positions'\n 'cycling in vertical positions' 'rowing' 'jumping' 'playing basketball']\n"
]
]
] | [
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e7e115c3cf4dc856f8e75637243567ec428b531c | 157,846 | ipynb | Jupyter Notebook | notebooks/thermodynamics/density_of_gas.ipynb | EvenSol/testneqsim | 2862507de7da13b39b3575de9095a9c5643e4008 | [
"Apache-2.0"
] | 10 | 2020-10-06T23:03:36.000Z | 2022-03-09T03:28:12.000Z | notebooks/thermodynamics/density_of_gas.ipynb | EvenSol/testneqsim | 2862507de7da13b39b3575de9095a9c5643e4008 | [
"Apache-2.0"
] | 1 | 2020-02-25T09:33:08.000Z | 2020-02-25T09:33:08.000Z | notebooks/thermodynamics/density_of_gas.ipynb | EvenSol/testneqsim | 2862507de7da13b39b3575de9095a9c5643e4008 | [
"Apache-2.0"
] | 1 | 2021-02-22T10:31:17.000Z | 2021-02-22T10:31:17.000Z | 334.419492 | 67,824 | 0.764023 | [
[
[
"<a href=\"https://colab.research.google.com/github/EvenSol/NeqSim-Colab/blob/master/notebooks/thermodynamics/density_of_gas.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>",
"_____no_output_____"
]
],
[
[
"#@title Calculation of density of gases\n#@markdown Demonstration of ideal gas law and equations of state. An introduction to equations of state can be seen in the [EoS Wikipedia pages](https://en.wikipedia.org/wiki/Equation_of_state).\n#@markdown <br><br>This document is part of the module [\"Introduction to Gas Processing using NeqSim in Colab\"](https://colab.research.google.com/github/EvenSol/NeqSim-Colab/blob/master/notebooks/examples_of_NeqSim_in_Colab.ipynb#scrollTo=_eRtkQnHpL70).\n%%capture\n!pip install neqsim\nimport neqsim\nfrom neqsim.thermo.thermoTools import *\nimport matplotlib\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport math\nplt.style.use('classic')\n%matplotlib inline",
"_____no_output_____"
],
[
"#@title Introduction to Gas Laws\n#@markdown This video gives an intriduction to behavour of gases as function of pressure and temperature\nfrom IPython.display import YouTubeVideo\nYouTubeVideo('QhnlyHV8evY', width=600, height=400)",
"_____no_output_____"
]
],
[
[
"# Comparison of ideal and real gas behaviour\n\nIn the following example we use the ideal gas law and the PR/SRK-EOS to calculate the density of a pure component gas. At low pressure we see that the ideal gas and the real density are the same, at higher pressures the real gas density is higher, while at very high pressures the ideal gas density is the highest. The reason for this is that at intermediate pressures, the atractive forces is dominating, while at very high pressures repulsive forces starts to dominate.\n\nThe ideal gas equation of state is $pV=nRT$ where $R$ is the gas constant $8.314kJ/mol$. The real gas equation of state is given on the form $pV=ZnRT$ where $Z$ is the gas compressibility factor. The density can be calculated from $\\rho=n/V\\times M$ where $M$is the molar mass.\n\nUse the form to select molecule, temperature and pressure range, and calculate the density and compressibility of the gas. Can you find a gas that shifts from gas to liquid when pressure is increased?",
"_____no_output_____"
]
],
[
[
"#@title Select component and equation of state. Set temperature [K] and pressure range [bara]. { run: \"auto\" }\ncomponentName = \"CO2\" #@param [\"methane\", \"ethane\", \"propane\", \"CO2\", \"nitrogen\"]\ntemperature = 323.0 #@param {type:\"number\"}\nminPressure = 1.0 #@param {type:\"number\"}\nmaxPressure = 350.0 #@param {type:\"number\"}\neosname = \"srk\" #@param [\"srk\", \"pr\"]\nR = 8.314 # J/mol/K\n\n# Creating a fluid in neqsim\nfluid1 = fluid(eosname) #create a fluid using the SRK-EoS\nfluid1.addComponent(componentName, 1.0) #adding 1 mole to the fluid\nfluid1.init(0); \n\nprint('molar mass of ', componentName, ' is ', fluid1.getMolarMass()*1000 , ' kg/mol')\n\ndef idealgasdensity(pressure, temperature):\n m3permol = R*temperature/(pressure*1e5)\n m3perkg = m3permol/fluid1.getMolarMass()\n return 1.0/m3perkg\n\ndef realgasdensity(pressure, temperature):\n fluid1.setPressure(pressure)\n fluid1.setTemperature(temperature)\n TPflash(fluid1)\n fluid1.initPhysicalProperties();\n return fluid1.getDensity('kg/m3')\n\ndef compressibility(pressure, temperature):\n fluid1.setPressure(pressure)\n fluid1.setTemperature(temperature)\n TPflash(fluid1)\n fluid1.initPhysicalProperties();\n return fluid1.getZ()\n\npressure = np.arange(minPressure, maxPressure, int((maxPressure-minPressure)/100)+1)\nidealdensity = [idealgasdensity(P,temperature) for P in pressure]\nrealdensity = [realgasdensity(P,temperature) for P in pressure]\ncompressibility = [compressibility(P,temperature) for P in pressure]\n\nplt.figure()\nplt.subplot(2, 1, 1)\nplt.plot(pressure, idealdensity, '--')\nplt.plot(pressure, realdensity, '-')\nplt.xlabel('Pressure [bara]')\nplt.ylabel('Density [kg/m3]')\nplt.legend(['ideal', 'real'])\nplt.subplot(2, 1, 2)\nplt.plot(pressure, compressibility, '-')\nplt.xlabel('Pressure [bara]')\nplt.ylabel('Z [-]')\nplt.legend(['compressibility factor'])",
"molar mass of CO2 is 44.01 kg/mol\n"
]
],
[
[
"# Pressure of gas as function of volume\n\n1 m3 methane at 1 bar and 25 C is compressed to 200 bar and cooled to 25 C. What is\nthe volume of the gas? What is the density of the compressed gas?",
"_____no_output_____"
]
],
[
[
"componentName = \"nitrogen\" #@param [\"methane\", \"ethane\", \"propane\", \"CO2\", \"nitrogen\"]\ntemperature = 298.15 #@param {type:\"number\"}\ninitialVolume = 1.0 #@param {type:\"number\"}\ninitialPressure = 1.0 #@param {type:\"number\"}\nendPressure = 10.0 #@param {type:\"number\"}\nR = 8.314 # J/mol/K\n\ninitialMoles = initialPressure*1e5*1.0/(R*temperature)\n\n# Creating a fluid in neqsim\nfluid1 = fluid('srk') #create a fluid using the SRK-EoS\nfluid1.addComponent(componentName, initialMoles) #adding 1 Sm3 to the fluid\nfluid1.setTemperature(temperature)\nfluid1.setPressure(initialPressure)\nTPflash(fluid1)\nfluid1.initPhysicalProperties()\nstartVolume = fluid1.getVolume('m3/sec')\n\n\nprint('initialVolume ', startVolume, 'm3')\nprint('initial gas density ', fluid1.getDensity('kg/m3'), 'kg/m3')\nprint('initial gas compressiility ', fluid1.getZ(), ' [-]')\n \nfluid1.setPressure(endPressure)\nTPflash(fluid1)\nfluid1.initPhysicalProperties()\n\nendVolume = fluid1.getVolume('m3/sec')\nprint('end volume ', fluid1.getVolume('Sm3/sec'), 'm3')\nprint('volume ratio ', endVolume/startVolume, ' m3/m3')\nprint('end gas density ', fluid1.getDensity('kg/m3'), ' kg/m3')\nprint('end gas compressibility ', fluid1.getZ(), ' [-]')",
"initialVolume 1.0000083601119607 m3\ninitial gas density 1.1301476964655142 kg/m3\ninitial gas compressiility 0.9999527817885948 [-]\nend volume 0.09997979623211148 m3\nvolume ratio 0.09997896039680884 m3/m3\nend gas density 11.30767486281327 kg/m3\nend gas compressibility 0.9997423956912075 [-]\n"
]
],
[
[
"# Calculation of density of LNG\nThe density of liquified methane at the boiling point at atomspheric pressure can be calcuated as demonstrated in the following example. In this case we use the SRK EoS and the PR-EoS.",
"_____no_output_____"
]
],
[
[
"# Creating a fluid in neqsim\neos = 'srk' #@param [\"srk\", \"pr\"]\npressure = 1.01325 #@param {type:\"number\"}\ntemperature = -162.0 #@param {type:\"number\"}\nfluid1 = fluid(eos) #create a fluid using the SRK-EoS\nfluid1.addComponent('methane', 1.0)\nfluid1.setTemperature(temperature)\nfluid1.setPressure(pressure)\nbubt(fluid1)\nfluid1.initPhysicalProperties()\nprint('temperature at boiling point ', fluid1.getTemperature()-273.15, 'C')\nprint('LNG density ', fluid1.getDensity('kg/m3'), ' kg/m3')\n",
"temperature at boiling point -161.1441471093413 C\nLNG density 428.1719693971862 kg/m3\n"
]
],
[
[
"# Accuracy of EoS for calculating the density\nThe density calculated with any equation of state will have an uncertainty. The GERG-2008 is a reference equation of state with high accuracy in prediction of thermodynamic properties. In the following example we compare the gas density calculations of SRK/PR with the GERG-(2008 version)-EoS.",
"_____no_output_____"
]
],
[
[
"#@title Select component and equation of state. Set temperature [K] and pressure range [bara]. { run: \"auto\" }\ncomponentName = \"methane\" #@param [\"methane\", \"ethane\", \"propane\", \"CO2\", \"nitrogen\"]\ntemperature = 298.0 #@param {type:\"number\"}\nminPressure = 1.0 #@param {type:\"number\"}\nmaxPressure = 500.0 #@param {type:\"number\"}\neosname = \"srk\" #@param [\"srk\", \"pr\"]\nR = 8.314 # J/mol/K\n\n# Creating a fluid in neqsim\nfluid1 = fluid(eosname) #create a fluid using the SRK-EoS\nfluid1.addComponent(componentName, 1.0) #adding 1 mole to the fluid\nfluid1.init(0); \n\n\ndef realgasdensity(pressure, temperature):\n fluid1.setPressure(pressure)\n fluid1.setTemperature(temperature)\n TPflash(fluid1)\n fluid1.initPhysicalProperties();\n return fluid1.getDensity('kg/m3')\n\ndef GERGgasdensity(pressure, temperature):\n fluid1.setPressure(pressure)\n fluid1.setTemperature(temperature)\n TPflash(fluid1)\n return fluid1.getPhase('gas').getDensity_GERG2008()\n\npressure = np.arange(minPressure, maxPressure, int((maxPressure-minPressure)/100)+1)\nrealdensity = [realgasdensity(P,temperature) for P in pressure]\nGERG2008density = [GERGgasdensity(P,temperature) for P in pressure]\ndeviation = [((realgasdensity(P,temperature)-GERGgasdensity(P,temperature))/GERGgasdensity(P,temperature)*100.0) for P in pressure]\n\nplt.figure()\nplt.subplot(2, 1, 1)\nplt.plot(pressure, realdensity, '-')\nplt.plot(pressure, GERG2008density, '--')\nplt.xlabel('Pressure [bara]')\nplt.ylabel('Density [kg/m3]')\nplt.legend(['EoS', 'GERG-2008'])\nplt.subplot(2, 1, 2)\n\nplt.plot(pressure, deviation)\nplt.xlabel('Pressure [bara]')\nplt.ylabel('Deviation [%]')",
"_____no_output_____"
]
],
[
[
"##Calculation of density and compressibility factor for a natual gas mixture\nIn the following example we calculate the density of a multicomponent gas mixture.",
"_____no_output_____"
]
],
[
[
"#@title Select equation of state and set temperature [C] and pressure [bara] { run: \"auto\" }\ntemperature = 15.0 #@param {type:\"number\"}\npressure = 100.0 #@param {type:\"number\"}\neosname = \"srk\" #@param [\"srk\", \"pr\"]\n\nfluid1 = fluid(eosname)\nfluid1.addComponent('nitrogen', 1.2) \nfluid1.addComponent('CO2', 2.6) \nfluid1.addComponent('methane', 85.8)\nfluid1.addComponent('ethane', 7.0) \nfluid1.addComponent('propane', 3.4) \nfluid1.setTemperature(temperature, 'C')\nfluid1.setPressure(pressure, 'bara')\nTPflash(fluid1)\nfluid1.initProperties()\nprint('gas compressibility ', fluid1.getZ(), ' -')\nprint('gas density ', fluid1.getDensity('kg/m3'), ' kg/m3')\n",
"gas compressibility 0.7735308707131694 -\ngas density 102.2871090151433 kg/m3\n"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
e7e117348e02d9c253e3c6e333ba7742d5a95dbd | 117,562 | ipynb | Jupyter Notebook | Store Level Gradient Boost.ipynb | YikunLiu0801/Time-Series-Walmart-Product-Sale-Forecast | 8b0a7cbd3a537e97e8f4af2db482d48a23c820b6 | [
"MIT"
] | null | null | null | Store Level Gradient Boost.ipynb | YikunLiu0801/Time-Series-Walmart-Product-Sale-Forecast | 8b0a7cbd3a537e97e8f4af2db482d48a23c820b6 | [
"MIT"
] | null | null | null | Store Level Gradient Boost.ipynb | YikunLiu0801/Time-Series-Walmart-Product-Sale-Forecast | 8b0a7cbd3a537e97e8f4af2db482d48a23c820b6 | [
"MIT"
] | null | null | null | 39.556528 | 240 | 0.434162 | [
[
[
"import pandas as pd\nimport numpy as np\nimport operator\nimport matplotlib.pyplot as plt\nfrom sklearn.ensemble import RandomForestRegressor as rfr\nfrom sklearn.ensemble import GradientBoostingRegressor as gbr\nfrom datetime import datetime,timedelta",
"_____no_output_____"
],
[
"df0 = pd.read_csv('data_CA_1.csv')\ndf1 = pd.read_csv('data_CA_2.csv')\ndf2 = pd.read_csv('data_CA_3.csv')\ndf3 = pd.read_csv('data_CA_4.csv')\ndf4 = pd.read_csv('data_TX_1.csv')\ndf5 = pd.read_csv('data_TX_2.csv')\ndf6 = pd.read_csv('data_TX_3.csv')\ndf7 = pd.read_csv('data_WI_1.csv')\ndf8 = pd.read_csv('data_WI_2.csv')\ndf9 = pd.read_csv('data_WI_3.csv')",
"_____no_output_____"
],
[
"def preprocess(df):\n df.drop(columns = ['Unnamed: 0','store_id','id','weekday','sell_price','event_name_1','event_name_2','event_type_2'],inplace=True)\n \n \n # get the day of month\n date = pd.to_datetime(df['date'],format= '%Y-%m-%d')\n mday = []\n for x in date:\n mday.append(x.strftime(\"%d\"))\n df['mday'] = mday\n df.mday = df.mday.astype(int)\n # get the day number\n df['day'] = df.d.apply(lambda x: x[2:])\n df.day = df.day.astype(int)\n # get the week number\n df['week'] = df.wm_yr_wk.apply(lambda x: x%100)\n \n # dummy to denote national events\n df['national'] = np.where(df.event_type_1=='National',1,0)\n \n df['snap_CA'] = df.snap.apply(lambda x: x[0])\n df['snap_TX'] = df.snap.apply(lambda x: x[2])\n df['snap_WI'] = df.snap.apply(lambda x: x[4])\n \n df.drop(columns = ['d','wm_yr_wk','event_type_1','date','snap'],inplace=True)\n \n return df",
"_____no_output_____"
],
[
"df = [df0,df1,df2,df3,df4,df5,df6,df7,df8,df9]\ndfs = []\nfor i in range(10):\n dfs.append(preprocess(df[i]))",
"_____no_output_____"
],
[
"def clean(df):\n\n df_y = df.groupby('day').sum().demand.reset_index(drop = True)\n df_x = df.drop(columns = ['demand']).drop_duplicates().reset_index(drop = True)\n \n df_x['demand_28'] = df_y.shift(28)\n \n df_x = df_x.dropna().reset_index(drop=True)\n df_y = df_y[28:].reset_index(drop=True)\n\n return df_x,df_y",
"_____no_output_____"
],
[
"def split(df_x,df_y,num):\n # train-valid split, use last 28 days as validation\n df_xtrain = df_x.iloc[num:-28,:]\n df_xvalid = df_x.iloc[-28:,:]\n df_ytrain = df_y.iloc[num:-28]\n df_yvalid = df_y.iloc[-28:]\n \n return df_xtrain,df_ytrain,df_xvalid,df_yvalid",
"_____no_output_____"
],
[
"def rmsse(y_train,y_test,y_pred):\n y_train1 = y_train.shift()\n n = len(y_train)\n h = len(y_test)\n dividor = sum((y_train[1:]-y_train1[1:])**2)/(n-1)\n divident = sum((y_test-y_pred)**2)/h\n \n return divident/dividor",
"_____no_output_____"
],
[
"def gb_select(df_xtrain,df_ytrain,df_xvalid,df_yvalid,m,a):\n model = gbr(random_state=1,max_features=m,n_estimators=150,learning_rate=a).fit(df_xtrain,df_ytrain)\n df_ypred = model.predict(df_xvalid)\n\n return rmsse(df_ytrain,df_yvalid,df_ypred)",
"_____no_output_____"
],
[
"weight = np.array([0.110888286,0.110246513,0.155628193,0.065608342,0.0775605,0.09520139,0.096534717,0.087117424,0.116621569,0.084593065])\n",
"_____no_output_____"
],
[
"error_table = pd.DataFrame(index=range(1,12),columns = [0.001,0.003,0.005,0.007,0.01,0.03,0.05,0.07,0.1,0.3])",
"_____no_output_____"
],
[
"for m in range(1,12):\n for a in [0.001,0.003,0.005,0.007,0.01,0.03,0.05,0.07,0.1,0.3]:\n b = []\n Loss = np.array([0])\n for i in range(len(dfs)):\n df_x,df_y = clean(dfs[i])\n df_xtrain,df_ytrain,df_xvalid,df_yvalid = split(df_x,df_y,-759)\n loss= gb_select(df_xtrain,df_ytrain,df_xvalid,df_yvalid,m,a)\n b.append(loss)\n Loss = np.array(b)\n error_table.loc[m,a] = sum(Loss*weight)",
"_____no_output_____"
],
[
"error_table",
"_____no_output_____"
],
[
"for a in [0.02,0.04]:\n print('a=',a)\n b = []\n Loss = np.array([0])\n for i in range(len(dfs)):\n df_x,df_y = clean(dfs[i])\n df_xtrain,df_ytrain,df_xvalid,df_yvalid = split(df_x,df_y,-759)\n loss= gb_select(df_xtrain,df_ytrain,df_xvalid,df_yvalid,9,a)\n b.append(loss)\n Loss = np.array(b)\n print(sum(Loss*weight))",
"a= 0.02\n0.2561138894131205\na= 0.04\n0.24908494634554074\n"
],
[
"valid_train = pd.DataFrame(columns = ['wday', 'month', 'year', 'mday', 'day', 'week', 'national', 'snap_CA','snap_TX', 'snap_WI', 'demand_28'])\n\ndate = []\nfor i in range(1,57):\n date.append(pd.to_datetime('2016-04-24')+timedelta(days=i))\nwday = []\nfor x in date:\n num = int(x.strftime(\"%w\"))\n if num == 5:\n wday.append(7)\n else:\n wday.append((num+2)%7)\nvalid_train['wday'] = wday\n\nmonth = []\nfor x in date:\n month.append(int(x.strftime(\"%m\")))\nvalid_train['month'] = month\n\nvalid_train['year'] = [2016]*56\n\nmday = []\nfor x in date:\n mday.append(int(x.strftime(\"%d\")))\nvalid_train['mday'] = mday\n\nvalid_train['day'] = range(1914,1914+56)\n\nweek = [13]*5+[14]*7+[15]*7+[16]*7+[17]*7+[18]*7+[19]*7+[20]*7+[21]*2\nvalid_train['week'] = week\n\n# only 5/30/2016 is a federal holiday\nnational = [0]*36+[1]+[0]*19\nvalid_train['national'] = national\n\nmask1 = (valid_train.mday<=10)\nvalid_train['snap_CA']=np.where(mask1,1,0)\n\nmask2 = (valid_train.mday==1)|(valid_train.mday==3)|(valid_train.mday==5)|(valid_train.mday==6)|(valid_train.mday==7)|(valid_train.mday==9)|(valid_train.mday==11)|(valid_train.mday==12)|(valid_train.mday==13)|(valid_train.mday==15)\nvalid_train['snap_TX']=np.where(mask2,1,0)\n\nmask3 = (valid_train.mday==2)|(valid_train.mday==3)|(valid_train.mday==5)|(valid_train.mday==6)|(valid_train.mday==8)|(valid_train.mday==9)|(valid_train.mday==11)|(valid_train.mday==12)|(valid_train.mday==14)|(valid_train.mday==15)\nvalid_train['snap_WI']=np.where(mask3,1,0)\n\nvalid_train['demand_28'] = [0]*56",
"_____no_output_____"
],
[
"def gb_forecast(df_x,df_y,valid_train):\n model = gbr(random_state=1,max_features=9,n_estimators=150,learning_rate=0.03).fit(df_x,df_y)\n valid_train.demand_28[:28] = df_y[-28:]\n df_ypred1 = model.predict(valid_train.iloc[:28,:])\n valid_train.demand_28[28:] = df_ypred1\n df_ypred = model.predict(valid_train)\n\n return df_ypred",
"_____no_output_____"
],
[
"pred = pd.DataFrame(columns=['CA_1','CA_2','CA_3','CA_4','TX_1','TX_2','TX_3','WI_1','WI_2','WI_3'])",
"_____no_output_____"
],
[
"dfs = [df0,df1,df2,df3,df4,df5,df6,df7,df8,df9]\nstores = ['CA_1','CA_2','CA_3','CA_4','TX_1','TX_2','TX_3','WI_1','WI_2','WI_3']\nfor i in range(len(dfs)):\n df_x,df_y = clean(dfs[i])\n df_ypred = gb_forecast(df_x,df_y,valid_train)\n pred[f'{stores[i]}'] = df_ypred",
"/Users/liuyikun/anaconda3/lib/python3.7/site-packages/ipykernel_launcher.py:3: SettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame\n\nSee the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n This is separate from the ipykernel package so we can avoid doing imports until\n/Users/liuyikun/anaconda3/lib/python3.7/site-packages/ipykernel_launcher.py:5: SettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame\n\nSee the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n \"\"\"\n/Users/liuyikun/anaconda3/lib/python3.7/site-packages/ipykernel_launcher.py:3: SettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame\n\nSee the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n This is separate from the ipykernel package so we can avoid doing imports until\n/Users/liuyikun/anaconda3/lib/python3.7/site-packages/ipykernel_launcher.py:5: SettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame\n\nSee the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n \"\"\"\n/Users/liuyikun/anaconda3/lib/python3.7/site-packages/ipykernel_launcher.py:3: SettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame\n\nSee the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n This is separate from the ipykernel package so we can avoid doing imports until\n/Users/liuyikun/anaconda3/lib/python3.7/site-packages/ipykernel_launcher.py:5: SettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame\n\nSee the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n \"\"\"\n/Users/liuyikun/anaconda3/lib/python3.7/site-packages/ipykernel_launcher.py:3: SettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame\n\nSee the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n This is separate from the ipykernel package so we can avoid doing imports until\n/Users/liuyikun/anaconda3/lib/python3.7/site-packages/ipykernel_launcher.py:5: SettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame\n\nSee the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n \"\"\"\n/Users/liuyikun/anaconda3/lib/python3.7/site-packages/ipykernel_launcher.py:3: SettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame\n\nSee the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n This is separate from the ipykernel package so we can avoid doing imports until\n/Users/liuyikun/anaconda3/lib/python3.7/site-packages/ipykernel_launcher.py:5: SettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame\n\nSee the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n \"\"\"\n/Users/liuyikun/anaconda3/lib/python3.7/site-packages/ipykernel_launcher.py:3: SettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame\n\nSee the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n This is separate from the ipykernel package so we can avoid doing imports until\n/Users/liuyikun/anaconda3/lib/python3.7/site-packages/ipykernel_launcher.py:5: SettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame\n\nSee the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n \"\"\"\n/Users/liuyikun/anaconda3/lib/python3.7/site-packages/ipykernel_launcher.py:3: SettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame\n\nSee the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n This is separate from the ipykernel package so we can avoid doing imports until\n/Users/liuyikun/anaconda3/lib/python3.7/site-packages/ipykernel_launcher.py:5: SettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame\n\nSee the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n \"\"\"\n/Users/liuyikun/anaconda3/lib/python3.7/site-packages/ipykernel_launcher.py:3: SettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame\n\nSee the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n This is separate from the ipykernel package so we can avoid doing imports until\n/Users/liuyikun/anaconda3/lib/python3.7/site-packages/ipykernel_launcher.py:5: SettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame\n\nSee the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n \"\"\"\n/Users/liuyikun/anaconda3/lib/python3.7/site-packages/ipykernel_launcher.py:3: SettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame\n\nSee the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n This is separate from the ipykernel package so we can avoid doing imports until\n/Users/liuyikun/anaconda3/lib/python3.7/site-packages/ipykernel_launcher.py:5: SettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame\n\nSee the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n \"\"\"\n"
],
[
"pred",
"_____no_output_____"
],
[
"pred = pred.transpose()\npred.to_csv('store_pred_gb.csv')",
"_____no_output_____"
],
[
"df0 = pd.read_csv('data_CA_1.csv')\ndf1 = pd.read_csv('data_CA_2.csv')\ndf2 = pd.read_csv('data_CA_3.csv')\ndf3 = pd.read_csv('data_CA_4.csv')\ndf4 = pd.read_csv('data_TX_1.csv')\ndf5 = pd.read_csv('data_TX_2.csv')\ndf6 = pd.read_csv('data_TX_3.csv')\ndf7 = pd.read_csv('data_WI_1.csv')\ndf8 = pd.read_csv('data_WI_2.csv')\ndf9 = pd.read_csv('data_WI_3.csv')",
"/Users/liuyikun/anaconda3/lib/python3.7/site-packages/IPython/core/interactiveshell.py:3058: DtypeWarning: Columns (11,12,13,14) have mixed types.Specify dtype option on import or set low_memory=False.\n interactivity=interactivity, compiler=compiler, result=result)\n"
],
[
"def get_ratio(df):\n df['day'] = df.d.apply(lambda x: x[2:])\n df.day = df.day.astype(int)\n last = df[df.day>1913-28]\n sum_demand = sum(last.demand)\n sep_demand = last.groupby(['id']).sum().demand\n \n sep_demand['ratio'] = sep_demand/sum_demand\n \n return sep_demand['ratio']",
"_____no_output_____"
],
[
"def forecast(df,store_name):\n result = pd.DataFrame(columns=range(1,57))\n ratio = get_ratio(df)\n for x in range(1,57):\n result[x]=ratio*pred.loc[store_name,:][x-1]\n return result",
"_____no_output_____"
],
[
"result = pd.DataFrame(columns=range(1,57))\n\ndfs = [df0,df1,df2,df3,df4,df5,df6,df7,df8,df9]\nstores = ['CA_1','CA_2','CA_3','CA_4','TX_1','TX_2','TX_3','WI_1','WI_2','WI_3']\n\nfor i in range(len(dfs)):\n sub_data = forecast(dfs[i],stores[i])\n result = pd.concat([result,sub_data])",
"_____no_output_____"
],
[
"result",
"_____no_output_____"
],
[
"result.to_csv('result_gb.csv')",
"_____no_output_____"
],
[
"validation = result.loc[:,:28]\nevaluation = result.loc[:,29:]",
"_____no_output_____"
],
[
"validation.reset_index(inplace=True)\nfor i in range(30490):\n validation['index'][i] = validation['index'][i]+'_validation'\nvalidation.rename(columns = {'index':'id'},inplace = True)\nvalidation.set_index('id',inplace=True)\n\nevaluation.reset_index(inplace=True)\nfor i in range(30490):\n evaluation['index'][i] = evaluation['index'][i]+'_evaluation'\nevaluation.rename(columns = {'index':'id'},inplace=True)\nevaluation.set_index('id',inplace=True)",
"/Users/liuyikun/anaconda3/lib/python3.7/site-packages/ipykernel_launcher.py:3: SettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame\n\nSee the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n This is separate from the ipykernel package so we can avoid doing imports until\n/Users/liuyikun/anaconda3/lib/python3.7/site-packages/IPython/core/interactiveshell.py:3326: SettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame\n\nSee the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n exec(code_obj, self.user_global_ns, self.user_ns)\n/Users/liuyikun/anaconda3/lib/python3.7/site-packages/pandas/core/frame.py:4133: SettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame\n\nSee the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n errors=errors,\n/Users/liuyikun/anaconda3/lib/python3.7/site-packages/ipykernel_launcher.py:9: SettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame\n\nSee the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n if __name__ == '__main__':\n"
],
[
"columns = []\nfor i in range(1,29):\n columns.append(f'F{i}')\nvalidation.columns = columns\nevaluation.columns = columns\nresult=pd.concat([validation,evaluation])",
"_____no_output_____"
],
[
"result",
"_____no_output_____"
],
[
"result.to_csv('final_result_gb.csv')",
"_____no_output_____"
],
[
"result_rf = pd.read_csv('final_result.csv',index_col='id')",
"_____no_output_____"
],
[
"result_rf",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e7e11c665edae45eef6324b0e8173bf3329c5095 | 87,235 | ipynb | Jupyter Notebook | intro_PyTorch/3.1.ipynb | caffeflow/intro_pytorch | 9f7870340d3e3878f771531719353159f738af66 | [
"MIT"
] | null | null | null | intro_PyTorch/3.1.ipynb | caffeflow/intro_pytorch | 9f7870340d3e3878f771531719353159f738af66 | [
"MIT"
] | null | null | null | intro_PyTorch/3.1.ipynb | caffeflow/intro_pytorch | 9f7870340d3e3878f771531719353159f738af66 | [
"MIT"
] | null | null | null | 319.542125 | 81,600 | 0.932252 | [
[
[
"",
"_____no_output_____"
]
],
[
[
"import torch \nimport h5py\nimport numpy as np\nimport csv",
"_____no_output_____"
]
],
[
[
"### 加载数据, 创建tensor",
"_____no_output_____"
]
],
[
[
"wine_path = \"./data/chapter3/winequality-white.csv\"\nwine_data = np.loadtxt(fname=wine_path,delimiter=';',skiprows=1) # 第一行是标签\nwine_data.shape",
"_____no_output_____"
],
[
"wine_label = next(csv.reader(open(wine_path),delimiter=';'))\nwine_label = np.array(wine_label)\nwine_label.shape",
"_____no_output_____"
],
[
"# ndarray转为tensor\nwine_data = torch.from_numpy(wine_data)",
"_____no_output_____"
]
],
[
[
"### 预处理张量\n",
"_____no_output_____"
]
],
[
[
"# 划分出评分做为ground_truth\nwine_content = wine_data[:,:-1]\nwine_score = wine_data[:,-1]\n\nwine_content.shape,wine_score.shape",
"_____no_output_____"
],
[
"wine_score",
"_____no_output_____"
]
],
[
[
"### 特征缩放",
"_____no_output_____"
]
],
[
[
"# 标准化\ncontent_mean = wine_content.mean(dim=0)\ncontent_var = wine_content.var(dim=0)\ncontent_normalized = (wine_content - content_mean)/torch.sqrt(content_var)",
"_____no_output_____"
]
],
[
[
"### 数据审查",
"_____no_output_____"
]
],
[
[
"# 酒分3个等级\ncontent_bad = wine_content[torch.lt(wine_score,6)]\ncontent_mid = wine_content[torch.ge(wine_score,6) & torch.lt(wine_score,8)]\ncontent_good = wine_content[torch.gt(wine_score,8)]\n\ncontent_bad.shape",
"_____no_output_____"
],
[
"# 对酒中的化学含量做平均值\ncontent_bad = content_bad.mean(dim=0)\ncontent_mid = content_mid.mean(dim=0)\ncontent_good = content_good.mean(dim=0)\n\ncontent_bad.shape",
"_____no_output_____"
],
[
"for i,args in enumerate(zip(wine_label,content_bad,content_mid,content_good)):\n print('{:2} {:20} {:6.2f} {:6.2f} {:6.2f}'.format(i,*args))",
" 0 fixed acidity 6.96 6.81 7.42\n 1 volatile acidity 0.31 0.26 0.30\n 2 citric acid 0.33 0.33 0.39\n 3 residual sugar 7.05 6.08 4.12\n 4 chlorides 0.05 0.04 0.03\n 5 free sulfur dioxide 35.34 35.21 33.40\n 6 total sulfur dioxide 148.60 133.64 116.00\n 7 density 1.00 0.99 0.99\n 8 pH 3.17 3.20 3.31\n 9 sulphates 0.48 0.49 0.47\n10 alcohol 9.85 10.80 12.18\n"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
]
] |
e7e1216b91184005ec6ec4b31448862585e0baa5 | 8,646 | ipynb | Jupyter Notebook | Project Notes/Kaggle Learn/01 Python/exercise02 functions and getting help.ipynb | JoaoAnt/Projects | fce9f8421756dcda4307ba6c71884589f856c6c0 | [
"MIT"
] | 1 | 2020-07-29T17:08:27.000Z | 2020-07-29T17:08:27.000Z | Project Notes/Kaggle Learn/01 Python/exercise02 functions and getting help.ipynb | JoaoAnt/Projects | fce9f8421756dcda4307ba6c71884589f856c6c0 | [
"MIT"
] | null | null | null | Project Notes/Kaggle Learn/01 Python/exercise02 functions and getting help.ipynb | JoaoAnt/Projects | fce9f8421756dcda4307ba6c71884589f856c6c0 | [
"MIT"
] | null | null | null | 24.916427 | 378 | 0.538515 | [
[
[
"**[Python Micro-Course Home Page](https://www.kaggle.com/learn/python)**\n\n---\n",
"_____no_output_____"
],
[
"These exercises accompany the tutorial on [functions and getting help](https://www.kaggle.com/colinmorris/functions-and-getting-help).\n\nAs before, don't forget to run the setup code below before jumping into question 1.",
"_____no_output_____"
]
],
[
[
"# SETUP. You don't need to worry for now about what this code does or how it works.\nfrom learntools.core import binder; binder.bind(globals())\nfrom learntools.python.ex2 import *\nprint('Setup complete.')",
"Setup complete.\n"
]
],
[
[
"# Exercises",
"_____no_output_____"
],
[
"## 1.\n\nComplete the body of the following function according to its docstring.\n\nHINT: Python has a builtin function `round`",
"_____no_output_____"
]
],
[
[
"def round_to_two_places(num):\n \"\"\"Return the given number rounded to two decimal places. \n \n >>> round_to_two_places(3.14159)\n 3.14\n \"\"\"\n return round(num,2)\n pass\nround_to_two_places(3.14)\nq1.check()",
"_____no_output_____"
],
[
"# Uncomment the following for a hint\n#q1.hint()\n# Or uncomment the following to peek at the solution\n#q1.solution()",
"_____no_output_____"
]
],
[
[
"## 2.\nThe help for `round` says that `ndigits` (the second argument) may be negative.\nWhat do you think will happen when it is? Try some examples in the following cell?\n\nCan you think of a case where this would be useful?",
"_____no_output_____"
]
],
[
[
"# Put your test code here\nround(105.8555, -1)\nprint('Yes')",
"Yes\n"
],
[
"#q2.solution()",
"_____no_output_____"
]
],
[
[
"## 3.\n\nIn a previous programming problem, the candy-sharing friends Alice, Bob and Carol tried to split candies evenly. For the sake of their friendship, any candies left over would be smashed. For example, if they collectively bring home 91 candies, they'll take 30 each and smash 1.\n\nBelow is a simple function that will calculate the number of candies to smash for *any* number of total candies.\n\nModify it so that it optionally takes a second argument representing the number of friends the candies are being split between. If no second argument is provided, it should assume 3 friends, as before.\n\nUpdate the docstring to reflect this new behaviour.",
"_____no_output_____"
]
],
[
[
"def to_smash(total_candies, number_friends=3):\n \"\"\"Return the number of leftover candies that must be smashed after distributing\n the given number of candies evenly between 3 friends.\n \n >>> to_smash(91)\n 1\n \"\"\"\n return total_candies % number_friends\n\nq3.check()",
"_____no_output_____"
],
[
"#q3.hint()",
"_____no_output_____"
],
[
"#q3.solution()",
"_____no_output_____"
]
],
[
[
"## 4.\n\nIt may not be fun, but reading and understanding error messages will be an important part of your Python career.\n\nEach code cell below contains some commented-out buggy code. For each cell...\n\n1. Read the code and predict what you think will happen when it's run.\n2. Then uncomment the code and run it to see what happens. (**Tip**: In the kernel editor, you can highlight several lines and press `ctrl`+`/` to toggle commenting.)\n3. Fix the code (so that it accomplishes its intended purpose without throwing an exception)\n\n<!-- TODO: should this be autochecked? Delta is probably pretty small. -->",
"_____no_output_____"
]
],
[
[
"round_to_two_places(9.9999)",
"_____no_output_____"
],
[
"x = -10\ny = 5\n# Which of the two variables above has the smallest absolute value?\nsmallest_abs = min(abs(x),abs(y))",
"_____no_output_____"
],
[
"def f(x):\n y = abs(x)\n return y\n\nprint(f(5))",
"5\n"
]
],
[
[
"# Keep Going\n\nYou are ready for **[booleans and conditionals](https://www.kaggle.com/colinmorris/booleans-and-conditionals).**\n",
"_____no_output_____"
],
[
"---\n**[Python Micro-Course Home Page](https://www.kaggle.com/learn/python)**\n\n",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
]
] |
e7e12609e06aebe1154a7bc280314d277db7cea2 | 9,921 | ipynb | Jupyter Notebook | examples/JHTDB_visualization_with_K3D.ipynb | lento234/pyJHTDB | 9d525b790037456839ce82a88a086faabf034c67 | [
"Apache-2.0"
] | 55 | 2015-04-10T14:49:01.000Z | 2022-03-28T17:07:20.000Z | examples/JHTDB_visualization_with_K3D.ipynb | joelguerrero/pyJHTDB | 4050fb49010eb6b27776e5c2c0fe0cab765eefb1 | [
"Apache-2.0"
] | 26 | 2015-03-18T16:44:37.000Z | 2022-02-23T09:08:15.000Z | examples/JHTDB_visualization_with_K3D.ipynb | joelguerrero/pyJHTDB | 4050fb49010eb6b27776e5c2c0fe0cab765eefb1 | [
"Apache-2.0"
] | 36 | 2015-03-19T19:10:14.000Z | 2022-03-30T09:28:58.000Z | 26.669355 | 155 | 0.509626 | [
[
[
"# Channel Flow Example",
"_____no_output_____"
]
],
[
[
"# Written for JHTDB by German G Saltar Rivera (2019)\n# To use K3D capabilities, use Firefox or Chrome browser. \n# Safari has trouble with K3D generated objects\n#\nimport pyJHTDB\nfrom pyJHTDB import libJHTDB\nimport time as tt\nimport numpy as np\nimport k3d #https://github.com/K3D-tools/K3D-jupyter\nimport ipywidgets as widgets\nfrom ipywidgets import interact, interactive, fixed\nimport math\nfrom numpy import sin,cos,pi\n\nlJHTDB = libJHTDB()\nlJHTDB.initialize()\n\n#Add token\nauth_token = \"edu.jhu.pha.turbulence.testing-201311\" #Replace with your own token here\nlJHTDB.add_token(auth_token)\n\n#Set domain to be queried\ntime = 0\nFD4Lag4 = 44\n\ndeltax = 0.01\ndeltay = 0.008\ndeltaz = 0.008\n\nnx=100\nny=100\nnz=100\n\nxmin, xmax = 0, deltax*nx\nymin, ymax = -1, -1+deltay*ny\nzmin, zmax = 0, deltaz*nz",
"_____no_output_____"
],
[
"#Creates query points and arranges their coordinates into the required (n,3)-type array\npoints = np.zeros((nx*ny*nz,3),dtype='float32')\n\nx=np.linspace(xmin,xmax,nx,dtype='float32')\ny=np.linspace(ymin,ymax,ny,dtype='float32')\nz=np.linspace(zmin,zmax,nz,dtype='float32')\n\ncount = 0\n\nfor ii in range(np.size(x)): \n for jj in range(np.size(y)):\n for kk in range(np.size(z)):\n points[count,0] = x[ii]\n points[count,1] = y[jj]\n points[count,2] = z[kk]\n count = count + 1 \n\nprint(np.shape(points))",
"(1000000, 3)\n"
],
[
"#Queries the velocity gradient from JHTDB\nstart = tt.time()\nvelgrad = lJHTDB.getData(\n time, point_coords=points,sinterp = FD4Lag4,\n data_set = 'channel',\n getFunction = 'getVelocityGradient')\n\nlJHTDB.finalize()\nend = tt.time()\nprint(end - start)\nprint(velgrad.shape)",
"223.8548023700714\n(1000000, 9)\n"
],
[
"#Calculates the q-criterion\nqc = np.zeros((np.size(velgrad[:,0]),1))\nqc[:,0] = -0.5*(velgrad[:,0]**2+velgrad[:,4]**2+velgrad[:,8]**2+2*(velgrad[:,1]*velgrad[:,3]+velgrad[:,2]*velgrad[:,6]+velgrad[:,5]*velgrad[:,7]))\n\ncount2 = 0\nqcriterion = np.zeros((nx,ny,nz))\nfor ii in range(np.size(x)):\n for jj in range(np.size(y)):\n for kk in range(np.size(z)):\n qcriterion[ii,jj,kk] = qc[count2]\n count2 = count2 + 1\n\nprint(qcriterion.shape)",
"(100, 100, 100)\n"
],
[
"#Creates a K3D-volume rendering object\n#In order to export the plot as html object, in the controls panel, click on \"Snapshot\"\nvol = k3d.volume(qcriterion, color_range=[2,100], color_map=np.array(k3d.basic_color_maps.Jet,dtype=np.float32), \n bounds=[xmin,xmax,ymin,ymax,zmin,zmax],\n alpha_coef=100,name=\"Channel Flow Q vizualization\")\nplt = k3d.plot()\nplt.camera_auto_fit = False\nplt.camera = [1.5,0.2,1.5,0,-1,-0.5,0,1,0]\nplt += vol \nplt.axes = ['x','y','z']\nplt.display()",
"_____no_output_____"
]
],
[
[
"# Forced Isotropic Turbulence Example",
"_____no_output_____"
]
],
[
[
"#Set domain to be queried\n#Generates a 3D plot of Q iso-surface with overlayed kinetic energy volume \n#rendering in a [0,0.5]^3 subcube in isotropic turbulence\n\ntime1 = 3.00\n\nnx1=80\nny1=80\nnz1=80\n\nxmin1, xmax1 = 0, 0.5\nymin1, ymax1 = 0, 0.5\nzmin1, zmax1 = 0, 0.5\n\n#Creates query points and arranges their coordinates into the required (n,3)-type array\npoints1 = np.zeros((nx1*ny1*nz1,3),dtype='float32')\n\nx1=np.linspace(xmin1,xmax1,nx1,dtype='float32')\ny1=np.linspace(ymin1,ymax1,ny1,dtype='float32')\nz1=np.linspace(zmin1,zmax1,nz1,dtype='float32')\ncount = 0\n\nfor ii in range(np.size(x1)): \n for jj in range(np.size(y1)): \n for kk in range(np.size(z1)): \n points1[count,0] = x1[ii]\n points1[count,1] = y1[jj]\n points1[count,2] = z1[kk]\n count = count + 1 ",
"_____no_output_____"
],
[
"#Queries the velocity from JHTDB\nlJHTDB.initialize()\nstart = tt.time()\n\nLag4 = 4\n\nvel1 = lJHTDB.getData(\n time1, point_coords=points1,sinterp = Lag4,\n data_set = 'isotropic1024coarse',\n getFunction = 'getVelocity')\n \nend = tt.time()\n\nprint(end - start)\nlJHTDB.finalize()\nprint(vel1.shape)",
"25.941040515899658\n(512000, 3)\n"
],
[
"#Queries the velocity gradient from JHTDB\nlJHTDB.initialize()\nstart = tt.time()\n\ngrad1 = lJHTDB.getData(\n time1, point_coords=points1,sinterp = FD4Lag4,\n data_set = 'isotropic1024coarse',\n getFunction = 'getVelocityGradient')\n \nend = tt.time()\n\nprint(end - start)\nlJHTDB.finalize()\nprint(grad1.shape)",
"_____no_output_____"
],
[
"#Calculates the q-criterion\nq1 = np.zeros((np.size(grad1[:,0]),1))\nq1[:,0] = -0.5*(grad1[:,0]**2+grad1[:,4]**2+grad1[:,8]**2+2*(grad1[:,1]*grad1[:,3]+grad1[:,2]*grad1[:,6]+grad1[:,5]*grad1[:,7]))\n\n#Calculates the kinetic energy\ne1 = np.zeros((np.size(vel1[:,0]),1))\ne1[:,0] = vel1[:,0]**2 + vel1[:,1]**2 + vel1[:,2]**2",
"_____no_output_____"
],
[
"#Arrange 1D arrays into 3D arrays\nqcrit = np.zeros((nx1,ny1,nz1))\nenerg = np.zeros((nx1,ny1,nz1))\n\ncount2 = 0\nfor ii in range(nx1):\n for jj in range(ny1):\n for kk in range(nz1):\n qcrit[ii,jj,kk] = q1[count2]\n energ[ii,jj,kk] = e1[count2]\n count2 += 1",
"_____no_output_____"
],
[
"#Creates a K3D isosurface object\nisosurface = k3d.marching_cubes(qcrit,xmin=xmin1,xmax=xmax1,ymin=ymin1,ymax=ymax1, zmin=zmin1, zmax=zmax1, \n level=250, color = 0xf4ea0e,name = 'isotropic: Q Isosurface')\n\n#Creates a K3D volume rendering object\nvolume = k3d.volume(energ, color_range=[0.1*np.max(energ),0.8*np.max(energ)], color_map=np.array(k3d.basic_color_maps.Jet,dtype=np.float32), \n bounds=[xmin1,xmax1,ymin1,ymax1,zmin1,zmax1]\n ,alpha_coef=15,name=\"isotropic: kinetic energy\")\n\nplot = k3d.plot()\nplot.camera_auto_fit = True\nplot += volume \nplot += isosurface\nplot.axes = ['x','y','z']\nplot.display()",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e7e12db16db27d05725715e1403ba3d90d874486 | 28,172 | ipynb | Jupyter Notebook | src/summarization/2. tokenizer_sample.ipynb | youngerous/kobart-voice-summarization | bf48edea602c0661d638f0ed6f4a35c2ced4009f | [
"Apache-2.0"
] | 8 | 2021-05-16T05:40:29.000Z | 2022-03-14T08:32:54.000Z | src/summarization/2. tokenizer_sample.ipynb | youngerous/kobart-voice-summarization | bf48edea602c0661d638f0ed6f4a35c2ced4009f | [
"Apache-2.0"
] | null | null | null | src/summarization/2. tokenizer_sample.ipynb | youngerous/kobart-voice-summarization | bf48edea602c0661d638f0ed6f4a35c2ced4009f | [
"Apache-2.0"
] | 3 | 2021-08-09T08:06:24.000Z | 2021-11-29T05:04:56.000Z | 23.091803 | 82 | 0.259087 | [
[
[
"# Kobart tokenizer sample\n\nTokenizer를 간단하게 살펴봅니다.",
"_____no_output_____"
]
],
[
[
"from kobart import get_kobart_tokenizer",
"_____no_output_____"
]
],
[
[
"## tokenize",
"_____no_output_____"
]
],
[
[
"tok = get_kobart_tokenizer()",
"using cached model\n"
],
[
"# only tokenize\ntokenized = tok.tokenize('비정형데이터분석 팀 식사과정입니다. 무야호!')\ntokenized",
"_____no_output_____"
],
[
"# convert to indice\ntok.convert_tokens_to_ids(tokenized)",
"_____no_output_____"
],
[
"# encode = tokenize + convert_tokens_to_ids\ntok.encode('비정형데이터분석 팀 식사과정입니다. 무야호!')",
"_____no_output_____"
]
],
[
[
"## check vocab",
"_____no_output_____"
]
],
[
[
"vocab = dict(sorted(tok.vocab.items(), key=lambda item: item[1]))\nlen(vocab)",
"_____no_output_____"
],
[
"vocab",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |
e7e160600a2938e76df25bfae21049925c4891d4 | 11,887 | ipynb | Jupyter Notebook | _notebook/DraftKingsNFLConstraintSatisfaction.ipynb | levon003/ml-visualized | 2b1a565820d40c81e6b8f0e9feb5e6cb2bfa8abc | [
"MIT"
] | null | null | null | _notebook/DraftKingsNFLConstraintSatisfaction.ipynb | levon003/ml-visualized | 2b1a565820d40c81e6b8f0e9feb5e6cb2bfa8abc | [
"MIT"
] | null | null | null | _notebook/DraftKingsNFLConstraintSatisfaction.ipynb | levon003/ml-visualized | 2b1a565820d40c81e6b8f0e9feb5e6cb2bfa8abc | [
"MIT"
] | 2 | 2020-12-03T17:25:29.000Z | 2021-12-03T19:41:33.000Z | 26.832957 | 184 | 0.465046 | [
[
[
"DraftKings NFL Constraint Satisfaction\n===\n\nThis is the companion code to a [blog post](https://zwlevonian.medium.com/integer-linear-programming-with-pulp-optimizing-a-draftkings-nfl-lineup-5e7524dd42d3) I wrote on Medium.",
"_____no_output_____"
]
],
[
[
"import pandas as pd",
"_____no_output_____"
],
[
"import pulp",
"_____no_output_____"
]
],
[
[
"### Load in the weekly data",
"_____no_output_____"
]
],
[
[
"df = pd.read_csv('DKSalaries.csv')\nlen(df)",
"_____no_output_____"
],
[
"df.sample(n=5)",
"_____no_output_____"
],
[
"# trim any postponed games, since those can't be included in a lineup\ndf = df[df['Game Info'] != 'Postponed']\nlen(df)",
"_____no_output_____"
],
[
"exclude_list = ['Dak Prescott']\ndf = df[~df['Name'].isin(exclude_list)]\nlen(df)",
"_____no_output_____"
],
[
"# this is equivalent to an extra constraint that requires playing only players with a minimum cost\n# does not apply to DST, since that's kind of a special category\ndf = df[(df.Salary >= 4000)|(df['Roster Position'] == 'DST')]\nlen(df)",
"_____no_output_____"
]
],
[
[
"### Create the constraint problem\n\nGoal: maximize AvgPointsPerGame\n\n - TotalPlayers = 9\n - TotalSalary <= 50000\n - TotalPosition_WR = 3\n - TotalPosition_RB = 2\n - TotalPosition_TE = 1\n - TotalPosition_QB = 1\n - TotalPosition_FLEX = 1\n - TotalPosition_DST = 1\n - Each player in only one position (relevant only for FLEX)\n ",
"_____no_output_____"
]
],
[
[
"prob = pulp.LpProblem('DK_NFL_weekly', pulp.LpMaximize)",
"_____no_output_____"
],
[
"player_vars = [pulp.LpVariable(f'player_{row.ID}', cat='Binary') for row in df.itertuples()]",
"_____no_output_____"
],
[
"# total assigned players constraint\nprob += pulp.lpSum(player_var for player_var in player_vars) == 9",
"_____no_output_____"
],
[
"# position constraints\n# TODO fix this, currently won't work\n# as it makes the problem infeasible\ndef get_position_sum(player_vars, df, position):\n return pulp.lpSum([player_vars[i] * (position in df['Roster Position'].iloc[i]) for i in range(len(df))])\n \nprob += get_position_sum(player_vars, df, 'QB') == 1\nprob += get_position_sum(player_vars, df, 'DST') == 1\n\n# to account for the FLEX position, we allow additional selections of the 3 FLEX-eligible roles\nprob += get_position_sum(player_vars, df, 'RB') >= 2\nprob += get_position_sum(player_vars, df, 'WR') >= 3\nprob += get_position_sum(player_vars, df, 'TE') >= 1",
"_____no_output_____"
],
[
"# total salary constraint\nprob += pulp.lpSum(df.Salary.iloc[i] * player_vars[i] for i in range(len(df))) <= 50000",
"_____no_output_____"
],
[
"# finally, specify the goal\nprob += pulp.lpSum([df.AvgPointsPerGame.iloc[i] * player_vars[i] for i in range(len(df))])",
"_____no_output_____"
],
[
"# solve and print the status\nprob.solve()\nprint(pulp.LpStatus[prob.status])",
"Optimal\n"
],
[
"# for each of the player variables, \ntotal_salary_used = 0\nmean_AvgPointsPerGame = 0\nfor i in range(len(df)):\n if player_vars[i].value() == 1:\n row = df.iloc[i]\n print(row['Roster Position'], row.Name, row.TeamAbbrev, row.Salary, row.AvgPointsPerGame)\n total_salary_used += row.Salary\n mean_AvgPointsPerGame += row.AvgPointsPerGame\n#mean_AvgPointsPerGame /= 9 # divide by total players in roster to get a mean\ntotal_salary_used, mean_AvgPointsPerGame",
"RB/FLEX Dalvin Cook MIN 8200 28.65\nQB Russell Wilson SEA 7600 32.01\nWR/FLEX Tyler Lockett SEA 6800 22.07\nWR/FLEX Corey Davis TEN 5900 17.98\nRB/FLEX Melvin Gordon III DEN 5300 15.72\nWR/FLEX CeeDee Lamb DAL 4900 14.21\nTE/FLEX Hunter Henry LAC 4000 9.63\nWR/FLEX Keelan Cole JAX 4000 12.37\nDST Colts IND 3300 11.71\n"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e7e160db1bb91553265ae04d2e35c3ed41043d4e | 3,787 | ipynb | Jupyter Notebook | data_notebooks/postulations.ipynb | platzimaster-wedeal/wescripts | 015a01bb6812f2b1c46b9a0e60df5d7ea582c35f | [
"MIT"
] | null | null | null | data_notebooks/postulations.ipynb | platzimaster-wedeal/wescripts | 015a01bb6812f2b1c46b9a0e60df5d7ea582c35f | [
"MIT"
] | null | null | null | data_notebooks/postulations.ipynb | platzimaster-wedeal/wescripts | 015a01bb6812f2b1c46b9a0e60df5d7ea582c35f | [
"MIT"
] | null | null | null | 23.521739 | 79 | 0.390547 | [
[
[
"import pandas as pd",
"_____no_output_____"
],
[
"i = [i for i in range(2000)]\ndf = pd.DataFrame(i)\nid_employers_job_offer = pd.Series(i)",
"_____no_output_____"
],
[
"df['id_employers_job_offer'] = id_employers_job_offer\ndf.rename(columns={0:'id_employee'}, inplace=True)",
"_____no_output_____"
],
[
"i=[0,1]\nimport random\nstatus = random.choices(i,k=2000)\ndf['hired'] = status",
"_____no_output_____"
],
[
"df.sample(5)",
"_____no_output_____"
],
[
"df.to_csv(index=False, path_or_buf = '../data_base/csv/postulations.csv')",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e7e161f00f782f745eb1133600618425aecdd495 | 270,647 | ipynb | Jupyter Notebook | 02/homework_day2.ipynb | Py101/py101-assignments-andremarco | 2e211855a7f0f56c1e4d8c6d20233fad2b520faf | [
"MIT"
] | null | null | null | 02/homework_day2.ipynb | Py101/py101-assignments-andremarco | 2e211855a7f0f56c1e4d8c6d20233fad2b520faf | [
"MIT"
] | null | null | null | 02/homework_day2.ipynb | Py101/py101-assignments-andremarco | 2e211855a7f0f56c1e4d8c6d20233fad2b520faf | [
"MIT"
] | null | null | null | 485.901257 | 134,373 | 0.938407 | [
[
[
"<center>\n<hr>\n<h1>Python Crash Course</h1>\n<h2>Master in Data Science - Sapienza University</h2>\n<h2>Homework 2: Python Challenges</h2>\n<h3>A.A. 2017/18</h3>\n<h3>Tutor: Francesco Fabbri</h3>\n<hr>\n</center>\n\n",
"_____no_output_____"
],
[
"# Instructions\nSo guys, here we are! **Finally** you're facing your first **REAL** homework. Are you ready to fight?\nWe're going to apply all the Pythonic stuff seen before AND EVEN MORE...\n\n\n## Simple rules:\n\n1. Don't touch the instructions, you **just have to fill the blank rows**.\n\n\n2. This is supposed to be an exercise for improving your Pythonic Skills in a spirit of collaboration so...of course you can help your classmates and obviously get a really huge help as well from all the others (as the proverb says: \"I get help from you and then you help me\", right?!...)\n\n\n3. **RULE OF THUMB** for you during the homework:\n - *1st Step:* try to solve the problem alone\n - *2nd Step:* googling random the answer\n - *3rd Step:* ask to your colleagues\n - *3rd Step:* screaming and complaining about life \n - *4th Step:* ask to Tutors\n \n## And the Prize? The Beer?The glory?!:\nGuys the life is hard...in this Master it's even worse...\nSoooo, since that you seem so smart I want to test you before the start of all the courses.\n\n.\n\n.\n\n.\n\nBut not now.\n\nYou have to come prepared to the challenge, so right now solve these first 6 exercises, then it will be the time for **FIGHTING** and (for one of you) **DRINKING**.\n\n",
"_____no_output_____"
],
[
"# Warm-up...",
"_____no_output_____"
],
[
"### 1. 12! is equal to...",
"_____no_output_____"
]
],
[
[
"n=12\nx=1\nwhile n>1:\n x=x*n\n n=n-1\nprint(x)",
"479001600\n"
]
],
[
[
"### 2. More math...\nWrite a program which will find all such numbers which are divisible by 7 but are not a multiple of 5, between 0 and 1000 (both included). The numbers obtained should be printed in a comma-separated sequence on a single line. (range and CFS)",
"_____no_output_____"
]
],
[
[
"ris=[]\nfor x in range(0,1001):\n if x%7==0 and x%5!=0:\n ris.append(str(x))\n\nr2=','.join(ris)\nprint(r2)",
"7,14,21,28,42,49,56,63,77,84,91,98,112,119,126,133,147,154,161,168,182,189,196,203,217,224,231,238,252,259,266,273,287,294,301,308,322,329,336,343,357,364,371,378,392,399,406,413,427,434,441,448,462,469,476,483,497,504,511,518,532,539,546,553,567,574,581,588,602,609,616,623,637,644,651,658,672,679,686,693,707,714,721,728,742,749,756,763,777,784,791,798,812,819,826,833,847,854,861,868,882,889,896,903,917,924,931,938,952,959,966,973,987,994\n"
]
],
[
[
"### 2. Count capital letters\nIn this exercises you're going to deal with YOUR DATA. Indeed, in the list below there are stored your Favorite Tv Series. But, as you can see, there is something weird. There are too much CaPITal LeTTErs. Your task is to count the capital letters in all the strings and then print the total number of capital letters in all the list.",
"_____no_output_____"
]
],
[
[
"tv_series = ['Game of THRroneS',\n 'big bang tHeOrY',\n 'MR robot',\n 'WesTWorlD',\n 'fIRefLy',\n \"i haven't\",\n 'HOW I MET your mothER',\n 'friENds',\n 'bRon broen',\n 'gossip girl',\n 'prISon break',\n 'breaking BAD']",
"_____no_output_____"
],
[
"#alfab=[\"A\",\"B\",\"C\",\"D\",\"E\"\"F\",\"G\",\"H\",\"I\",\"L\",\"M\",\"N\",\"O\",\"P\",\"Q\",\"R\",\"S\",\"T\",\"U\",\"V\",\"Z\",\"X\",\"Y\",\"K\",\"W\"]\n#cont=0\nris=[]\nfor i in tv_series:\n k=0\n for a in i:\n if a.isupper():\n k=k+1\n ris.append(k)\n\nris\n",
"_____no_output_____"
]
],
[
[
"### 3. A remark\nUsing the list above, create a dictionary where the keys are Unique IDs and values the TV Series.\nYou have to do the exercise keeping in mind these 2 constraints: \n\n1. The order of the IDs has to be **dependent on the alphabetical order of the titles**, i.e. 0: first_title_in_alphabetical_order and so on...\n\n\n2. **Solve the mess** of the capital letter: we want them only at the start of the words (\"prISon break\" should be \"Prison Break\")\n",
"_____no_output_____"
]
],
[
[
"\n\nlista=[]\nfor i in tv_series:\n lista.append(i.title())\n \n\nidx=list(range(0+1,12+1))\ndic_one=dict(zip(sorted(lista),idx))\nprint(dic_one)\n",
"{'Big Bang Theory': 1, 'Breaking Bad': 2, 'Bron Broen': 3, 'Firefly': 4, 'Friends': 5, 'Game Of Thrrones': 6, 'Gossip Girl': 7, 'How I Met Your Mother': 8, \"I Haven'T\": 9, 'Mr Robot': 10, 'Prison Break': 11, 'Westworld': 12}\n"
]
],
[
[
"### 4. Dictionary to its maximum\nInvert the keys with the values in the dictionary built before. ",
"_____no_output_____"
]
],
[
[
"inv_dic={v: k for k, v in dic_one.items()}\ninv_dic",
"_____no_output_____"
]
],
[
[
"Have you done in **one line of code**? If not, try now!",
"_____no_output_____"
],
[
"### 4. Other boring math\nLet's talk about our beloved exams. Starting from the exams and CFU below, are you able to compute the weighted mean of them?\nLet's do it and print the result.\n\nDescription of the data:\n\nexams[1] = $(title_1, grade_1)$\n\ncfu[1] = $CFU_1$",
"_____no_output_____"
]
],
[
[
"exams = [('BIOINFORMATICS', 29),\n ('DATA MANAGEMENT FOR DATA SCIENCE', 30),\n ('DIGITAL EPIDEMIOLOGY', 26),\n ('NETWORKING FOR BIG DATA AND LABORATORY',28),\n ('QUANTITATIVE MODELS FOR ECONOMIC ANALYSIS AND MANAGEMENT','30 e lode'),\n ('DATA MINING TECHNOLOGY FOR BUSINESS AND SOCIETY', 30),\n ('STATISTICAL LEARNING',30),\n ('ALGORITHMIC METHODS OF DATA MINING AND LABORATORY',30),\n ('FUNDAMENTALS OF DATA SCIENCE AND LABORATORY', 29)]\n\ncfu = sum([6,6,6,9,6,6,6,9,9])",
"_____no_output_____"
],
[
"# create a list in which are stored the marks\nvoti=[]\nfor i in exams:\n voti.append(i[1])\n\ncrediti=[6,6,6,9,6,6,6,9,9] \n# must transform the \"30 e lode\" value in integer value\nprova=[]\nfor n,i in enumerate(voti):\n if i==\"30 e lode\":\n voti[n]=30\nfin=[]\n\nfor x in range(len(crediti)):\n c=0\n c=crediti[x]*voti[x]\n fin.append(c)\n\naverage_1=sum(fin)/sum(crediti)\nprint(average_1)\n",
"29.095238095238095\n"
]
],
[
[
"### 5. Palindromic numbers\nWrite a script which finds all the Palindromic numbers, in the range [0,**N**] (bounds included). The numbers obtained should be printed in a comma-separated sequence on a single line.\n\nWhat is **N**?\nLooking at the exercise before:\n**N** = (Total number of CFU) x (Sum of all the grades)\n\n(details: https://en.wikipedia.org/wiki/Palindromic_number)\n",
"_____no_output_____"
]
],
[
[
"top=cfu*sum(voti)\ntot_num=list(range(1,top))\ndef palindo(s):\n return str(s)==str(s)[::-1]\ntt=[]\nfor i in tot_num:\n c=palindo(i)\n if c==True:\n tt.append(str(i))\n\nr6=','.join(tt)\nprint(r6)",
"1,2,3,4,5,6,7,8,9,11,22,33,44,55,66,77,88,99,101,111,121,131,141,151,161,171,181,191,202,212,222,232,242,252,262,272,282,292,303,313,323,333,343,353,363,373,383,393,404,414,424,434,444,454,464,474,484,494,505,515,525,535,545,555,565,575,585,595,606,616,626,636,646,656,666,676,686,696,707,717,727,737,747,757,767,777,787,797,808,818,828,838,848,858,868,878,888,898,909,919,929,939,949,959,969,979,989,999,1001,1111,1221,1331,1441,1551,1661,1771,1881,1991,2002,2112,2222,2332,2442,2552,2662,2772,2882,2992,3003,3113,3223,3333,3443,3553,3663,3773,3883,3993,4004,4114,4224,4334,4444,4554,4664,4774,4884,4994,5005,5115,5225,5335,5445,5555,5665,5775,5885,5995,6006,6116,6226,6336,6446,6556,6666,6776,6886,6996,7007,7117,7227,7337,7447,7557,7667,7777,7887,7997,8008,8118,8228,8338,8448,8558,8668,8778,8888,8998,9009,9119,9229,9339,9449,9559,9669,9779,9889,9999,10001,10101,10201,10301,10401,10501,10601,10701,10801,10901,11011,11111,11211,11311,11411,11511,11611,11711,11811,11911,12021,12121,12221,12321,12421,12521,12621,12721,12821,12921,13031,13131,13231,13331,13431,13531,13631,13731,13831,13931,14041,14141,14241,14341,14441,14541,14641,14741,14841,14941,15051,15151,15251,15351,15451,15551,15651,15751,15851,15951,16061,16161,16261,16361,16461\n"
]
],
[
[
"### 6. StackOverflow",
"_____no_output_____"
],
[
"Let's start using your new best friend. Now I'm going to give other task, slightly more difficult BUT this time, just googling, you will find easily the answer on the www.stackoverflow.com. You can use the code there for solving the exercise BUT you have to understand the solution there **COMMENTING** the code, showing me you understood the thinking process behind the code.",
"_____no_output_____"
],
[
"### 6. A\nShow me an example of how to use **PROPERLY** the *Try - Except* statements",
"_____no_output_____"
]
],
[
[
"# you start with a try statement: if python can do this statement you will find \"Hello\". \ntry:\n print(\"HELLO\")\n# in the other case will be execute the except statement. In this case it will be execute only if there is an ImportError\nexcept ImportError:\n print (\"NO module found\")",
"HELLO\n"
]
],
[
[
"#### 6. B\nGiving this list of words below, after copying in a variable, explain and provide me a code for obtaining a **Bag of Words** from them.\n(Hint: use dictionaries and loops)",
"_____no_output_____"
],
[
"['theory', 'of', 'bron', 'firefly', 'thrones', 'break', 'bad', 'mother', 'firefly', \"haven't\", 'prison', 'big', 'friends', 'girl', 'westworld', 'bad', \"haven't\", 'gossip', 'thrones', 'your', 'big', 'how', 'friends', 'theory', 'your', 'bron', 'bad', 'bad', 'breaking', 'met', 'breaking', 'breaking', 'game', 'bron', 'your', 'breaking', 'met', 'bang', 'how', 'mother', 'bad', 'theory', 'how', 'i', 'friends', \"haven't\", 'of', 'of', 'gossip', 'i', 'robot', 'of', 'prison', 'bad', 'friends', 'friends', 'i', 'robot', 'bang', 'mother', 'bang', 'i', 'of', 'bad', 'friends', 'theory', 'i', 'friends', 'thrones', 'prison', 'theory', 'theory', 'big', 'of', 'bang', 'how', 'thrones', 'bang', 'theory', 'friends', 'game', 'bang', 'mother', 'broen', 'bad', 'game', 'break', 'break', 'bang', 'big', 'gossip', 'robot', 'met', 'i', 'game', 'your', 'met', 'bad', 'firefly', 'your']",
"_____no_output_____"
]
],
[
[
"list_6=['theory', 'of', 'bron', 'firefly', 'thrones', 'break', 'bad', 'mother', 'firefly', \"haven't\", 'prison', 'big', 'friends', 'girl', 'westworld', 'bad', \"haven't\", 'gossip', 'thrones', 'your', 'big', 'how', 'friends', 'theory', 'your', 'bron', 'bad', 'bad', 'breaking', 'met', 'breaking', 'breaking', 'game', 'bron', 'your', 'breaking', 'met', 'bang', 'how', 'mother', 'bad', 'theory', 'how', 'i', 'friends', \"haven't\", 'of', 'of', 'gossip', 'i', 'robot', 'of', 'prison', 'bad', 'friends', 'friends', 'i', 'robot', 'bang', 'mother', 'bang', 'i', 'of', 'bad', 'friends', 'theory', 'i', 'friends', 'thrones', 'prison', 'theory', 'theory', 'big', 'of', 'bang', 'how', 'thrones', 'bang', 'theory', 'friends', 'game', 'bang', 'mother', 'broen', 'bad', 'game', 'break', 'break', 'bang', 'big', 'gossip', 'robot', 'met', 'i', 'game', 'your', 'met', 'bad', 'firefly', 'your']\nindice=list(range(0+1,len(list_6)+1))\ndic_6=dict(zip(list_6,indice))\nprint(dic_6)\n ",
"{'theory': 79, 'of': 74, 'bron': 34, 'firefly': 99, 'thrones': 77, 'break': 88, 'bad': 98, 'mother': 83, \"haven't\": 46, 'prison': 70, 'big': 90, 'friends': 80, 'girl': 14, 'westworld': 15, 'gossip': 91, 'your': 100, 'how': 76, 'breaking': 36, 'met': 97, 'game': 95, 'bang': 89, 'i': 94, 'robot': 92, 'broen': 84}\n"
]
],
[
[
"#### 6. C\nAnd now, write down a code which computes the first 10 Fibonacci numbers\n\n(details: https://en.wikipedia.org/wiki/Fibonacci_number)",
"_____no_output_____"
]
],
[
[
"y=0\nz=1\nrr=[]\nfor count in range(1,11):\n v=0\n v=z\n z=y+z\n y=v\n count=count+1\n rr.append(z)\n\nprint(rr)",
"[1, 2, 3, 5, 8, 13, 21, 34, 55, 89]\n"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
e7e164b45824fc17802f0051a39a1e69f2457e0d | 27,271 | ipynb | Jupyter Notebook | trace_graph.ipynb | LecJackS/micrograd | df3107c662c15debfc02e014b3c6908504284baa | [
"MIT"
] | null | null | null | trace_graph.ipynb | LecJackS/micrograd | df3107c662c15debfc02e014b3c6908504284baa | [
"MIT"
] | null | null | null | trace_graph.ipynb | LecJackS/micrograd | df3107c662c15debfc02e014b3c6908504284baa | [
"MIT"
] | null | null | null | 54.871227 | 156 | 0.520553 | [
[
[
"# brew install graphviz\n# pip install graphviz\nfrom graphviz import Digraph",
"_____no_output_____"
],
[
"from micrograd.engine import Value",
"_____no_output_____"
],
[
"def trace(root):\n nodes, edges = set(), set()\n def build(v):\n if v not in nodes:\n nodes.add(v)\n for child in v._prev:\n edges.add((child, v))\n build(child)\n build(root)\n return nodes, edges\n\ndef draw_dot(root, format='svg', rankdir='LR'):\n \"\"\"\n format: png | svg | ...\n rankdir: TB (top to bottom graph) | LR (left to right)\n \"\"\"\n assert rankdir in ['LR', 'TB']\n nodes, edges = trace(root)\n dot = Digraph(format=format, graph_attr={'rankdir': rankdir}) #, node_attr={'rankdir': 'TB'})\n \n for n in nodes:\n dot.node(name=str(id(n)), label = \"{ data %.4f | grad %.4f }\" % (n.data, n.grad), shape='record')\n if n._op:\n dot.node(name=str(id(n)) + n._op, label=n._op)\n dot.edge(str(id(n)) + n._op, str(id(n)))\n \n for n1, n2 in edges:\n dot.edge(str(id(n1)), str(id(n2)) + n2._op)\n \n return dot",
"_____no_output_____"
],
[
"# a very simple example\nx = Value(1.0)\ny = (x * 2 + 1).relu()\ny.backward()\ndraw_dot(y)",
"_____no_output_____"
],
[
"# a simple 2D neuron\nimport random\nfrom micrograd import nn\n\nrandom.seed(1337)\nn = nn.Neuron(2)\nx = [Value(1.0), Value(-2.0)]\ny = n(x)\ny.backward()\n\ndot = draw_dot(y)\ndot",
"_____no_output_____"
],
[
"dot.render('gout')",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e7e17693cfe7791db10fc08e1099f583b37f09c9 | 681,710 | ipynb | Jupyter Notebook | Diabetics Prediction (ML) CV=5.ipynb | AmitHasanShuvo/Prediction-of-Clinical-Risk-Factors-of-Diabetes-Using-ML-Resolving-Class-Imbalance | 19277aa809d002776c59cc4f141555b5cc657e35 | [
"MIT"
] | 3 | 2021-05-15T09:43:52.000Z | 2022-03-14T04:57:19.000Z | Diabetics Prediction (ML) CV=5.ipynb | zhaoliang0302/Prediction-of-Clinical-Risk-Factors-of-Diabetes-Using-ML-Resolving-Class-Imbalance | 19277aa809d002776c59cc4f141555b5cc657e35 | [
"MIT"
] | null | null | null | Diabetics Prediction (ML) CV=5.ipynb | zhaoliang0302/Prediction-of-Clinical-Risk-Factors-of-Diabetes-Using-ML-Resolving-Class-Imbalance | 19277aa809d002776c59cc4f141555b5cc657e35 | [
"MIT"
] | 5 | 2021-05-02T00:54:49.000Z | 2022-03-14T09:09:16.000Z | 221.11904 | 336,388 | 0.888037 | [
[
[
"import numpy as np\nimport copy\nfrom sklearn import preprocessing\nimport tensorflow as tf\nfrom tensorflow import keras\nimport os\nimport pandas as pd\nfrom matplotlib import pyplot as plt\nfrom numpy.random import seed\nnp.random.seed(2095)",
"_____no_output_____"
],
[
"data = pd.read_excel('CardiacPrediction.xlsx')",
"_____no_output_____"
],
[
"data.shape",
"_____no_output_____"
],
[
"data.columns",
"_____no_output_____"
],
[
"data.drop(['SEQN','Annual-Family-Income','Height','Ratio-Family-Income-Poverty','X60-sec-pulse',\n 'Health-Insurance','Glucose','Vigorous-work','Total-Cholesterol','CoronaryHeartDisease','Blood-Rel-Stroke','Red-Cell-Distribution-Width','Triglycerides','Mean-Platelet-Vol','Platelet-count','Lymphocyte','Monocyte','Eosinophils','Mean-cell-Hemoglobin','White-Blood-Cells','Red-Blood-Cells','Basophils','Mean-Cell-Vol','Mean-Cell-Hgb-Conc.','Hematocrit','Segmented-Neutrophils'], axis = 1, inplace=True)\n\n",
"_____no_output_____"
],
[
"#data['Diabetes'] = data['Diabetes'].replace('3','1')",
"_____no_output_____"
],
[
"#data = data.astype(float)",
"_____no_output_____"
],
[
"data['Diabetes'].loc[(data['Diabetes'] == 3 )] = 1",
"/Library/Frameworks/Python.framework/Versions/3.8/lib/python3.8/site-packages/pandas/core/indexing.py:205: SettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame\n\nSee the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n self._setitem_with_indexer(indexer, value)\n"
],
[
"\n#data= data[\"Diabetes\"].replace({\"3\": \"1\"},inplace=True)",
"_____no_output_____"
],
[
"data[\"Diabetes\"].value_counts()",
"_____no_output_____"
],
[
"data[\"Diabetes\"].describe()",
"_____no_output_____"
],
[
"#del data['Basophils']",
"_____no_output_____"
],
[
"#del data['Health-Insurance']",
"_____no_output_____"
],
[
"#del data['Platelet-count']",
"_____no_output_____"
],
[
"data.shape",
"_____no_output_____"
],
[
"data.columns",
"_____no_output_____"
],
[
"data = data[['Gender', 'Age', 'Systolic', 'Diastolic', 'Weight', 'Body-Mass-Index',\n 'Hemoglobin', 'Albumin', 'ALP', 'AST', 'ALT', 'Cholesterol',\n 'Creatinine', 'GGT', 'Iron', 'LDH', 'Phosphorus',\n 'Bilirubin', 'Protein', 'Uric.Acid', 'HDL',\n 'Glycohemoglobin', 'Moderate-work',\n 'Blood-Rel-Diabetes', 'Diabetes']]\n",
"_____no_output_____"
],
[
"data.columns",
"_____no_output_____"
],
[
"data.isnull().sum()",
"_____no_output_____"
],
[
"data.describe()",
"_____no_output_____"
],
[
"data.shape",
"_____no_output_____"
],
[
"data['Diabetes'].describe()",
"_____no_output_____"
],
[
"data.columns",
"_____no_output_____"
],
[
"data[\"Diabetes\"].value_counts().sort_index().plot.barh()\n",
"_____no_output_____"
],
[
"#data[\"Gender\"].value_counts().sort_index().plot.barh()\n#balanced",
"_____no_output_____"
],
[
"#data.corr()",
"_____no_output_____"
],
[
"data.columns",
"_____no_output_____"
],
[
"data.shape",
"_____no_output_____"
],
[
"data.info()",
"<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 37079 entries, 0 to 37078\nData columns (total 25 columns):\nGender 37079 non-null int64\nAge 37079 non-null int64\nSystolic 37079 non-null int64\nDiastolic 37079 non-null int64\nWeight 37079 non-null float64\nBody-Mass-Index 37079 non-null float64\nHemoglobin 37079 non-null float64\nAlbumin 37079 non-null int64\nALP 37079 non-null int64\nAST 37079 non-null int64\nALT 37079 non-null int64\nCholesterol 37079 non-null float64\nCreatinine 37079 non-null float64\nGGT 37079 non-null int64\nIron 37079 non-null float64\nLDH 37079 non-null int64\nPhosphorus 37079 non-null float64\nBilirubin 37079 non-null float64\nProtein 37079 non-null float64\nUric.Acid 37079 non-null float64\nHDL 37079 non-null float64\nGlycohemoglobin 37079 non-null float64\nModerate-work 37079 non-null int64\nBlood-Rel-Diabetes 37079 non-null int64\nDiabetes 37079 non-null int64\ndtypes: float64(12), int64(13)\nmemory usage: 7.1 MB\n"
],
[
"data = data.astype(float)\n",
"_____no_output_____"
],
[
"data.info()",
"<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 37079 entries, 0 to 37078\nData columns (total 25 columns):\nGender 37079 non-null float64\nAge 37079 non-null float64\nSystolic 37079 non-null float64\nDiastolic 37079 non-null float64\nWeight 37079 non-null float64\nBody-Mass-Index 37079 non-null float64\nHemoglobin 37079 non-null float64\nAlbumin 37079 non-null float64\nALP 37079 non-null float64\nAST 37079 non-null float64\nALT 37079 non-null float64\nCholesterol 37079 non-null float64\nCreatinine 37079 non-null float64\nGGT 37079 non-null float64\nIron 37079 non-null float64\nLDH 37079 non-null float64\nPhosphorus 37079 non-null float64\nBilirubin 37079 non-null float64\nProtein 37079 non-null float64\nUric.Acid 37079 non-null float64\nHDL 37079 non-null float64\nGlycohemoglobin 37079 non-null float64\nModerate-work 37079 non-null float64\nBlood-Rel-Diabetes 37079 non-null float64\nDiabetes 37079 non-null float64\ndtypes: float64(25)\nmemory usage: 7.1 MB\n"
],
[
"import seaborn as sns\nplt.subplots(figsize=(12,8))\nsns.heatmap(data.corr(),cmap='inferno', annot=True)",
"_____no_output_____"
],
[
"plt.subplots(figsize=(12,5))\ndata.boxplot(patch_artist=True, sym=\"k.\")\nplt.xlabel('Features')\nplt.ylabel('Data')\nplt.xticks(rotation=90)",
"_____no_output_____"
],
[
"minimum = 0\nmaximum = 0\n\ndef detect_outlier(feature):\n first_q = np.percentile(feature, 25)\n third_q = np.percentile(feature, 75) \n IQR = third_q - first_q\n IQR *= 1.5\n minimum = first_q - IQR \n maximum = third_q + IQR\n flag = False\n \n if(minimum > np.min(feature)):\n flag = True\n if(maximum < np.max(feature)):\n flag = True\n \n return flag",
"_____no_output_____"
],
[
"def remove_outlier(feature):\n first_q = np.percentile(X[feature], 25)\n third_q = np.percentile(X[feature], 75)\n IQR = third_q - first_q\n IQR *= 1.5\n \n minimum = first_q - IQR # the acceptable minimum value\n maximum = third_q + IQR # the acceptable maximum value\n \n median = X[feature].median()\n \n \"\"\"\n # any value beyond the acceptance range are considered\n as outliers. \n # we replace the outliers with the median value of that \n feature.\n \"\"\"\n \n X.loc[X[feature] < minimum, feature] = median \n X.loc[X[feature] > maximum, feature] = median\n\n# taking all the columns except the last one\n# last column is the label\n\nX = data.iloc[:, :-1]\nfor i in range(len(X.columns)): \n remove_outlier(X.columns[i])",
"/Library/Frameworks/Python.framework/Versions/3.8/lib/python3.8/site-packages/pandas/core/indexing.py:205: SettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame\n\nSee the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n self._setitem_with_indexer(indexer, value)\n<ipython-input-35-02173f10b8ff>:19: SettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame\n\nSee the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n X.loc[X[feature] < minimum, feature] = median\n<ipython-input-35-02173f10b8ff>:20: SettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame\n\nSee the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n X.loc[X[feature] > maximum, feature] = median\n"
],
[
"X = data.iloc[:, :-1]\n",
"_____no_output_____"
],
[
"for i in range(len(X.columns)):\n if(detect_outlier(X[X.columns[i]])):\n print(X.columns[i], \"Contains Outlier\")",
"Systolic Contains Outlier\nDiastolic Contains Outlier\nWeight Contains Outlier\nBody-Mass-Index Contains Outlier\nHemoglobin Contains Outlier\nAlbumin Contains Outlier\nALP Contains Outlier\nAST Contains Outlier\nALT Contains Outlier\nCholesterol Contains Outlier\nGGT Contains Outlier\nIron Contains Outlier\nLDH Contains Outlier\nHDL Contains Outlier\nGlycohemoglobin Contains Outlier\n"
],
[
"for i in range (50):\n for i in range(len(X.columns)):\n remove_outlier(X.columns[i])",
"<ipython-input-35-02173f10b8ff>:19: SettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame\n\nSee the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n X.loc[X[feature] < minimum, feature] = median\n<ipython-input-35-02173f10b8ff>:20: SettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame\n\nSee the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n X.loc[X[feature] > maximum, feature] = median\n"
],
[
"plt.subplots(figsize=(12,5))\nX.boxplot(patch_artist=True, sym=\"k.\")\nplt.xlabel('Features')\nplt.ylabel('Data')\nplt.xticks(rotation=90)",
"_____no_output_____"
],
[
"for i in range(len(X.columns)):\n if(detect_outlier(X[X.columns[i]])):\n print(X.columns[i], \"Contains Outlier\")",
"_____no_output_____"
],
[
"import numpy as np \nimport pandas as pd\nimport matplotlib.pyplot as plt\n%matplotlib inline\n\nimport seaborn as sns\nsns.set()\nfrom sklearn.preprocessing import MinMaxScaler, StandardScaler, LabelEncoder\nfrom sklearn.svm import SVC\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.linear_model import LogisticRegression\n#from xgboost import XGBClassifier, plot_importance\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import accuracy_score,confusion_matrix",
"_____no_output_____"
],
[
"scaler = StandardScaler()\nscaled_data = scaler.fit_transform(X)\nscaled_df = pd.DataFrame(data = scaled_data, columns = X.columns)\nscaled_df.head()\n",
"_____no_output_____"
],
[
"label = data[\"Diabetes\"]\n",
"_____no_output_____"
],
[
"encoder = LabelEncoder()\nlabel = encoder.fit_transform(label)",
"_____no_output_____"
],
[
"X = scaled_df\ny = label \n\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=420)",
"_____no_output_____"
],
[
"print(X_train.shape, y_test.shape)\nprint(y_train.shape, y_test.shape)",
"(29663, 24) (7416,)\n(29663,) (7416,)\n"
],
[
"from sklearn.feature_selection import SelectKBest\nfrom sklearn.feature_selection import f_classif\n\nxnew2=SelectKBest(f_classif, k=20).fit_transform(X, y)",
"_____no_output_____"
],
[
" import sklearn.feature_selection as fs\n import matplotlib.pyplot as plt\n df2 = fs.SelectKBest(k='all')\n\n df2.fit(X, y)\n\n names = X.columns.values[df2.get_support()]\n\n scores = df2.scores_[df2.get_support()]\n\n names_scores = list(zip(names, scores))\n\n ns_df = pd.DataFrame(data = names_scores, columns=\n\n ['Features','F_Scores'])\n\n ns_df_sorted = ns_df.sort_values(['F_Scores','Features'], ascending =\n\n [False, True])\n\n print(ns_df_sorted)",
" Features F_Scores\n1 Age 2686.409705\n23 Blood-Rel-Diabetes 1536.785191\n5 Body-Mass-Index 1000.670115\n2 Systolic 766.368625\n4 Weight 526.900940\n7 Albumin 525.811066\n21 Glycohemoglobin 477.417483\n20 HDL 378.750379\n11 Cholesterol 334.923024\n13 GGT 305.472574\n6 Hemoglobin 270.428079\n14 Iron 211.216877\n22 Moderate-work 188.266414\n8 ALP 156.172300\n19 Uric.Acid 144.722845\n12 Creatinine 102.880413\n17 Bilirubin 80.463331\n3 Diastolic 63.316044\n15 LDH 49.826302\n10 ALT 27.645892\n9 AST 22.682305\n0 Gender 12.219556\n18 Protein 8.581358\n16 Phosphorus 0.000433\n"
],
[
"import statsmodels.api as sm\nimport pandas\nfrom patsy import dmatrices\nlogit_model = sm.OLS(y_train, X_train)\nresult = logit_model.fit()\nprint(result.summary2())",
" Results: Ordinary least squares\n================================================================================\nModel: OLS Adj. R-squared (uncentered): 0.020 \nDependent Variable: y AIC: 79421.8034\nDate: 2020-09-09 18:30 BIC: 79620.9471\nNo. Observations: 29663 Log-Likelihood: -39687. \nDf Model: 24 F-statistic: 26.24 \nDf Residuals: 29639 Prob (F-statistic): 3.01e-116 \nR-squared (uncentered): 0.021 Scale: 0.85111 \n----------------------------------------------------------------------------------\n Coef. Std.Err. t P>|t| [0.025 0.975]\n----------------------------------------------------------------------------------\nGender 0.0234 0.0083 2.8205 0.0048 0.0071 0.0397\nAge -0.0902 0.0069 -13.1157 0.0000 -0.1037 -0.0767\nSystolic -0.0140 0.0065 -2.1714 0.0299 -0.0267 -0.0014\nDiastolic 0.0255 0.0059 4.3003 0.0000 0.0139 0.0371\nWeight -0.0048 0.0097 -0.4899 0.6242 -0.0238 0.0143\nBody-Mass-Index -0.0266 0.0095 -2.8093 0.0050 -0.0452 -0.0081\nHemoglobin 0.0259 0.0074 3.5122 0.0004 0.0115 0.0404\nAlbumin 0.0099 0.0066 1.4935 0.1353 -0.0031 0.0228\nALP -0.0050 0.0057 -0.8701 0.3842 -0.0162 0.0062\nAST 0.0213 0.0066 3.2098 0.0013 0.0083 0.0344\nALT -0.0148 0.0069 -2.1589 0.0309 -0.0283 -0.0014\nCholesterol 0.0316 0.0057 5.5653 0.0000 0.0205 0.0427\nCreatinine 0.0059 0.0069 0.8481 0.3964 -0.0077 0.0195\nGGT -0.0098 0.0062 -1.5736 0.1156 -0.0219 0.0024\nIron -0.0005 0.0062 -0.0757 0.9396 -0.0125 0.0116\nLDH 0.0086 0.0058 1.4660 0.1427 -0.0029 0.0200\nPhosphorus -0.0144 0.0055 -2.6064 0.0092 -0.0252 -0.0036\nBilirubin 0.0080 0.0060 1.3330 0.1825 -0.0038 0.0197\nProtein -0.0132 0.0061 -2.1621 0.0306 -0.0251 -0.0012\nUric.Acid 0.0070 0.0068 1.0259 0.3049 -0.0063 0.0203\nHDL 0.0186 0.0061 3.0621 0.0022 0.0067 0.0306\nGlycohemoglobin 0.0077 0.0060 1.2892 0.1973 -0.0040 0.0195\nModerate-work -0.0089 0.0055 -1.6392 0.1012 -0.0196 0.0017\nBlood-Rel-Diabetes 0.0620 0.0054 11.4032 0.0000 0.0513 0.0726\n--------------------------------------------------------------------------------\nOmnibus: 8295.976 Durbin-Watson: 0.227 \nProb(Omnibus): 0.000 Jarque-Bera (JB): 18042.779\nSkew: -1.657 Prob(JB): 0.000 \nKurtosis: 4.902 Condition No.: 5 \n================================================================================\n\n"
],
[
"np.exp(result.params)\n",
"_____no_output_____"
],
[
"params = result.params\nconf = result.conf_int()\nconf['Odds Ratio'] = params\nconf.columns = ['5%', '95%', 'Odds Ratio']\nprint(np.exp(conf))",
" 5% 95% Odds Ratio\nGender 1.007169 1.040487 1.023693\nAge 0.901528 0.926160 0.913761\nSystolic 0.973684 0.998637 0.986081\nDiastolic 1.013955 1.037768 1.025792\nWeight 0.976520 1.014361 0.995260\nBody-Mass-Index 0.955784 0.991980 0.973713\nHemoglobin 1.011516 1.041195 1.026249\nAlbumin 0.996919 1.023104 1.009927\nALP 0.983931 1.006258 0.995032\nAST 1.008342 1.034961 1.021564\nALT 0.972104 0.998635 0.985280\nCholesterol 1.020663 1.043616 1.032076\nCreatinine 0.992305 1.019698 1.005909\nGGT 0.978338 1.002398 0.990295\nIron 0.987550 1.011664 0.999534\nLDH 0.997114 1.020242 1.008612\nPhosphorus 0.975092 0.996436 0.985706\nBilirubin 0.996248 1.019936 1.008023\nProtein 0.975190 0.998769 0.986909\nUric.Acid 0.993676 1.020485 1.006991\nHDL 1.006727 1.031023 1.018803\nGlycohemoglobin 0.995988 1.019659 1.007754\nModerate-work 0.980566 1.001751 0.991102\nBlood-Rel-Diabetes 1.052669 1.075340 1.063944\n"
],
[
"result.pvalues.sort_values()\n",
"_____no_output_____"
],
[
"#from sklearn.utils import class_weight\n#class_weights = class_weight.compute_class_weight('balanced',\n # np.unique(y_train),\n # y_train)\n#model.fit(X_train, y_train, class_weight=class_weights)\n",
"_____no_output_____"
],
[
"from sklearn.model_selection import GridSearchCV\n\nweights = np.linspace(0.05, 0.95, 20)\n\ngsc = GridSearchCV(\n estimator=LogisticRegression(),\n param_grid={\n 'class_weight': [{0: x, 1: 1.0-x} for x in weights]\n },\n scoring='accuracy',\n cv=10\n)\ngrid_result = gsc.fit(X, y)\n\nprint(\"Best parameters : %s\" % grid_result.best_params_)\n\n# Plot the weights vs f1 score\ndataz = pd.DataFrame({ 'score': grid_result.cv_results_['mean_test_score'],\n 'weight': weights })\ndataz.plot(x='weight')",
"Best parameters : {'class_weight': {0: 0.5236842105263158, 1: 0.47631578947368425}}\n"
],
[
"class_weight = {0: 0.5236842105263158,\n 1: 0.47631578947368425}",
"_____no_output_____"
],
[
"#LR",
"_____no_output_____"
],
[
"from sklearn.linear_model import LogisticRegression\nfrom sklearn.metrics import confusion_matrix, classification_report\nfrom mlxtend.plotting import plot_decision_regions, plot_confusion_matrix\nfrom matplotlib import pyplot as plt\nlr = LogisticRegression(class_weight='balanced',random_state=420)\n\n# Fit..\nlr.fit(X_train, y_train)\n\n# Predict..\ny_pred = lr.predict(X_test)\n\n# Evaluate the model\nprint(classification_report(y_test, y_pred))\nplot_confusion_matrix(confusion_matrix(y_test, y_pred))\nfrom sklearn.metrics import roc_curve, auc\nfalse_positive_rate, true_positive_rate, thresholds = roc_curve(y_test, y_pred)\nroc_auc = auc(false_positive_rate, true_positive_rate)\nroc_auc",
" precision recall f1-score support\n\n 0 0.30 0.76 0.43 951\n 1 0.95 0.73 0.83 6465\n\n accuracy 0.74 7416\n macro avg 0.62 0.75 0.63 7416\nweighted avg 0.87 0.74 0.78 7416\n\n"
],
[
"from sklearn.svm import SVC\n\nclf_svc_rbf = SVC(kernel=\"rbf\",class_weight='balanced',random_state=4200)\nclf_svc_rbf.fit(X_train,y_train)\ny_pred_clf_svc_rbf = clf_svc_rbf.predict(X_test)\n\n\nimport matplotlib.pyplot as plt\ncm = confusion_matrix(y_test,y_pred_clf_svc_rbf)\n\n#plt.figure(figsize=(5,5))\n#sns.heatmap(cm,annot=True)\n#plt.show()\n\n#print(classification_report(y_test,y_pred_clf_svc_rbf))\n\nprint(classification_report(y_test, y_pred_clf_svc_rbf))\nplot_confusion_matrix(confusion_matrix(y_test, y_pred_clf_svc_rbf))\n\nfrom sklearn.metrics import roc_curve, auc\nfalse_positive_rate, true_positive_rate, thresholds = roc_curve(y_test, y_pred_clf_svc_rbf)\nroc_auc = auc(false_positive_rate, true_positive_rate)\nroc_auc",
" precision recall f1-score support\n\n 0 0.35 0.76 0.48 951\n 1 0.96 0.80 0.87 6465\n\n accuracy 0.79 7416\n macro avg 0.65 0.78 0.67 7416\nweighted avg 0.88 0.79 0.82 7416\n\n"
],
[
"from sklearn.ensemble import RandomForestClassifier\n\nrd = RandomForestClassifier(class_weight='balanced',random_state=4200)\nrd.fit(X_train,y_train)\ny_pred_rd = rd.predict(X_test)\n\n\nimport matplotlib.pyplot as plt\ncm = confusion_matrix(y_test,y_pred_rd)\n\n#plt.figure(figsize=(5,5))\n#sns.heatmap(cm,annot=True,linewidths=.3)\n#plt.show()\n\nprint(classification_report(y_test,y_pred_rd))\nplot_confusion_matrix(confusion_matrix(y_test, y_pred_rd))\n\nfrom sklearn.metrics import roc_curve, auc\nfalse_positive_rate, true_positive_rate, thresholds = roc_curve(y_test, y_pred_rd)\nroc_auc = auc(false_positive_rate, true_positive_rate)\nroc_auc",
" precision recall f1-score support\n\n 0 0.69 0.29 0.41 951\n 1 0.90 0.98 0.94 6465\n\n accuracy 0.89 7416\n macro avg 0.80 0.63 0.67 7416\nweighted avg 0.88 0.89 0.87 7416\n\n"
],
[
"#CV appraoach",
"_____no_output_____"
]
],
[
[
"## SVM",
"_____no_output_____"
]
],
[
[
"\n# evaluate a logistic regression model using k-fold cross-validation\nfrom numpy import mean\nfrom numpy import std\nfrom sklearn.model_selection import KFold\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.model_selection import ShuffleSplit\n\nfrom sklearn.linear_model import LogisticRegression\n# create dataset\n#X, y = make_classification(n_samples=1000, n_features=20, n_informative=15, n_redundant=5, random_state=1)\n# prepare the cross-validation procedure\n#cv = KFold(n_splits=5, test_size= 0.2, random_state=0)\ncv = ShuffleSplit(n_splits=5, test_size=0.2, random_state=42)\n# create model\nmodel = SVC(kernel='rbf', C=1, class_weight=class_weight)\n# evaluate model\nscores = cross_val_score(model, X, y, scoring='accuracy', cv=cv, n_jobs=-1)\n# report performance\nprint('Accuracy: %.4f (%.4f)' % (mean(scores), std(scores)))\nscores",
"Accuracy: 0.8799 (0.0030)\n"
],
[
"import numpy as np\nimport matplotlib.pyplot as plt\n\nfrom sklearn import svm, datasets\nfrom sklearn.metrics import auc\nfrom sklearn.metrics import plot_roc_curve\nfrom sklearn.model_selection import StratifiedKFold\n\n\n\n# #############################################################################\n# Classification and ROC analysis\n\n# Run classifier with cross-validation and plot ROC curves\ncv = ShuffleSplit(n_splits=5, test_size=0.2, random_state=42)\nclassifier = svm.SVC(kernel='rbf', probability=True, class_weight=class_weight,\n random_state=42)\n\ntprs = []\naucs = []\nmean_fpr = np.linspace(0, 1, 100)\n\nfig, ax = plt.subplots()\nfor i, (train, test) in enumerate(cv.split(X, y)):\n classifier.fit(X, y)\n viz = plot_roc_curve(classifier, X, y,\n name='ROC fold {}'.format(i),\n alpha=0.3, lw=1, ax=ax)\n interp_tpr = np.interp(mean_fpr, viz.fpr, viz.tpr)\n interp_tpr[0] = 0.0\n tprs.append(interp_tpr)\n aucs.append(viz.roc_auc)\n\nax.plot([0, 1], [0, 1], linestyle='--', lw=2, color='r',\n label='Chance', alpha=.8)\n\nmean_tpr = np.mean(tprs, axis=0)\nmean_tpr[-1] = 1.0\nmean_auc = auc(mean_fpr, mean_tpr)\nstd_auc = np.std(aucs)\nax.plot(mean_fpr, mean_tpr, color='b',\n label=r'Mean ROC (AUC = %0.2f $\\pm$ %0.2f)' % (mean_auc, std_auc),\n lw=2, alpha=.8)\n\nstd_tpr = np.std(tprs, axis=0)\ntprs_upper = np.minimum(mean_tpr + std_tpr, 1)\ntprs_lower = np.maximum(mean_tpr - std_tpr, 0)\nax.fill_between(mean_fpr, tprs_lower, tprs_upper, color='grey', alpha=.2,\n label=r'$\\pm$ 1 std. dev.')\n\nax.set(xlim=[-0.05, 1.05], ylim=[-0.05, 1.05],\n title=\"Receiver operating characteristic\")\nax.legend(loc=\"lower right\")\nplt.show()",
"_____no_output_____"
]
],
[
[
"## LR",
"_____no_output_____"
]
],
[
[
"from numpy import mean\nfrom numpy import std\nfrom sklearn.model_selection import KFold\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.model_selection import ShuffleSplit\n\nfrom sklearn.linear_model import LogisticRegression\n# create dataset\n#X, y = make_classification(n_samples=1000, n_features=20, n_informative=15, n_redundant=5, random_state=1)\n# prepare the cross-validation procedure\n#cv = KFold(n_splits=5, test_size= 0.2, random_state=0)\ncv = ShuffleSplit(n_splits=5, test_size=0.2, random_state=42)\n# create model\nmodel = LogisticRegression(class_weight=class_weight)\n# evaluate model\nscores = cross_val_score(model, X, y, scoring='accuracy', cv=cv, n_jobs=-1)\n# report performance\nprint('Accuracy: %.4f (%.4f)' % (mean(scores), std(scores)))\nscores",
"Accuracy: 0.8746 (0.0018)\n"
],
[
"import numpy as np\nimport matplotlib.pyplot as plt\n\nfrom sklearn import svm, datasets\nfrom sklearn.metrics import auc\nfrom sklearn.metrics import plot_roc_curve\nfrom sklearn.model_selection import StratifiedKFold\n\n# #############################################################################\n# Data IO and generation\n\n# Import some data to play with\n#iris = datasets.load_iris()\n#X = iris.data\n#y = iris.target\n#X, y = X[y != 2], y[y != 2]\n#n_samples, n_features = X.shape\n\n# Add noisy features\n#random_state = np.random.RandomState(0)\n#X = np.c_[X, random_state.randn(n_samples, 200 * n_features)]\n\n# #############################################################################\n# Classification and ROC analysis\n\n# Run classifier with cross-validation and plot ROC curves\ncv = ShuffleSplit(n_splits=5, test_size=0.2, random_state=42)\nclassifier = LogisticRegression(class_weight=class_weight,random_state=42)\ntprs = []\naucs = []\nmean_fpr = np.linspace(0, 1, 100)\n\nfig, ax = plt.subplots()\nfor i, (train, test) in enumerate(cv.split(X, y)):\n classifier.fit(X, y)\n viz = plot_roc_curve(classifier, X, y,\n name='ROC fold {}'.format(i),\n alpha=0.3, lw=1, ax=ax)\n interp_tpr = np.interp(mean_fpr, viz.fpr, viz.tpr)\n interp_tpr[0] = 0.0\n tprs.append(interp_tpr)\n aucs.append(viz.roc_auc)\n\nax.plot([0, 1], [0, 1], linestyle='--', lw=2, color='r',\n label='Chance', alpha=.8)\n\nmean_tpr = np.mean(tprs, axis=0)\nmean_tpr[-1] = 1.0\nmean_auc = auc(mean_fpr, mean_tpr)\nstd_auc = np.std(aucs)\nax.plot(mean_fpr, mean_tpr, color='b',\n label=r'Mean ROC (AUC = %0.2f $\\pm$ %0.2f)' % (mean_auc, std_auc),\n lw=2, alpha=.8)\n\nstd_tpr = np.std(tprs, axis=0)\ntprs_upper = np.minimum(mean_tpr + std_tpr, 1)\ntprs_lower = np.maximum(mean_tpr - std_tpr, 0)\nax.fill_between(mean_fpr, tprs_lower, tprs_upper, color='grey', alpha=.2,\n label=r'$\\pm$ 1 std. dev.')\n\nax.set(xlim=[-0.05, 1.05], ylim=[-0.05, 1.05],\n title=\"Receiver operating characteristic example\")\nax.legend(loc=\"lower right\")\nplt.show()",
"_____no_output_____"
]
],
[
[
"## RF",
"_____no_output_____"
]
],
[
[
"from numpy import mean\nfrom numpy import std\nfrom sklearn.model_selection import KFold\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.model_selection import ShuffleSplit\n\n# create dataset\n#X, y = make_classification(n_samples=1000, n_features=20, n_informative=15, n_redundant=5, random_state=1)\n# prepare the cross-validation procedure\n#cv = KFold(n_splits=5, test_size= 0.2, random_state=0)\ncv = ShuffleSplit(n_splits=5, test_size=0.2, random_state=42)\n# create model\nmodel = RandomForestClassifier(class_weight=class_weight)\n# evaluate model\nscores = cross_val_score(model, X, y, scoring='accuracy', cv=cv, n_jobs=-1)\n# report performance\nprint('Accuracy: %.4f (%.4f)' % (mean(scores), std(scores)))\nscores",
"Accuracy: 0.8955 (0.0019)\n"
],
[
"import numpy as np\nimport matplotlib.pyplot as plt\n\nfrom sklearn import svm, datasets\nfrom sklearn.metrics import auc\nfrom sklearn.metrics import plot_roc_curve\nfrom sklearn.model_selection import StratifiedKFold\n\n# #############################################################################\n# Data IO and generation\n\n# Import some data to play with\n#iris = datasets.load_iris()\n#X = iris.data\n#y = iris.target\n#X, y = X[y != 2], y[y != 2]\n#n_samples, n_features = X.shape\n\n# Add noisy features\n#random_state = np.random.RandomState(0)\n#X = np.c_[X, random_state.randn(n_samples, 200 * n_features)]\n\n# #############################################################################\n# Classification and ROC analysis\n\n# Run classifier with cross-validation and plot ROC curves\ncv = ShuffleSplit(n_splits=5, test_size=0.2, random_state=42)\nclassifier = RandomForestClassifier(class_weight=class_weight,random_state=42)\ntprs = []\naucs = []\nmean_fpr = np.linspace(0, 1, 100)\n\nfig, ax = plt.subplots()\nfor i, (train, test) in enumerate(cv.split(X, y)):\n classifier.fit(X, y)\n #viz = plot_roc_curve(classifier, X, y,\n # name='ROC fold {}'.format(i),\n # alpha=0.3, lw=1, ax=ax)\n interp_tpr = np.interp(mean_fpr, viz.fpr, viz.tpr)\n interp_tpr[0] = 0.0\n tprs.append(interp_tpr)\n aucs.append(viz.roc_auc)\n\nax.plot([0, 1], [0, 1], linestyle='--', lw=2, color='r',\n label='Chance', alpha=.8)\n\nmean_tpr = np.mean(tprs, axis=0)\nmean_tpr[-1] = 1.0\nmean_auc = auc(mean_fpr, mean_tpr)\nstd_auc = np.std(aucs)\nax.plot(mean_fpr, mean_tpr, color='b',\n label=r'Mean ROC (AUC = %0.4f $\\pm$ %0.4f)' % (mean_auc, std_auc),\n lw=2, alpha=.8)\n\nstd_tpr = np.std(tprs, axis=0)\ntprs_upper = np.minimum(mean_tpr + std_tpr, 1)\ntprs_lower = np.maximum(mean_tpr - std_tpr, 0)\nax.fill_between(mean_fpr, tprs_lower, tprs_upper, color='grey', alpha=.2,\n label=r'$\\pm$ 1 std. dev.')\n\nax.set(xlim=[-0.05, 1.05], ylim=[-0.05, 1.05],\n title=\"Receiver operating characteristic example\")\nax.legend(loc=\"lower right\")\nplt.show()",
"_____no_output_____"
]
],
[
[
"## DT",
"_____no_output_____"
]
],
[
[
"from numpy import mean\nfrom numpy import std\nfrom sklearn.model_selection import KFold\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.model_selection import ShuffleSplit\nfrom sklearn.tree import DecisionTreeClassifier\n\n\n# create dataset\n#X, y = make_classification(n_samples=1000, n_features=20, n_informative=15, n_redundant=5, random_state=1)\n# prepare the cross-validation procedure\n#cv = KFold(n_splits=5, test_size= 0.2, random_state=0)\ncv = ShuffleSplit(n_splits=5, test_size=0.2, random_state=4200)\n# create model\nmodel = DecisionTreeClassifier(class_weight=class_weight)\n# evaluate model\nscores = cross_val_score(model, X, y, scoring='accuracy', cv=cv, n_jobs=-1)\n# report performance\nprint('Accuracy: %.4f (%.4f)' % (mean(scores), std(scores)))\nscores",
"Accuracy: 0.8520 (0.0019)\n"
],
[
"import numpy as np\nimport matplotlib.pyplot as plt\n\nfrom sklearn import svm, datasets\nfrom sklearn.metrics import auc\nfrom sklearn.metrics import plot_roc_curve\nfrom sklearn.model_selection import StratifiedKFold\n\n# #############################################################################\n# Data IO and generation\n\n# Import some data to play with\n#iris = datasets.load_iris()\n#X = iris.data\n#y = iris.target\n#X, y = X[y != 2], y[y != 2]\n#n_samples, n_features = X.shape\n\n# Add noisy features\n#random_state = np.random.RandomState(0)\n#X = np.c_[X, random_state.randn(n_samples, 200 * n_features)]\n\n# #############################################################################\n# Classification and ROC analysis\n\n# Run classifier with cross-validation and plot ROC curves\ncv = ShuffleSplit(n_splits=5, test_size=0.2, random_state=4200)\nclassifier = DecisionTreeClassifier(class_weight=class_weight,random_state=4200)\ntprs = []\naucs = []\nmean_fpr = np.linspace(0, 1, 100)\n\nfig, ax = plt.subplots()\nfor i, (train, test) in enumerate(cv.split(X, y)):\n classifier.fit(X, y)\n viz = plot_roc_curve(classifier, X, y,\n name='ROC fold {}'.format(i),\n alpha=0.3, lw=1, ax=ax)\n interp_tpr = np.interp(mean_fpr, viz.fpr, viz.tpr)\n interp_tpr[0] = 0.0\n tprs.append(interp_tpr)\n aucs.append(viz.roc_auc)\n\nax.plot([0, 1], [0, 1], linestyle='--', lw=2, color='r',\n label='Chance', alpha=.8)\n\nmean_tpr = np.mean(tprs, axis=0)\nmean_tpr[-1] = 1.0\nmean_auc = auc(mean_fpr, mean_tpr)\nstd_auc = np.std(aucs)\nax.plot(mean_fpr, mean_tpr, color='b',\n label=r'Mean ROC (AUC = %0.4f $\\pm$ %0.4f)' % (mean_auc, std_auc),\n lw=2, alpha=.8)\n\nstd_tpr = np.std(tprs, axis=0)\ntprs_upper = np.minimum(mean_tpr + std_tpr, 1)\ntprs_lower = np.maximum(mean_tpr - std_tpr, 0)\nax.fill_between(mean_fpr, tprs_lower, tprs_upper, color='grey', alpha=.2,\n label=r'$\\pm$ 1 std. dev.')\n\nax.set(xlim=[-0.05, 1.05], ylim=[-0.05, 1.05],\n title=\"Receiver operating characteristic example\")\nax.legend(loc=\"lower right\")\nplt.show()",
"_____no_output_____"
],
[
"#from sklearn.model_selection import cross_val_score\n#from sklearn import svm\n#clf = svm.SVC(kernel='rbf', C=1, class_weight=class_weight)\n#scores = cross_val_score(clf, X, y, cv=5)\n#print(\"Accuracy: %0.4f (+/- %0.4f)\" % (scores.mean(), scores.std() * 2))\n\n#clf.score(X_test, y_test)",
"_____no_output_____"
]
],
[
[
"## ANN",
"_____no_output_____"
]
],
[
[
"import keras\nfrom keras.models import Sequential\nfrom keras.layers import Dense,Dropout",
"_____no_output_____"
],
[
"classifier=Sequential()\nclassifier.add(Dense(units=256, kernel_initializer='uniform',activation='relu',input_dim=24))\nclassifier.add(Dense(units=128, kernel_initializer='uniform',activation='relu'))\nclassifier.add(Dropout(p=0.1))\nclassifier.add(Dense(units=64, kernel_initializer='uniform',activation='relu'))\nclassifier.add(Dropout(p=0.3))\nclassifier.add(Dense(units=32, kernel_initializer='uniform',activation='relu'))\n\nclassifier.add(Dense(units=1, kernel_initializer='uniform',activation='sigmoid'))\nclassifier.compile(optimizer='adam',loss=\"binary_crossentropy\",metrics=['accuracy'])\nclassifier.fit(X_train,y_train,batch_size=10,epochs=100,class_weight=class_weight)",
"<ipython-input-99-d84fe8f2198c>:4: UserWarning: Update your `Dropout` call to the Keras 2 API: `Dropout(rate=0.1)`\n classifier.add(Dropout(p=0.1))\n<ipython-input-99-d84fe8f2198c>:6: UserWarning: Update your `Dropout` call to the Keras 2 API: `Dropout(rate=0.3)`\n classifier.add(Dropout(p=0.3))\n"
],
[
"#clf_svc_rbf.fit(X_train,y_train)\nfrom sklearn.metrics import confusion_matrix,classification_report,roc_auc_score,auc,f1_score\n\ny_pred = classifier.predict(X_test)>0.8\n\n\nimport matplotlib.pyplot as plt\ncm = confusion_matrix(y_test,y_pred)\n\n#plt.figure(figsize=(5,5))\n#sns.heatmap(cm,annot=True)\n#plt.show()\n\n#print(classification_report(y_test,y_pred_clf_svc_rbf))\n\nprint(classification_report(y_test, y_pred))\n#plot_confusion_matrix(confusion_matrix(y_test, y_pred))\n\nfrom sklearn.metrics import roc_curve, auc\nfalse_positive_rate, true_positive_rate, thresholds = roc_curve(y_test, y_pred)\nroc_auc = auc(false_positive_rate, true_positive_rate)\nroc_auc",
" precision recall f1-score support\n\n 0 0.46 0.48 0.47 951\n 1 0.92 0.92 0.92 6465\n\n accuracy 0.86 7416\n macro avg 0.69 0.70 0.70 7416\nweighted avg 0.86 0.86 0.86 7416\n\n"
],
[
"#from sklearn.tree import DecisionTreeClassifier\n\n\n#from sklearn.model_selection import cross_val_score\n\n#dt = DecisionTreeClassifier(class_weight=class_weight)\n#scores = cross_val_score(clf, X, y, cv=5)\n#print(\"Accuracy: %0.4f (+/- %0.4f)\" % (scores.mean(), scores.std() * 2))",
"_____no_output_____"
],
[
"'''\n\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.metrics import confusion_matrix,classification_report,roc_auc_score,auc,f1_score\n\nlr = LogisticRegression()\nlr.fit(X_train,y_train)\ny_pred_logistic = lr.predict(X_test)\n\nimport matplotlib.pyplot as plt\ncm = confusion_matrix(y_test,y_pred_logistic)\n\nplt.figure(figsize=(5,5))\nsns.heatmap(cm,annot=True,linewidths=.3)\nplt.show()\n\nprint(classification_report(y_test,y_pred_logistic))\n\n\nfrom sklearn.metrics import roc_curve, auc\nfalse_positive_rate, true_positive_rate, thresholds = roc_curve(y_test, y_pred_logistic)\nroc_auc = auc(false_positive_rate, true_positive_rate)\nroc_auc\nprint(f1_score(y_test, y_pred_logistic,average=\"macro\"))\n'''",
"_____no_output_____"
],
[
"from sklearn import datasets\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.ensemble import VotingClassifier",
"_____no_output_____"
],
[
"clf1 = SVC(kernel='rbf', C=1, class_weight=class_weight,random_state=42)\nclf2 = LogisticRegression(class_weight=class_weight,random_state=42)\nclf3 = RandomForestClassifier(class_weight=class_weight,random_state=42)\nclf4 = DecisionTreeClassifier(class_weight=class_weight,random_state=42)\n#clf5 = Sequential()",
"_____no_output_____"
],
[
"eclf = VotingClassifier( estimators=[('svm', clf1), ('lr', clf2), ('rf', clf3), ('dt',clf4)],\n voting='hard')",
"_____no_output_____"
],
[
"for clf, label in zip([clf1, clf2, clf3,clf4 ,eclf], ['SVM', 'LR', 'RF','DT', 'Ensemble']):\n scores = cross_val_score(clf, X, y, scoring='accuracy', cv=5)\nprint(\"Accuracy: %0.4f (+/- %0.4f) [%s]\" % (scores.mean(), scores.std(), label))\nscores",
"Accuracy: 0.8886 (+/- 0.0027) [Ensemble]\n"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e7e18c0157feb1b166ae1aa73a86e3af74add4e5 | 4,841 | ipynb | Jupyter Notebook | capstone.ipynb | jasonbossert/TDI_Capstone | 414d5d53812e87f42e6598810a5388155269ebe0 | [
"MIT"
] | null | null | null | capstone.ipynb | jasonbossert/TDI_Capstone | 414d5d53812e87f42e6598810a5388155269ebe0 | [
"MIT"
] | null | null | null | capstone.ipynb | jasonbossert/TDI_Capstone | 414d5d53812e87f42e6598810a5388155269ebe0 | [
"MIT"
] | null | null | null | 26.598901 | 127 | 0.528403 | [
[
[
"import pandas as pd\nimport seaborn as sns\nimport json\nimport itertools\nimport numpy as np\nimport os\nfrom functools import partial\nimport seaborn as sns\n\nimport nest_asyncio\nnest_asyncio.apply()\n\nimport stan",
"_____no_output_____"
]
],
[
[
"## Define data convert functions",
"_____no_output_____"
]
],
[
[
"def peek(iterable):\n try:\n first = next(iterable)\n except StopIteration:\n return None\n return first, itertools.chain([first], iterable)",
"_____no_output_____"
],
[
"def json_to_feather(filename, new_filename_base, records_per_file = 1000000, pipe_func = None):\n\n records = map(json.loads, open(filename))\n \n records_per_file = 1000000\n\n file_num = 0\n peek_res = peek(records)\n while peek_res is not None:\n _, records = peek_res\n data = pd.DataFrame.from_records(records, nrows = records_per_file)\n data.to_feather(f\"{new_filename_base}_tmp_{file_num}.feather\")\n peek_res = peek(records)\n file_num += 1\n \n dfs = list()\n for read_num in range(file_num):\n tmp_filename = f\"{new_filename_base}_tmp_{read_num}.feather\"\n small_df = pd.read_feather(tmp_filename)\n if pipe_func is not None:\n small_df = small_df.pipe(pipe_func)\n \n dfs.append(small_df)\n os.remove(tmp_filename)\n \n data = pd.concat(dfs, axis = 0).reset_index()\n data.to_feather(f\"{new_filename_base}.feather\")",
"_____no_output_____"
],
[
"def starts_with(df, start_str):\n mask = df.columns.str.startswith(start_str)\n columns = list(df.columns[mask])\n return(columns)",
"_____no_output_____"
],
[
"def pipeable_drop(df, labels):\n return(df.drop(columns = labels))\n\ndef pipeable_drop_startswith(df, labels, start):\n new_df = (df.drop(columns = labels)\n .pipe(lambda x: x.drop(columns = starts_with(x, start)))\n )\n return(new_df)",
"_____no_output_____"
]
],
[
[
"## Convert Data to Feather",
"_____no_output_____"
]
],
[
[
"filename = \"yelp_academic_dataset_business.json\"\nnew_filename_base = \"yelp_business\"\n\nbusiness_drop = partial(pipeable_drop, labels = [\"address\", \"is_open\", \"attributes\", \"hours\"])\n\njson_to_feather(filename, new_filename_base, pipe_func = business_drop)",
"_____no_output_____"
],
[
"filename = \"yelp_academic_dataset_user.json\"\nnew_filename_base = \"yelp_user\"\n\nusers_drop = partial(pipeable_drop_startswith, \n labels = [\"name\", \"useful\", \"funny\", \"cool\", \"elite\", \"friends\", \"fans\"],\n start = \"compliment\")\n\njson_to_feather(filename, new_filename_base, pipe_func = users_drop)",
"_____no_output_____"
],
[
"filename = \"yelp_academic_dataset_review.json\"\nnew_filename_base = \"yelp_review\"\n\nreview_drop = partial(pipeable_drop, labels = [\"text\"])\n\njson_to_feather(filename, new_filename_base, pipe_func = review_drop)",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
]
] |
e7e1effdf4d65cbd9930c6313a2e3ccffa3d7c19 | 7,313 | ipynb | Jupyter Notebook | src/Prediction/salary/presalary.ipynb | chenshihang/Analysis-of-College-Graduates-Employment-Orientation | 5f666130c64d8e87f1ee5760b491a9647240fa76 | [
"MIT"
] | null | null | null | src/Prediction/salary/presalary.ipynb | chenshihang/Analysis-of-College-Graduates-Employment-Orientation | 5f666130c64d8e87f1ee5760b491a9647240fa76 | [
"MIT"
] | null | null | null | src/Prediction/salary/presalary.ipynb | chenshihang/Analysis-of-College-Graduates-Employment-Orientation | 5f666130c64d8e87f1ee5760b491a9647240fa76 | [
"MIT"
] | 1 | 2019-09-30T03:05:56.000Z | 2019-09-30T03:05:56.000Z | 27.700758 | 235 | 0.558868 | [
[
[
"import pandas as pd\nimport numpy as np",
"_____no_output_____"
],
[
"train = pd.read_csv('train.csv',index_col=None,header=None)\n# test = pd.read_csv('test.csv',index_col=None,header=None)",
"_____no_output_____"
],
[
"Y_trainmin = train.drop([0,1,2,4],axis=1)\nY_trainmax = train.drop([0,1,2,3],axis=1)\nX_train = train.drop([3,4],axis=1)\n\n# Y_testmin = test.drop([0,1,2,4],axis=1)\n# Y_testmax = test.drop([0,1,2,3],axis=1)\n# X_test = test.drop([3,4],axis=1)",
"_____no_output_____"
],
[
"# print(\"Number transactions train dataset: \", len(X_train))\n# print(\"Number transactions test dataset: \", len(X_test))\n# print(\"Total number of transactions: \", len(X_train)+len(X_test))",
"_____no_output_____"
],
[
"from sklearn.ensemble import RandomForestRegressor,GradientBoostingRegressor\nimport sklearn.metrics\nfrom sklearn.datasets import make_regression\nimport seaborn as sns",
"_____no_output_____"
],
[
"# print(Y_trainmax.max(axis=0))\n# print(Y_trainmin.max(axis=0))\n# print(Y_testmax.max(axis=0))\n# print(Y_testmin.max(axis=0))",
"_____no_output_____"
]
],
[
[
"### GBDT ",
"_____no_output_____"
]
],
[
[
"regr = GradientBoostingRegressor(max_depth=20, random_state=0,max_features=2,n_estimators=333)\nregr.fit(X_train, Y_trainmin/90000)\n\nregr2 = GradientBoostingRegressor(max_depth=20, random_state=0,max_features=2,n_estimators=333)\nregr2.fit(X_train, Y_trainmax/100000)\n\nfrom sklearn.metrics import mean_squared_error\nfrom sklearn.metrics import r2_score\ny_pred = regr.predict(X_train.values)\nprint('最小薪酬训练集均方根误差',np.sqrt(mean_squared_error(Y_trainmin.values.T[0], y_pred*90000)))\n# y_pred = regr.predict(X_test.values)\n# print('最小薪酬测试集均方根误差',np.sqrt(mean_squared_error(Y_testmin.values.T[0], y_pred*90000)))\n# print('最小薪酬R square',r2_score(Y_testmin.values.T[0], y_pred*90000))#衡量正确率\n\ny_pred2 = regr2.predict(X_train.values)\nprint('最大薪酬训练集均方根误差',np.sqrt(mean_squared_error(Y_trainmax.values.T[0], y_pred2*100000)))\n# y_pred2 = regr.predict(X_test.values)\n# print('最大薪酬测试集均方根误差',np.sqrt(mean_squared_error(Y_testmax.values.T[0], y_pred2*100000)))\n# print('最大薪酬R square',r2_score(Y_testmax.values.T[0], y_pred2*100000))#衡量正确率\n",
"D:\\Anaconda\\lib\\site-packages\\sklearn\\utils\\validation.py:578: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples, ), for example using ravel().\n y = column_or_1d(y, warn=True)\n"
],
[
"minsalary = y_pred*90000\nmaxsalary= y_pred2*100000",
"_____no_output_____"
],
[
"Y_trainmax.columns =['最高工资']\nY_trainmin.columns =['最低工资']\nY_trainmax.to_csv('trainmaxsalary.csv',index=None)\nY_trainmin.to_csv('trainminsalary.csv',index=None)",
"_____no_output_____"
],
[
"#预测值\nfile = open('pretrainmaxsalary.csv','a')\nfor i in range(len(maxsalary)):\n s = str(maxsalary[i]).replace('[','').replace(']','')#去除[],这两行按数据不同,可以选择\n s = s.replace(\"'\",'').replace(',','') +'\\n' #去除单引号,逗号,每行末尾追加换行符\n file.write(s)\nfile.close()\nfile = open('pretrainminsalary.csv','a')\nfor i in range(len(minsalary)):\n s = str(minsalary[i]).replace('[','').replace(']','')#去除[],这两行按数据不同,可以选择\n s = s.replace(\"'\",'').replace(',','') +'\\n' #去除单引号,逗号,每行末尾追加换行符\n file.write(s)\nfile.close()\n\n",
"_____no_output_____"
],
[
"maxsalary = pd.read_csv('pretrainmaxsalary.csv',header=None)\nminsalary = pd.read_csv('pretrainminsalary.csv',header=None)",
"_____no_output_____"
],
[
"# minsalary.columns =['预测最高工资']\n# minsalary.columns =['预测最低工资']",
"_____no_output_____"
],
[
"X_train.columns =['职位','城市','经验']",
"_____no_output_____"
],
[
"X_train['预测最低工资']=minsalary\nX_train['预测最高工资']=maxsalary\nX_train.to_csv('result/salaryresult.csv',index=None,encoding=\"utf_8_sig\")",
"_____no_output_____"
],
[
"#将职位城市经验从字典中转换出来\ncity = pd.read_pickle('dict/city.pkl')\nexperience = pd.read_pickle('dict/experience.pkl')\noccupation = pd.read_pickle('dict/occupation.pkl')\nsalary = pd.read_csv('result/salaryresult.csv')\n\nprecity =[]\nfor i in salary['城市']:\n for key,values in city.items():\n if values == i:\n precity.append(key)\nsalary['城市'] = precity\n\npreexperience =[]\nfor i in salary['经验']:\n for key,values in experience.items():\n if values == i:\n preexperience.append(key)\n#print(preexperience)\nsalary['经验'] = preexperience\n\npreoccupation =[]\nfor i in salary['职位']:\n for key,values in occupation.items():\n if values == i:\n preoccupation.append(key)\n#print(preexperience)\nsalary['职位'] = preoccupation\n\nsalary.to_csv('result/Resultsalary.csv',index=None,encoding=\"utf_8_sig\")",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e7e1f031420a4d0714fedb77513ce9399e13abed | 4,670 | ipynb | Jupyter Notebook | playbook/tactics/privilege-escalation/T1053.002.ipynb | haresudhan/The-AtomicPlaybook | 447b1d6bca7c3750c5a58112634f6bac31aff436 | [
"MIT"
] | 8 | 2021-05-25T15:25:31.000Z | 2021-11-08T07:14:45.000Z | playbook/tactics/privilege-escalation/T1053.002.ipynb | haresudhan/The-AtomicPlaybook | 447b1d6bca7c3750c5a58112634f6bac31aff436 | [
"MIT"
] | 1 | 2021-08-23T17:38:02.000Z | 2021-10-12T06:58:19.000Z | playbook/tactics/privilege-escalation/T1053.002.ipynb | haresudhan/The-AtomicPlaybook | 447b1d6bca7c3750c5a58112634f6bac31aff436 | [
"MIT"
] | 2 | 2021-05-29T20:24:24.000Z | 2021-08-05T23:44:12.000Z | 66.714286 | 2,025 | 0.726338 | [
[
[
"# T1053.002 - Scheduled Task/Job: At (Windows)\nAdversaries may abuse the <code>at.exe</code> utility to perform task scheduling for initial or recurring execution of malicious code. The [at](https://attack.mitre.org/software/S0110) utility exists as an executable within Windows for scheduling tasks at a specified time and date. Using [at](https://attack.mitre.org/software/S0110) requires that the Task Scheduler service be running, and the user to be logged on as a member of the local Administrators group. \n\nAn adversary may use <code>at.exe</code> in Windows environments to execute programs at system startup or on a scheduled basis for persistence. [at](https://attack.mitre.org/software/S0110) can also be abused to conduct remote Execution as part of Lateral Movement and or to run a process under the context of a specified account (such as SYSTEM).\n\nNote: The <code>at.exe</code> command line utility has been deprecated in current versions of Windows in favor of <code>schtasks</code>.",
"_____no_output_____"
],
[
"## Atomic Tests",
"_____no_output_____"
]
],
[
[
"#Import the Module before running the tests.\n# Checkout Jupyter Notebook at https://github.com/cyb3rbuff/TheAtomicPlaybook to run PS scripts.\nImport-Module /Users/0x6c/AtomicRedTeam/atomics/invoke-atomicredteam/Invoke-AtomicRedTeam.psd1 - Force",
"_____no_output_____"
]
],
[
[
"### Atomic Test #1 - At.exe Scheduled task\nExecutes cmd.exe\nNote: deprecated in Windows 8+\n\nUpon successful execution, cmd.exe will spawn at.exe and create a scheduled task that will spawn cmd at a specific time.\n\n**Supported Platforms:** windows\n#### Attack Commands: Run with `command_prompt`\n```command_prompt\nat 13:20 /interactive cmd\n```",
"_____no_output_____"
]
],
[
[
"Invoke-AtomicTest T1053.002 -TestNumbers 1",
"_____no_output_____"
]
],
[
[
"## Detection\nMonitor process execution from the svchost.exe in Windows 10 and the Windows Task Scheduler taskeng.exe for older versions of Windows. (Citation: Twitter Leoloobeek Scheduled Task) If scheduled tasks are not used for persistence, then the adversary is likely to remove the task when the action is complete. Monitor Windows Task Scheduler stores in %systemroot%\\System32\\Tasks for change entries related to scheduled tasks that do not correlate with known software, patch cycles, etc.\n\nConfigure event logging for scheduled task creation and changes by enabling the \"Microsoft-Windows-TaskScheduler/Operational\" setting within the event logging service. (Citation: TechNet Forum Scheduled Task Operational Setting) Several events will then be logged on scheduled task activity, including: (Citation: TechNet Scheduled Task Events)(Citation: Microsoft Scheduled Task Events Win10)\n\n* Event ID 106 on Windows 7, Server 2008 R2 - Scheduled task registered\n* Event ID 140 on Windows 7, Server 2008 R2 / 4702 on Windows 10, Server 2016 - Scheduled task updated\n* Event ID 141 on Windows 7, Server 2008 R2 / 4699 on Windows 10, Server 2016 - Scheduled task deleted\n* Event ID 4698 on Windows 10, Server 2016 - Scheduled task created\n* Event ID 4700 on Windows 10, Server 2016 - Scheduled task enabled\n* Event ID 4701 on Windows 10, Server 2016 - Scheduled task disabled\n\nTools such as Sysinternals Autoruns may also be used to detect system changes that could be attempts at persistence, including listing current scheduled tasks. (Citation: TechNet Autoruns)\n\nRemote access tools with built-in features may interact directly with the Windows API to perform these functions outside of typical system utilities. Tasks may also be created through Windows system management tools such as [Windows Management Instrumentation](https://attack.mitre.org/techniques/T1047) and [PowerShell](https://attack.mitre.org/techniques/T1059/001), so additional logging may need to be configured to gather the appropriate data.",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
e7e1f2230902b201db1b5e07838d14233cfd5f04 | 120,395 | ipynb | Jupyter Notebook | image-classification/dlnd_image_classification.ipynb | cfcdavidchan/Deep-Learning-Foundation-Nanodegree | f388c655800c07999158745b19b8566b6d642e03 | [
"MIT"
] | null | null | null | image-classification/dlnd_image_classification.ipynb | cfcdavidchan/Deep-Learning-Foundation-Nanodegree | f388c655800c07999158745b19b8566b6d642e03 | [
"MIT"
] | null | null | null | image-classification/dlnd_image_classification.ipynb | cfcdavidchan/Deep-Learning-Foundation-Nanodegree | f388c655800c07999158745b19b8566b6d642e03 | [
"MIT"
] | null | null | null | 107.880824 | 63,462 | 0.814577 | [
[
[
"# Image Classification\nIn this project, you'll classify images from the [CIFAR-10 dataset](https://www.cs.toronto.edu/~kriz/cifar.html). The dataset consists of airplanes, dogs, cats, and other objects. You'll preprocess the images, then train a convolutional neural network on all the samples. The images need to be normalized and the labels need to be one-hot encoded. You'll get to apply what you learned and build a convolutional, max pooling, dropout, and fully connected layers. At the end, you'll get to see your neural network's predictions on the sample images.\n## Get the Data\nRun the following cell to download the [CIFAR-10 dataset for python](https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz).",
"_____no_output_____"
]
],
[
[
"\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE\n\"\"\"\nfrom urllib.request import urlretrieve\nfrom os.path import isfile, isdir\nfrom tqdm import tqdm\nimport problem_unittests as tests\nimport tarfile\n\ncifar10_dataset_folder_path = 'cifar-10-batches-py'\n\n# Use Floyd's cifar-10 dataset if present\nfloyd_cifar10_location = '/input/cifar-10/python.tar.gz'\nif isfile(floyd_cifar10_location):\n tar_gz_path = floyd_cifar10_location\nelse:\n tar_gz_path = 'cifar-10-python.tar.gz'\n\nclass DLProgress(tqdm):\n last_block = 0\n\n def hook(self, block_num=1, block_size=1, total_size=None):\n self.total = total_size\n self.update((block_num - self.last_block) * block_size)\n self.last_block = block_num\n\nif not isfile(tar_gz_path):\n with DLProgress(unit='B', unit_scale=True, miniters=1, desc='CIFAR-10 Dataset') as pbar:\n urlretrieve(\n 'https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz',\n tar_gz_path,\n pbar.hook)\n\nif not isdir(cifar10_dataset_folder_path):\n with tarfile.open(tar_gz_path) as tar:\n tar.extractall()\n tar.close()\n\n\ntests.test_folder_path(cifar10_dataset_folder_path)",
"All files found!\n"
]
],
[
[
"## Explore the Data\nThe dataset is broken into batches to prevent your machine from running out of memory. The CIFAR-10 dataset consists of 5 batches, named `data_batch_1`, `data_batch_2`, etc.. Each batch contains the labels and images that are one of the following:\n* airplane\n* automobile\n* bird\n* cat\n* deer\n* dog\n* frog\n* horse\n* ship\n* truck\n\nUnderstanding a dataset is part of making predictions on the data. Play around with the code cell below by changing the `batch_id` and `sample_id`. The `batch_id` is the id for a batch (1-5). The `sample_id` is the id for a image and label pair in the batch.\n\nAsk yourself \"What are all possible labels?\", \"What is the range of values for the image data?\", \"Are the labels in order or random?\". Answers to questions like these will help you preprocess the data and end up with better predictions.",
"_____no_output_____"
]
],
[
[
"%matplotlib inline\n%config InlineBackend.figure_format = 'retina'\n\nimport helper\nimport numpy as np\n\n# Explore the dataset\nbatch_id = 1\nsample_id = 5\nhelper.display_stats(cifar10_dataset_folder_path, batch_id, sample_id)",
"\nStats of batch 1:\nSamples: 10000\nLabel Counts: {0: 1005, 1: 974, 2: 1032, 3: 1016, 4: 999, 5: 937, 6: 1030, 7: 1001, 8: 1025, 9: 981}\nFirst 20 Labels: [6, 9, 9, 4, 1, 1, 2, 7, 8, 3, 4, 7, 7, 2, 9, 9, 9, 3, 2, 6]\n\nExample of Image 5:\nImage - Min Value: 0 Max Value: 252\nImage - Shape: (32, 32, 3)\nLabel - Label Id: 1 Name: automobile\n"
]
],
[
[
"## Implement Preprocess Functions\n### Normalize\nIn the cell below, implement the `normalize` function to take in image data, `x`, and return it as a normalized Numpy array. The values should be in the range of 0 to 1, inclusive. The return object should be the same shape as `x`.",
"_____no_output_____"
]
],
[
[
"def normalize(x):\n \"\"\"\n Normalize a list of sample image data in the range of 0 to 1\n : x: List of image data. The image shape is (32, 32, 3)\n : return: Numpy array of normalize data\n \"\"\"\n x_norm = x.reshape(x.size)\n x_norm = (x_norm - min(x_norm))/(max(x_norm)-min(x_norm)) \n \n return x_norm.reshape(x.shape)\n\n\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE\n\"\"\"\ntests.test_normalize(normalize)",
"Tests Passed\n"
]
],
[
[
"### One-hot encode\nJust like the previous code cell, you'll be implementing a function for preprocessing. This time, you'll implement the `one_hot_encode` function. The input, `x`, are a list of labels. Implement the function to return the list of labels as One-Hot encoded Numpy array. The possible values for labels are 0 to 9. The one-hot encoding function should return the same encoding for each value between each call to `one_hot_encode`. Make sure to save the map of encodings outside the function.\n\nHint: Don't reinvent the wheel.",
"_____no_output_____"
]
],
[
[
"def one_hot_encode(x):\n \"\"\"\n One hot encode a list of sample labels. Return a one-hot encoded vector for each label.\n : x: List of sample Labels\n : return: Numpy array of one-hot encoded labels\n \"\"\"\n\n one_hot_array = np.zeros((len(x), 10))\n for index in range(len(x)):\n val_index = x[index]\n one_hot_array[index][val_index] = 1\n\n return one_hot_array\n\n\n\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE\n\"\"\"\ntests.test_one_hot_encode(one_hot_encode)",
"Tests Passed\n"
]
],
[
[
"### Randomize Data\nAs you saw from exploring the data above, the order of the samples are randomized. It doesn't hurt to randomize it again, but you don't need to for this dataset.",
"_____no_output_____"
],
[
"## Preprocess all the data and save it\nRunning the code cell below will preprocess all the CIFAR-10 data and save it to file. The code below also uses 10% of the training data for validation.",
"_____no_output_____"
]
],
[
[
"\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL\n\"\"\"\n# Preprocess Training, Validation, and Testing Data\nhelper.preprocess_and_save_data(cifar10_dataset_folder_path, normalize, one_hot_encode)",
"_____no_output_____"
]
],
[
[
"# Check Point\nThis is your first checkpoint. If you ever decide to come back to this notebook or have to restart the notebook, you can start from here. The preprocessed data has been saved to disk.",
"_____no_output_____"
]
],
[
[
"\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL\n\"\"\"\nimport pickle\nimport problem_unittests as tests\nimport helper\n\n# Load the Preprocessed Validation data\nvalid_features, valid_labels = pickle.load(open('preprocess_validation.p', mode='rb'))",
"_____no_output_____"
]
],
[
[
"## Build the network\nFor the neural network, you'll build each layer into a function. Most of the code you've seen has been outside of functions. To test your code more thoroughly, we require that you put each layer in a function. This allows us to give you better feedback and test for simple mistakes using our unittests before you submit your project.\n\n>**Note:** If you're finding it hard to dedicate enough time for this course each week, we've provided a small shortcut to this part of the project. In the next couple of problems, you'll have the option to use classes from the [TensorFlow Layers](https://www.tensorflow.org/api_docs/python/tf/layers) or [TensorFlow Layers (contrib)](https://www.tensorflow.org/api_guides/python/contrib.layers) packages to build each layer, except the layers you build in the \"Convolutional and Max Pooling Layer\" section. TF Layers is similar to Keras's and TFLearn's abstraction to layers, so it's easy to pickup.\n\n>However, if you would like to get the most out of this course, try to solve all the problems _without_ using anything from the TF Layers packages. You **can** still use classes from other packages that happen to have the same name as ones you find in TF Layers! For example, instead of using the TF Layers version of the `conv2d` class, [tf.layers.conv2d](https://www.tensorflow.org/api_docs/python/tf/layers/conv2d), you would want to use the TF Neural Network version of `conv2d`, [tf.nn.conv2d](https://www.tensorflow.org/api_docs/python/tf/nn/conv2d). \n\nLet's begin!\n\n### Input\nThe neural network needs to read the image data, one-hot encoded labels, and dropout keep probability. Implement the following functions\n* Implement `neural_net_image_input`\n * Return a [TF Placeholder](https://www.tensorflow.org/api_docs/python/tf/placeholder)\n * Set the shape using `image_shape` with batch size set to `None`.\n * Name the TensorFlow placeholder \"x\" using the TensorFlow `name` parameter in the [TF Placeholder](https://www.tensorflow.org/api_docs/python/tf/placeholder).\n* Implement `neural_net_label_input`\n * Return a [TF Placeholder](https://www.tensorflow.org/api_docs/python/tf/placeholder)\n * Set the shape using `n_classes` with batch size set to `None`.\n * Name the TensorFlow placeholder \"y\" using the TensorFlow `name` parameter in the [TF Placeholder](https://www.tensorflow.org/api_docs/python/tf/placeholder).\n* Implement `neural_net_keep_prob_input`\n * Return a [TF Placeholder](https://www.tensorflow.org/api_docs/python/tf/placeholder) for dropout keep probability.\n * Name the TensorFlow placeholder \"keep_prob\" using the TensorFlow `name` parameter in the [TF Placeholder](https://www.tensorflow.org/api_docs/python/tf/placeholder).\n\nThese names will be used at the end of the project to load your saved model.\n\nNote: `None` for shapes in TensorFlow allow for a dynamic size.",
"_____no_output_____"
]
],
[
[
"import tensorflow as tf\n\ndef neural_net_image_input(image_shape):\n \"\"\"\n Return a Tensor for a batch of image input\n : image_shape: Shape of the images\n : return: Tensor for image input.\n \"\"\"\n return tf.placeholder(tf.float32, shape=(None, image_shape[0], image_shape[1], image_shape[2]), name='x')\n\n\n\ndef neural_net_label_input(n_classes):\n \"\"\"\n Return a Tensor for a batch of label input\n : n_classes: Number of classes\n : return: Tensor for label input.\n \"\"\"\n return tf.placeholder(tf.uint8, (None, n_classes), name='y')\n\n\ndef neural_net_keep_prob_input():\n \"\"\"\n Return a Tensor for keep probability\n : return: Tensor for keep probability.\n \"\"\"\n return tf.placeholder(tf.float32, None, name='keep_prob')\n\n\n\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE\n\"\"\"\ntf.reset_default_graph()\ntests.test_nn_image_inputs(neural_net_image_input)\ntests.test_nn_label_inputs(neural_net_label_input)\ntests.test_nn_keep_prob_inputs(neural_net_keep_prob_input)",
"Image Input Tests Passed.\nLabel Input Tests Passed.\nKeep Prob Tests Passed.\n"
]
],
[
[
"### Convolution and Max Pooling Layer\nConvolution layers have a lot of success with images. For this code cell, you should implement the function `conv2d_maxpool` to apply convolution then max pooling:\n* Create the weight and bias using `conv_ksize`, `conv_num_outputs` and the shape of `x_tensor`.\n* Apply a convolution to `x_tensor` using weight and `conv_strides`.\n * We recommend you use same padding, but you're welcome to use any padding.\n* Add bias\n* Add a nonlinear activation to the convolution.\n* Apply Max Pooling using `pool_ksize` and `pool_strides`.\n * We recommend you use same padding, but you're welcome to use any padding.\n\n**Note:** You **can't** use [TensorFlow Layers](https://www.tensorflow.org/api_docs/python/tf/layers) or [TensorFlow Layers (contrib)](https://www.tensorflow.org/api_guides/python/contrib.layers) for **this** layer, but you can still use TensorFlow's [Neural Network](https://www.tensorflow.org/api_docs/python/tf/nn) package. You may still use the shortcut option for all the **other** layers.",
"_____no_output_____"
]
],
[
[
"def conv2d_maxpool(x_tensor, conv_num_outputs, conv_ksize, conv_strides, pool_ksize, pool_strides):\n \"\"\"\n Apply convolution then max pooling to x_tensor\n :param x_tensor: TensorFlow Tensor\n :param conv_num_outputs: Number of outputs for the convolutional layer\n :param conv_ksize: kernal size 2-D Tuple for the convolutional layer\n :param conv_strides: Stride 2-D Tuple for convolution\n :param pool_ksize: kernal size 2-D Tuple for pool\n :param pool_strides: Stride 2-D Tuple for pool\n : return: A tensor that represents convolution and max pooling of x_tensor\n \"\"\"\n x_tensor_dims = x_tensor._shape.ndims\n channel_num = x_tensor._shape.dims[x_tensor_dims - 1].value\n \n mu = 0\n sigma = 0.1\n \n conv_weight = tf.Variable(tf.truncated_normal(shape=(conv_ksize[0], conv_ksize[1], channel_num, conv_num_outputs), mean=mu, stddev=sigma))\n conv_bias = tf.Variable(tf.zeros(conv_num_outputs))\n \n conv = tf.nn.conv2d(x_tensor, conv_weight, strides=[1, conv_strides[0], conv_strides[1], 1], padding='SAME') + conv_bias\n conv = tf.nn.relu(conv)\n \n return tf.nn.max_pool(conv, ksize=[1, pool_ksize[1], pool_ksize[1], 1], strides=[1, pool_strides[0], pool_strides[1], 1], padding='SAME') \n\n\n\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE\n\"\"\"\ntests.test_con_pool(conv2d_maxpool)",
"Tests Passed\n"
]
],
[
[
"### Flatten Layer\nImplement the `flatten` function to change the dimension of `x_tensor` from a 4-D tensor to a 2-D tensor. The output should be the shape (*Batch Size*, *Flattened Image Size*). Shortcut option: you can use classes from the [TensorFlow Layers](https://www.tensorflow.org/api_docs/python/tf/layers) or [TensorFlow Layers (contrib)](https://www.tensorflow.org/api_guides/python/contrib.layers) packages for this layer. For more of a challenge, only use other TensorFlow packages.",
"_____no_output_____"
]
],
[
[
"def flatten(x_tensor):\n \"\"\"\n Flatten x_tensor to (Batch Size, Flattened Image Size)\n : x_tensor: A tensor of size (Batch Size, ...), where ... are the image dimensions.\n : return: A tensor of size (Batch Size, Flattened Image Size).\n \"\"\"\n shaped = x_tensor.get_shape().as_list()\n reshaped = tf.reshape(x_tensor, [-1, shaped[1] * shaped[2] * shaped[3]])\n \n return reshaped\n\n\n\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE\n\"\"\"\ntests.test_flatten(flatten)",
"Tests Passed\n"
]
],
[
[
"### Fully-Connected Layer\nImplement the `fully_conn` function to apply a fully connected layer to `x_tensor` with the shape (*Batch Size*, *num_outputs*). Shortcut option: you can use classes from the [TensorFlow Layers](https://www.tensorflow.org/api_docs/python/tf/layers) or [TensorFlow Layers (contrib)](https://www.tensorflow.org/api_guides/python/contrib.layers) packages for this layer. For more of a challenge, only use other TensorFlow packages.",
"_____no_output_____"
]
],
[
[
"def fully_conn(x_tensor, num_outputs):\n \"\"\"\n Apply a fully connected layer to x_tensor using weight and bias\n : x_tensor: A 2-D tensor where the first dimension is batch size.\n : num_outputs: The number of output that the new tensor should be.\n : return: A 2-D tensor where the second dimension is num_outputs.\n \"\"\"\n weight = tf.Variable(tf.truncated_normal(shape=[x_tensor.get_shape().as_list()[1], num_outputs], mean=0.0, stddev=0.1)) \n bias = tf.Variable(tf.zeros(shape=num_outputs))\n \n return tf.nn.relu(tf.matmul(x_tensor, weight) + bias)\n\n\n\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE\n\"\"\"\ntests.test_fully_conn(fully_conn)",
"Tests Passed\n"
]
],
[
[
"### Output Layer\nImplement the `output` function to apply a fully connected layer to `x_tensor` with the shape (*Batch Size*, *num_outputs*). Shortcut option: you can use classes from the [TensorFlow Layers](https://www.tensorflow.org/api_docs/python/tf/layers) or [TensorFlow Layers (contrib)](https://www.tensorflow.org/api_guides/python/contrib.layers) packages for this layer. For more of a challenge, only use other TensorFlow packages.\n\n**Note:** Activation, softmax, or cross entropy should **not** be applied to this.",
"_____no_output_____"
]
],
[
[
"def output(x_tensor, num_outputs):\n \"\"\"\n Apply a output layer to x_tensor using weight and bias\n : x_tensor: A 2-D tensor where the first dimension is batch size.\n : num_outputs: The number of output that the new tensor should be.\n : return: A 2-D tensor where the second dimension is num_outputs.\n \"\"\"\n weight = tf.Variable(tf.truncated_normal(shape=[x_tensor.get_shape().as_list()[1], num_outputs], mean=0.0, stddev=0.1)) \n bias = tf.Variable(tf.zeros(shape=num_outputs))\n \n return tf.matmul(x_tensor, weight) + bias\n\n\n\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE\n\"\"\"\ntests.test_output(output)",
"Tests Passed\n"
]
],
[
[
"### Create Convolutional Model\nImplement the function `conv_net` to create a convolutional neural network model. The function takes in a batch of images, `x`, and outputs logits. Use the layers you created above to create this model:\n\n* Apply 1, 2, or 3 Convolution and Max Pool layers\n* Apply a Flatten Layer\n* Apply 1, 2, or 3 Fully Connected Layers\n* Apply an Output Layer\n* Return the output\n* Apply [TensorFlow's Dropout](https://www.tensorflow.org/api_docs/python/tf/nn/dropout) to one or more layers in the model using `keep_prob`. ",
"_____no_output_____"
]
],
[
[
"def conv_net(x, keep_prob):\n \"\"\"\n Create a convolutional neural network model\n : x: Placeholder tensor that holds image data.\n : keep_prob: Placeholder tensor that hold dropout keep probability.\n : return: Tensor that represents logits\n \"\"\"\n # Play around with different number of outputs, kernel size and stride\n # Function Definition from Above:\n # conv2d_maxpool(x_tensor, conv_num_outputs, conv_ksize, conv_strides, pool_ksize, pool_strides)\n \n conv_num_outputs = 10\n conv_ksize = (3, 3)\n conv_strides = (1, 1)\n pool_ksize = (2, 2)\n pool_strides = (2, 2)\n x_tensor = conv2d_maxpool(x, conv_num_outputs, conv_ksize, conv_strides, pool_ksize, pool_strides)\n # Function Definition from Above:\n # flatten(x_tensor)\n \n x_tensor = flatten(x_tensor)\n # Play around with different number of outputs\n # Function Definition from Above:\n # fully_conn(x_tensor, num_outputs)\n num_outputs = 100\n x_tensor = fully_conn(x_tensor, num_outputs)\n x_tensor = tf.nn.dropout(x_tensor, keep_prob)\n\n # Set this to the number of classes\n # Function Definition from Above:\n # output(x_tensor, num_outputs)\n num_outputs = 10\n model = output(x_tensor, num_outputs)\n \n # TODO: return output\n return model\n\n\n\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE\n\"\"\"\n\n##############################\n## Build the Neural Network ##\n##############################\n\n# Remove previous weights, bias, inputs, etc..\ntf.reset_default_graph()\n\n# Inputs\nx = neural_net_image_input((32, 32, 3))\ny = neural_net_label_input(10)\nkeep_prob = neural_net_keep_prob_input()\n\n# Model\nlogits = conv_net(x, keep_prob)\n\n# Name logits Tensor, so that is can be loaded from disk after training\nlogits = tf.identity(logits, name='logits')\n\n# Loss and Optimizer\ncost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=y))\noptimizer = tf.train.AdamOptimizer().minimize(cost)\n\n# Accuracy\ncorrect_pred = tf.equal(tf.argmax(logits, 1), tf.argmax(y, 1))\naccuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32), name='accuracy')\n\ntests.test_conv_net(conv_net)",
"Neural Network Built!\n"
]
],
[
[
"## Train the Neural Network\n### Single Optimization\nImplement the function `train_neural_network` to do a single optimization. The optimization should use `optimizer` to optimize in `session` with a `feed_dict` of the following:\n* `x` for image input\n* `y` for labels\n* `keep_prob` for keep probability for dropout\n\nThis function will be called for each batch, so `tf.global_variables_initializer()` has already been called.\n\nNote: Nothing needs to be returned. This function is only optimizing the neural network.",
"_____no_output_____"
]
],
[
[
"def train_neural_network(session, optimizer, keep_probability, feature_batch, label_batch):\n \"\"\"\n Optimize the session on a batch of images and labels\n : session: Current TensorFlow session\n : optimizer: TensorFlow optimizer function\n : keep_probability: keep probability\n : feature_batch: Batch of Numpy image data\n : label_batch: Batch of Numpy label data\n \"\"\"\n feed_dict = {\n x: feature_batch, \n y: label_batch, \n keep_prob: keep_probability}\n session.run(optimizer, feed_dict=feed_dict)\n\n\n\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE\n\"\"\"\ntests.test_train_nn(train_neural_network)",
"Tests Passed\n"
]
],
[
[
"### Show Stats\nImplement the function `print_stats` to print loss and validation accuracy. Use the global variables `valid_features` and `valid_labels` to calculate validation accuracy. Use a keep probability of `1.0` to calculate the loss and validation accuracy.",
"_____no_output_____"
]
],
[
[
"def print_stats(session, feature_batch, label_batch, cost, accuracy):\n \"\"\"\n Print information about loss and validation accuracy\n : session: Current TensorFlow session\n : feature_batch: Batch of Numpy image data\n : label_batch: Batch of Numpy label data\n : cost: TensorFlow cost function\n : accuracy: TensorFlow accuracy function\n \"\"\"\n current_cost = session.run(cost,feed_dict={x: feature_batch, y: label_batch, keep_prob: 1.})\n valid_accuracy = session.run(accuracy,feed_dict={x: valid_features, y: valid_labels, keep_prob: 1.})\n \n print('Loss: {:<8.3} Valid Accuracy: {:<5.3}'.format(current_cost,valid_accuracy))",
"_____no_output_____"
]
],
[
[
"### Hyperparameters\nTune the following parameters:\n* Set `epochs` to the number of iterations until the network stops learning or start overfitting\n* Set `batch_size` to the highest number that your machine has memory for. Most people set them to common sizes of memory:\n * 64\n * 128\n * 256\n * ...\n* Set `keep_probability` to the probability of keeping a node using dropout",
"_____no_output_____"
]
],
[
[
"# TODO: Tune Parameters\nepochs = 20\nbatch_size = 128\nkeep_probability = 0.5",
"_____no_output_____"
]
],
[
[
"### Train on a Single CIFAR-10 Batch\nInstead of training the neural network on all the CIFAR-10 batches of data, let's use a single batch. This should save time while you iterate on the model to get a better accuracy. Once the final validation accuracy is 50% or greater, run the model on all the data in the next section.",
"_____no_output_____"
]
],
[
[
"\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL\n\"\"\"\nprint('Checking the Training on a Single Batch...')\nwith tf.Session() as sess:\n # Initializing the variables\n sess.run(tf.global_variables_initializer())\n \n # Training cycle\n for epoch in range(epochs):\n batch_i = 1\n for batch_features, batch_labels in helper.load_preprocess_training_batch(batch_i, batch_size):\n train_neural_network(sess, optimizer, keep_probability, batch_features, batch_labels)\n print('Epoch {:>2}, CIFAR-10 Batch {}: '.format(epoch + 1, batch_i), end='')\n print_stats(sess, batch_features, batch_labels, cost, accuracy)",
"Checking the Training on a Single Batch...\nEpoch 1, CIFAR-10 Batch 1: Loss: 2.09 Valid Accuracy: 0.321\nEpoch 2, CIFAR-10 Batch 1: Loss: 1.93 Valid Accuracy: 0.397\nEpoch 3, CIFAR-10 Batch 1: Loss: 1.81 Valid Accuracy: 0.432\nEpoch 4, CIFAR-10 Batch 1: Loss: 1.68 Valid Accuracy: 0.458\nEpoch 5, CIFAR-10 Batch 1: Loss: 1.58 Valid Accuracy: 0.461\nEpoch 6, CIFAR-10 Batch 1: Loss: 1.48 Valid Accuracy: 0.478\nEpoch 7, CIFAR-10 Batch 1: Loss: 1.38 Valid Accuracy: 0.484\nEpoch 8, CIFAR-10 Batch 1: Loss: 1.3 Valid Accuracy: 0.495\nEpoch 9, CIFAR-10 Batch 1: Loss: 1.25 Valid Accuracy: 0.498\nEpoch 10, CIFAR-10 Batch 1: Loss: 1.14 Valid Accuracy: 0.504\nEpoch 11, CIFAR-10 Batch 1: Loss: 1.1 Valid Accuracy: 0.512\nEpoch 12, CIFAR-10 Batch 1: Loss: 1.04 Valid Accuracy: 0.513\nEpoch 13, CIFAR-10 Batch 1: Loss: 0.968 Valid Accuracy: 0.526\nEpoch 14, CIFAR-10 Batch 1: Loss: 0.916 Valid Accuracy: 0.525\nEpoch 15, CIFAR-10 Batch 1: Loss: 0.92 Valid Accuracy: 0.522\nEpoch 16, CIFAR-10 Batch 1: Loss: 0.854 Valid Accuracy: 0.526\nEpoch 17, CIFAR-10 Batch 1: Loss: 0.794 Valid Accuracy: 0.525\nEpoch 18, CIFAR-10 Batch 1: Loss: 0.773 Valid Accuracy: 0.528\nEpoch 19, CIFAR-10 Batch 1: Loss: 0.723 Valid Accuracy: 0.532\nEpoch 20, CIFAR-10 Batch 1: Loss: 0.684 Valid Accuracy: 0.536\n"
]
],
[
[
"### Fully Train the Model\nNow that you got a good accuracy with a single CIFAR-10 batch, try it with all five batches.",
"_____no_output_____"
]
],
[
[
"\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL\n\"\"\"\nsave_model_path = './image_classification'\n\nprint('Training...')\nwith tf.Session() as sess:\n # Initializing the variables\n sess.run(tf.global_variables_initializer())\n \n # Training cycle\n for epoch in range(epochs):\n # Loop over all batches\n n_batches = 5\n for batch_i in range(1, n_batches + 1):\n for batch_features, batch_labels in helper.load_preprocess_training_batch(batch_i, batch_size):\n train_neural_network(sess, optimizer, keep_probability, batch_features, batch_labels)\n print('Epoch {:>2}, CIFAR-10 Batch {}: '.format(epoch + 1, batch_i), end='')\n print_stats(sess, batch_features, batch_labels, cost, accuracy)\n \n # Save Model\n saver = tf.train.Saver()\n save_path = saver.save(sess, save_model_path)",
"Training...\nEpoch 1, CIFAR-10 Batch 1: Loss: 2.1 Valid Accuracy: 0.31 \nEpoch 1, CIFAR-10 Batch 2: Loss: 1.8 Valid Accuracy: 0.391\nEpoch 1, CIFAR-10 Batch 3: Loss: 1.64 Valid Accuracy: 0.411\nEpoch 1, CIFAR-10 Batch 4: Loss: 1.61 Valid Accuracy: 0.438\nEpoch 1, CIFAR-10 Batch 5: Loss: 1.65 Valid Accuracy: 0.467\nEpoch 2, CIFAR-10 Batch 1: Loss: 1.74 Valid Accuracy: 0.468\nEpoch 2, CIFAR-10 Batch 2: Loss: 1.39 Valid Accuracy: 0.491\nEpoch 2, CIFAR-10 Batch 3: Loss: 1.29 Valid Accuracy: 0.499\nEpoch 2, CIFAR-10 Batch 4: Loss: 1.33 Valid Accuracy: 0.514\nEpoch 2, CIFAR-10 Batch 5: Loss: 1.49 Valid Accuracy: 0.525\nEpoch 3, CIFAR-10 Batch 1: Loss: 1.55 Valid Accuracy: 0.517\nEpoch 3, CIFAR-10 Batch 2: Loss: 1.2 Valid Accuracy: 0.529\nEpoch 3, CIFAR-10 Batch 3: Loss: 1.15 Valid Accuracy: 0.531\nEpoch 3, CIFAR-10 Batch 4: Loss: 1.27 Valid Accuracy: 0.533\nEpoch 3, CIFAR-10 Batch 5: Loss: 1.38 Valid Accuracy: 0.55 \nEpoch 4, CIFAR-10 Batch 1: Loss: 1.41 Valid Accuracy: 0.545\nEpoch 4, CIFAR-10 Batch 2: Loss: 1.09 Valid Accuracy: 0.556\nEpoch 4, CIFAR-10 Batch 3: Loss: 1.09 Valid Accuracy: 0.546\nEpoch 4, CIFAR-10 Batch 4: Loss: 1.15 Valid Accuracy: 0.556\nEpoch 4, CIFAR-10 Batch 5: Loss: 1.31 Valid Accuracy: 0.556\nEpoch 5, CIFAR-10 Batch 1: Loss: 1.28 Valid Accuracy: 0.558\nEpoch 5, CIFAR-10 Batch 2: Loss: 1.03 Valid Accuracy: 0.566\nEpoch 5, CIFAR-10 Batch 3: Loss: 0.96 Valid Accuracy: 0.565\nEpoch 5, CIFAR-10 Batch 4: Loss: 1.08 Valid Accuracy: 0.566\nEpoch 5, CIFAR-10 Batch 5: Loss: 1.23 Valid Accuracy: 0.581\nEpoch 6, CIFAR-10 Batch 1: Loss: 1.18 Valid Accuracy: 0.576\nEpoch 6, CIFAR-10 Batch 2: Loss: 0.946 Valid Accuracy: 0.574\nEpoch 6, CIFAR-10 Batch 3: Loss: 0.943 Valid Accuracy: 0.576\nEpoch 6, CIFAR-10 Batch 4: Loss: 0.993 Valid Accuracy: 0.581\nEpoch 6, CIFAR-10 Batch 5: Loss: 1.14 Valid Accuracy: 0.581\nEpoch 7, CIFAR-10 Batch 1: Loss: 1.15 Valid Accuracy: 0.587\nEpoch 7, CIFAR-10 Batch 2: Loss: 0.884 Valid Accuracy: 0.59 \nEpoch 7, CIFAR-10 Batch 3: Loss: 0.855 Valid Accuracy: 0.589\nEpoch 7, CIFAR-10 Batch 4: Loss: 0.966 Valid Accuracy: 0.586\nEpoch 7, CIFAR-10 Batch 5: Loss: 1.07 Valid Accuracy: 0.596\nEpoch 8, CIFAR-10 Batch 1: Loss: 1.02 Valid Accuracy: 0.596\nEpoch 8, CIFAR-10 Batch 2: Loss: 0.827 Valid Accuracy: 0.597\nEpoch 8, CIFAR-10 Batch 3: Loss: 0.826 Valid Accuracy: 0.593\nEpoch 8, CIFAR-10 Batch 4: Loss: 0.896 Valid Accuracy: 0.594\nEpoch 8, CIFAR-10 Batch 5: Loss: 1.02 Valid Accuracy: 0.596\nEpoch 9, CIFAR-10 Batch 1: Loss: 0.968 Valid Accuracy: 0.598\nEpoch 9, CIFAR-10 Batch 2: Loss: 0.782 Valid Accuracy: 0.601\nEpoch 9, CIFAR-10 Batch 3: Loss: 0.746 Valid Accuracy: 0.602\nEpoch 9, CIFAR-10 Batch 4: Loss: 0.849 Valid Accuracy: 0.596\nEpoch 9, CIFAR-10 Batch 5: Loss: 0.958 Valid Accuracy: 0.597\nEpoch 10, CIFAR-10 Batch 1: Loss: 0.984 Valid Accuracy: 0.591\nEpoch 10, CIFAR-10 Batch 2: Loss: 0.736 Valid Accuracy: 0.603\nEpoch 10, CIFAR-10 Batch 3: Loss: 0.672 Valid Accuracy: 0.608\nEpoch 10, CIFAR-10 Batch 4: Loss: 0.794 Valid Accuracy: 0.601\nEpoch 10, CIFAR-10 Batch 5: Loss: 0.92 Valid Accuracy: 0.607\nEpoch 11, CIFAR-10 Batch 1: Loss: 0.895 Valid Accuracy: 0.606\nEpoch 11, CIFAR-10 Batch 2: Loss: 0.711 Valid Accuracy: 0.61 \nEpoch 11, CIFAR-10 Batch 3: Loss: 0.663 Valid Accuracy: 0.61 \nEpoch 11, CIFAR-10 Batch 4: Loss: 0.762 Valid Accuracy: 0.608\nEpoch 11, CIFAR-10 Batch 5: Loss: 0.887 Valid Accuracy: 0.603\nEpoch 12, CIFAR-10 Batch 1: Loss: 0.902 Valid Accuracy: 0.613\nEpoch 12, CIFAR-10 Batch 2: Loss: 0.671 Valid Accuracy: 0.606\nEpoch 12, CIFAR-10 Batch 3: Loss: 0.568 Valid Accuracy: 0.615\nEpoch 12, CIFAR-10 Batch 4: Loss: 0.736 Valid Accuracy: 0.604\nEpoch 12, CIFAR-10 Batch 5: Loss: 0.855 Valid Accuracy: 0.611\nEpoch 13, CIFAR-10 Batch 1: Loss: 0.899 Valid Accuracy: 0.615\nEpoch 13, CIFAR-10 Batch 2: Loss: 0.648 Valid Accuracy: 0.616\nEpoch 13, CIFAR-10 Batch 3: Loss: 0.587 Valid Accuracy: 0.614\nEpoch 13, CIFAR-10 Batch 4: Loss: 0.694 Valid Accuracy: 0.613\nEpoch 13, CIFAR-10 Batch 5: Loss: 0.818 Valid Accuracy: 0.611\nEpoch 14, CIFAR-10 Batch 1: Loss: 0.865 Valid Accuracy: 0.619\nEpoch 14, CIFAR-10 Batch 2: Loss: 0.581 Valid Accuracy: 0.618\nEpoch 14, CIFAR-10 Batch 3: Loss: 0.565 Valid Accuracy: 0.618\nEpoch 14, CIFAR-10 Batch 4: Loss: 0.651 Valid Accuracy: 0.615\nEpoch 14, CIFAR-10 Batch 5: Loss: 0.761 Valid Accuracy: 0.621\nEpoch 15, CIFAR-10 Batch 1: Loss: 0.852 Valid Accuracy: 0.62 \nEpoch 15, CIFAR-10 Batch 2: Loss: 0.575 Valid Accuracy: 0.624\nEpoch 15, CIFAR-10 Batch 3: Loss: 0.533 Valid Accuracy: 0.619\nEpoch 15, CIFAR-10 Batch 4: Loss: 0.604 Valid Accuracy: 0.608\nEpoch 15, CIFAR-10 Batch 5: Loss: 0.783 Valid Accuracy: 0.605\nEpoch 16, CIFAR-10 Batch 1: Loss: 0.826 Valid Accuracy: 0.618\nEpoch 16, CIFAR-10 Batch 2: Loss: 0.607 Valid Accuracy: 0.623\nEpoch 16, CIFAR-10 Batch 3: Loss: 0.536 Valid Accuracy: 0.609\nEpoch 16, CIFAR-10 Batch 4: Loss: 0.546 Valid Accuracy: 0.617\nEpoch 16, CIFAR-10 Batch 5: Loss: 0.72 Valid Accuracy: 0.608\nEpoch 17, CIFAR-10 Batch 1: Loss: 0.791 Valid Accuracy: 0.622\nEpoch 17, CIFAR-10 Batch 2: Loss: 0.538 Valid Accuracy: 0.621\nEpoch 17, CIFAR-10 Batch 3: Loss: 0.486 Valid Accuracy: 0.621\nEpoch 17, CIFAR-10 Batch 4: Loss: 0.571 Valid Accuracy: 0.623\nEpoch 17, CIFAR-10 Batch 5: Loss: 0.68 Valid Accuracy: 0.616\nEpoch 18, CIFAR-10 Batch 1: Loss: 0.783 Valid Accuracy: 0.621\nEpoch 18, CIFAR-10 Batch 2: Loss: 0.536 Valid Accuracy: 0.624\nEpoch 18, CIFAR-10 Batch 3: Loss: 0.462 Valid Accuracy: 0.625\nEpoch 18, CIFAR-10 Batch 4: Loss: 0.555 Valid Accuracy: 0.622\nEpoch 18, CIFAR-10 Batch 5: Loss: 0.642 Valid Accuracy: 0.617\nEpoch 19, CIFAR-10 Batch 1: Loss: 0.728 Valid Accuracy: 0.622\nEpoch 19, CIFAR-10 Batch 2: Loss: 0.507 Valid Accuracy: 0.625\nEpoch 19, CIFAR-10 Batch 3: Loss: 0.442 Valid Accuracy: 0.631\nEpoch 19, CIFAR-10 Batch 4: Loss: 0.495 Valid Accuracy: 0.624\nEpoch 19, CIFAR-10 Batch 5: Loss: 0.606 Valid Accuracy: 0.62 \nEpoch 20, CIFAR-10 Batch 1: Loss: 0.726 Valid Accuracy: 0.624\nEpoch 20, CIFAR-10 Batch 2: Loss: 0.493 Valid Accuracy: 0.626\nEpoch 20, CIFAR-10 Batch 3: Loss: 0.466 Valid Accuracy: 0.623\nEpoch 20, CIFAR-10 Batch 4: Loss: 0.487 Valid Accuracy: 0.616\nEpoch 20, CIFAR-10 Batch 5: Loss: 0.592 Valid Accuracy: 0.622\n"
]
],
[
[
"# Checkpoint\nThe model has been saved to disk.\n## Test Model\nTest your model against the test dataset. This will be your final accuracy. You should have an accuracy greater than 50%. If you don't, keep tweaking the model architecture and parameters.",
"_____no_output_____"
]
],
[
[
"\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL\n\"\"\"\n%matplotlib inline\n%config InlineBackend.figure_format = 'retina'\n\nimport tensorflow as tf\nimport pickle\nimport helper\nimport random\n\n# Set batch size if not already set\ntry:\n if batch_size:\n pass\nexcept NameError:\n batch_size = 64\n\nsave_model_path = './image_classification'\nn_samples = 4\ntop_n_predictions = 3\n\ndef test_model():\n \"\"\"\n Test the saved model against the test dataset\n \"\"\"\n\n test_features, test_labels = pickle.load(open('preprocess_test.p', mode='rb'))\n loaded_graph = tf.Graph()\n\n with tf.Session(graph=loaded_graph) as sess:\n # Load model\n loader = tf.train.import_meta_graph(save_model_path + '.meta')\n loader.restore(sess, save_model_path)\n\n # Get Tensors from loaded model\n loaded_x = loaded_graph.get_tensor_by_name('x:0')\n loaded_y = loaded_graph.get_tensor_by_name('y:0')\n loaded_keep_prob = loaded_graph.get_tensor_by_name('keep_prob:0')\n loaded_logits = loaded_graph.get_tensor_by_name('logits:0')\n loaded_acc = loaded_graph.get_tensor_by_name('accuracy:0')\n \n # Get accuracy in batches for memory limitations\n test_batch_acc_total = 0\n test_batch_count = 0\n \n for test_feature_batch, test_label_batch in helper.batch_features_labels(test_features, test_labels, batch_size):\n test_batch_acc_total += sess.run(\n loaded_acc,\n feed_dict={loaded_x: test_feature_batch, loaded_y: test_label_batch, loaded_keep_prob: 1.0})\n test_batch_count += 1\n\n print('Testing Accuracy: {}\\n'.format(test_batch_acc_total/test_batch_count))\n\n # Print Random Samples\n random_test_features, random_test_labels = tuple(zip(*random.sample(list(zip(test_features, test_labels)), n_samples)))\n random_test_predictions = sess.run(\n tf.nn.top_k(tf.nn.softmax(loaded_logits), top_n_predictions),\n feed_dict={loaded_x: random_test_features, loaded_y: random_test_labels, loaded_keep_prob: 1.0})\n helper.display_image_predictions(random_test_features, random_test_labels, random_test_predictions)\n\n\ntest_model()",
"INFO:tensorflow:Restoring parameters from ./image_classification\nTesting Accuracy: 0.6132318037974683\n\n"
]
],
[
[
"## Why 50-80% Accuracy?\nYou might be wondering why you can't get an accuracy any higher. First things first, 50% isn't bad for a simple CNN. Pure guessing would get you 10% accuracy. However, you might notice people are getting scores [well above 80%](http://rodrigob.github.io/are_we_there_yet/build/classification_datasets_results.html#43494641522d3130). That's because we haven't taught you all there is to know about neural networks. We still need to cover a few more techniques.\n## Submitting This Project\nWhen submitting this project, make sure to run all the cells before saving the notebook. Save the notebook file as \"dlnd_image_classification.ipynb\" and save it as a HTML file under \"File\" -> \"Download as\". Include the \"helper.py\" and \"problem_unittests.py\" files in your submission.",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
e7e1f2abbba3dbbcf8ee18ebfc9e8288a11dfcf1 | 8,464 | ipynb | Jupyter Notebook | notebooks/09-Extras.ipynb | jbwhit/WSP-312-Tips-and-Tricks | e1582f6daf8b555c6c13eb0f1f4eb7edadfa1017 | [
"MIT"
] | 4 | 2017-05-06T16:16:06.000Z | 2021-07-26T04:40:46.000Z | notebooks/09-Extras.ipynb | jbwhit/WSP-312-Tips-and-Tricks | e1582f6daf8b555c6c13eb0f1f4eb7edadfa1017 | [
"MIT"
] | null | null | null | notebooks/09-Extras.ipynb | jbwhit/WSP-312-Tips-and-Tricks | e1582f6daf8b555c6c13eb0f1f4eb7edadfa1017 | [
"MIT"
] | 1 | 2018-04-04T21:24:31.000Z | 2018-04-04T21:24:31.000Z | 19.962264 | 97 | 0.497755 | [
[
[
"from __future__ import absolute_import, division, print_function",
"_____no_output_____"
]
],
[
[
"## Learn the standard library to at least know what's there\n\n### itertools and collections have very useful features\n\n - chain\n - product\n - permutations\n - combinations\n - izip",
"_____no_output_____"
]
],
[
[
"%matplotlib inline\n%config InlineBackend.figure_format='retina'\n\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nsns.set_context('talk')\nsns.set_style('darkgrid') \nplt.rcParams['figure.figsize'] = 12, 8 # plotsize \n\nimport numpy as np\nimport pandas as pd",
"_____no_output_____"
],
[
"# plot residuals",
"_____no_output_____"
],
[
"from itertools import groupby # NOT REGULAR GROUPBY\nfrom itertools import product, cycle, izip\nimport re # regular expressions",
"_____no_output_____"
]
],
[
[
"## Challenge (Easy)\n\nWrite a function to return the total number of digits in a given string, and those digits. ",
"_____no_output_____"
]
],
[
[
"test_string = \"\"\"de3456yghj87654edfghuio908ujhgyuY^YHJUi8ytgh gtyujnh y7\"\"\"",
"_____no_output_____"
],
[
"count = 0\ndigits = []\nfor x in test_string:\n try: \n int(x)\n count += 1\n digits.append(int(x))\n except:\n pass\n \nprint(\"Number of digits:\", str(count) + \";\")\nprint(\"They are:\", digits)",
"_____no_output_____"
]
],
[
[
"## Challenge (Tricky)\n\nSame as above -- but were consecutive digits are available, return as a single number. \n\nEx. \"2a78b123\" returns \"3 numbers, they are: 2, 78, 123\"",
"_____no_output_____"
]
],
[
[
"test_string",
"_____no_output_____"
],
[
"groups = []\nuniquekeys = []\nfor k, g in groupby(test_string, lambda x: x.isdigit()):\n groups.append(list(g))\n uniquekeys.append(k) ",
"_____no_output_____"
],
[
"print(groups)\nprint(uniquekeys)",
"_____no_output_____"
],
[
"numbers = []\nfor x, y in izip(groups, uniquekeys):\n if y:\n numbers.append(int(''.join([j for j in x])))\nprint(\"Number:\", np.sum(uniquekeys))\nprint(\"They are:\", numbers)",
"_____no_output_____"
],
[
"# In one cell\n\ndef solution_2(test_string):\n groups = []\n uniquekeys = []\n for k, g in groupby(test_string, lambda x: x.isdigit()):\n if k:\n groups.append(int(''.join([j for j in g])))\n\n return len(groups), groups\n \nprint(solution_2(test_string))",
"_____no_output_____"
]
],
[
[
"## Challenge (Tricky)\n\nSame as above, but do it a second way.\n",
"_____no_output_____"
]
],
[
[
"def solution_3(test_string):\n \"\"\"Regular expressions can be a very powerful and useful tool.\"\"\"\n groups = [int(j) for j in re.findall(r'\\d+', test_string)]\n return len(groups), groups\n\nsolution_3(test_string)",
"_____no_output_____"
]
],
[
[
"## Challenge (Hard)\n\nSame as above, but all valid numbers expressed in digits, commas, and decimal points. \n\nEx. \"a23.42dx9,331nm87,55\" -> 4; 23.42, 9331, 87, 55\n\nLeft as an exercise :) \n\nDon't spend much time on this one.",
"_____no_output_____"
],
[
"## Generators",
"_____no_output_____"
]
],
[
[
"def ex1(num):\n \"\"\"A stupid example generator to prove a point.\"\"\"\n while num > 1:\n num += 1\n yield num ",
"_____no_output_____"
],
[
"hey = ex1(5)",
"_____no_output_____"
],
[
"hey.next()",
"_____no_output_____"
],
[
"hey.next()",
"_____no_output_____"
]
],
[
[
"# Gotchas\n\nModifying a dictionary's keys while iterating over it. \n\n```python\nfor key in dictionary:\n if key == \"bat\":\n del dictionary[key]\n```\n\nIf you have to do someeven_better_name like this: \n\n```python\nlist_of_keys = dictionary.keys()\nfor key in list_of_keys:\n if key == \"bat\":\n del dictionary[key]\n```",
"_____no_output_____"
]
],
[
[
"even_better_name = 5",
"_____no_output_____"
],
[
"even_better_name = 5",
"_____no_output_____"
],
[
"even_better_name = 5",
"_____no_output_____"
],
[
"even_better_name = 5",
"_____no_output_____"
],
[
"even_better_name = 5",
"_____no_output_____"
],
[
"even_better_name = 5",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e7e1f3df79d73a2afa318d24a77feca633dde6ab | 94,000 | ipynb | Jupyter Notebook | model.ipynb | akshatgurnani/Admission-Prediction-Model | e592133125cf726b582e49b6a5bf5bd3fd7fc36f | [
"MIT"
] | null | null | null | model.ipynb | akshatgurnani/Admission-Prediction-Model | e592133125cf726b582e49b6a5bf5bd3fd7fc36f | [
"MIT"
] | null | null | null | model.ipynb | akshatgurnani/Admission-Prediction-Model | e592133125cf726b582e49b6a5bf5bd3fd7fc36f | [
"MIT"
] | null | null | null | 137.426901 | 19,044 | 0.858787 | [
[
[
"# necessary Imports\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport pickle\n%matplotlib inline",
"_____no_output_____"
],
[
"df= pd.read_csv('Admission_Prediction.csv')\ndf.head()",
"_____no_output_____"
],
[
"df.isnull().sum()",
"_____no_output_____"
],
[
"sns.heatmap(df.isnull())",
"_____no_output_____"
],
[
"df['GRE Score'].fillna(df['GRE Score'].mode()[0],inplace=True)\ndf['TOEFL Score'].fillna(df['TOEFL Score'].mode()[0],inplace=True)\ndf['University Rating'].fillna(df['University Rating'].mean(),inplace=True)",
"_____no_output_____"
],
[
"x=df.drop(['Chance of Admit','Serial No.'],axis=1)\ny=df['Chance of Admit']",
"_____no_output_____"
],
[
"x.head()",
"_____no_output_____"
],
[
"plt.scatter(df['GRE Score'],y)",
"_____no_output_____"
],
[
"plt.scatter(df['TOEFL Score'],y)",
"_____no_output_____"
],
[
"plt.scatter(df['CGPA'],y)",
"_____no_output_____"
],
[
"plt.scatter(df['University Rating'],y)",
"_____no_output_____"
],
[
"from sklearn.model_selection import train_test_split\ntrain_x,test_x,train_y,test_y=train_test_split(x,y,test_size=0.30, random_state=102)",
"_____no_output_____"
],
[
"train_x.head()",
"_____no_output_____"
],
[
"from sklearn.linear_model import LinearRegression\nmodel = LinearRegression()\nmodel.fit(train_x, train_y)",
"_____no_output_____"
],
[
"from sklearn.metrics import r2_score\nscore= r2_score(model.predict(test_x),test_y)\nscore",
"_____no_output_____"
],
[
"filename = 'finalized_model.pickle'\npickle.dump(model, open(filename, 'wb'))",
"_____no_output_____"
],
[
"loaded_model = pickle.load(open(filename, 'rb'))\nprediction=loaded_model.predict(([[320,120,5,5,5,10,1]]))\nprint(prediction[0])",
"0.981906319606733\n"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e7e1f545200786596e636624af4277e1b89425ab | 5,778 | ipynb | Jupyter Notebook | csv_to_html.ipynb | abgondin/Web-Design-Challenge | 27de1172def3529301fb4e3a3196ef3a9feead04 | [
"ADSL"
] | null | null | null | csv_to_html.ipynb | abgondin/Web-Design-Challenge | 27de1172def3529301fb4e3a3196ef3a9feead04 | [
"ADSL"
] | null | null | null | csv_to_html.ipynb | abgondin/Web-Design-Challenge | 27de1172def3529301fb4e3a3196ef3a9feead04 | [
"ADSL"
] | null | null | null | 26.027027 | 89 | 0.36639 | [
[
[
"import pandas as pd",
"_____no_output_____"
],
[
"cities = pd.read_csv(\"Resources/cities.csv\")",
"_____no_output_____"
],
[
"cities.head()",
"_____no_output_____"
],
[
"cities.to_html(\"cities.html\")",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code"
]
] |
e7e1f8615c1585f3cf7a21564f49ab6768680585 | 199,163 | ipynb | Jupyter Notebook | ETL_Cities_Health_Data_Project.ipynb | erikayi/Project-2-ETL-Cities-Health-Data | b19924299735a1a490062941b19dcf5b343332af | [
"MIT"
] | 2 | 2020-12-23T22:27:18.000Z | 2021-04-28T22:47:59.000Z | ETL_Cities_Health_Data_Project.ipynb | erikayi/miniProject-1.5-ETL-Cities-Health-Data | b19924299735a1a490062941b19dcf5b343332af | [
"MIT"
] | null | null | null | ETL_Cities_Health_Data_Project.ipynb | erikayi/miniProject-1.5-ETL-Cities-Health-Data | b19924299735a1a490062941b19dcf5b343332af | [
"MIT"
] | null | null | null | 33.338299 | 325 | 0.380437 | [
[
[
"import numpy as np\nimport pandas as pd\nfrom sqlalchemy import create_engine",
"_____no_output_____"
]
],
[
[
"# 1. Store .csv files into dataframe individually ",
"_____no_output_____"
]
],
[
[
"# import .csv into dataframe of big cities health data\n# organized the big cities health data in the excel sheet \nbig_cities_csv_file = \"data/Big_Cities_Health_Data_Inventory.csv\"\nbig_cities_health_df = pd.read_csv(big_cities_csv_file)\nbig_cities_health_df.head()",
"_____no_output_____"
],
[
"# import 2010 hospital beds by ownership types; test results in 1,000 populations\n# added a year for each hospital dataset manually in the excel worksheet, since these are pretty small data\nhospital_beds_2010_csv_file = \"data/2010_hospital_1000population_beds_ownership_type.csv\"\ndf_2010 = pd.read_csv(hospital_beds_2010_csv_file)\ndf_2010.head()",
"_____no_output_____"
],
[
"# import 2011 hospital beds by ownership types; test results in 1,000 populations\n# added a year for each hospital dataset manually in the excel worksheet, since these are pretty small data\nhospital_beds_2011_csv_file = \"data/2011_hospital_1000population_beds_ownership_type.csv\"\ndf_2011 = pd.read_csv(hospital_beds_2011_csv_file)\ndf_2011.head()",
"_____no_output_____"
],
[
"# import 2012 hospital beds by ownership types; test results in 1,000 populations\n# added a year for each hospital dataset manually in the excel worksheet, since these are pretty small data\nhospital_beds_2012_csv_file = \"data/2012_hospital_1000population_beds_ownership_type.csv\"\ndf_2012 = pd.read_csv(hospital_beds_2012_csv_file)\ndf_2012.head()",
"_____no_output_____"
],
[
"# import 2013 hospital beds by ownership types; test results in 1,000 populations\n# added a year for each hospital dataset manually in the excel worksheet, since these are pretty small data\nhospital_beds_2013_csv_file = \"data/2013_hospital_1000population_beds_ownership_type.csv\"\ndf_2013 = pd.read_csv(hospital_beds_2013_csv_file)\ndf_2013.head()",
"_____no_output_____"
],
[
"# import 2014 hospital beds by ownership types; test results in 1,000 populations\n# added a year for each hospital dataset manually in the excel worksheet, since these are pretty small data\nhospital_beds_2014_csv_file = \"data/2014_hospital_1000population_beds_ownership_type.csv\"\ndf_2014 = pd.read_csv(hospital_beds_2014_csv_file)\ndf_2014.head()",
"_____no_output_____"
],
[
"# import 2015 hospital beds by ownership types; test results in 1,000 populations\n# added a year for each hospital dataset manually in the excel worksheet, since these are pretty small data\nhospital_beds_2015_csv_file = \"data/2015_hospital_1000population_beds_ownership_type.csv\"\ndf_2015 = pd.read_csv(hospital_beds_2015_csv_file)\ndf_2015.head()",
"_____no_output_____"
],
[
"# check the rows of the hospital data for year 2010, 2011, 2012, 2013, 2014, 2015 dataframe, they should have 51 rows for each state.\ndf_2015.shape",
"_____no_output_____"
],
[
"df_2014.shape",
"_____no_output_____"
],
[
"df_2013.shape",
"_____no_output_____"
],
[
"df_2012.shape",
"_____no_output_____"
],
[
"df_2011.shape",
"_____no_output_____"
],
[
"df_2010.shape",
"_____no_output_____"
],
[
"# combine all hospital data by years\nhosp_df=pd.concat([df_2010,df_2011,df_2012,df_2013,df_2014,df_2015], axis=0)\nhosp_df.head()",
"_____no_output_____"
],
[
"# check the rows again to be make sure we get the combined data of the year and state of hospital data.\nhosp_df.shape",
"_____no_output_____"
]
],
[
[
"# 2. Extract the data sources",
"_____no_output_____"
],
[
"## A. cleaning the Big Cities Health Data for year and state; plus, any information that needs to be cleaned out before loading into the sql database",
"_____no_output_____"
]
],
[
[
"# for references of created dataframe for big cities health data: \n\n# big_cities_csv_file = \"data/Big_Cities_Health_Data_Inventory.csv\"\n# big_cities_health_df = pd.read_csv(big_cities_csv_file)\n# big_cities_health_df.head()\n\n# Extract information of the cities data by year, category, indicator, gender, race/ethnicity, value, place\nhealth_cities = big_cities_health_df[['Year', 'Indicator Category', 'Indicator', 'Gender', 'Race/ Ethnicity', 'Value', 'Place']]\n# health_cities.head()\n\n# rename the big health data in cities and correct them in relation to the information they given \nnew_health_cities = health_cities.rename(columns={'Year':'year',\n 'Indicator Category':'category',\n 'Indicator':'cause_of_death',\n 'Gender':'gender',\n 'Race/ Ethnicity':'race_ethnicity',\n 'Value':'death_rate',\n 'Place':'city_state'})\nnew_health_cities.head()",
"_____no_output_____"
],
[
"# split the city and the state\nnew_health_cities[['city','state']] = new_health_cities.city_state.str.split(expand=True, pat=\",\")\nnew_health_cities.head()",
"_____no_output_____"
],
[
"# drop the city_state and city columns for cleaner look\nnew_health_cities_df = new_health_cities.drop(columns=['city_state', 'city'])\nnew_health_cities_df.head()",
"_____no_output_____"
],
[
"ordered_health_data = new_health_cities_df.sort_values(\"year\", ascending=True)\nordered_health_data.head()",
"_____no_output_____"
],
[
"ordered_health_data.shape",
"_____no_output_____"
],
[
"# split the year min.year and max.year of the data for the year included '-' \n\n# previous city health dataframe working on\n# new_health_cities[['year']] = new_health_cities.year.str.split(expand=True, pat=\"-\")\n\n# new city dataframe we worked on\nordered_health_data[['Year1','Year2']] = ordered_health_data['year'].str.split('-',expand=True)\nordered_health_data.head()",
"_____no_output_____"
],
[
"# end of the city data set \nordered_health_data.tail()",
"_____no_output_____"
],
[
"# check the city data rows to make sure we don't lose any information \nordered_health_data.shape",
"_____no_output_____"
],
[
"# store the extracted years in previous steps into max year\nordered_health_data['Max_Year'] = np.where((ordered_health_data['Year2'].isnull()), \n ordered_health_data['Year1'], ordered_health_data['Year2'])\nordered_health_data.head()",
"_____no_output_____"
],
[
"ordered_health_data.tail()",
"_____no_output_____"
],
[
"# drop the unnecessary year data in the city dataframe\nnew_ordered_health_data = ordered_health_data.drop(columns=['year', 'Year1', 'Year2'])\nnew_ordered_health_data.head()",
"_____no_output_____"
],
[
"# check rows once again\nnew_ordered_health_data.shape",
"_____no_output_____"
],
[
"# rename the max year into year to be constant with other dataframe which it will be joined together at the end\ncity_data = new_ordered_health_data.rename(columns={'Max_Year':'year'})\ncity_data.head()",
"_____no_output_____"
],
[
"city_data.tail()",
"_____no_output_____"
],
[
"# sort by year in ascending order in city data\nsort_city_data = city_data.sort_values(by='year',ascending=True, inplace=False)\nsort_city_data",
"_____no_output_____"
],
[
"# save this data in csv file as 'new_health_data.csv'\nsort_city_data.to_csv(r'D:\\Github\\Project-2-ETL-Cities-Health-Data\\data\\new_health_data.csv', index=True)",
"_____no_output_____"
]
],
[
[
"## B. cleaning the hospital beds data for year and state; plus any necessary data that needs to be cleaned, such as null values ",
"_____no_output_____"
]
],
[
[
"# show hospital bed rate in dataframe structure that we made earlier \nhosp_df.head()",
"_____no_output_____"
],
[
"hosp_df.tail()",
"_____no_output_____"
],
[
"# check the rows if it's matching same as earlier after joining them together\nhosp_df.shape",
"_____no_output_____"
],
[
"# rename each columns for hospital bed data for each year and state, state local gov, non-profit, for-profit, and total\nnew_hosp_df = hosp_df.rename(columns={'Year':'year', 'Location':'state',\n 'State/Local Government':'state_local_gov',\n 'Non-Profit':'non_profit',\n 'For-Profit':'profit',\n 'Total':'total'})\nnew_hosp_df.head()",
"_____no_output_____"
],
[
"new_hosp_df.tail()",
"_____no_output_____"
],
[
"new_hosp_df.shape",
"_____no_output_____"
],
[
"# drop the null values if it exists, and they happened to have some null values after I joined them together, \n# so I went back to this part to fix them.\nhospital_df = new_hosp_df.dropna()\nhospital_df.head()",
"_____no_output_____"
],
[
"hospital_df.tail()",
"_____no_output_____"
],
[
"# as you can see here, the number of rows decreased from 306 to 253. This indicates there are some null values within the data.\nhospital_df.shape",
"_____no_output_____"
],
[
"# make a dictionary of the state for converting state name into abbreviation \n# so, it will match with the other data set of cities \n# and so, we can join both of the tables by each state and a year, which this was our goal \nus_state_abbrev = {\n'Alabama': 'AL', 'Alaska': 'AK', 'Arizona': 'AZ', 'Arkansas': 'AR', 'California': 'CA', 'Colorado': 'CO',\n'Connecticut': 'CT', 'Delaware': 'DE', 'Florida': 'FL', 'Georgia': 'GA', 'Hawaii': 'HI', 'Idaho': 'ID',\n'Illinois': 'IL', 'Indiana': 'IN', 'Iowa': 'IA', 'Kansas': 'KS', 'Kentucky': 'KY', 'Louisiana': 'LA',\n'Maine': 'ME', 'Maryland': 'MD', 'Massachusetts': 'MA', 'Michigan': 'MI', 'Minnesota': 'MN', 'Mississippi': 'MS',\n'Missouri': 'MO', 'Montana': 'MT', 'Nebraska': 'NE', 'Nevada': 'NV', 'New Hampshire': 'NH', 'New Jersey': 'NJ',\n'New Mexico': 'NM', 'New York': 'NY', 'North Carolina': 'NC', 'North Dakota': 'ND', 'Ohio': 'OH', 'Oklahoma': 'OK',\n'Oregon': 'OR', 'Pennsylvania': 'PA', 'Rhode Island': 'RI', 'South Carolina': 'SC', 'South Dakota': 'SD',\n'Tennessee': 'TN', 'Texas': 'TX', 'Utah': 'UT', 'Vermont': 'VT', 'Virginia': 'VA', 'Washington': 'WA',\n'West Virginia': 'WV', 'Wisconsin': 'WI', 'Wyoming': 'WY'}",
"_____no_output_____"
],
[
"# replace the name of the state into abbreviation \nhospital_df['state'] = hospital_df['state'].map(us_state_abbrev).fillna(hospital_df['state'])\nhospital_df.head()",
"D:\\Anaconda\\lib\\site-packages\\ipykernel_launcher.py:2: SettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame.\nTry using .loc[row_indexer,col_indexer] = value instead\n\nSee the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n \n"
],
[
"hospital_df.tail()",
"_____no_output_____"
],
[
"# check the rows using .shape function.\n# it is same as previous one. \n# so there is no errors on adding extra values into the dataframe table. whew!\nhospital_df.shape",
"_____no_output_____"
]
],
[
[
"# C. Cleaning the data before joining",
"_____no_output_____"
]
],
[
[
"# look into the data where the columns type \nsort_city_data.info()",
"<class 'pandas.core.frame.DataFrame'>\nInt64Index: 13512 entries, 639 to 13354\nData columns (total 7 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 category 13512 non-null object \n 1 cause_of_death 13512 non-null object \n 2 gender 13512 non-null object \n 3 race_ethnicity 13512 non-null object \n 4 death_rate 13499 non-null float64\n 5 state 12899 non-null object \n 6 year 13512 non-null object \ndtypes: float64(1), object(6)\nmemory usage: 844.5+ KB\n"
],
[
"# convert the year object into integers \nsort_city_data['year'] = sort_city_data['year'].astype(int)",
"_____no_output_____"
],
[
"# check if the year turned into integer\nsort_city_data.info()",
"<class 'pandas.core.frame.DataFrame'>\nInt64Index: 13512 entries, 639 to 13354\nData columns (total 7 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 category 13512 non-null object \n 1 cause_of_death 13512 non-null object \n 2 gender 13512 non-null object \n 3 race_ethnicity 13512 non-null object \n 4 death_rate 13499 non-null float64\n 5 state 12899 non-null object \n 6 year 13512 non-null int32 \ndtypes: float64(1), int32(1), object(5)\nmemory usage: 791.7+ KB\n"
],
[
"# check hospital bed data column type\nhospital_df.info()",
"<class 'pandas.core.frame.DataFrame'>\nInt64Index: 253 entries, 0 to 50\nData columns (total 6 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 year 253 non-null int64 \n 1 state 253 non-null object \n 2 state_local_gov 253 non-null float64\n 3 non_profit 253 non-null float64\n 4 profit 253 non-null float64\n 5 total 253 non-null float64\ndtypes: float64(4), int64(1), object(1)\nmemory usage: 13.8+ KB\n"
],
[
"# convert the column type into string for both of the data set\nsort_city_data['state']=sort_city_data['state'].str.strip()\nhospital_df['state']=hospital_df['state'].str.strip()",
"D:\\Anaconda\\lib\\site-packages\\ipykernel_launcher.py:3: SettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame.\nTry using .loc[row_indexer,col_indexer] = value instead\n\nSee the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n This is separate from the ipykernel package so we can avoid doing imports until\n"
],
[
"# drop the duplicates of the city data to be make sure if they have anything repeated to avoid errors\nsort_city_data.drop_duplicates(keep='first')",
"_____no_output_____"
],
[
"# drop any null values left if exists\ncleaned_city = sort_city_data.dropna()\ncleaned_city.head()",
"_____no_output_____"
],
[
"# check the shape (rows) of the dataframe\n# 13512 rows matches with the cleaned dataframe on the previous part of the project \ncleaned_city.shape",
"_____no_output_____"
],
[
"# merge the both dataframees together at 'state' and 'year' on both left and right join \n# save it as joined_df \njoined_df= pd.merge(cleaned_city,hospital_df, how='inner', left_on=['state','year'], right_on=['state','year'])\njoined_df.head()",
"_____no_output_____"
],
[
"joined_df.tail()",
"_____no_output_____"
],
[
"joined_df",
"_____no_output_____"
],
[
"# check if the null values dropped\njoined_df.shape",
"_____no_output_____"
],
[
"# save the csv as new hospital data\njoined_df.to_csv(r'D:\\Github\\Project-2-ETL-Cities-Health-Data\\data\\new_hospital_data.csv', index=True)",
"_____no_output_____"
],
[
"# below are the work that I saved each dataframe for hospital beds data by the year.\n# I commented out the rest of the individual dataframe of hospital beds data by the year because\n# it was unnecessary to use, since we already have joined dataframe for hospital beds data ",
"_____no_output_____"
],
[
"# beds_2010 = hospital_beds_2010_csv_file_df[['Year','Location', 'State/Local Government', 'Non-Profit', 'For-Profit']].copy()\n# beds_2010.head()\n\n# # rename each labels with year 2010\n# new_beds_2010 = beds_2010.rename(columns={'Year':'year',\n# 'Location':'state',\n# 'State/Local Government':'state_local_2010',\n# 'Non-Profit':'nonprofit_2010',\n# 'For-Profit':'profit_2010'})\n# new_beds_2010.head()",
"_____no_output_____"
],
[
"# beds_2011 = hospital_beds_2011_csv_file_df[['Year','Location', 'State/Local Government', 'Non-Profit', 'For-Profit']].copy()\n# # beds_2010.head()\n\n# # rename each labels with year 2011\n# new_beds_2011 = beds_2011.rename(columns={'Year':'year',\n# 'Location':'state',\n# 'State/Local Government':'state_local_2011',\n# 'Non-Profit':'nonprofit_2011',\n# 'For-Profit':'profit_2011'})\n# new_beds_2011.head()",
"_____no_output_____"
],
[
"# beds_2012 = hospital_beds_2012_csv_file_df[['Year','Location', 'State/Local Government', 'Non-Profit', 'For-Profit']].copy()\n# # beds_2010.head()\n\n# # rename each labels with year 2012\n# new_beds_2012 = beds_2012.rename(columns={'Year':'year',\n# 'Location':'state',\n# 'State/Local Government':'state_local_2012',\n# 'Non-Profit':'nonprofit_2012',\n# 'For-Profit':'profit_2012'})\n# new_beds_2012.head()",
"_____no_output_____"
],
[
"# beds_2013 = hospital_beds_2013_csv_file_df[['Year','Location', 'State/Local Government', 'Non-Profit', 'For-Profit']].copy()\n# # beds_2010.head()\n\n# # rename each labels with year 2013\n# new_beds_2013 = beds_2013.rename(columns={'Year':'year',\n# 'Location':'state',\n# 'State/Local Government':'state_local_2013',\n# 'Non-Profit':'nonprofit_2013',\n# 'For-Profit':'profit_2013'})\n# new_beds_2013.head()",
"_____no_output_____"
],
[
"# beds_2014 = hospital_beds_2014_csv_file_df[['Year','Location', 'State/Local Government', 'Non-Profit', 'For-Profit']].copy()\n# # beds_2010.head()\n\n# # rename each labels with year 2013\n# new_beds_2014 = beds_2014.rename(columns={'Year':'year',\n# 'Location':'state',\n# 'State/Local Government':'state_local_2014',\n# 'Non-Profit':'nonprofit_2014',\n# 'For-Profit':'profit_2014'})\n# new_beds_2014.head()",
"_____no_output_____"
],
[
"# beds_2015 = hospital_beds_2015_csv_file_df[['Year','Location', 'State/Local Government', 'Non-Profit', 'For-Profit']].copy()\n# # beds_2010.head()\n\n# # rename each labels with year 2015\n# new_beds_2015 = beds_2015.rename(columns={'Year':'year',\n# 'Location':'state',\n# 'State/Local Government':'state_local_2015',\n# 'Non-Profit':'nonprofit_2015',\n# 'For-Profit':'profit_2015'})\n# new_beds_2015.head()",
"_____no_output_____"
]
],
[
[
"# Loading the extracted data into SQL database",
"_____no_output_____"
]
],
[
[
"# import dependencies \nfrom pin import username, password\n\n# make a connection string for the database on localhost, and create engine for the database we made \nrds_connection_string = (f\"{username}:{password}@localhost:5432/healthcities_db\")\nengine = create_engine(f'postgresql://{rds_connection_string}')",
"_____no_output_____"
]
],
[
[
"# Check the table names",
"_____no_output_____"
]
],
[
[
"engine.table_names()",
"_____no_output_____"
]
],
[
[
"# Use Pandas to load csv converted DataFrame into SQL database",
"_____no_output_____"
]
],
[
[
"cleaned_city.to_sql(name='health_cities', con=engine, if_exists='append', index=False)",
"_____no_output_____"
],
[
"cleaned_city.shape",
"_____no_output_____"
],
[
"hospital_df.to_sql(name='hospital_data', con=engine, if_exists='append', index=False)",
"_____no_output_____"
],
[
"hospital_df.shape",
"_____no_output_____"
],
[
"# this is where I previously loaded each converted DataFrame into SQL database, and tried to join them using SQL database\n# instead of joining in pandas dataframe. However, it went through with errors and ran some troubleshooting. \n# so, my collegue helped me out, and suggest them to join in pandas dataframe first just to make a life an ease. \n# and, whew! that worked through without having errors and went through succeessfully. so, think simple and fail fast! \n\n# new_beds_2010.to_sql(name='hospital_data_2010', con=engine, if_exists='append', index=False)\n# new_beds_2011.to_sql(name='hospital_data_2011', con=engine, if_exists='append', index=False)\n# new_beds_2012.to_sql(name='hospital_data_2012', con=engine, if_exists='append', index=False)\n# new_beds_2013.to_sql(name='hospital_data_2013', con=engine, if_exists='append', index=False)\n# new_beds_2014.to_sql(name='hospital_data_2014', con=engine, if_exists='append', index=False)\n# new_beds_2015.to_sql(name='hospital_data_2015', con=engine, if_exists='append', index=False)",
"_____no_output_____"
]
],
[
[
"# Confirm if the data has been added into SQL database query successfully for health_cities database.",
"_____no_output_____"
]
],
[
[
"cities_df = pd.read_sql_query('SELECT * FROM health_cities', con=engine)\ncities_df.head()",
"_____no_output_____"
],
[
"cities_df.shape",
"_____no_output_____"
],
[
"cities_df.tail()",
"_____no_output_____"
],
[
"cleaned_hospital_df = pd.read_sql_query('SELECT * FROM hospital_data', con=engine)\ncleaned_hospital_df.head()",
"_____no_output_____"
],
[
"cleaned_hospital_df.tail()",
"_____no_output_____"
],
[
"cleaned_hospital_df.shape",
"_____no_output_____"
],
[
"# This is just individual query that I worked with previous dataframe and tried to load each hospital beds data in each query.\n# However, these queries made the life more complicated with a full of errors and running into some troubleshooting.\n# So, I commented out just to show where I have made a mistake. \n\n# hospitals_2010 = pd.read_sql_query('SELECT * FROM hospital_data_2010', con=engine)\n# hospitals_2010.head()\n\n# hospitals_2011 = pd.read_sql_query('SELECT * FROM hospital_data_2011', con=engine)\n# hospitals_2011.head()\n\n# hospitals_2012 = pd.read_sql_query('SELECT * FROM hospital_data_2012', con=engine)\n# hospitals_2012.head()\n\n# hospitals_2013 = pd.read_sql_query('SELECT * FROM hospital_data_2013', con=engine)\n# hospitals_2013.head()\n\n# hospitals_2014 = pd.read_sql_query('SELECT * FROM hospital_data_2014', con=engine)\n# hospitals_2014.head()\n\n# hospitals_2015 = pd.read_sql_query('SELECT * FROM hospital_data_2015', con=engine)\n# hospitals_2015.head()",
"_____no_output_____"
]
],
[
[
"# Merging the data together using SQL database",
"_____no_output_____"
]
],
[
[
"merged_health_data = pd.read_sql_query('SELECT hc.category, hc.cause_of_death, hc.gender, hc.race_ethnicity, hc.death_rate, ho.state, hc.year, ho.state_local_gov, ho.non_profit, ho.profit, ho.total FROM health_cities hc INNER JOIN hospital_data ho ON hc.year = ho.year AND hc.state = ho.state ORDER BY hc.year ASC', \n con=engine)\nmerged_health_data.head()",
"_____no_output_____"
],
[
"merged_health_data.tail()",
"_____no_output_____"
],
[
"# always check for shape for correct number of rows \n# I've ran into problem where I have more rows that I have after I join these two data tables \n# and my colleague helped me that I should check for number of rows count just to make sure I am inserting the tables correctly.\nmerged_health_data.shape",
"_____no_output_____"
]
],
[
[
"# Source we used ",
"_____no_output_____"
]
],
[
[
"# Data Source 1- Health Status across US urban cities\n# https://data.world/health/big-cities-health\n\n# Data Source 2 - Hospital Data\n# https://www.kff.org/other/state-indicator/beds-by-ownership/?currentTimeframe=10&sortModel=%7B%22colId%22:%22Location%22,%22sort%22:%22asc%22%7D",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
]
] |
e7e205fed20d81c6126a0e2cc9ca9270981889bd | 21,227 | ipynb | Jupyter Notebook | DataCamp/Give Life: Predict Blood Donations/notebook.ipynb | lukzmu/data-courses | 9d49bd6d0bb01bcee966fc52833b8e3aa9241432 | [
"MIT"
] | null | null | null | DataCamp/Give Life: Predict Blood Donations/notebook.ipynb | lukzmu/data-courses | 9d49bd6d0bb01bcee966fc52833b8e3aa9241432 | [
"MIT"
] | 52 | 2021-04-06T10:57:31.000Z | 2022-01-18T13:21:57.000Z | DataCamp/Give Life: Predict Blood Donations/notebook.ipynb | lukzmu/data-science | 806ae9caa635b486a81fc835218c04e340f1f186 | [
"MIT"
] | null | null | null | 21,227 | 21,227 | 0.660856 | [
[
[
"## 1. Inspecting transfusion.data file\n<p><img src=\"https://assets.datacamp.com/production/project_646/img/blood_donation.png\" style=\"float: right;\" alt=\"A pictogram of a blood bag with blood donation written in it\" width=\"200\"></p>\n<p>Blood transfusion saves lives - from replacing lost blood during major surgery or a serious injury to treating various illnesses and blood disorders. Ensuring that there's enough blood in supply whenever needed is a serious challenge for the health professionals. According to <a href=\"https://www.webmd.com/a-to-z-guides/blood-transfusion-what-to-know#1\">WebMD</a>, \"about 5 million Americans need a blood transfusion every year\".</p>\n<p>Our dataset is from a mobile blood donation vehicle in Taiwan. The Blood Transfusion Service Center drives to different universities and collects blood as part of a blood drive. We want to predict whether or not a donor will give blood the next time the vehicle comes to campus.</p>\n<p>The data is stored in <code>datasets/transfusion.data</code> and it is structured according to RFMTC marketing model (a variation of RFM). We'll explore what that means later in this notebook. First, let's inspect the data.</p>",
"_____no_output_____"
]
],
[
[
"# Print out the first 5 lines from the transfusion.data file\n!head -n 5 datasets/transfusion.data",
"Recency (months),Frequency (times),Monetary (c.c. blood),Time (months),\"whether he/she donated blood in March 2007\"\r\r\n2 ,50,12500,98 ,1\r\r\n0 ,13,3250,28 ,1\r\r\n1 ,16,4000,35 ,1\r\r\n2 ,20,5000,45 ,1\r\r\n"
]
],
[
[
"## 2. Loading the blood donations data\n<p>We now know that we are working with a typical CSV file (i.e., the delimiter is <code>,</code>, etc.). We proceed to loading the data into memory.</p>",
"_____no_output_____"
]
],
[
[
"# Import pandas\nimport pandas as pd\n\n# Read in dataset\ntransfusion = pd.read_csv('datasets/transfusion.data')\n\n# Print out the first rows of our dataset\ntransfusion.head()",
"_____no_output_____"
]
],
[
[
"## 3. Inspecting transfusion DataFrame\n<p>Let's briefly return to our discussion of RFM model. RFM stands for Recency, Frequency and Monetary Value and it is commonly used in marketing for identifying your best customers. In our case, our customers are blood donors.</p>\n<p>RFMTC is a variation of the RFM model. Below is a description of what each column means in our dataset:</p>\n<ul>\n<li>R (Recency - months since the last donation)</li>\n<li>F (Frequency - total number of donation)</li>\n<li>M (Monetary - total blood donated in c.c.)</li>\n<li>T (Time - months since the first donation)</li>\n<li>a binary variable representing whether he/she donated blood in March 2007 (1 stands for donating blood; 0 stands for not donating blood)</li>\n</ul>\n<p>It looks like every column in our DataFrame has the numeric type, which is exactly what we want when building a machine learning model. Let's verify our hypothesis.</p>",
"_____no_output_____"
]
],
[
[
"# Print a concise summary of transfusion DataFrame\ntransfusion.info()",
"<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 748 entries, 0 to 747\nData columns (total 5 columns):\nRecency (months) 748 non-null int64\nFrequency (times) 748 non-null int64\nMonetary (c.c. blood) 748 non-null int64\nTime (months) 748 non-null int64\nwhether he/she donated blood in March 2007 748 non-null int64\ndtypes: int64(5)\nmemory usage: 29.3 KB\n"
]
],
[
[
"## 4. Creating target column\n<p>We are aiming to predict the value in <code>whether he/she donated blood in March 2007</code> column. Let's rename this it to <code>target</code> so that it's more convenient to work with.</p>",
"_____no_output_____"
]
],
[
[
"# Rename target column as 'target' for brevity \ntransfusion.rename(\n columns={'whether he/she donated blood in March 2007': 'target'},\n inplace=True\n)\n\n# Print out the first 2 rows\ntransfusion.head(2)",
"_____no_output_____"
]
],
[
[
"## 5. Checking target incidence\n<p>We want to predict whether or not the same donor will give blood the next time the vehicle comes to campus. The model for this is a binary classifier, meaning that there are only 2 possible outcomes:</p>\n<ul>\n<li><code>0</code> - the donor will not give blood</li>\n<li><code>1</code> - the donor will give blood</li>\n</ul>\n<p>Target incidence is defined as the number of cases of each individual target value in a dataset. That is, how many 0s in the target column compared to how many 1s? Target incidence gives us an idea of how balanced (or imbalanced) is our dataset.</p>",
"_____no_output_____"
]
],
[
[
"# Print target incidence proportions, rounding output to 3 decimal places\ntransfusion.target.value_counts(normalize=True).round(3)",
"_____no_output_____"
]
],
[
[
"## 6. Splitting transfusion into train and test datasets\n<p>We'll now use <code>train_test_split()</code> method to split <code>transfusion</code> DataFrame.</p>\n<p>Target incidence informed us that in our dataset <code>0</code>s appear 76% of the time. We want to keep the same structure in train and test datasets, i.e., both datasets must have 0 target incidence of 76%. This is very easy to do using the <code>train_test_split()</code> method from the <code>scikit learn</code> library - all we need to do is specify the <code>stratify</code> parameter. In our case, we'll stratify on the <code>target</code> column.</p>",
"_____no_output_____"
]
],
[
[
"# Import train_test_split method\nfrom sklearn.model_selection import train_test_split\n\n# Split transfusion DataFrame into\n# X_train, X_test, y_train and y_test datasets,\n# stratifying on the `target` column\nX_train, X_test, y_train, y_test = train_test_split(\n transfusion.drop(columns='target'),\n transfusion.target,\n test_size=0.25,\n random_state=42,\n stratify=transfusion['target'],\n)\n\n# Print out the first 2 rows of X_train\nX_train.head(2)",
"_____no_output_____"
]
],
[
[
"## 7. Selecting model using TPOT\n<p><a href=\"https://github.com/EpistasisLab/tpot\">TPOT</a> is a Python Automated Machine Learning tool that optimizes machine learning pipelines using genetic programming.</p>\n<p><img src=\"https://assets.datacamp.com/production/project_646/img/tpot-ml-pipeline.png\" alt=\"TPOT Machine Learning Pipeline\"></p>\n<p>TPOT will automatically explore hundreds of possible pipelines to find the best one for our dataset. Note, the outcome of this search will be a <a href=\"https://scikit-learn.org/stable/modules/generated/sklearn.pipeline.Pipeline.html\">scikit-learn pipeline</a>, meaning it will include any pre-processing steps as well as the model.</p>\n<p>We are using TPOT to help us zero in on one model that we can then explore and optimize further.</p>",
"_____no_output_____"
]
],
[
[
"# Import TPOTClassifier and roc_auc_score\nfrom tpot import TPOTClassifier\nfrom sklearn.metrics import roc_auc_score\n\n# Instantiate TPOTClassifier\ntpot = TPOTClassifier(\n generations=5,\n population_size=20,\n verbosity=2,\n scoring='roc_auc',\n random_state=42,\n disable_update_check=True,\n config_dict='TPOT light'\n)\ntpot.fit(X_train, y_train)\n\n# AUC score for tpot model\ntpot_auc_score = roc_auc_score(y_test, tpot.predict_proba(X_test)[:, 1])\nprint(f'\\nAUC score: {tpot_auc_score:.4f}')\n\n# Print best pipeline steps\nprint('\\nBest pipeline steps:', end='\\n')\nfor idx, (name, transform) in enumerate(tpot.fitted_pipeline_.steps, start=1):\n # Print idx and transform\n print(f'{idx}. {transform}')",
"_____no_output_____"
]
],
[
[
"## 8. Checking the variance\n<p>TPOT picked <code>LogisticRegression</code> as the best model for our dataset with no pre-processing steps, giving us the AUC score of 0.7850. This is a great starting point. Let's see if we can make it better.</p>\n<p>One of the assumptions for linear regression models is that the data and the features we are giving it are related in a linear fashion, or can be measured with a linear distance metric. If a feature in our dataset has a high variance that's an order of magnitude or more greater than the other features, this could impact the model's ability to learn from other features in the dataset.</p>\n<p>Correcting for high variance is called normalization. It is one of the possible transformations you do before training a model. Let's check the variance to see if such transformation is needed.</p>",
"_____no_output_____"
]
],
[
[
"# X_train's variance, rounding the output to 3 decimal places\npd.DataFrame.var(X_train).round(3)",
"_____no_output_____"
]
],
[
[
"## 9. Log normalization\n<p><code>Monetary (c.c. blood)</code>'s variance is very high in comparison to any other column in the dataset. This means that, unless accounted for, this feature may get more weight by the model (i.e., be seen as more important) than any other feature.</p>\n<p>One way to correct for high variance is to use log normalization.</p>",
"_____no_output_____"
]
],
[
[
"# Import numpy\nimport numpy as np\n\n# Copy X_train and X_test into X_train_normed and X_test_normed\nX_train_normed, X_test_normed = X_train.copy(), X_test.copy()\n\n# Specify which column to normalize\ncol_to_normalize = 'Monetary (c.c. blood)'\n\n# Log normalization\nfor df_ in [X_train_normed, X_test_normed]:\n # Add log normalized column\n df_['monetary_log'] = np.log(df_[col_to_normalize])\n # Drop the original column\n df_.drop(columns=[col_to_normalize], inplace=True)\n\n# Check the variance for X_train_normed\nX_train_normed.var().round(3)",
"_____no_output_____"
]
],
[
[
"## 10. Training the linear regression model\n<p>The variance looks much better now. Notice that now <code>Time (months)</code> has the largest variance, but it's not the <a href=\"https://en.wikipedia.org/wiki/Order_of_magnitude\">orders of magnitude</a> higher than the rest of the variables, so we'll leave it as is.</p>\n<p>We are now ready to train the linear regression model.</p>",
"_____no_output_____"
]
],
[
[
"# Importing modules\nfrom sklearn import linear_model\n\n# Instantiate LogisticRegression\nlogreg = linear_model.LogisticRegression(\n solver='liblinear',\n random_state=42\n)\n\n# Train the model\nlogreg.fit(X_train_normed, y_train)\n\n# AUC score for tpot model\nlogreg_auc_score = roc_auc_score(y_test, logreg.predict_proba(X_test_normed)[:, 1])\nprint(f'\\nAUC score: {logreg_auc_score:.4f}')",
"\nAUC score: 0.7891\n"
]
],
[
[
"## 11. Conclusion\n<p>The demand for blood fluctuates throughout the year. As one <a href=\"https://www.kjrh.com/news/local-news/red-cross-in-blood-donation-crisis\">prominent</a> example, blood donations slow down during busy holiday seasons. An accurate forecast for the future supply of blood allows for an appropriate action to be taken ahead of time and therefore saving more lives.</p>\n<p>In this notebook, we explored automatic model selection using TPOT and AUC score we got was 0.7850. This is better than simply choosing <code>0</code> all the time (the target incidence suggests that such a model would have 76% success rate). We then log normalized our training data and improved the AUC score by 0.5%. In the field of machine learning, even small improvements in accuracy can be important, depending on the purpose.</p>\n<p>Another benefit of using logistic regression model is that it is interpretable. We can analyze how much of the variance in the response variable (<code>target</code>) can be explained by other variables in our dataset.</p>",
"_____no_output_____"
]
],
[
[
"# Importing itemgetter\nfrom operator import itemgetter\n\n# Sort models based on their AUC score from highest to lowest\nsorted(\n [('tpot', tpot_auc_score), ('logreg', logreg_auc_score)],\n key=itemgetter(1),\n reverse=True,\n)",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
e7e2296d21c33e166f04d1b83fceea05b60eedce | 23,353 | ipynb | Jupyter Notebook | notebooks/05-casestudies/01-gameofthrones.ipynb | khanin-th/Network-Analysis-Made-Simple | c287510a329363820b288e83bedd9217c5e3bea3 | [
"MIT"
] | null | null | null | notebooks/05-casestudies/01-gameofthrones.ipynb | khanin-th/Network-Analysis-Made-Simple | c287510a329363820b288e83bedd9217c5e3bea3 | [
"MIT"
] | null | null | null | notebooks/05-casestudies/01-gameofthrones.ipynb | khanin-th/Network-Analysis-Made-Simple | c287510a329363820b288e83bedd9217c5e3bea3 | [
"MIT"
] | null | null | null | 30.687254 | 723 | 0.609943 | [
[
[
"%load_ext autoreload\n%autoreload 2\n%matplotlib inline\n%config InlineBackend.figure_format = 'retina'\nimport pandas as pd\nimport networkx as nx\nimport community\nimport numpy as np\nimport matplotlib.pyplot as plt",
"_____no_output_____"
]
],
[
[
"## Introduction\n\nIn this chapter, we will use Game of Thrones as a case study to practice our newly learnt skills of network analysis.\n\nIt is suprising right? What is the relationship between a fatansy TV show/novel and network science or Python(not dragons).\n\nIf you haven't heard of Game of Thrones, then you must be really good at hiding. Game of Thrones is a hugely popular television series by HBO based on the (also) hugely popular book series A Song of Ice and Fire by George R.R. Martin. In this notebook, we will analyze the co-occurrence network of the characters in the Game of Thrones books. Here, two characters are considered to co-occur if their names appear in the vicinity of 15 words from one another in the books.\n\nThe figure below is a precusor of what we will analyse in this chapter.\n\n\n\n\nThe dataset is publicly avaiable for the 5 books at https://github.com/mathbeveridge/asoiaf. This is an interaction network and were created by connecting two characters whenever their names (or nicknames) appeared within 15 words of one another in one of the books. The edge weight corresponds to the number of interactions. \n\n\nBlog: https://networkofthrones.wordpress.com",
"_____no_output_____"
]
],
[
[
"from nams import load_data as cf\nbooks = cf.load_game_of_thrones_data()",
"_____no_output_____"
]
],
[
[
"The resulting DataFrame books has 5 columns: Source, Target, Type, weight, and book. Source and target are the two nodes that are linked by an edge. As we know a network can have directed or undirected edges and in this network all the edges are undirected. The weight attribute of every edge tells us the number of interactions that the characters have had over the book, and the book column tells us the book number.\n\nLet's have a look at the data.",
"_____no_output_____"
]
],
[
[
"# We also add this weight_inv to our dataset.\n# Why? we will discuss it in a later section.\nbooks['weight_inv'] = 1/books.weight",
"_____no_output_____"
],
[
"books.head()",
"_____no_output_____"
]
],
[
[
"From the above data we can see that the characters Addam Marbrand and Tywin Lannister have interacted 6 times in the first book.\n\nWe can investigate this data by using the pandas DataFrame. Let's find all the interactions of Robb Stark in the third book.",
"_____no_output_____"
]
],
[
[
"robbstark = (\n books.query(\"book == 3\")\n .query(\"Source == 'Robb-Stark' or Target == 'Robb-Stark'\")\n)",
"_____no_output_____"
],
[
"robbstark.head()",
"_____no_output_____"
]
],
[
[
"As you can see this data easily translates to a network problem. Now it's time to create a network.\nWe create a graph for each book. It's possible to create one `MultiGraph`(Graph with multiple edges between nodes) instead of 5 graphs, but it is easier to analyse and manipulate individual `Graph` objects rather than a `MultiGraph`.",
"_____no_output_____"
]
],
[
[
"# example of creating a MultiGraph\n\n# all_books_multigraph = nx.from_pandas_edgelist(\n# books, source='Source', target='Target',\n# edge_attr=['weight', 'book'],\n# create_using=nx.MultiGraph)",
"_____no_output_____"
],
[
"# we create a list of graph objects using\n# nx.from_pandas_edgelist and specifying\n# the edge attributes.\n\ngraphs = [nx.from_pandas_edgelist(\n books[books.book==i],\n source='Source', target='Target',\n edge_attr=['weight', 'weight_inv'])\n for i in range(1, 6)]",
"_____no_output_____"
],
[
"# The Graph object associated with the first book.\ngraphs[0]",
"_____no_output_____"
],
[
"# To access the relationship edges in the graph with\n# the edge attribute weight data (data=True)\nrelationships = list(graphs[0].edges(data=True))",
"_____no_output_____"
],
[
"relationships[0:3]",
"_____no_output_____"
]
],
[
[
"## Finding the most important node i.e character in these networks.\n\nLet's use our network analysis knowledge to decrypt these Graphs that we have just created.\n\nIs it Jon Snow, Tyrion, Daenerys, or someone else? Let's see! Network Science offers us many different metrics to measure the importance of a node in a network as we saw in the first part of the tutorial. Note that there is no \"correct\" way of calculating the most important node in a network, every metric has a different meaning.\n\nFirst, let's measure the importance of a node in a network by looking at the number of neighbors it has, that is, the number of nodes it is connected to. For example, an influential account on Twitter, where the follower-followee relationship forms the network, is an account which has a high number of followers. This measure of importance is called degree centrality.\n\nUsing this measure, let's extract the top ten important characters from the first book (`graphs[0]`) and the fifth book (`graphs[4]`).\n\nNOTE: We are using zero-indexing and that's why the graph of the first book is acceseed by `graphs[0]`.",
"_____no_output_____"
]
],
[
[
"# We use the in-built degree_centrality method\ndeg_cen_book1 = nx.degree_centrality(graphs[0])\ndeg_cen_book5 = nx.degree_centrality(graphs[4])",
"_____no_output_____"
]
],
[
[
"`degree_centrality` returns a dictionary and to access the results we can directly use the name of the character.",
"_____no_output_____"
]
],
[
[
"deg_cen_book1['Daenerys-Targaryen']",
"_____no_output_____"
]
],
[
[
"Top 5 important characters in the first book according to degree centrality.",
"_____no_output_____"
]
],
[
[
"# The following expression sorts the dictionary by\n# degree centrality and returns the top 5 from a graph\n\nsorted(deg_cen_book1.items(),\n key=lambda x:x[1],\n reverse=True)[0:5]",
"_____no_output_____"
]
],
[
[
"Top 5 important characters in the fifth book according to degree centrality.",
"_____no_output_____"
]
],
[
[
"sorted(deg_cen_book5.items(),\n key=lambda x:x[1],\n reverse=True)[0:5]",
"_____no_output_____"
]
],
[
[
"To visualize the distribution of degree centrality let's plot a histogram of degree centrality.",
"_____no_output_____"
]
],
[
[
"plt.hist(deg_cen_book1.values(), bins=30)\nplt.show()",
"_____no_output_____"
]
],
[
[
"The above plot shows something that is expected, a high portion of characters aren't connected to lot of other characters while some characters are highly connected all through the network. A close real world example of this is a social network like Twitter where a few people have millions of connections(followers) but majority of users aren't connected to that many other users. This exponential decay like property resembles power law in real life networks.",
"_____no_output_____"
]
],
[
[
"# A log-log plot to show the \"signature\" of power law in graphs.\nfrom collections import Counter\nhist = Counter(deg_cen_book1.values())\nplt.scatter(np.log2(list(hist.keys())),\n np.log2(list(hist.values())),\n alpha=0.9)\nplt.show()",
"_____no_output_____"
]
],
[
[
"### Exercise\n\nCreate a new centrality measure, weighted_degree(Graph, weight) which takes in Graph and the weight attribute and returns a weighted degree dictionary. Weighted degree is calculated by summing the weight of the all edges of a node and find the top five characters according to this measure.",
"_____no_output_____"
]
],
[
[
"from nams.solutions.got import weighted_degree\n\nplt.hist(list(weighted_degree(graphs[0], 'weight').values()), bins=30)\nplt.show()",
"_____no_output_____"
],
[
"sorted(weighted_degree(graphs[0], 'weight').items(), key=lambda x:x[1], reverse=True)[0:5]",
"_____no_output_____"
]
],
[
[
"## Betweeness centrality\n\nLet's do this for Betweeness centrality and check if this makes any difference. As different centrality method use different measures underneath, they find nodes which are important in the network. A centrality method like Betweeness centrality finds nodes which are structurally important to the network, which binds the network together and densely.",
"_____no_output_____"
]
],
[
[
"# First check unweighted (just the structure)\n\nsorted(nx.betweenness_centrality(graphs[0]).items(),\n key=lambda x:x[1], reverse=True)[0:10]",
"_____no_output_____"
],
[
"# Let's care about interactions now\n\nsorted(nx.betweenness_centrality(graphs[0],\n weight='weight_inv').items(),\n key=lambda x:x[1], reverse=True)[0:10]",
"_____no_output_____"
]
],
[
[
"We can see there are some differences between the unweighted and weighted centrality measures. Another thing to note is that we are using the weight_inv attribute instead of weight(the number of interactions between characters). This decision is based on the way we want to assign the notion of \"importance\" of a character. The basic idea behind betweenness centrality is to find nodes which are essential to the structure of the network. As betweenness centrality computes shortest paths underneath, in the case of weighted betweenness centrality it will end up penalising characters with high number of interactions. By using weight_inv we will prop up the characters with high interactions with other characters.",
"_____no_output_____"
],
[
"## PageRank\nThe billion dollar algorithm, PageRank works by counting the number and quality of links to a page to determine a rough estimate of how important the website is. The underlying assumption is that more important websites are likely to receive more links from other websites.\n\nNOTE: We don't need to worry about weight and weight_inv in PageRank as the algorithm uses weights in the opposite sense (larger weights are better). This may seem confusing as different centrality measures have different definition of weights. So it is always better to have a look at documentation before using weights in a centrality measure.",
"_____no_output_____"
]
],
[
[
"# by default weight attribute in PageRank is weight\n# so we use weight=None to find the unweighted results\nsorted(nx.pagerank_numpy(graphs[0],\n weight=None).items(),\n key=lambda x:x[1], reverse=True)[0:10]",
"_____no_output_____"
],
[
"sorted(nx.pagerank_numpy(\n graphs[0], weight='weight').items(),\n key=lambda x:x[1], reverse=True)[0:10]",
"_____no_output_____"
]
],
[
[
"### Exercise\n\n#### Is there a correlation between these techniques?\n\n\nFind the correlation between these four techniques.\n\n- pagerank (weight = 'weight')\n- betweenness_centrality (weight = 'weight_inv')\n- weighted_degree\n- degree centrality\n\nHINT: Use pandas correlation ",
"_____no_output_____"
]
],
[
[
"from nams.solutions.got import correlation_centrality\n\ncorrelation_centrality(graphs[0])",
"_____no_output_____"
]
],
[
[
"## Evolution of importance of characters over the books\n\nAccording to degree centrality the most important character in the first book is Eddard Stark but he is not even in the top 10 of the fifth book. The importance changes over the course of five books, because you know stuff happens ;)\n\nLet's look at the evolution of degree centrality of a couple of characters like Eddard Stark, Jon Snow, Tyrion which showed up in the top 10 of degree centrality in first book.\n\nWe create a dataframe with character columns and index as books where every entry is the degree centrality of the character in that particular book and plot the evolution of degree centrality Eddard Stark, Jon Snow and Tyrion.\nWe can see that the importance of Eddard Stark in the network dies off and with Jon Snow there is a drop in the fourth book but a sudden rise in the fifth book",
"_____no_output_____"
]
],
[
[
"evol = [nx.degree_centrality(graph)\n for graph in graphs]\nevol_df = pd.DataFrame.from_records(evol).fillna(0)\nevol_df[['Eddard-Stark',\n 'Tyrion-Lannister',\n 'Jon-Snow']].plot()\nplt.show()",
"_____no_output_____"
],
[
"set_of_char = set()\nfor i in range(5):\n set_of_char |= set(list(\n evol_df.T[i].sort_values(\n ascending=False)[0:5].index))\nset_of_char",
"_____no_output_____"
]
],
[
[
"### Exercise\n\nPlot the evolution of betweenness centrality of the above mentioned characters over the 5 books.",
"_____no_output_____"
]
],
[
[
"from nams.solutions.got import evol_betweenness",
"_____no_output_____"
],
[
"evol_betweenness(graphs)",
"_____no_output_____"
]
],
[
[
"## So what's up with Stannis Baratheon?",
"_____no_output_____"
]
],
[
[
"sorted(nx.degree_centrality(graphs[4]).items(),\n key=lambda x:x[1], reverse=True)[:5]",
"_____no_output_____"
],
[
"sorted(nx.betweenness_centrality(graphs[4]).items(),\n key=lambda x:x[1], reverse=True)[:5]",
"_____no_output_____"
],
[
"nx.draw(nx.barbell_graph(5, 1), with_labels=True)",
"_____no_output_____"
]
],
[
[
"As we know the a higher betweenness centrality means that the node is crucial for the structure of the network, and in the case of Stannis Baratheon in the fifth book it seems like Stannis Baratheon has characterstics similar to that of node 5 in the above example as it seems to be the holding the network together.\n\nAs evident from the betweenness centrality scores of the above example of barbell graph, node 5 is the most important node in this network.",
"_____no_output_____"
]
],
[
[
"nx.betweenness_centrality(nx.barbell_graph(5, 1))",
"_____no_output_____"
]
],
[
[
"## Community detection in Networks\nA network is said to have community structure if the nodes of the network can be easily grouped into (potentially overlapping) sets of nodes such that each set of nodes is densely connected internally. There are multiple algorithms and definitions to calculate these communites in a network.\n\nWe will use louvain community detection algorithm to find the modules in our graph.",
"_____no_output_____"
]
],
[
[
"import nxviz as nv\nfrom nxviz import annotate\nplt.figure(figsize=(8, 8))\n\npartition = community.best_partition(graphs[0], randomize=False)\n\n# Annotate nodes' partitions\nfor n in graphs[0].nodes():\n graphs[0].nodes[n][\"partition\"] = partition[n]\n graphs[0].nodes[n][\"degree\"] = graphs[0].degree(n)\n \nnv.matrix(graphs[0], group_by=\"partition\", sort_by=\"degree\", node_color_by=\"partition\")\nannotate.matrix_block(graphs[0], group_by=\"partition\", color_by=\"partition\")\nannotate.matrix_group(graphs[0], group_by=\"partition\", offset=-8)",
"_____no_output_____"
]
],
[
[
"A common defining quality of a community is that\nthe within-community edges are denser than the between-community edges.",
"_____no_output_____"
]
],
[
[
"# louvain community detection find us 8 different set of communities\npartition_dict = {}\nfor character, par in partition.items():\n if par in partition_dict:\n partition_dict[par].append(character)\n else:\n partition_dict[par] = [character]",
"_____no_output_____"
],
[
"len(partition_dict)",
"_____no_output_____"
],
[
"partition_dict[2]",
"_____no_output_____"
]
],
[
[
"If we plot these communities of the network we see a denser network as compared to the original network which contains all the characters.",
"_____no_output_____"
]
],
[
[
"nx.draw(nx.subgraph(graphs[0], partition_dict[3]))",
"_____no_output_____"
],
[
"nx.draw(nx.subgraph(graphs[0],partition_dict[1]))",
"_____no_output_____"
]
],
[
[
"We can test this by calculating the density of the network and the community.\n\nLike in the following example the network between characters in a community is 5 times more dense than the original network.",
"_____no_output_____"
]
],
[
[
"nx.density(nx.subgraph(\n graphs[0], partition_dict[4])\n )/nx.density(graphs[0])",
"_____no_output_____"
]
],
[
[
"### Exercise \n\nFind the most important node in the partitions according to degree centrality of the nodes using the partition_dict we have already created.",
"_____no_output_____"
]
],
[
[
"from nams.solutions.got import most_important_node_in_partition",
"_____no_output_____"
],
[
"most_important_node_in_partition(graphs[0], partition_dict)",
"_____no_output_____"
]
],
[
[
"## Solutions\n\nHere are the solutions to the exercises above.",
"_____no_output_____"
]
],
[
[
"from nams.solutions import got\nimport inspect\n\nprint(inspect.getsource(got))",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
]
] |
e7e22b71d61c997f08f931a49175e06a35089cb4 | 1,689 | ipynb | Jupyter Notebook | cn/.ipynb_checkpoints/sicp-2-08-checkpoint.ipynb | DamonDeng/sicp_exercise | 189bb880c1205fc43394eaf67a1fa01dbc34c1b0 | [
"MIT"
] | null | null | null | cn/.ipynb_checkpoints/sicp-2-08-checkpoint.ipynb | DamonDeng/sicp_exercise | 189bb880c1205fc43394eaf67a1fa01dbc34c1b0 | [
"MIT"
] | null | null | null | cn/.ipynb_checkpoints/sicp-2-08-checkpoint.ipynb | DamonDeng/sicp_exercise | 189bb880c1205fc43394eaf67a1fa01dbc34c1b0 | [
"MIT"
] | 1 | 2021-12-17T09:49:17.000Z | 2021-12-17T09:49:17.000Z | 18.977528 | 69 | 0.510361 | [
[
[
"## SICP 习题 (2.8) 解题总结:区间的减法",
"_____no_output_____"
],
[
"\nSICP 习题 2.8 需要我们完成区间运算的减法,区间运算的加法书中已经有了,代码如下:",
"_____no_output_____"
]
],
[
[
"(define (add-interval x y)\n (make-interval (+ (lower-bound x) (lower-bound y))\n\t\t (+ (upper-bound x) (upper-bound y))))",
"_____no_output_____"
]
],
[
[
"以上代码很简单,就是计算区间的加法时将两个区间的起点相加,称为新区间的起点,然后将两个区间的终点相加,成为新区间的终点。\n\n减法时加法的逆运算,我们看着加法的代码照猫画虎一番就可以了,代码如下:",
"_____no_output_____"
]
],
[
[
"(define (sub-interval x y)\n (make-interval (- (lower-bound x) (lower-bound y))\n\t\t (- (upper-bound x) (upper-bound y))))",
"_____no_output_____"
]
],
[
[
"这是一道完全不符合SICP难度级别的题目。。。。。。",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
e7e22eab40964ccf1ebe73c101c11e3ad7e6c1a3 | 7,973 | ipynb | Jupyter Notebook | data visualizing/Data Cleaning and Preparation.ipynb | wangbinyq/my-jupyter-notebook | 417a992015e6c6bf7faeb55b441aca62eae5e76d | [
"MIT"
] | 1 | 2018-07-19T02:19:16.000Z | 2018-07-19T02:19:16.000Z | data visualizing/Data Cleaning and Preparation.ipynb | wangbinyq/my-jupyter-notebook | 417a992015e6c6bf7faeb55b441aca62eae5e76d | [
"MIT"
] | null | null | null | data visualizing/Data Cleaning and Preparation.ipynb | wangbinyq/my-jupyter-notebook | 417a992015e6c6bf7faeb55b441aca62eae5e76d | [
"MIT"
] | null | null | null | 23.244898 | 118 | 0.399473 | [
[
[
"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\n%matplotlib inline",
"_____no_output_____"
],
[
"## handling missing data\n\nstring_data = pd.Series(['aardvark', 'artichoke', np.nan, 'avocado'])\n\n## missing data stands for data not avaliable (na)\n\n## na data handling function\n## dropna\n## fillna\n## isnull\n## notnull",
"_____no_output_____"
],
[
"string_data.dropna()",
"_____no_output_____"
],
[
"string_data.fillna('artichoke')",
"_____no_output_____"
],
[
"## Data transformation\n\n## duplicated\n## drop_duplicates\n## map",
"_____no_output_____"
],
[
"## Discretization and Binning\n\nages = [20, 22, 25, 27, 21, 23, 37, 31, 61, 45, 41, 32]\nbins = [18, 25, 35, 60, 100]\n\ncats = pd.cut(ages, bins)\ncats",
"_____no_output_____"
],
[
"cats.codes",
"_____no_output_____"
],
[
"cats.categories",
"_____no_output_____"
],
[
"pd.value_counts(cats)",
"_____no_output_____"
],
[
"## Detecting and Filtering Outliers\n\ndata = pd.DataFrame(np.random.randn(1000, 4))\ndata.describe()",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e7e22f2d5e52da1ee0188fef6595b2d3d4ac1604 | 34,547 | ipynb | Jupyter Notebook | examples/Structure Learning with Chow-Liu.ipynb | vbob/pgmpy | eaec44be4f5210f647261ed1f6501a618e6039a1 | [
"MIT"
] | 2,144 | 2015-01-05T21:25:04.000Z | 2022-03-31T08:24:15.000Z | examples/Structure Learning with Chow-Liu.ipynb | vbob/pgmpy | eaec44be4f5210f647261ed1f6501a618e6039a1 | [
"MIT"
] | 1,181 | 2015-01-04T18:19:44.000Z | 2022-03-30T17:21:19.000Z | examples/Structure Learning with Chow-Liu.ipynb | vbob/pgmpy | eaec44be4f5210f647261ed1f6501a618e6039a1 | [
"MIT"
] | 777 | 2015-01-01T11:13:27.000Z | 2022-03-28T12:31:57.000Z | 147.636752 | 14,196 | 0.882711 | [
[
[
"# Learning Tree Structure from Data using the Chow-Liu Algorithm ",
"_____no_output_____"
],
[
"In this notebook, we show an example for learning the structure of a Bayesian Network using the Chow-Liu algorithm. We will first build a model to generate some data and then attempt to learn the model's graph structure back from the generated data.",
"_____no_output_____"
],
[
"## First, create a tree graph",
"_____no_output_____"
]
],
[
[
"import networkx as nx\nimport matplotlib.pyplot as plt\n\nfrom pgmpy.models import BayesianNetwork\n\n# construct the tree graph structure\nmodel = BayesianNetwork([('A', 'B'), ('A', 'C'), ('B', 'D'), ('B', 'E'), ('C', 'F')])\nnx.draw_circular(model, with_labels=True, arrowsize=30, node_size=800, alpha=0.3, font_weight='bold')\nplt.show()\n",
"_____no_output_____"
]
],
[
[
"## Then, add CPDs to our tree to create a Bayesian network",
"_____no_output_____"
]
],
[
[
"from pgmpy.factors.discrete import TabularCPD\n\n# add CPD to each edge\ncpd_a = TabularCPD('A', 2, [[0.4], [0.6]])\ncpd_b = TabularCPD('B', 3, [[0.6,0.2],[0.3,0.5],[0.1,0.3]], evidence=['A'], evidence_card=[2])\ncpd_c = TabularCPD('C', 2, [[0.3,0.4],[0.7,0.6]], evidence=['A'], evidence_card=[2])\ncpd_d = TabularCPD('D', 3, [[0.5,0.3,0.1],[0.4,0.4,0.8],[0.1,0.3,0.1]], evidence=['B'], evidence_card=[3])\ncpd_e = TabularCPD('E', 2, [[0.3,0.5,0.2],[0.7,0.5,0.8]], evidence=['B'], evidence_card=[3])\ncpd_f = TabularCPD('F', 3, [[0.3,0.6],[0.5,0.2],[0.2,0.2]], evidence=['C'], evidence_card=[2])\nmodel.add_cpds(cpd_a, cpd_b, cpd_c, cpd_d, cpd_e, cpd_f)\n",
"_____no_output_____"
]
],
[
[
"## Next, generate sample data from our tree Bayesian network",
"_____no_output_____"
]
],
[
[
"from pgmpy.sampling import BayesianModelSampling\n\n# sample data from BN\ninference = BayesianModelSampling(model)\ndf_data = inference.forward_sample(size=10000)\nprint(df_data)\n",
"Generating for node: D: 100%|██████████| 6/6 [00:00<00:00, 275.41it/s]"
]
],
[
[
"## Finally, apply the Chow-Liu algorithm to learn the tree graph from sample data",
"_____no_output_____"
]
],
[
[
"from pgmpy.estimators import TreeSearch\n\n# learn graph structure \nest = TreeSearch(df_data, root_node=\"A\")\ndag = est.estimate(estimator_type=\"chow-liu\")\nnx.draw_circular(dag, with_labels=True, arrowsize=30, node_size=800, alpha=0.3, font_weight='bold')\nplt.show()\n",
"Building tree: 100%|██████████| 15/15.0 [00:00<00:00, 4518.10it/s]\n"
]
],
[
[
"## To parameterize the learned graph from data, check out the other tutorials for more info",
"_____no_output_____"
]
],
[
[
"from pgmpy.estimators import BayesianEstimator\n\n# there are many choices of parametrization, here is one example\nmodel = BayesianNetwork(dag.edges())\nmodel.fit(df_data, estimator=BayesianEstimator, prior_type='dirichlet', pseudo_counts=0.1)\nmodel.get_cpds()",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
e7e244ce6785e2ce1c4be3cb7cf6ed2cb1d5f6c6 | 15,711 | ipynb | Jupyter Notebook | onnxruntime/python/tools/bert/notebooks/Inference_GPT2_with_OnnxRuntime_on_CPU.ipynb | lienching/onnxruntime | 034f01b2845dc53d52681247760be40b86338868 | [
"MIT"
] | null | null | null | onnxruntime/python/tools/bert/notebooks/Inference_GPT2_with_OnnxRuntime_on_CPU.ipynb | lienching/onnxruntime | 034f01b2845dc53d52681247760be40b86338868 | [
"MIT"
] | null | null | null | onnxruntime/python/tools/bert/notebooks/Inference_GPT2_with_OnnxRuntime_on_CPU.ipynb | lienching/onnxruntime | 034f01b2845dc53d52681247760be40b86338868 | [
"MIT"
] | 2 | 2020-05-21T20:08:25.000Z | 2021-04-19T10:39:13.000Z | 36.537209 | 310 | 0.605754 | [
[
[
"Copyright (c) Microsoft Corporation. All rights reserved. \nLicensed under the MIT License.",
"_____no_output_____"
],
[
"# Inference PyTorch GPT2 Model with ONNX Runtime on CPU\n\nIn this tutorial, you'll be introduced to how to load a GPT2 model from PyTorch, convert it to ONNX, and inference it using ONNX Runtime.\n\n**Note: this work is still in progresss. Need install ort_nightly package before onnxruntime 1.3.0 is ready. The performance number of ort_nightly does not reflect the final result for onnxruntime 1.3.0. **",
"_____no_output_____"
],
[
"## Prerequisites ##\n\nIf you have Jupyter Notebook, you may directly run this notebook. We will use pip to install or upgrade [PyTorch](https://pytorch.org/), [OnnxRuntime](https://microsoft.github.io/onnxruntime/) and other required packages.\n\nOtherwise, you can setup a new environment. First, we install [AnaConda](https://www.anaconda.com/distribution/). Then open an AnaConda prompt window and run the following commands:\n\n```console\nconda create -n cpu_env python=3.6\nconda activate cpu_env\n\nconda install pytorch torchvision cpuonly -c pytorch\npip install onnxruntime\npip install transformers==2.5.1\npip install onnx psutil pytz pandas py-cpuinfo py3nvml netron\n\nconda install jupyter\njupyter notebook\n```\nThe last command will launch Jupyter Notebook and we can open this notebook in browser to continue.",
"_____no_output_____"
]
],
[
[
"# Enable pass state in input.\nenable_past_input = False",
"_____no_output_____"
],
[
"import os\n\ncache_dir = \"./gpt2\"\nif not os.path.exists(cache_dir):\n os.makedirs(cache_dir)\n\noutput_dir = './gpt2_onnx'\nif not os.path.exists(output_dir):\n os.makedirs(output_dir)",
"_____no_output_____"
]
],
[
[
"## Benchmark ##\n\nYou will need git clone the onnxruntime repository like\n```console\ngit clone https://github.com/microsoft/onnxruntime.git\n```\nThen update the bert_tools_dir according to the path in your machine.",
"_____no_output_____"
]
],
[
[
"# Assume you have git clone the repository of onnxruntime from github.\nbert_tools_dir = r'D:\\Git\\onnxruntime\\onnxruntime\\python\\tools\\bert'\nbenchmark_script = os.path.join(bert_tools_dir, 'benchmark_gpt2.py')\n\nif enable_past_input:\n %run $benchmark_script --model_type gpt2 --cache_dir $cache_dir --output_dir $output_dir --enable_optimization --enable_past_input\nelse:\n %run $benchmark_script --model_type gpt2 --cache_dir $cache_dir --output_dir $output_dir --enable_optimization",
"_____no_output_____"
]
],
[
[
"If you only need the benchmark results. You can skip the remaining parts.\n\nIn the following, we will introduce the benchmark script.\n\n### Load pretrained model",
"_____no_output_____"
]
],
[
[
"from transformers import GPT2Model, GPT2Tokenizer\nmodel_class, tokenizer_class, model_name_or_path = (GPT2Model, GPT2Tokenizer, 'gpt2')\ntokenizer = tokenizer_class.from_pretrained(model_name_or_path, cache_dir=cache_dir)\nmodel = model_class.from_pretrained(model_name_or_path, cache_dir=cache_dir)\nmodel.eval().cpu()",
"_____no_output_____"
],
[
"import numpy\nimport time\n\ndef pytorch_inference(model, input_ids, past=None, total_runs = 100):\n latency = []\n with torch.no_grad():\n for _ in range(total_runs):\n start = time.time()\n outputs = model(input_ids=input_ids, past=past)\n latency.append(time.time() - start)\n \n if total_runs > 1:\n print(\"PyTorch Inference time = {} ms\".format(format(sum(latency) * 1000 / len(latency), '.2f')))\n \n return outputs\n \ndef onnxruntime_inference(ort_session, input_ids, past=None, total_runs=100): \n # Use contiguous array as input might improve performance.\n # You can check the results from performance test tool to see whether you need it.\n ort_inputs = {\n 'input_ids': numpy.ascontiguousarray(input_ids.cpu().numpy())\n }\n \n if past is not None:\n for i, past_i in enumerate(past):\n ort_inputs[f'past_{i}'] = numpy.ascontiguousarray(past[i].cpu().numpy())\n \n latency = []\n for _ in range(total_runs):\n start = time.time()\n ort_outputs = ort_session.run(None, ort_inputs)\n latency.append(time.time() - start)\n \n if total_runs > 1:\n print(\"OnnxRuntime Inference time = {} ms\".format(format(sum(latency) * 1000 / len(latency), '.2f')))\n \n return ort_outputs\n\ndef inference(model, ort_session, input_ids, past=None, total_runs=100, verify_outputs=True):\n outputs = pytorch_inference(model, input_ids, past, total_runs)\n ort_outputs = onnxruntime_inference(ort_session, input_ids, past, total_runs)\n if verify_outputs:\n print('PyTorch and OnnxRuntime output 0 (last_state) are close:'.format(0), numpy.allclose(ort_outputs[0], outputs[0].cpu(), rtol=1e-05, atol=1e-04))\n\n if enable_past_input:\n for layer in range(model.config.n_layer):\n print('PyTorch and OnnxRuntime layer {} state (present_{}) are close:'.format(layer, layer), numpy.allclose(ort_outputs[1 + layer], outputs[1][layer].cpu(), rtol=1e-05, atol=1e-04)) ",
"_____no_output_____"
],
[
"import torch\nimport os\n\ninputs = tokenizer.encode_plus(\"Here is an example input for GPT2 model\", add_special_tokens=True, return_tensors='pt')\ninput_ids = inputs['input_ids']\n\n# run without past so that we can know the shape of past from output.\noutputs = model(input_ids=input_ids, past=None)",
"_____no_output_____"
],
[
"num_layer = model.config.n_layer \npresent_names = [f'present_{i}' for i in range(num_layer)]\noutput_names = [\"last_state\"] + present_names\n\ninput_names = ['input_ids']\ndynamic_axes= {'input_ids': {0: 'batch_size', 1: 'seq_len'},\n #'token_type_ids' : {0: 'batch_size', 1: 'seq_len'},\n #'attention_mask' : {0: 'batch_size', 1: 'seq_len'},\n 'last_state' : {0: 'batch_size', 1: 'seq_len'}\n }\nfor name in present_names:\n dynamic_axes[name] = {1: 'batch_size', 3: 'seq_len'}\n \nif enable_past_input:\n past_names = [f'past_{i}' for i in range(num_layer)]\n input_names = ['input_ids'] + past_names #+ ['token_type_ids', 'attention_mask']\n dummy_past = [torch.zeros(list(outputs[1][0].shape)) for _ in range(num_layer)]\n for name in past_names:\n dynamic_axes[name] = {1: 'batch_size', 3: 'seq_len'}\n export_inputs = (inputs['input_ids'], tuple(dummy_past)) #, inputs['token_type_ids'], inputs['attention_mask'])\nelse:\n export_inputs = (inputs['input_ids'])\n\nexport_model_path = os.path.join(output_dir, 'gpt2_past{}.onnx'.format(int(enable_past_input)))\n\ntorch.onnx.export(model,\n args=export_inputs,\n f=export_model_path,\n input_names=input_names,\n output_names=output_names,\n dynamic_axes=dynamic_axes,\n opset_version=11,\n do_constant_folding = True,\n verbose=False)",
"_____no_output_____"
],
[
"def remove_past_outputs(export_model_path, output_model_path):\n from onnx import ModelProto\n from OnnxModel import OnnxModel\n\n model = ModelProto()\n with open(export_model_path, \"rb\") as f:\n model.ParseFromString(f.read())\n bert_model = OnnxModel(model)\n\n # remove past state outputs and only keep the first output.\n keep_output_names = [bert_model.model.graph.output[0].name]\n logger.info(f\"Prune graph to keep the first output and drop past state outputs:{keep_output_names}\")\n bert_model.prune_graph(keep_output_names)\n\n bert_model.save_model_to_file(output_model_path)\n \nif enable_past_input:\n onnx_model_path = export_model_path\nelse:\n onnx_model_path = os.path.join(output_dir, 'gpt2_past{}_out1.onnx'.format(int(enable_past_input)))\n remove_past_outputs(export_model_path, onnx_model_path)",
"_____no_output_____"
]
],
[
[
"## Inference with ONNX Runtime\n\n### OpenMP Environment Variable\n\nOpenMP environment variables are very important for CPU inference of GPT2 model. It has large performance impact on GPT2 model so you might need set it carefully according to benchmark script.\n\nSetting environment variables shall be done before importing onnxruntime. Otherwise, they might not take effect.",
"_____no_output_____"
]
],
[
[
"import psutil\n\n# You may change the settings in this cell according to Performance Test Tool result.\nuse_openmp = True\n\n# ATTENTION: these environment variables must be set before importing onnxruntime.\nif use_openmp:\n os.environ[\"OMP_NUM_THREADS\"] = str(psutil.cpu_count(logical=True))\nelse:\n os.environ[\"OMP_NUM_THREADS\"] = '1'\n\nos.environ[\"OMP_WAIT_POLICY\"] = 'ACTIVE'",
"_____no_output_____"
],
[
"import onnxruntime\nimport numpy\n\n# Print warning if user uses onnxruntime-gpu instead of onnxruntime package.\nif 'CUDAExecutionProvider' in onnxruntime.get_available_providers():\n print(\"warning: onnxruntime-gpu is not built with OpenMP. You might try onnxruntime package to test CPU inference.\")\n\nsess_options = onnxruntime.SessionOptions()\n\n# Optional: store the optimized graph and view it using Netron to verify that model is fully optimized.\n# Note that this will increase session creation time, so it is for debugging only.\n#sess_options.optimized_model_filepath = os.path.join(output_dir, \"optimized_model_cpu.onnx\")\n \nif use_openmp:\n sess_options.intra_op_num_threads=1\nelse:\n sess_options.intra_op_num_threads=psutil.cpu_count(logical=True)\n\n# Specify providers when you use onnxruntime-gpu for CPU inference.\nsession = onnxruntime.InferenceSession(onnx_model_path, sess_options, providers=['CPUExecutionProvider'])\n\n# Compare PyTorch and OnnxRuntime inference performance and results\n%time inference(model, session, input_ids, past=dummy_past if enable_past_input else None)",
"_____no_output_____"
],
[
"import gc\ndel session\ngc.collect()",
"_____no_output_____"
],
[
"optimized_model = os.path.join(output_dir, 'gpt2_past{}_optimized.onnx'.format(int(enable_past_input)))",
"_____no_output_____"
],
[
"bert_opt_script = os.path.join(bert_tools_dir, 'bert_model_optimization.py')",
"_____no_output_____"
],
[
"# Local directory corresponding to https://github.com/microsoft/onnxruntime/tree/master/onnxruntime/python/tools/bert/\n%run $bert_opt_script --model_type gpt2 --input $onnx_model_path --output $optimized_model --opt_level 0",
"_____no_output_____"
],
[
"session = onnxruntime.InferenceSession(optimized_model, sess_options, providers=['CPUExecutionProvider'])\n\n%time inference(model, session, input_ids, past=dummy_past if enable_past_input else None, verify_outputs=False)",
"_____no_output_____"
]
],
[
[
"## Additional Info\n\nNote that running Jupyter Notebook has slight impact on performance result since Jupyter Notebook is using system resources like CPU and memory etc. It is recommended to close Jupyter Notebook and other applications, then run the benchmark script in a console to get more accurate performance numbers.\n\n[OnnxRuntime C API](https://github.com/microsoft/onnxruntime/blob/master/docs/C_API.md) could get slightly better performance than python API. If you use C API in inference, you can use OnnxRuntime_Perf_Test.exe built from source to measure performance instead.\n\nHere is the machine configuration that generated the above results. The machine has GPU but not used in CPU inference.\nYou might get slower or faster result based on your hardware.",
"_____no_output_____"
]
],
[
[
"machine_info_script = os.path.join(bert_tools_dir, 'MachineInfo.py')\n%run $machine_info_script --silent",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
]
] |
e7e245334e51fb570e2e5890c82e6007e039f6e1 | 243,553 | ipynb | Jupyter Notebook | .ipynb_checkpoints/GMDataReductionNotebook-checkpoint.ipynb | garethcmurphy/datareduction | 53a57bd32cb7edc8618ed0ebfb774b5c72d76d9a | [
"MIT"
] | null | null | null | .ipynb_checkpoints/GMDataReductionNotebook-checkpoint.ipynb | garethcmurphy/datareduction | 53a57bd32cb7edc8618ed0ebfb774b5c72d76d9a | [
"MIT"
] | null | null | null | .ipynb_checkpoints/GMDataReductionNotebook-checkpoint.ipynb | garethcmurphy/datareduction | 53a57bd32cb7edc8618ed0ebfb774b5c72d76d9a | [
"MIT"
] | null | null | null | 778.124601 | 225,796 | 0.943515 | [
[
[
"import numpy as np\nimport matplotlib.pyplot as plt",
"_____no_output_____"
],
[
"cd py4ast/core\nls",
"_____no_output_____"
],
[
"cd py4ast\n",
"/Users/gmurphy/datareduction/py4ast\n"
],
[
"cd core",
"/Users/gmurphy/datareduction/py4ast/core\n"
],
[
"from astropy.io import fits\nhdus = fits.open('3c120_stis.fits.gz')\nhdus",
"_____no_output_____"
],
[
"hdus?",
"_____no_output_____"
],
[
"primary = hdus[0].data # Primary (NULL) header data unit\nimg = hdus[1].data # Intensity data\nerr = hdus[2].data # Error per pixel\ndq = hdus[3].data # Data quality per pixel\n",
"_____no_output_____"
],
[
"plt.imshow(img)",
"_____no_output_____"
],
[
"plt.clf()\nplt.imshow(img, origin = 'lower')",
"_____no_output_____"
],
[
"plt.imshow(img, origin = 'lower')",
"_____no_output_____"
],
[
"%matplotlib inline \n",
"_____no_output_____"
],
[
"plt.imshow(img, origin = 'lower')",
"_____no_output_____"
],
[
"plt.clf()\nplt.imshow(img, origin = 'lower', vmin = -10, vmax = 65)\nplt.colorbar()",
"_____no_output_____"
],
[
"img?",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e7e25856b1f52facfd9e41426b1a033a9cc4c7d3 | 202,255 | ipynb | Jupyter Notebook | Data cleaning/Tuberculosis.ipynb | olgarozhdestvina/Data-Science-and-Machine-Learning | 3d5b6ed5d20056458af540091aa5ac58cb5b2e44 | [
"MIT"
] | null | null | null | Data cleaning/Tuberculosis.ipynb | olgarozhdestvina/Data-Science-and-Machine-Learning | 3d5b6ed5d20056458af540091aa5ac58cb5b2e44 | [
"MIT"
] | null | null | null | Data cleaning/Tuberculosis.ipynb | olgarozhdestvina/Data-Science-and-Machine-Learning | 3d5b6ed5d20056458af540091aa5ac58cb5b2e44 | [
"MIT"
] | null | null | null | 183.368087 | 134,132 | 0.868686 | [
[
[
"### Tuberculosis WHO",
"_____no_output_____"
],
[
"#### General setup.\n___\n",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\n%matplotlib inline",
"_____no_output_____"
],
[
"plt.style.use('ggplot')\nplt.rcParams['figure.figsize'] = [12, 8]",
"_____no_output_____"
]
],
[
[
"<br>\n\n#### Load the data set.\n___\n",
"_____no_output_____"
]
],
[
[
"# Load the data set\ntb = pd.read_csv('../Data/tuberculosis.csv')\ntb.head()",
"_____no_output_____"
],
[
"tb.tail()",
"_____no_output_____"
]
],
[
[
"There are several issues with the data set: \n* Missing values\n* Confusing names (for example, m04 means male 0-4 years old)",
"_____no_output_____"
]
],
[
[
"tb.columns",
"_____no_output_____"
],
[
"# Plot the columns from m04 to fu for the last row in the data set.\nplt.plot(tb.loc[5768, 'm04':'fu'])\nplt.show()",
"_____no_output_____"
],
[
"# And now the same as above for all rows\nfor _, row in tb.iterrows():\n plt.plot(row['m04':'fu'], color='C0', alpha=0.1)",
"_____no_output_____"
]
],
[
[
"<br>\n\n#### Data cleaning.\n___\n",
"_____no_output_____"
]
],
[
[
"# Melt columns from m04 to fu into a sex and cases columns\ntb_melt = tb.melt(tb.columns[:2], tb.columns[2:], 'sex_age', 'cases')\ntb_melt",
"_____no_output_____"
],
[
"# Created a new column 'age' from 'sex'\ntb_melt['age'] = tb_melt.sex_age.apply(lambda x: x[1:])\ntb_melt['age']",
"_____no_output_____"
],
[
"def age_format(x):\n \"\"\" Reformatting age column \"\"\"\n if len(x) == 1:\n return ''\n elif len(x) in [2,3]:\n if x == '65':\n return '65+'\n return f'{x[0]}-{x[1:]}' \n return f'{x[:2]}-{x[2:]}'",
"_____no_output_____"
],
[
"# Apply the function to \ntb_melt['age'] = tb_melt.age.apply(lambda x: age_format(x))",
"_____no_output_____"
],
[
"# Remove age from 'sex' column\ntb_melt['sex'] = tb_melt.sex_age.apply(lambda x: x[0])",
"_____no_output_____"
],
[
"# Drop all empty values\ntb_melt.dropna(inplace=True)\ntb_melt.head()",
"_____no_output_____"
],
[
"# Reset the index and drop the column index\nfinal = tb_melt.sort_values(['country', 'year', 'age', 'sex', 'cases']).reset_index()\nfinal.drop(['index', 'sex_age'], axis=1, inplace=True)\nfinal.head()",
"_____no_output_____"
],
[
"# Rearrange the column order\nfinal = final[['country', 'year', 'age', 'sex', 'cases']]\nfinal.head()",
"_____no_output_____"
],
[
"# Output to csv file\nfinal.to_csv('data/final_tb.csv', index=False)",
"_____no_output_____"
]
],
[
[
"<br>\n\n___\n\n#### End.",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
]
] |
e7e25c2c0096057964d8ee5074a496fc2fb3c3d0 | 50,122 | ipynb | Jupyter Notebook | course_2/course_material/Part_7_Deep_Learning/S43_L300/Minimal_example_All_Exercises.ipynb | Alexander-Meldrum/learning-data-science | a87cf6be80c67a8d1b57a96c042bdf423ba0a142 | [
"MIT"
] | null | null | null | course_2/course_material/Part_7_Deep_Learning/S43_L300/Minimal_example_All_Exercises.ipynb | Alexander-Meldrum/learning-data-science | a87cf6be80c67a8d1b57a96c042bdf423ba0a142 | [
"MIT"
] | null | null | null | course_2/course_material/Part_7_Deep_Learning/S43_L300/Minimal_example_All_Exercises.ipynb | Alexander-Meldrum/learning-data-science | a87cf6be80c67a8d1b57a96c042bdf423ba0a142 | [
"MIT"
] | null | null | null | 100.646586 | 33,384 | 0.842604 | [
[
[
"# Simple Linear Regression. Minimal example",
"_____no_output_____"
],
[
"# Using the same code as before, please solve the following exercises\n 1. Change the number of observations to 100,000 and see what happens.\n 2. Change the number of observations to 1,000,000 and see what happens.\n 3. Play around with the learning rate. Values like 0.0001, 0.001, 0.1, 1 are all interesting to observe. \n 4. Change the loss function. L2-norm loss (without dividing by 2) is a good way to start. \n 5. Тry with the L1-norm loss, given by the sum of the ABSOLUTE value of yj - tj. The L1-norm loss is given by:\n## $$ \\Sigma_i = |y_i-t_i| $$\n 6. Create a function f(x,z) = 13*xs + 7*zs - 12. Does the algorithm work in the same way?\n \n \nUseful tip: When you change something, don't forget to RERUN all cells. This can be done easily by clicking:\nKernel -> Restart & Run All\nIf you don't do that, your algorithm will keep the OLD values of all parameters.\n\nYou can either use this file for all the exercises, or check the solutions of EACH ONE of them in the separate files we have provided. All other files are solutions of each problem. If you feel confident enough, you can simply change values in this file. Please note that it will be nice, if you return the file to starting position after you have solved a problem, so you can use the lecture as a basis for comparison.",
"_____no_output_____"
],
[
"### Import the relevant libraries",
"_____no_output_____"
]
],
[
[
"# We must always import the relevant libraries for our problem at hand. NumPy is a must for this example.\nimport numpy as np\n\n# matplotlib and mpl_toolkits are not necessary. We employ them for the sole purpose of visualizing the results. \nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D",
"_____no_output_____"
]
],
[
[
"### Generate random input data to train on",
"_____no_output_____"
]
],
[
[
"# First, we should declare a variable containing the size of the training set we want to generate.\nobservations = 1000\n\n# We will work with two variables as inputs. You can think about them as x1 and x2 in our previous examples.\n# We have picked x and z, since it is easier to differentiate them.\n# We generate them randomly, drawing from an uniform distribution. There are 3 arguments of this method (low, high, size).\n# The size of xs and zs is observations by 1. In this case: 1000 x 1.\nxs = np.random.uniform(low=-10, high=10, size=(observations,1))\nzs = np.random.uniform(-10, 10, (observations,1))\n\n# Combine the two dimensions of the input into one input matrix. \n# This is the X matrix from the linear model y = x*w + b.\n# column_stack is a Numpy method, which combines two vectors into a matrix. Alternatives are stack, dstack, hstack, etc.\ninputs = np.column_stack((xs,zs))\n\n# Check if the dimensions of the inputs are the same as the ones we defined in the linear model lectures. \n# They should be n x k, where n is the number of observations, and k is the number of variables, so 1000 x 2.\nprint (inputs.shape)",
"(1000, 2)\n"
]
],
[
[
"### Generate the targets we will aim at",
"_____no_output_____"
]
],
[
[
"# We want to \"make up\" a function, use the ML methodology, and see if the algorithm has learned it.\n# We add a small random noise to the function i.e. f(x,z) = 2x - 3z + 5 + <small noise>\nnoise = np.random.uniform(-1, 1, (observations,1))\n\n# Produce the targets according to the f(x,z) = 2x - 3z + 5 + noise definition.\n# In this way, we are basically saying: the weights should be 2 and -3, while the bias is 5.\ntargets = 2*xs - 3*zs + 5 + noise\n\n# Check the shape of the targets just in case. It should be n x m, where m is the number of output variables, so 1000 x 1.\nprint (targets.shape)",
"(1000, 1)\n"
]
],
[
[
"### Plot the training data\nThe point is to see that there is a strong trend that our model should learn to reproduce.",
"_____no_output_____"
],
[
"### Initialize variables",
"_____no_output_____"
]
],
[
[
"# We will initialize the weights and biases randomly in some small initial range.\n# init_range is the variable that will measure that.\n# You can play around with the initial range, but we don't really encourage you to do so.\n# High initial ranges may prevent the machine learning algorithm from learning.\ninit_range = 0.1\n\n# Weights are of size k x m, where k is the number of input variables and m is the number of output variables\n# In our case, the weights matrix is 2x1 since there are 2 inputs (x and z) and one output (y)\nweights = np.random.uniform(low=-init_range, high=init_range, size=(2, 1))\n\n# Biases are of size 1 since there is only 1 output. The bias is a scalar.\nbiases = np.random.uniform(low=-init_range, high=init_range, size=1)\n\n#Print the weights to get a sense of how they were initialized.\nprint (weights)\nprint (biases)",
"[[0.02158668]\n [0.04520037]]\n[0.07680059]\n"
]
],
[
[
"### Set a learning rate",
"_____no_output_____"
]
],
[
[
"# Set some small learning rate (denoted eta in the lecture). \n# 0.02 is going to work quite well for our example. Once again, you can play around with it.\n# It is HIGHLY recommended that you play around with it.\nlearning_rate = 0.02",
"_____no_output_____"
]
],
[
[
"### Train the model",
"_____no_output_____"
]
],
[
[
"# We iterate over our training dataset 100 times. That works well with a learning rate of 0.02.\n# The proper number of iterations is something we will talk about later on, but generally\n# a lower learning rate would need more iterations, while a higher learning rate would need less iterations\n# keep in mind that a high learning rate may cause the loss to diverge to infinity, instead of converge to 0.\nfor i in range (100):\n \n # This is the linear model: y = xw + b equation\n outputs = np.dot(inputs,weights) + biases\n # The deltas are the differences between the outputs and the targets\n # Note that deltas here is a vector 1000 x 1\n deltas = outputs - targets\n \n # We are considering the L2-norm loss, but divided by 2, so it is consistent with the lectures.\n # Moreover, we further divide it by the number of observations.\n # This is simple rescaling by a constant. We explained that this doesn't change the optimization logic,\n # as any function holding the basic property of being lower for better results, and higher for worse results\n # can be a loss function.\n loss = np.sum(deltas ** 2) / 2 / observations\n \n # We print the loss function value at each step so we can observe whether it is decreasing as desired.\n print (loss)\n \n # Another small trick is to scale the deltas the same way as the loss function\n # In this way our learning rate is independent of the number of samples (observations).\n # Again, this doesn't change anything in principle, it simply makes it easier to pick a single learning rate\n # that can remain the same if we change the number of training samples (observations).\n # You can try solving the problem without rescaling to see how that works for you.\n deltas_scaled = deltas / observations\n \n # Finally, we must apply the gradient descent update rules from the relevant lecture.\n # The weights are 2x1, learning rate is 1x1 (scalar), inputs are 1000x2, and deltas_scaled are 1000x1\n # We must transpose the inputs so that we get an allowed operation.\n weights = weights - learning_rate * np.dot(inputs.T,deltas_scaled)\n biases = biases - learning_rate * np.sum(deltas_scaled)\n \n # The weights are updated in a linear algebraic way (a matrix minus another matrix)\n # The biases, however, are just a single number here, so we must transform the deltas into a scalar.\n # The two lines are both consistent with the gradient descent methodology. ",
"237249.78007243446\n5739587.921816474\n1992902565.643724\n719554963540.3586\n259830306790502.12\n9.382439477181739e+16\n3.387987017559843e+19\n1.2233978230391737e+22\n4.417674051463696e+24\n1.5952165074557885e+27\n5.76030661387594e+29\n2.0800394260452784e+32\n7.510996035316175e+34\n2.7122111598526613e+37\n9.793760163154793e+39\n3.536514396563739e+42\n1.2770308715701512e+45\n4.611342310744724e+47\n1.6651498707090051e+50\n6.01283510326613e+52\n2.1712271438771583e+55\n7.840273730021682e+57\n2.8311129185637116e+60\n1.0223112908630776e+63\n3.691553129418609e+65\n1.333015161733698e+68\n4.813500873795554e+70\n1.7381490719052497e+73\n6.276434294656793e+75\n2.2664124781865262e+78\n8.183986767219839e+80\n2.955227261174523e+83\n1.067128822858039e+86\n3.853388670087579e+88\n1.3914537705945411e+91\n5.024521950592025e+93\n1.8143485155956026e+96\n6.551589521180396e+98\n2.3657706821531062e+101\n8.54276798392397e+103\n3.084782704329477e+106\n1.1139111293713626e+109\n4.022318986669404e+111\n1.452454294055924e+114\n5.2447940685786336e+116\n1.89388849854841e+119\n6.83880739269138e+121\n2.4694846919539974e+124\n8.917277960353747e+126\n3.2200177827096285e+129\n1.162744344974406e+132\n4.198655110010857e+134\n1.516129044962873e+137\n5.474722788016975e+139\n1.9769154680607224e+142\n7.138616728525449e+144\n2.5777454635818573e+147\n9.30820623618128e+149\n3.361181488217665e+152\n1.2137183803280195e+155\n4.382721706370037e+157\n1.582595259890179e+160\n5.714731448694445e+162\n2.063582291593845e+165\n7.451569531150132e+167\n2.69075232442893e+170\n9.716272580096515e+172\n3.508533728416333e+175\n1.266927087724081e+178\n4.574857675184804e+180\n1.6519753149958247e+183\n5.965261949368312e+185\n2.154048538229604e+188\n7.778242002499892e+190\n2.8087133402842874e+193\n1.0142228315029865e+196\n3.6623458050646816e+198\n1.322468433884333e+201\n4.775416773047163e+203\n1.724396951337346e+206\n6.226775561380224e+208\n2.24848077246553e+211\n8.119235604866657e+213\n2.9318456984201305e+216\n1.0586857701471766e+219\n3.822900913632967e+221\n1.3804446803345712e+224\n4.984768265032192e+226\n1.7999935100658833e+229\n6.499753777938913e+231\n2.3470528608897527e+234\n8.475178168299134e+236\n3.0603761074722623e+239\n1.1050979381436075e+242\n3.9904946647160495e+244\n1.4409625716863748e+247\n5.203297604580957e+249\n1.878904177931045e+252\n6.784699200635173e+254\n2.4499462923004373e+257\n"
]
],
[
[
"### Print weights and biases and see if we have worked correctly.",
"_____no_output_____"
]
],
[
[
"# We print the weights and the biases, so we can see if they have converged to what we wanted.\n# When declared the targets, following the f(x,z), we knew the weights should be 2 and -3, while the bias: 5.\nprint (weights, biases)\n\n# Note that they may be convergING. So more iterations are needed.",
"[[-1.53536772e+125 -1.53536772e+125 -1.53536772e+125 ... -1.53536772e+125\n -1.53536772e+125 -1.53536772e+125]\n [-1.52680119e+124 -1.52680119e+124 -1.52680119e+124 ... -1.52680119e+124\n -1.52680119e+124 -1.52680119e+124]] [-4.2058021e+128]\n"
]
],
[
[
"### Plot last outputs vs targets\nSince they are the last ones at the end of the training, they represent the final model accuracy. <br/>\nThe closer this plot is to a 45 degree line, the closer target and output values are.",
"_____no_output_____"
]
],
[
[
"# We print the outputs and the targets in order to see if they have a linear relationship.\n# Again, that's not needed. Moreover, in later lectures, that would not even be possible.\nplt.plot(outputs,targets)\nplt.xlabel('outputs')\nplt.ylabel('targets')\nplt.show()",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
e7e2633ef8c825bfd64b892e17c026c8631d127f | 277,356 | ipynb | Jupyter Notebook | codes/clase_14/interp_error_lineal.ipynb | mlares/computacion2020 | 185bfded8ef1670e80b1c2cdc1fceb365d962b0e | [
"MIT"
] | null | null | null | codes/clase_14/interp_error_lineal.ipynb | mlares/computacion2020 | 185bfded8ef1670e80b1c2cdc1fceb365d962b0e | [
"MIT"
] | null | null | null | codes/clase_14/interp_error_lineal.ipynb | mlares/computacion2020 | 185bfded8ef1670e80b1c2cdc1fceb365d962b0e | [
"MIT"
] | null | null | null | 909.363934 | 229,696 | 0.954978 | [
[
[
"import numpy as np\nfrom matplotlib import pyplot as plt\nimport interp_tools as it\nfrom scipy.interpolate import interp1d\n\nfrom numpy.polynomial import polynomial as P\nfrom functools import partial\nimport math",
"_____no_output_____"
]
],
[
[
"Sea $f(x)=e^x$ \n ",
"_____no_output_____"
]
],
[
[
"def f(x):\n z = np.cos(x) + np.sin(3*x) + np.cos(np.sqrt(x)) + np.cos(18*x)\n return z",
"_____no_output_____"
],
[
"f = lambda x: np.exp(x)",
"_____no_output_____"
]
],
[
[
"y una partición regular en el intervalo $[0, 1]$ donde se construye el polinomio interpolante de orden $n$, $P_n(x)$. ",
"_____no_output_____"
],
[
"#### Interpolación de Newton con N puntos:",
"_____no_output_____"
]
],
[
[
"N = 30",
"_____no_output_____"
],
[
"xd = np.linspace(2, 10, N)",
"_____no_output_____"
],
[
"yd = f(xd)",
"_____no_output_____"
],
[
"xi = np.linspace(min(xd), max(xd), 200)\nym = f(xi)",
"_____no_output_____"
]
],
[
[
"_______",
"_____no_output_____"
]
],
[
[
"yl = it.interp_newton(xi, xd, yd)",
"_____no_output_____"
],
[
"fig = plt.figure(figsize=(12, 6))\nax = fig.add_subplot()\nax.plot(xi, yl, linewidth=1.4, linestyle='-', color='orchid', \n label='lagrange')\nax.plot(xd, yd, marker='o', linestyle='None', color='navy', markersize=5)\nax.grid()\nax.legend()",
"_____no_output_____"
],
[
"fig = plt.figure(figsize=(12, 6))\nax = fig.add_subplot()\n\nax.plot(xi, yl-ym, linewidth=3, color='indigo')\n\nax.plot(xd, [0]*len(xd), marker='o', linestyle='None', color='navy',\n markersize=8, mfc='white', mec='indigo', mew=2)\nax.set_ylabel('ERROR')\nax.set_xlabel('x')\nax.grid()",
"_____no_output_____"
]
],
[
[
"Veamos juntos los errores para diferentes N",
"_____no_output_____"
]
],
[
[
"fig, axs = plt.subplots(5, 4, figsize=[15, 18])\n\nfor N, ax in zip(range(6, 66, 3), axs.flat):\n xd = np.linspace(2, 10, N)\n yd = f(xd)\n \n xi = np.linspace(min(xd), max(xd), 200)\n ym = f(xi)\n \n ylgg = it.interp_lagrange(xi, xd, yd)\n mx = max(ylgg-ym)\n \n ylin = np.interp(xi, xd, yd)\n \n spline = interp1d(xd, yd, kind='cubic')\n ysp3 = spline(xi)\n \n #ax.plot(xi, ylgg-ym, linewidth=2, color='cornflowerblue', label='lagrange')\n ax.plot(xi, ylin-ym, linewidth=2, color='peru', label='lineal')\n ax.plot(xi, ysp3-ym, linewidth=2, color='mediumaquamarine', linestyle=':', label='cubic spline')\n ax.set_title(f'N={N}; max={mx:5.1e}')\n ax.legend()\n ax.axhline(0, linestyle='--', color='k')",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
]
] |
e7e266b38823dff75b28310a444dad38437fa65e | 3,524 | ipynb | Jupyter Notebook | exercices/part4.ipynb | AbdelwahabHassan/python-bootcamp | 193fbeeb6649f1bb9895425886940d952c9e0c5e | [
"MIT"
] | null | null | null | exercices/part4.ipynb | AbdelwahabHassan/python-bootcamp | 193fbeeb6649f1bb9895425886940d952c9e0c5e | [
"MIT"
] | null | null | null | exercices/part4.ipynb | AbdelwahabHassan/python-bootcamp | 193fbeeb6649f1bb9895425886940d952c9e0c5e | [
"MIT"
] | null | null | null | 20.488372 | 140 | 0.487798 | [
[
[
"### Exercice 1:",
"_____no_output_____"
],
[
"Write a Python class named square constructed by a length and two methods which will compute the area and the perimeter of the square.",
"_____no_output_____"
]
],
[
[
"class square():\n #define your methods\n def __init__(self,longueur):\n self.longueur = longueur\n def aire_carree(self):\n return self.longueur**2\n def perimetre(self):\n return self.longueur*4\n\nsquare1 = square(5)\nprint('Aire est : \\n',square1.aire_carree())\nprint('Perimetre est :\\n',square1.perimetre())",
"Aire est : \n 25\nPerimetre est :\n 20\n"
]
],
[
[
"### Exercice 2:",
"_____no_output_____"
],
[
"Write a python class rectangle that inherits from the square class.",
"_____no_output_____"
]
],
[
[
"class rectangle(square):\n def __init__(self,longueur,largeur):\n self.largeur = largeur\n super().__init__(longueur)\n\nsquare = rectangle(5,2)\nprint('Aire_R est : \\n',square.aire_carree())\nprint('Perimetre_R est :\\n',square.perimetre())",
"Aire_R est : \n 25\nPerimetre_R est :\n 20\n"
]
],
[
[
"### Exercice 3:",
"_____no_output_____"
]
],
[
[
"class SampleClass:\n\n def __init__(self, a):\n ## private varibale in Python\n self.a = a\n@SampleClass\ndef work(x):\n x = SampleClass(3)\n return x\nprint('p1 --->',work.a)\nwork.a = 23\nprint('p2--->',work.a)\n",
"p1 ---> <function work at 0x7f93280d45e0>\np2---> 23\n"
]
],
[
[
"Use python decorators to make the above code works",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
e7e2a2176abd612d2e7b334cf5ebbccf9f6f0431 | 9,443 | ipynb | Jupyter Notebook | examples/Notebooks/flopy3_multi-component_SSM.ipynb | aleaf/flopy | a5777a4d4a745e473110a167c69603ac4ad3106c | [
"CC0-1.0",
"BSD-3-Clause"
] | null | null | null | examples/Notebooks/flopy3_multi-component_SSM.ipynb | aleaf/flopy | a5777a4d4a745e473110a167c69603ac4ad3106c | [
"CC0-1.0",
"BSD-3-Clause"
] | null | null | null | examples/Notebooks/flopy3_multi-component_SSM.ipynb | aleaf/flopy | a5777a4d4a745e473110a167c69603ac4ad3106c | [
"CC0-1.0",
"BSD-3-Clause"
] | null | null | null | 26.675141 | 359 | 0.50969 | [
[
[
"# FloPy\n\n## Using FloPy to simplify the use of the MT3DMS ```SSM``` package\n\nA multi-component transport demonstration",
"_____no_output_____"
]
],
[
[
"import os\nimport sys\nimport numpy as np\n\n# run installed version of flopy or add local path\ntry:\n import flopy\nexcept:\n fpth = os.path.abspath(os.path.join('..', '..'))\n sys.path.append(fpth)\n import flopy\n\nprint(sys.version)\nprint('numpy version: {}'.format(np.__version__))\nprint('flopy version: {}'.format(flopy.__version__))",
"flopy is installed in /Users/jdhughes/Documents/Development/flopy_git/flopy_fork/flopy\n3.7.3 | packaged by conda-forge | (default, Jul 1 2019, 14:38:56) \n[Clang 4.0.1 (tags/RELEASE_401/final)]\nnumpy version: 1.17.3\nflopy version: 3.3.1\n"
]
],
[
[
"First, we will create a simple model structure",
"_____no_output_____"
]
],
[
[
"nlay, nrow, ncol = 10, 10, 10\nperlen = np.zeros((10), dtype=np.float) + 10\nnper = len(perlen)\n\nibound = np.ones((nlay,nrow,ncol), dtype=np.int)\n\nbotm = np.arange(-1,-11,-1)\ntop = 0.",
"_____no_output_____"
]
],
[
[
"## Create the ```MODFLOW``` packages",
"_____no_output_____"
]
],
[
[
"model_ws = 'data'\nmodelname = 'ssmex'\nmf = flopy.modflow.Modflow(modelname, model_ws=model_ws)\ndis = flopy.modflow.ModflowDis(mf, nlay=nlay, nrow=nrow, ncol=ncol, \n perlen=perlen, nper=nper, botm=botm, top=top, \n steady=False)\nbas = flopy.modflow.ModflowBas(mf, ibound=ibound, strt=top)\nlpf = flopy.modflow.ModflowLpf(mf, hk=100, vka=100, ss=0.00001, sy=0.1)\noc = flopy.modflow.ModflowOc(mf)\npcg = flopy.modflow.ModflowPcg(mf)\nrch = flopy.modflow.ModflowRch(mf)",
"_____no_output_____"
]
],
[
[
"We'll track the cell locations for the ```SSM``` data using the ```MODFLOW``` boundary conditions.\n\n\nGet a dictionary (```dict```) that has the ```SSM``` ```itype``` for each of the boundary types.",
"_____no_output_____"
]
],
[
[
"itype = flopy.mt3d.Mt3dSsm.itype_dict()\nprint(itype)\nprint(flopy.mt3d.Mt3dSsm.get_default_dtype())\nssm_data = {}",
"{'CHD': 1, 'BAS6': 1, 'PBC': 1, 'WEL': 2, 'DRN': 3, 'RIV': 4, 'GHB': 5, 'MAS': 15, 'CC': -1}\n[('k', '<i8'), ('i', '<i8'), ('j', '<i8'), ('css', '<f4'), ('itype', '<i8')]\n"
]
],
[
[
"Add a general head boundary (```ghb```). The general head boundary head (```bhead```) is 0.1 for the first 5 stress periods with a component 1 (comp_1) concentration of 1.0 and a component 2 (comp_2) concentration of 100.0. Then ```bhead``` is increased to 0.25 and comp_1 concentration is reduced to 0.5 and comp_2 concentration is increased to 200.0",
"_____no_output_____"
]
],
[
[
"ghb_data = {}\nprint(flopy.modflow.ModflowGhb.get_default_dtype())\nghb_data[0] = [(4, 4, 4, 0.1, 1.5)]\nssm_data[0] = [(4, 4, 4, 1.0, itype['GHB'], 1.0, 100.0)]\nghb_data[5] = [(4, 4, 4, 0.25, 1.5)]\nssm_data[5] = [(4, 4, 4, 0.5, itype['GHB'], 0.5, 200.0)]\n\nfor k in range(nlay):\n for i in range(nrow):\n ghb_data[0].append((k, i, 0, 0.0, 100.0))\n ssm_data[0].append((k, i, 0, 0.0, itype['GHB'], 0.0, 0.0))\n \nghb_data[5] = [(4, 4, 4, 0.25, 1.5)]\nssm_data[5] = [(4, 4, 4, 0.5, itype['GHB'], 0.5, 200.0)]\nfor k in range(nlay):\n for i in range(nrow):\n ghb_data[5].append((k, i, 0, -0.5, 100.0))\n ssm_data[5].append((k, i, 0, 0.0, itype['GHB'], 0.0, 0.0))",
"[('k', '<i8'), ('i', '<i8'), ('j', '<i8'), ('bhead', '<f4'), ('cond', '<f4')]\n"
]
],
[
[
"Add an injection ```well```. The injection rate (```flux```) is 10.0 with a comp_1 concentration of 10.0 and a comp_2 concentration of 0.0 for all stress periods. WARNING: since we changed the ```SSM``` data in stress period 6, we need to add the well to the ssm_data for stress period 6.",
"_____no_output_____"
]
],
[
[
"wel_data = {}\nprint(flopy.modflow.ModflowWel.get_default_dtype())\nwel_data[0] = [(0, 4, 8, 10.0)]\nssm_data[0].append((0, 4, 8, 10.0, itype['WEL'], 10.0, 0.0))\nssm_data[5].append((0, 4, 8, 10.0, itype['WEL'], 10.0, 0.0))",
"[('k', '<i8'), ('i', '<i8'), ('j', '<i8'), ('flux', '<f4')]\n"
]
],
[
[
"Add the ```GHB``` and ```WEL``` packages to the ```mf``` ```MODFLOW``` object instance.",
"_____no_output_____"
]
],
[
[
"ghb = flopy.modflow.ModflowGhb(mf, stress_period_data=ghb_data)\nwel = flopy.modflow.ModflowWel(mf, stress_period_data=wel_data)",
"_____no_output_____"
]
],
[
[
"## Create the ```MT3DMS``` packages",
"_____no_output_____"
]
],
[
[
"mt = flopy.mt3d.Mt3dms(modflowmodel=mf, modelname=modelname, model_ws=model_ws)\nbtn = flopy.mt3d.Mt3dBtn(mt, sconc=0, ncomp=2, sconc2=50.0)\nadv = flopy.mt3d.Mt3dAdv(mt)\nssm = flopy.mt3d.Mt3dSsm(mt, stress_period_data=ssm_data)\ngcg = flopy.mt3d.Mt3dGcg(mt)",
"found 'rch' in modflow model, resetting crch to 0.0\nSSM: setting crch for component 2 to zero. kwarg name crch2\n"
]
],
[
[
"Let's verify that ```stress_period_data``` has the right ```dtype```",
"_____no_output_____"
]
],
[
[
"print(ssm.stress_period_data.dtype)",
"[('k', '<i8'), ('i', '<i8'), ('j', '<i8'), ('css', '<f4'), ('itype', '<i8'), ('cssm(01)', '<f4'), ('cssm(02)', '<f4')]\n"
]
],
[
[
"## Create the ```SEAWAT``` packages",
"_____no_output_____"
]
],
[
[
"swt = flopy.seawat.Seawat(modflowmodel=mf, mt3dmodel=mt, \n modelname=modelname, namefile_ext='nam_swt', model_ws=model_ws)\nvdf = flopy.seawat.SeawatVdf(swt, mtdnconc=0, iwtable=0, indense=-1)",
"_____no_output_____"
],
[
"mf.write_input()\nmt.write_input()\nswt.write_input()",
"_____no_output_____"
]
],
[
[
"And finally, modify the ```vdf``` package to fix ```indense```.",
"_____no_output_____"
]
],
[
[
"fname = modelname + '.vdf'\nf = open(os.path.join(model_ws, fname),'r')\nlines = f.readlines()\nf.close()\nf = open(os.path.join(model_ws, fname),'w')\nfor line in lines:\n f.write(line)\nfor kper in range(nper):\n f.write(\"-1\\n\")\nf.close() \n",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
]
] |
e7e2a491f712eea148429f593869b1f202098043 | 42,761 | ipynb | Jupyter Notebook | examples/reference/widgets/Tabulator.ipynb | datalayer-contrib/holoviz-panel | c97b57e8eaff4b5f542add41f496395da2483b23 | [
"BSD-3-Clause"
] | null | null | null | examples/reference/widgets/Tabulator.ipynb | datalayer-contrib/holoviz-panel | c97b57e8eaff4b5f542add41f496395da2483b23 | [
"BSD-3-Clause"
] | null | null | null | examples/reference/widgets/Tabulator.ipynb | datalayer-contrib/holoviz-panel | c97b57e8eaff4b5f542add41f496395da2483b23 | [
"BSD-3-Clause"
] | null | null | null | 37.118924 | 639 | 0.616496 | [
[
[
"import datetime as dt\nimport numpy as np\nimport pandas as pd\nimport panel as pn\n\npn.extension('tabulator')",
"_____no_output_____"
]
],
[
[
"The ``Tabulator`` widget allows displaying and editing a pandas DataFrame. The `Tabulator` is a largely backward compatible replacement for the [`DataFrame`](./DataFrame.ipynb) widget and will eventually replace it. It is built on the [Tabulator](http://tabulator.info/) library, which provides for a wide range of features.\n\nFor more information about listening to widget events and laying out widgets refer to the [widgets user guide](../../user_guide/Widgets.ipynb). Alternatively you can learn how to build GUIs by declaring parameters independently of any specific widgets in the [param user guide](../../user_guide/Param.ipynb). To express interactivity entirely using Javascript without the need for a Python server take a look at the [links user guide](../../user_guide/Param.ipynb).\n\n#### Parameters:\n\nFor layout and styling related parameters see the [customization user guide](../../user_guide/Customization.ipynb).\n\n##### Core\n\n* **``aggregators``** (``dict``): A dictionary mapping from index name to an aggregator to be used for `hierarchical` multi-indexes (valid aggregators include 'min', 'max', 'mean' and 'sum'). If separate aggregators for different columns are required the dictionary may be nested as `{index_name: {column_name: aggregator}}`\n* **``configuration``** (``dict``): A dictionary mapping used to specify tabulator options not explicitly exposed by panel.\n* **``editors``** (``dict``): A dictionary mapping from column name to a bokeh `CellEditor` instance or tabulator editor specification.\n* **``embed_content``** (``boolean``): Whether to embed the `row_content` or to dynamically fetch it when a row is expanded.\n* **``expanded``** (``list``): The currently expanded rows as a list of integer indexes.\n* **``filters``** (``list``): A list of client-side filter definitions that are applied to the table.\n* **``formatters``** (``dict``): A dictionary mapping from column name to a bokeh `CellFormatter` instance or tabulator formatter specification.\n* **``groupby``** (`list`): Groups rows in the table by one or more columns.\n* **``header_filters``** (``boolean``/``dict``): A boolean enabling filters in the column headers or a dictionary providing filter definitions for specific columns.\n* **``hierarchical``** (boolean, default=False): Whether to render multi-indexes as hierarchical index (note hierarchical must be enabled during instantiation and cannot be modified later)\n* **``hidden_columns``** (`list`): List of columns to hide.\n* **``layout``** (``str``, `default='fit_data_table'`): Describes the column layout mode with one of the following options `'fit_columns'`, `'fit_data'`, `'fit_data_stretch'`, `'fit_data_fill'`, `'fit_data_table'`. \n* **``frozen_columns``** (`list`): List of columns to freeze, preventing them from scrolling out of frame. Column can be specified by name or index.\n* **``frozen_rows``**: (`list`): List of rows to freeze, preventing them from scrolling out of frame. Rows can be specified by positive or negative index.\n* **``page``** (``int``, `default=1`): Current page, if pagination is enabled.\n* **``page_size``** (``int``, `default=20`): Number of rows on each page, if pagination is enabled.\n* **``pagination``** (`str`, `default=None`): Set to `'local` or `'remote'` to enable pagination; by default pagination is disabled with the value set to `None`.\n* **``row_content``** (``callable``): A function that receives the expanded row as input and should return a Panel object to render into the expanded region below the row.\n* **``row_height``** (``int``, `default=30`): The height of each table row.\n* **``selection``** (``list``): The currently selected rows as a list of integer indexes.\n* **``selectable``** (`boolean` or `str` or `int`, `default=True`): Defines the selection mode:\n * `True`\n Selects rows on click. To select multiple use Ctrl-select, to select a range use Shift-select\n * `False`\n Disables selection\n * `'checkbox'`\n Adds a column of checkboxes to toggle selections\n * `'checkbox-single'`\n Same as 'checkbox' but header does not alllow select/deselect all\n * `'toggle'`\n Selection toggles when clicked\n * `int`\n The maximum number of selectable rows.\n* **``selectable_rows``** (`callable`): A function that should return a list of integer indexes given a DataFrame indicating which rows may be selected.\n* **``show_index``** (``boolean``, `default=True`): Whether to show the index column.\n* **``text_align``** (``dict`` or ``str``): A mapping from column name to alignment or a fixed column alignment, which should be one of `'left'`, `'center'`, `'right'`.\n* **`theme`** (``str``, `default='simple'`): The CSS theme to apply (note that changing the theme will restyle all tables on the page), which should be one of `'default'`, `'site'`, `'simple'`, `'midnight'`, `'modern'`, `'bootstrap'`, `'bootstrap4'`, `'materialize'`, `'bulma'`, `'semantic-ui'`, or `'fast'`.\n* **``titles``** (``dict``): A mapping from column name to a title to override the name with.\n* **``value``** (``pd.DataFrame``): The pandas DataFrame to display and edit\n* **``widths``** (``dict``): A dictionary mapping from column name to column width in the rendered table.\n\n##### Display\n\n* **``disabled``** (``boolean``): Whether the widget is editable\n* **``name``** (``str``): The title of the widget\n\n##### Properties\n\n* **``current_view``** (``DataFrame``): The current view of the table that is displayed, i.e. after sorting and filtering are applied\n* **``selected_dataframe``** (``DataFrame``): A DataFrame reflecting the currently selected rows.\n\n___",
"_____no_output_____"
],
[
"The ``Tabulator`` widget renders a DataFrame using an interactive grid, which allows directly editing the contents of the dataframe in place, with any changes being synced with Python. The `Tabulator` will usually determine the appropriate formatter appropriately based on the type of the data:",
"_____no_output_____"
]
],
[
[
"df = pd.DataFrame({\n 'int': [1, 2, 3],\n 'float': [3.14, 6.28, 9.42],\n 'str': ['A', 'B', 'C'],\n 'bool': [True, False, True],\n 'date': [dt.date(2019, 1, 1), dt.date(2020, 1, 1), dt.date(2020, 1, 10)]\n}, index=[1, 2, 3])\n\ndf_widget = pn.widgets.Tabulator(df)\ndf_widget",
"_____no_output_____"
]
],
[
[
"## Formatters\n\nBy default the widget will pick bokeh ``CellFormatter`` and ``CellEditor`` types appropriate to the dtype of the column. These may be overriden by explicit dictionaries mapping from the column name to the editor or formatter instance. For example below we create a ``SelectEditor`` instance to pick from four options in the ``str`` column and a ``NumberFormatter`` to customize the formatting of the float values:",
"_____no_output_____"
]
],
[
[
"from bokeh.models.widgets.tables import NumberFormatter, BooleanFormatter\n\nbokeh_formatters = {\n 'float': NumberFormatter(format='0.00000'),\n 'bool': BooleanFormatter(),\n}\n\npn.widgets.Tabulator(df, formatters=bokeh_formatters)",
"_____no_output_____"
]
],
[
[
"The list of valid Bokeh formatters includes:\n \n* [BooleanFormatter](https://docs.bokeh.org/en/latest/docs/reference/models/widgets.tables.html#bokeh.models.widgets.tables.BooleanFormatter)\n* [DateFormatter](https://docs.bokeh.org/en/latest/docs/reference/models/widgets.tables.html#bokeh.models.widgets.tables.DateFormatter)\n* [NumberFormatter](https://docs.bokeh.org/en/latest/docs/reference/models/widgets.tables.html#bokeh.models.widgets.tables.NumberFormatter)\n* [HTMLTemplateFormatter](https://docs.bokeh.org/en/latest/docs/reference/models/widgets.tables.html#bokeh.models.widgets.tables.HTMLTemplateFormatter)\n* [StringFormatter](https://docs.bokeh.org/en/latest/docs/reference/models/widgets.tables.html#bokeh.models.widgets.tables.StringFormatter)\n* [ScientificFormatter](https://docs.bokeh.org/en/latest/docs/reference/models/widgets.tables.html#bokeh.models.widgets.tables.ScientificFormatter)\n\nHowever in addition to the formatters exposed by Bokeh it is also possible to provide valid formatters built into the Tabulator library. These may be defined either as a string or as a dictionary declaring the 'type' and other arguments, which are passed to Tabulator as the `formatterParams`:",
"_____no_output_____"
]
],
[
[
"tabulator_formatters = {\n 'float': {'type': 'progress', 'max': 10},\n 'bool': {'type': 'tickCross'}\n}\n\npn.widgets.Tabulator(df, formatters=tabulator_formatters)",
"_____no_output_____"
]
],
[
[
"The list of valid Tabulator formatters can be found in the [Tabulator documentation](http://tabulator.info/docs/4.9/format#format-builtin).\n\n## Editors\n\nJust like the formatters, the `Tabulator` will natively understand the Bokeh `Editor` types. However, in the background it will replace most of them with equivalent editors natively supported by the tabulator library:",
"_____no_output_____"
]
],
[
[
"from bokeh.models.widgets.tables import CheckboxEditor, NumberEditor, SelectEditor, DateEditor, TimeEditor\n\nbokeh_editors = {\n 'float': NumberEditor(),\n 'bool': CheckboxEditor(),\n 'str': SelectEditor(options=['A', 'B', 'C', 'D']),\n}\n\npn.widgets.Tabulator(df[['float', 'bool', 'str']], editors=bokeh_editors)",
"_____no_output_____"
]
],
[
[
"Therefore it is often preferable to use one of the [Tabulator editors](http://tabulator.info/docs/4.9/edit#edit) directly:",
"_____no_output_____"
]
],
[
[
"from bokeh.models.widgets.tables import CheckboxEditor, NumberEditor, SelectEditor\n\nbokeh_editors = {\n 'float': {'type': 'number', 'max': 10, 'step': 0.1},\n 'bool': {'type': 'tickCross', 'tristate': True, 'indeterminateValue': None},\n 'str': {'type': 'autocomplete', 'values': True}\n}\n\npn.widgets.Tabulator(df[['float', 'bool', 'str']], editors=bokeh_editors)",
"_____no_output_____"
]
],
[
[
"### Column layouts\n\nBy default the DataFrame widget will adjust the sizes of both the columns and the table based on the contents, reflecting the default value of the parameter: `layout=\"fit_data_table\"`. Alternative modes allow manually specifying the widths of the columns, giving each column equal widths, or adjusting just the size of the columns.\n\n#### Manual column widths\n\nTo manually adjust column widths provide explicit `widths` for each of the columns:",
"_____no_output_____"
]
],
[
[
"custom_df = pd._testing.makeMixedDataFrame()\n\npn.widgets.Tabulator(custom_df, widths={'index': 70, 'A': 50, 'B': 50, 'C': 70, 'D': 130})",
"_____no_output_____"
]
],
[
[
"You can also declare a single width for all columns this way:",
"_____no_output_____"
]
],
[
[
"pn.widgets.Tabulator(custom_df, widths=130)",
"_____no_output_____"
]
],
[
[
"#### Autosize columns\n\nTo automatically adjust the columns dependending on their content set `layout='fit_data'`:",
"_____no_output_____"
]
],
[
[
"pn.widgets.Tabulator(custom_df, layout='fit_data', width=400)",
"_____no_output_____"
]
],
[
[
"To ensure that the table fits all the data but also stretches to fill all the available space, set `layout='fit_data_stretch'`:",
"_____no_output_____"
]
],
[
[
"pn.widgets.Tabulator(custom_df, layout='fit_data_stretch', width=400)",
"_____no_output_____"
]
],
[
[
"The `'fit_data_fill'` option on the other hand won't stretch the last column but still fill the space:",
"_____no_output_____"
]
],
[
[
"pn.widgets.Tabulator(custom_df, layout='fit_data_fill', width=400)",
"_____no_output_____"
]
],
[
[
"Perhaps the most useful of these options is `layout='fit_data_table'` (and therefore the default) since this will automatically size both the columns and the table:",
"_____no_output_____"
]
],
[
[
"pn.widgets.Tabulator(custom_df, layout='fit_data_table')",
"_____no_output_____"
]
],
[
[
"#### Equal size\n\nThe simplest option is simply to allocate each column equal amount of size:",
"_____no_output_____"
]
],
[
[
"pn.widgets.Tabulator(custom_df, layout='fit_columns', width=650)",
"_____no_output_____"
]
],
[
[
"## Styling\n\nThe ability to style the contents of a table based on its content and other considerations is very important. Thankfully pandas provides a powerful [styling API](https://pandas.pydata.org/pandas-docs/stable/user_guide/style.html), which can be used in conjunction with the `Tabulator` widget. Specifically the `Tabulator` widget exposes a `.style` attribute just like a `pandas.DataFrame` which lets the user apply custom styling using methods like `.apply` and `.applymap`. For a detailed guide to styling see the [Pandas documentation](https://pandas.pydata.org/pandas-docs/stable/user_guide/style.html).\n\nHere we will demonstrate with a simple example, starting with a basic table:",
"_____no_output_____"
]
],
[
[
"style_df = pd.DataFrame(np.random.randn(10, 5), columns=list('ABCDE'))\nstyled = pn.widgets.Tabulator(style_df)",
"_____no_output_____"
]
],
[
[
"Next we define two functions which apply styling cell-wise (`color_negative_red`) and column-wise (`highlight_max`), which we then apply to the `Tabulator` using the `.style` API and then display the `styled` table:",
"_____no_output_____"
]
],
[
[
"def color_negative_red(val):\n \"\"\"\n Takes a scalar and returns a string with\n the css property `'color: red'` for negative\n strings, black otherwise.\n \"\"\"\n color = 'red' if val < 0 else 'black'\n return 'color: %s' % color\n\ndef highlight_max(s):\n '''\n highlight the maximum in a Series yellow.\n '''\n is_max = s == s.max()\n return ['background-color: yellow' if v else '' for v in is_max]\n\nstyled.style.applymap(color_negative_red).apply(highlight_max)\n\nstyled",
"_____no_output_____"
]
],
[
[
"## Theming\n\nThe Tabulator library ships with a number of themes, which are defined as CSS stylesheets. For that reason changing the theme on one table will affect all Tables on the page and it will usually be preferable to see the theme once at the class level like this:\n\n```python\npn.widgets.Tabulator.theme = 'default'\n```\n\nFor a full list of themes see the [Tabulator documentation](http://tabulator.info/docs/4.9/theme), however the default themes include:\n\n- `'simple'`\n- `'default'`\n- `'midnight'`\n- `'site'`\n- `'modern'`\n- `'bootstrap'`\n- `'bootstrap4'`\n- `'materialize'`\n- `'semantic-ui'`\n- `'bulma'`",
"_____no_output_____"
],
[
"## Selection\n\nThe `selection` parameter controls which rows in the table are selected and can be set from Python and updated by selecting rows on the frontend:",
"_____no_output_____"
]
],
[
[
"sel_df = pd.DataFrame(np.random.randn(10, 5), columns=list('ABCDE'))\n\nselect_table = pn.widgets.Tabulator(sel_df, selection=[0, 3, 7])\nselect_table",
"_____no_output_____"
]
],
[
[
"Once initialized, the ``selection`` parameter will return the integer indexes of the selected rows, while the ``selected_dataframe`` property will return a new DataFrame containing just the selected rows:",
"_____no_output_____"
]
],
[
[
"select_table.selection = [1, 4, 9]\n\nselect_table.selected_dataframe",
"_____no_output_____"
]
],
[
[
"The `selectable` parameter declares how the selections work. \n\n- `True`: Selects rows on click. To select multiple use Ctrl-select, to select a range use Shift-select\n- `False`: Disables selection\n- `'checkbox'`: Adds a column of checkboxes to toggle selections\n- `'checkbox-single'`: Same as `'checkbox'` but disables (de)select-all in the header\n- `'toggle'`: Selection toggles when clicked\n- Any positive `int`: A number that sets the maximum number of selectable rows",
"_____no_output_____"
]
],
[
[
"pn.widgets.Tabulator(sel_df, selection=[0, 3, 7], selectable='checkbox')",
"_____no_output_____"
]
],
[
[
"Additionally we can also disable selection for specific rows by providing a `selectable_rows` function. The function must accept a DataFrame and return a list of integer indexes indicating which rows are selectable, e.g. here we disable selection for every second row:",
"_____no_output_____"
]
],
[
[
"pn.widgets.Tabulator(sel_df, selectable_rows=lambda df: list(range(0, len(df), 2)))",
"_____no_output_____"
]
],
[
[
"### Freezing rows and columns\n\nSometimes your table will be larger than can be displayed in a single viewport, in which case scroll bars will be enabled. In such cases, you might want to make sure that certain information is always visible. This is where the `frozen_columns` and `frozen_rows` options come in.\n\n#### Frozen columns\n\nWhen you have a large number of columns and can't fit them all on the screen you might still want to make sure that certain columns do not scroll out of view. The `frozen_columns` option makes this possible by specifying a list of columns that should be frozen, e.g. `frozen_columns=['index']` will freeze the index column:",
"_____no_output_____"
]
],
[
[
"wide_df = pd._testing.makeCustomDataframe(10, 10, r_idx_names=['index'])\n\npn.widgets.Tabulator(wide_df, frozen_columns=['index'], width=400)",
"_____no_output_____"
]
],
[
[
"#### Frozen rows\n\nAnother common scenario is when you have certain rows with special meaning, e.g. aggregates that summarize the information in the rest of the table. In this case you may want to freeze those rows so they do not scroll out of view. You can achieve this by setting a list of `frozen_rows` by integer index (which can be positive or negative, where negative values are relative to the end of the table):",
"_____no_output_____"
]
],
[
[
"date_df = pd._testing.makeTimeDataFrame().iloc[:10]\nagg_df = pd.concat([date_df, date_df.median().to_frame('Median').T, date_df.mean().to_frame('Mean').T])\nagg_df.index= agg_df.index.map(str)\n\npn.widgets.Tabulator(agg_df, frozen_rows=[-2, -1], width=400)",
"_____no_output_____"
]
],
[
[
"## Row contents\n\nA table can only display so much information without becoming difficult to scan. We may want to render additional information to a table row to provide additional context. To make this possible you can provide a `row_content` function which is given the table row as an argument and should return a panel object that will be rendered into an expanding region below the row. By default the contents are fetched dynamically whenever a row is expanded, however using the `embed_content` parameter we can embed all the content.\n\nBelow we create a periodic table of elements where the Wikipedia page for each element will be rendered into the expanded region:",
"_____no_output_____"
]
],
[
[
"from bokeh.sampledata.periodic_table import elements\n\nperiodic_df = elements[['atomic number', 'name', 'atomic mass', 'metal', 'year discovered']].set_index('atomic number')\n\ncontent_fn = lambda row: pn.pane.HTML(\n f'<iframe src=\"http://en.wikipedia.org/wiki/{row[\"name\"]}?printable=yes\" width=\"100%\" height=\"300px\"></iframe>',\n sizing_mode='stretch_width'\n)\n\nperiodic_table = pn.widgets.Tabulator(\n periodic_df, height=500, layout='fit_columns', sizing_mode='stretch_width',\n row_content=content_fn, embed_content=True\n)\n\nperiodic_table",
"_____no_output_____"
]
],
[
[
"The currently expanded rows can be accessed (and set) on the `expanded` parameter:",
"_____no_output_____"
]
],
[
[
"periodic_table.expanded",
"_____no_output_____"
]
],
[
[
"## Grouping\n\nAnother useful option is the ability to group specific rows together, which can be achieved using `groups` parameter. The `groups` parameter should be composed of a dictionary mapping from the group titles to the column names:",
"_____no_output_____"
]
],
[
[
"pn.widgets.Tabulator(date_df, groups={'Group 1': ['A', 'B'], 'Group 2': ['C', 'D']})",
"_____no_output_____"
]
],
[
[
"## Groupby\n\nIn addition to grouping columns we can also group rows by the values along one or more columns:",
"_____no_output_____"
]
],
[
[
"from bokeh.sampledata.autompg import autompg\n\npn.widgets.Tabulator(autompg, groupby=['yr', 'origin'], height=240)",
"_____no_output_____"
]
],
[
[
"### Hierarchical Multi-index\n\nThe `Tabulator` widget can also render a hierarchical multi-index and aggregate over specific categories. If a DataFrame with a hierarchical multi-index is supplied and the `hierarchical` is enabled the widget will group data by the categories in the order they are defined in. Additionally for each group in the multi-index an aggregator may be provided which will aggregate over the values in that category.\n\nFor example we may load population data for locations around the world broken down by sex and age-group. If we specify aggregators over the 'AgeGrp' and 'Sex' indexes we can see the aggregated values for each of those groups (note that we do not have to specify an aggregator for the outer index since we specify the aggregators over the subgroups in this case the 'Sex'):",
"_____no_output_____"
]
],
[
[
"from bokeh.sampledata.population import data as population_data \n\npop_df = population_data[population_data.Year == 2020].set_index(['Location', 'AgeGrp', 'Sex'])[['Value']]\n\npn.widgets.Tabulator(value=pop_df, hierarchical=True, aggregators={'Sex': 'sum', 'AgeGrp': 'sum'}, height=400)",
"_____no_output_____"
]
],
[
[
"## Pagination\n\nWhen working with large tables we sometimes can't send all the data to the browser at once. In these scenarios we can enable pagination, which will fetch only the currently viewed data from the server backend. This may be enabled by setting `pagination='remote'` and the size of each page can be set using the `page_size` option:",
"_____no_output_____"
]
],
[
[
"large_df = pd._testing.makeCustomDataframe(100000, 5) ",
"_____no_output_____"
],
[
"%%time\npaginated_table = pn.widgets.Tabulator(large_df, pagination='remote', page_size=10)\npaginated_table",
"_____no_output_____"
]
],
[
[
"Contrary to the `'remote'` option, `'local'` pagination entirely loads the data but still allows to display it on multiple pages.",
"_____no_output_____"
]
],
[
[
"%%time\npaginated_table = pn.widgets.Tabulator(large_df, pagination='local', page_size=10)\npaginated_table",
"_____no_output_____"
]
],
[
[
"## Filtering\n\nA very common scenario is that you want to attach a number of filters to a table in order to view just a subset of the data. You can achieve this through callbacks or other reactive approaches but the `.add_filter` method makes it much easier.\n\n#### Constant and Widget filters\n\nThe simplest approach to filtering is to select along a column with a constant or dynamic value. The `.add_filter` method allows passing in constant values, widgets and parameters. If a widget or parameter is provided the table will watch the object for changes in the value and update the data in response. The filtering will depend on the type of the constant or dynamic value:\n\n- scalar: Filters by checking for equality\n- `tuple`: A tuple will be interpreted as range.\n- `list`/`set`: A list or set will be interpreted as a set of discrete scalars and the filter will check if the values in the column match any of the items in the list.\n\nAs an example we will create a DataFrame with some data of mixed types:",
"_____no_output_____"
]
],
[
[
"mixed_df = pd._testing.makeMixedDataFrame()\nfilter_table = pn.widgets.Tabulator(mixed_df)\nfilter_table",
"_____no_output_____"
]
],
[
[
"Now we will start adding filters one-by-one, e.g. to start with we add a filter for the `'A'` column, selecting a range from 0 to 3:",
"_____no_output_____"
]
],
[
[
"filter_table.add_filter((0, 3), 'A')",
"_____no_output_____"
]
],
[
[
"Next we add dynamic widget based filter, a `RangeSlider` which allows us to further narrow down the data along the `'A'` column:",
"_____no_output_____"
]
],
[
[
"slider = pn.widgets.RangeSlider(start=0, end=3, name='A Filter')\nfilter_table.add_filter(slider, 'A')",
"_____no_output_____"
]
],
[
[
"Lastly we will add a `MultiSelect` filter along the `'C'` column:",
"_____no_output_____"
]
],
[
[
"select = pn.widgets.MultiSelect(options=['foo1', 'foo2', 'foo3', 'foo4', 'foo5'], name='C Filter')\nfilter_table.add_filter(select, 'C')",
"_____no_output_____"
]
],
[
[
"Now let's display the table alongside the widget based filters:",
"_____no_output_____"
]
],
[
[
"pn.Row(\n pn.Column(slider, select),\n filter_table\n)",
"_____no_output_____"
]
],
[
[
"After filtering you can inspect the current view with the `current_view` property:",
"_____no_output_____"
]
],
[
[
"filter_table.current_view",
"_____no_output_____"
]
],
[
[
"#### Function based filtering",
"_____no_output_____"
],
[
"For more complex filtering tasks you can supply a function that should accept the DataFrame to be filtered as the first argument and must return a filtered copy of the data. Let's start by loading some data.",
"_____no_output_____"
]
],
[
[
"import sqlite3\n\nfrom bokeh.sampledata.movies_data import movie_path\n\ncon = sqlite3.Connection(movie_path)\n\nmovies_df = pd.read_sql('SELECT Title, Year, Genre, Director, Writer, imdbRating from omdb', con)\nmovies_df = movies_df[~movies_df.Director.isna()]\n\nmovies_table = pn.widgets.Tabulator(movies_df, pagination='remote', layout='fit_columns', width=800)",
"_____no_output_____"
]
],
[
[
"By using the `pn.bind` function, which binds widget and parameter values to a function, complex filtering can be achieved. E.g. here we will add a filter function that uses tests whether the string or regex is contained in the 'Director' column of a listing of thousands of movies:",
"_____no_output_____"
]
],
[
[
"director_filter = pn.widgets.TextInput(name='Director filter', value='Chaplin')\n\ndef contains_filter(df, pattern, column):\n if not pattern:\n return df\n return df[df[column].str.contains(pattern)]\n \nmovies_table.add_filter(pn.bind(contains_filter, pattern=director_filter, column='Director')) \n\npn.Row(director_filter, movies_table)",
"_____no_output_____"
]
],
[
[
"### Client-side filtering\n\nIn addition to the Python API the Tabulator widget also offers a client-side filtering API, which can be exposed through `header_filters` or by manually adding filters to the rendered Bokeh model. The API for declaring header filters is almost identical to the API for defining [Editors](#Editors). The `header_filters` can either be enabled by setting it to `True` or by manually supplying filter types for each column. The types of filters supports all the same options as the editors, in fact if you do not declare explicit `header_filters` the tabulator will simply use the defined `editors` to determine the correct filter type:",
"_____no_output_____"
]
],
[
[
"bokeh_editors = {\n 'float': {'type': 'number', 'max': 10, 'step': 0.1},\n 'bool': {'type': 'tickCross', 'tristate': True, 'indeterminateValue': None},\n 'str': {'type': 'autocomplete', 'values': True}\n}\n\nheader_filter_table = pn.widgets.Tabulator(\n df[['float', 'bool', 'str']], height=140, width=400, layout='fit_columns',\n editors=bokeh_editors, header_filters=True\n)\n\nheader_filter_table",
"_____no_output_____"
]
],
[
[
"When a filter is applied client-side the `filters` parameter is synced with Python. The definition of `filters` looks something like this:\n\n```\n[{'field': 'Director', 'type': '=', 'value': 'Steven Spielberg'}]\n```\n\nTry applying a filter and then inspect the `filters` parameter:",
"_____no_output_____"
]
],
[
[
"header_filter_table.filters",
"_____no_output_____"
]
],
[
[
"For all supported filtering types see the [Tabulator Filtering documentation](http://tabulator.info/docs/4.9/filter).\n\nIf we want to change the filter type for the `header_filters` we can do so in the definition by supplying a dictionary indexed by the column names and then either providing a dictionary which may define the `'type'`, a comparison `'func'`, a `'placeholder'` and any additional keywords supported by the particular filter type.",
"_____no_output_____"
]
],
[
[
"movie_filters = {\n 'Title': {'type': 'input', 'func': 'like', 'placeholder': 'Enter title'},\n 'Year': {'placeholder': 'Enter year'},\n 'Genre': {'type': 'input', 'func': 'like', 'placeholder': 'Enter genre'},\n 'Director': {'type': 'input', 'func': 'like', 'placeholder': 'Enter director'},\n 'Writer': {'type': 'input', 'func': 'like', 'placeholder': 'Enter writer'},\n 'imdbRating': {'type': 'number', 'func': '>=', 'placeholder': 'Enter minimum rating'}\n}\n\nfilter_table = pn.widgets.Tabulator(\n movies_df, pagination='remote', layout='fit_columns', page_size=10, sizing_mode='stretch_width',\n header_filters=movie_filters\n)\nfilter_table",
"_____no_output_____"
]
],
[
[
"## Downloading\n\nThe `Tabulator` also supports triggering a download of the data as a CSV or JSON file dependending on the filename. The download can be triggered with the `.download()` method, which optionally accepts the filename as the first argument.\n\nTo trigger the download client-side (i.e. without involving the server) you can use the `.download_menu` method which creates a `TextInput` and `Button` widget, which allow setting the filename and triggering the download respectively:",
"_____no_output_____"
]
],
[
[
"download_df = pd.DataFrame(np.random.randn(10, 5), columns=list('ABCDE'))\n\ndownload_table = pn.widgets.Tabulator(download_df)\n\nfilename, button = download_table.download_menu(\n text_kwargs={'name': 'Enter filename', 'value': 'default.csv'},\n button_kwargs={'name': 'Download table'}\n)\n\npn.Row(\n pn.Column(filename, button),\n download_table\n)",
"_____no_output_____"
]
],
[
[
"## Streaming\n\nWhen we are monitoring some source of data that updates over time, we may want to update the table with the newly arriving data. However, we do not want to transmit the entire dataset each time. To handle efficient transfer of just the latest data, we can use the `.stream` method on the `Tabulator` object:",
"_____no_output_____"
]
],
[
[
"stream_df = pd.DataFrame(np.random.randn(10, 5), columns=list('ABCDE'))\n\nstream_table = pn.widgets.Tabulator(stream_df, layout='fit_columns', width=450)\nstream_table",
"_____no_output_____"
]
],
[
[
"As example, we will schedule a periodic callback that streams new data every 1000ms (i.e. 1s) five times in a row:",
"_____no_output_____"
]
],
[
[
"def stream_data(follow=True):\n stream_df = pd.DataFrame(np.random.randn(10, 5), columns=list('ABCDE'))\n stream_table.stream(stream_df, follow=follow)\n\npn.state.add_periodic_callback(stream_data, period=1000, count=5)",
"_____no_output_____"
]
],
[
[
"If you are viewing this example with a live Python kernel you will be able to watch the table update and scroll along. If we want to disable the scrolling behavior, we can set `follow=False`:",
"_____no_output_____"
]
],
[
[
"stream_data(follow=False)",
"_____no_output_____"
]
],
[
[
"## Patching\n\nIn certain cases we don't want to update the table with new data but just patch existing data.",
"_____no_output_____"
]
],
[
[
"patch_table = pn.widgets.Tabulator(df[['int', 'float', 'str', 'bool']])\npatch_table",
"_____no_output_____"
]
],
[
[
"The easiest way to patch the data is by supplying a dictionary as the patch value. The dictionary should have the following structure:\n\n```python\n{\n column: [\n (index: int or slice, value),\n ...\n ],\n ...\n}\n```\n \nAs an example, below we will patch the 'bool' and 'int' columns. On the `'bool'` column we will replace the 0th and 2nd row and on the `'int'` column we replace the first two rows:",
"_____no_output_____"
]
],
[
[
"patch_table.patch({\n 'bool': [\n (0, False),\n (2, False)\n ],\n 'int': [\n (slice(0, 2), [3, 2])\n ]\n})",
"_____no_output_____"
]
],
[
[
"## Static Configuration\n\nPanel does not expose all options available from Tabulator, if a desired option is not natively supported, it can be set via the `configuration` argument. \nThis dictionary can be seen as a base dictionary which the tabulator object fills and passes to the Tabulator javascript-library.\n\nAs an example, we can turn off sorting and resizing of columns by disabling the `headerSort` and `resizableColumn` options.",
"_____no_output_____"
]
],
[
[
"df = pd.DataFrame({\n 'int': [1, 2, 3],\n 'float': [3.14, 6.28, 9.42],\n 'str': ['A', 'B', 'C'],\n 'bool': [True, False, True],\n 'date': [dt.date(2019, 1, 1), dt.date(2020, 1, 1), dt.date(2020, 1, 10)]\n}, index=[1, 2, 3])\n\ndf_widget = pn.widgets.Tabulator(df, configuration={\"headerSort\": False, \"resizableColumns\": False})\ndf_widget.servable()",
"_____no_output_____"
]
],
[
[
"These and other available tabulator options are listed at http://tabulator.info/docs/4.9/options. \nObviously not all options will work though, especially any settable callbacks and options which are set by the internal panel tabulator module (for example the `columns` option).\nAdditionally it should be noted that the configuration parameter is not responsive so it can only be set at instantiation time.",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.