hexsha
stringlengths
40
40
size
int64
6
14.9M
ext
stringclasses
1 value
lang
stringclasses
1 value
max_stars_repo_path
stringlengths
6
260
max_stars_repo_name
stringlengths
6
119
max_stars_repo_head_hexsha
stringlengths
40
41
max_stars_repo_licenses
sequence
max_stars_count
int64
1
191k
max_stars_repo_stars_event_min_datetime
stringlengths
24
24
max_stars_repo_stars_event_max_datetime
stringlengths
24
24
max_issues_repo_path
stringlengths
6
260
max_issues_repo_name
stringlengths
6
119
max_issues_repo_head_hexsha
stringlengths
40
41
max_issues_repo_licenses
sequence
max_issues_count
int64
1
67k
max_issues_repo_issues_event_min_datetime
stringlengths
24
24
max_issues_repo_issues_event_max_datetime
stringlengths
24
24
max_forks_repo_path
stringlengths
6
260
max_forks_repo_name
stringlengths
6
119
max_forks_repo_head_hexsha
stringlengths
40
41
max_forks_repo_licenses
sequence
max_forks_count
int64
1
105k
max_forks_repo_forks_event_min_datetime
stringlengths
24
24
max_forks_repo_forks_event_max_datetime
stringlengths
24
24
avg_line_length
float64
2
1.04M
max_line_length
int64
2
11.2M
alphanum_fraction
float64
0
1
cells
sequence
cell_types
sequence
cell_type_groups
sequence
d0dc0ee55a3f1a5b511d675f35f747759b9a32bf
8,780
ipynb
Jupyter Notebook
iam_recommender_basics.ipynb
misabhishek/gcp-iam-recommender
7afa435884c2afbeee7afb4789a3649afbdb79f7
[ "Apache-2.0" ]
null
null
null
iam_recommender_basics.ipynb
misabhishek/gcp-iam-recommender
7afa435884c2afbeee7afb4789a3649afbdb79f7
[ "Apache-2.0" ]
null
null
null
iam_recommender_basics.ipynb
misabhishek/gcp-iam-recommender
7afa435884c2afbeee7afb4789a3649afbdb79f7
[ "Apache-2.0" ]
null
null
null
28.322581
250
0.476879
[ [ [ "<a href=\"https://colab.research.google.com/github/misabhishek/gcp-iam-recommender/blob/main/iam_recommender_basics.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ], [ "# Before you begin\n\n\n1. Have a GCP projrect ready. \n3. [Enable Iam Recommender](https://console.cloud.google.com/flows/enableapi?apiid=recommender.googleapis.com) APIs for the project.", "_____no_output_____" ], [ "### Provide your credentials to the runtime", "_____no_output_____" ] ], [ [ "from google.colab import auth\nauth.authenticate_user()\nprint('Authenticated')", "_____no_output_____" ] ], [ [ "## Understand GCP IAM Recommender", "_____no_output_____" ], [ "**Declare the Cloud project ID which will be used throughout this notebook**", "_____no_output_____" ] ], [ [ "project_id = \"Enter-your-project\"", "_____no_output_____" ] ], [ [ "**A helper function to execute `gcloud` commands**", "_____no_output_____" ] ], [ [ "import json\nimport subprocess\ndef execute_command(command):\n return json.loads(subprocess.check_output(filter(lambda x: x, command.split(\" \"))).decode(\"utf-8\"))", "_____no_output_____" ], [ "recommender_command = f\"\"\"gcloud recommender recommendations list \\\n --location=global \\\n --recommender=google.iam.policy.Recommender \\\n --project={project_id} \\\n --format=json\n \"\"\"", "_____no_output_____" ], [ "recommendations = execute_command(recommender_command)", "_____no_output_____" ], [ "recommendations[7]", "_____no_output_____" ] ], [ [ "### Getting insight for the recommendations", "_____no_output_____" ] ], [ [ "insight_command = f\"\"\"gcloud recommender insights list \\\n --project={project_id} \\\n --location=global \\\n --insight-type=google.iam.policy.Insight \\\n --format=json\n \"\"\"", "_____no_output_____" ], [ "insights = execute_command(insight_command)", "_____no_output_____" ], [ "insights[0]", "_____no_output_____" ] ], [ [ "# Generate diff view", "_____no_output_____" ] ], [ [ "recommendation_name = \"Enter-the-recommendation-name\"", "_____no_output_____" ], [ "#@title A helper to generate diff view. It uses IAM roles api also.\nimport pandas as pd\ndef generate_diff_view(recommendation_name):\n role_to_permission_command = \"gcloud iam roles describe {} --format=json\"\n\n recommendation = [r for r in recommendations if r[\"name\"] == recommendation_name][0]\n insight_name = recommendation[\"associatedInsights\"][0][\"insight\"]\n\n added_roles = []\n removed_role = []\n for op in recommendation[\"content\"][\"operationGroups\"][0][\"operations\"]:\n if op[\"action\"] == \"add\":\n added_roles.append(op[\"pathFilters\"][\"/iamPolicy/bindings/*/role\"])\n if op[\"action\"] == \"remove\":\n removed_role.append(op[\"pathFilters\"][\"/iamPolicy/bindings/*/role\"])\n\n cur_permissions = set(execute_command(\n role_to_permission_command.format(removed_role[0]))[\"includedPermissions\"])\n\n recommended_permisisons = set() \n for r in added_roles:\n recommended_permisisons.update(execute_command(\n role_to_permission_command.format(r))[\"includedPermissions\"])\n \n removed_permisisons = cur_permissions - recommended_permisisons\n \n insight = [insight for insight in insights \n if insight[\"name\"] == insight_name][0]\n used_permissions = set(k[\"permission\"] for k in \n insight[\"content\"][\"exercisedPermissions\"])\n inferred_permissions = set(k[\"permission\"] for k in \n insight[\"content\"][\"inferredPermissions\"])\n \n unused_but_still_common_permissions = (recommended_permisisons - used_permissions \n - inferred_permissions)\n \n types = ([\"used\"] * len(used_permissions) \n + [\"ml-inferred\"] * len(inferred_permissions)\n + [\"common\"] * len(unused_but_still_common_permissions)\n + [\"removed\"] * len(removed_permisisons))\n \n permissions = [*used_permissions, *inferred_permissions, \n *unused_but_still_common_permissions, *removed_permisisons]\n\n return pd.DataFrame({\"type\": types, \"permission\": permissions})", "_____no_output_____" ], [ "diff_view = generate_diff_view(recommendation_name)", "_____no_output_____" ], [ "diff_view", "_____no_output_____" ], [ "diff_view[\"type\"].value_counts()", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ] ]
d0dc189ffa2e7a4331cd2a546a475cdb48e2bc7a
56,145
ipynb
Jupyter Notebook
notebooks/NumQM_Basic_05.ipynb
raghurama123/NumQM
59286ff3c7339f3c8055ff60200e04f021b187aa
[ "MIT" ]
38
2021-12-29T14:45:12.000Z
2022-01-11T08:56:09.000Z
notebooks/NumQM_Basic_05.ipynb
raghurama123/NumQM
59286ff3c7339f3c8055ff60200e04f021b187aa
[ "MIT" ]
null
null
null
notebooks/NumQM_Basic_05.ipynb
raghurama123/NumQM
59286ff3c7339f3c8055ff60200e04f021b187aa
[ "MIT" ]
15
2021-12-29T19:21:41.000Z
2022-01-23T03:18:00.000Z
324.537572
51,780
0.92685
[ [ [ "import numpy as np\nfrom numpy import linalg as npla\nimport matplotlib.pyplot as plt\nfrom scipy import integrate", "_____no_output_____" ], [ "def eigen(A):\n eigenValues, eigenVectors = npla.eig(A)\n idx = np.argsort(eigenValues)\n eigenValues = eigenValues[idx]\n eigenVectors = eigenVectors[:,idx]\n return (eigenValues, eigenVectors)", "_____no_output_____" ], [ "L=15.0 # box length is 2L; [-L,L]\nm=1 # particle mass \nhbar=1 # 1 in atomic units", "_____no_output_____" ], [ "def potential(x): # 1-D molecule\n potential = -6/np.sqrt((x+4)**2+1)-4/np.sqrt((x-4)**2+1)\n return potential\n \ndef fn_V(x):\n psi_i=np.sqrt(1/L)*np.sin((i+1)*(x-L)*np.pi/(2*L))\n psi_j=np.sqrt(1/L)*np.sin((j+1)*(x-L)*np.pi/(2*L))\n Nx=x.size\n pot=np.zeros(Nx)\n for ix in range(Nx):\n pot[ix]=potential(x[ix])\n \n fn_V=psi_i * pot * psi_j\n return fn_V\n\nfor iN in range(0,6):\n \n N=2**iN # No. of basis functions\n\n V=np.zeros([N,N])\n T=np.zeros([N,N])\n H=np.zeros([N,N])\n \n for i in range(N):\n for j in range(N):\n Int_V=integrate.quadrature(fn_V, -L, L,maxiter=1000)\n V[i][j]=Int_V[0] \n T[i][i]=(i+1)**2 * hbar**2 * np.pi**2 / (8 * m * L**2)\n\n H=T+V\n\n E,V=eigen(H)\n\n print(\"Number of basis: \", N, \", ground state energy is:\", E[0])", "Number of basis: 1 , ground state energy is: -3.0563258068171444\nNumber of basis: 2 , ground state energy is: -3.156640541974056\nNumber of basis: 4 , ground state energy is: -3.924186324234983\nNumber of basis: 8 , ground state energy is: -4.547315446207627\nNumber of basis: 16 , ground state energy is: -5.2692879329887035\nNumber of basis: 32 , ground state energy is: -5.486268540734763\n" ], [ "x=np.linspace(-L, L, 101)\nVharm=np.zeros(101)\nfor ix in range(101):\n Vharm[ix]=potential(x[ix])\n\nfor k in range(16): \n psi0=np.zeros(101)\n for i in range(N):\n psi0=psi0+V[i][k]*np.sqrt(1/L)*np.sin((i+1)*(x-L)*np.pi/(2*L))\n \n plt.plot(x,np.abs(psi0)**2+E[k])\n\n\nplt.plot(x,Vharm)\nplt.xlabel(\"$x$\", fontsize=14)\nplt.ylabel(\"$|\\psi(x)|^2$\", fontsize=14)\nplt.savefig('psi_1Dmol.png') \n#plt.legend(['n = 0','n = 1','n = 2','n = 3','n = 4'])\nplt.xlim(-L,L)\n#plt.ylim(-10,10)\nplt.show()", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code" ] ]
d0dc1b6d657530cf660347d0087211a2ba807070
5,511
ipynb
Jupyter Notebook
docs/samples/python/presidio_notebook.ipynb
hkarakose/presidio
00431870d0cd3e3a507a86ffb2cdbabf9b18d829
[ "MIT" ]
null
null
null
docs/samples/python/presidio_notebook.ipynb
hkarakose/presidio
00431870d0cd3e3a507a86ffb2cdbabf9b18d829
[ "MIT" ]
null
null
null
docs/samples/python/presidio_notebook.ipynb
hkarakose/presidio
00431870d0cd3e3a507a86ffb2cdbabf9b18d829
[ "MIT" ]
null
null
null
32.803571
171
0.591363
[ [ [ "from presidio_analyzer import AnalyzerEngine, PatternRecognizer\nfrom presidio_anonymizer import AnonymizerEngine\nfrom presidio_anonymizer.entities import AnonymizerConfig", "_____no_output_____" ] ], [ [ "# Analyze Text for PII Entities\n\n<br>Using Presidio Analyzer, analyze a text to identify PII entities. \n<br>The Presidio analyzer is using pre-defined entity recognizers, and offers the option to create custom recognizers.\n\n<br>The following code sample will:\n<ol>\n<li>Set up the Analyzer engine - load the NLP module (spaCy model by default) and other PII recognizers</li>\n<li> Call analyzer to get analyzed results for \"PHONE_NUMBER\" entity type</li>\n</ol>", "_____no_output_____" ] ], [ [ "text_to_anonymize = \"His name is Mr. Jones and his phone number is 212-555-5555\"", "_____no_output_____" ], [ "analyzer = AnalyzerEngine()\nanalyzer_results = analyzer.analyze(text=text_to_anonymize, entities=[\"PHONE_NUMBER\"], language='en')\n\nprint(analyzer_results)", "_____no_output_____" ] ], [ [ "# Create Custom PII Entity Recognizers\n\n<br>Presidio Analyzer comes with a pre-defined set of entity recognizers. It also allows adding new recognizers without changing the analyzer base code,\n<b>by creating custom recognizers. \n<br>In the following example, we will create two new recognizers of type `PatternRecognizer` to identify titles and pronouns in the analyzed text.\n<br>A `PatternRecognizer` is a PII entity recognizer which uses regular expressions or deny-lists.\n\n<br>The following code sample will:\n<ol>\n<li>Create custom recognizers</li>\n<li>Add the new custom recognizers to the analyzer</li>\n<li>Call analyzer to get results from the new recognizers</li>\n</ol>\n", "_____no_output_____" ] ], [ [ "titles_recognizer = PatternRecognizer(supported_entity=\"TITLE\",\n deny_list=[\"Mr.\",\"Mrs.\",\"Miss\"])\n\npronoun_recognizer = PatternRecognizer(supported_entity=\"PRONOUN\",\n deny_list=[\"he\", \"He\", \"his\", \"His\", \"she\", \"She\", \"hers\" \"Hers\"])\n\nanalyzer.registry.add_recognizer(titles_recognizer)\nanalyzer.registry.add_recognizer(pronoun_recognizer)\n\nanalyzer_results = analyzer.analyze(text=text_to_anonymize,\n entities=[\"TITLE\", \"PRONOUN\"],\n language=\"en\")\nprint(analyzer_results)\n", "_____no_output_____" ] ], [ [ "Call Presidio Analyzer and get analyzed results with all the configured recognizers - default and new custom recognizers", "_____no_output_____" ] ], [ [ "analyzer_results = analyzer.analyze(text=text_to_anonymize, language='en')\n\nanalyzer_results", "_____no_output_____" ] ], [ [ "# Anonymize Text with Identified PII Entities\n\n<br>Presidio Anonymizer iterates over the Presidio Analyzer result, and provides anonymization capabilities for the identified text.\n<br>The anonymizer provides 5 types of anonymizers - replace, redact, mask, hash and encrypt. The default is **replace**\n\n<br>The following code sample will:\n<ol>\n<li>Setup the anonymizer engine </li>\n<li>Create an anonymizer request - text to anonymize, list of anonymizers to apply and the results from the analyzer request</li>\n<li>Anonymize the text</li>\n</ol>", "_____no_output_____" ] ], [ [ "anonymizer = AnonymizerEngine()\n\nanonymized_results = anonymizer.anonymize(\n text=text_to_anonymize,\n analyzer_results=analyzer_results, \n anonymizers_config={\"DEFAULT\": AnonymizerConfig(\"replace\", {\"new_value\": \"<ANONYMIZED>\"}), \n \"PHONE_NUMBER\": AnonymizerConfig(\"mask\", {\"type\": \"mask\", \"masking_char\" : \"*\", \"chars_to_mask\" : 12, \"from_end\" : True}),\n \"TITLE\": AnonymizerConfig(\"redact\", {})}\n)\n\nprint(anonymized_results)", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
d0dc2dd98258efe41e86fbf21dca308ac956c188
10,051
ipynb
Jupyter Notebook
Ch04a_OLS_Diagnostics.ipynb
PaulSoderlind/FinancialEconometrics
8fe42c13179954ade0bc926936b0e36f265e0d22
[ "MIT" ]
25
2019-11-11T18:07:45.000Z
2022-03-13T08:55:49.000Z
Ch04a_OLS_Diagnostics.ipynb
PaulSoderlind/FinancialEconometrics
8fe42c13179954ade0bc926936b0e36f265e0d22
[ "MIT" ]
null
null
null
Ch04a_OLS_Diagnostics.ipynb
PaulSoderlind/FinancialEconometrics
8fe42c13179954ade0bc926936b0e36f265e0d22
[ "MIT" ]
26
2019-01-12T17:52:06.000Z
2022-03-25T19:53:40.000Z
24.756158
137
0.487514
[ [ [ "empty" ] ] ]
[ "empty" ]
[ [ "empty" ] ]
d0dc349217c064bcc581bc58ad0313d710a4301a
173,946
ipynb
Jupyter Notebook
lqcontrol/lqcontrol_solutions_py.ipynb
indextrading/my
0e8719becef0a9526619885afe547cc9a34c5512
[ "BSD-3-Clause" ]
9
2015-01-04T12:16:32.000Z
2019-10-13T12:25:11.000Z
lqcontrol/lqcontrol_solutions_py.ipynb
indextrading/my
0e8719becef0a9526619885afe547cc9a34c5512
[ "BSD-3-Clause" ]
null
null
null
lqcontrol/lqcontrol_solutions_py.ipynb
indextrading/my
0e8719becef0a9526619885afe547cc9a34c5512
[ "BSD-3-Clause" ]
4
2018-02-07T22:55:43.000Z
2019-12-01T15:16:04.000Z
400.797235
57,003
0.913088
[ [ [ "empty" ] ] ]
[ "empty" ]
[ [ "empty" ] ]
d0dc40b3f85d53508696676fae1a4237712bfe58
457,309
ipynb
Jupyter Notebook
_notebooks/2020-11-10-DLforCNN.ipynb
nitishsaDire/dLBlog
c4c5a23adb1bc8593e3a19e49f49664bd36536de
[ "Apache-2.0" ]
null
null
null
_notebooks/2020-11-10-DLforCNN.ipynb
nitishsaDire/dLBlog
c4c5a23adb1bc8593e3a19e49f49664bd36536de
[ "Apache-2.0" ]
2
2021-05-20T22:04:44.000Z
2021-09-28T05:34:14.000Z
_notebooks/2020-11-10-DLforCNN.ipynb
nitishsaDire/dLBlog
c4c5a23adb1bc8593e3a19e49f49664bd36536de
[ "Apache-2.0" ]
null
null
null
150.33169
51,264
0.72805
[ [ [ "# ML/DL techniques for Tabular Modeling PART I\n> In this part, I have explained Decision Trees.\n- toc: true \n- badges: true\n- comments: true", "_____no_output_____" ] ], [ [ "#hide\n# !pip install -Uqq fastbook\n\nimport fastbook\nfastbook.setup_book()", "/home/nitish/miniconda3/lib/python3.7/site-packages/torch/cuda/__init__.py:52: UserWarning: CUDA initialization: Found no NVIDIA driver on your system. Please check that you have an NVIDIA GPU and installed a driver from http://www.nvidia.com/Download/index.aspx (Triggered internally at /pytorch/c10/cuda/CUDAFunctions.cpp:100.)\n return torch._C._cuda_getDeviceCount() > 0\n" ], [ "#hide\nfrom fastbook import *\nfrom kaggle import api\nfrom pandas.api.types import is_string_dtype, is_numeric_dtype, is_categorical_dtype\nfrom fastai.tabular.all import *\nfrom sklearn.ensemble import RandomForestRegressor\nfrom sklearn.tree import DecisionTreeRegressor\nfrom dtreeviz.trees import *\nfrom IPython.display import Image, display_svg, SVG\nimport numpy as np\nimport matplotlib.pyplot as plt\n\npd.options.display.max_rows = 20\npd.options.display.max_columns = 8", "_____no_output_____" ], [ "#hide\n\n# api.competition_download_cli('bluebook-for-bulldozers')\n# file_extract('bluebook-for-bulldozers.zip')\ndf = pd.read_csv('/home/nitish/Downloads/bluebook-bulldozers/TrainAndValid.csv', low_memory=False)", "_____no_output_____" ] ], [ [ "## Introduction\nTabular Modelling takes data in the form of a table, where generally we want to learn about a column's value from all the other columns' values. The column we want to learn is known as a dependent variable and others are known as independent variables. The learning could be both like a classification problem or regression problem. We will look into various machine learning models such as decision trees, random forests, etc, also we'll look for what deep learning has to offer in tabular modeling.", "_____no_output_____" ], [ "## Dataset\nI will be using [Kaggle competition](https://www.kaggle.com/c/bluebook-for-bulldozers) dataset on all the models so that it will be easier to understand and compare different models. I have loaded it into a dataframe df.", "_____no_output_____" ] ], [ [ "df.head()", "_____no_output_____" ] ], [ [ "The key fields are in train.csv are:\n\n- SalesID: the unique identifier of the sale\n- MachineID: the unique identifier of a machine. A machine can be sold multiple times\n- saleprice: what the machine sold for at auction (only provided in train.csv)\n- saledate: the date of the sale\n\nFor this competition, we need to predict the log of the sale price of bulldozers sold at auctions. We will try to build different ML and DL models which will be predicting $log$(sale price).", "_____no_output_____" ], [ "## Decision Trees\nA decision tree makes a split in data based on the values of a column. For example, suppose we have data for different persons for their age, whether they eat healthy, whether they exercise, etc, and want to predict whether they are fit or unfit based on the data then we can use the following decision tree.", "_____no_output_____" ], [ "![](images/blog5_1.png \"Credit:fast.ai\")", "_____no_output_____" ], [ "At each level, the data is divided into 2 groups for the next level, e.g. at the first level, whether age<30 or not divides the whole dataset into 2 smaller datasets, and similary the data is split again until we reach leaf node of 2 classes: FIT or UNFIT. \n\nIn the real world, data is way more complex containing a lot of columns. E.g., in our dataframe df, there are 53 columns. So the question arises which column to chose for each split and what should be the value at which it is split. The answer is to try for every column and each value present in a column for the split. So if there are n columns and each column have x different values then we need to try n\\*x splits and chose the best one on some criteria. When trying a split, then whole data will be divided into 2 groups for that level, so we can take the average of the sale price of a group as the predicted sale price for all the rows in that group, and can calculate rmse distance between predictions and actual sale price. This will give us a number, which is our loss value, if bigger tells our predictions are far from the actual sale price and vice-versa. So the algorithm for building a decision tree could be written as:\n1. Loop through all the columns in the training dataset.\n1. Loop through all the possible values for a column. If the column contains categorical data then chose the condition as \"equal to\" a category and \"not equal to\" a category. If the column contains continuous data then for all the distinct values split on \"less than equal to\" and \"greater than\" the value.\n1. Find the average sale price for each of the groups, this is our prediction. Calculate rmse from the actual values of the saleprice.\n1. The rmse of a split could be set as the sum of rmse for all groups after the split.\n1. After looping through all the columns and all possible splits for each column chose the split with the least rmse.\n1. Continue the same process recursively on the child groups until some stopping criteria are reached like maximum number of the leaf nodes, minimum number of data items per group, etc.", "_____no_output_____" ], [ "Below is given an example of a decision tree. In the root node, the value is simply the average of all the training dataset which would be the most simple prediction we could calculate for a new datapoint is to simply give a prediction of 10.1 every time. Mean Square Error (mse) is 0.48, and there is a total of 404710 samples, which is actually the total number of samples in the training dataset.\n\nNow for the split, it has tried all the columns and all the possible values for a column, and it came with $Coupler\\_System \\leq 0.5$ split. This would split the whole dataset into two smaller datasets. When the condition is True it resulted in 360847 samples, with mse of 0.42 and an average value of sale price as 10.21. When the condition is False it resulted in 43863 samples, with mse of 0.12 and an average sale price value of 9.21. It could be seen that this split has improved our prediction, and our model has learnt some pattern because now the weighted average mse is (360847 * 0.42 + 43863 * 0.12)/(404710) = 0.38 < 0.48.\n\nSimilarly, splitting the \"True condition child\" on $YearMade \\leq 0.42$ further decreases the mse, which means our predictions are further closer to the actual values\n", "_____no_output_____" ], [ "![](images/blog5_2.png \"Credit:fast.ai\")", "_____no_output_____" ], [ "## Overfitting and Underfitting in decision trees\nUnderfitting in the decision trees will be when we make very few splits, or no splits at all, e.g., in the root node the average value is 10.1, and if we use this value as prediction, then it's clearly a naive solution to a complex problem, which is therefore an underfitting. This is the case of high bias and low variance.\n\nOverfitting will be when there are way too many splits such that in extreme case there is one training sample per leaf node, which is actually the model has memorized the training dataset. It is overfitting because although the mse will be 0 for the training dataset, it will be very high for the validation dataset, as the model will fail to generalize on unseen datapoints. This is the case of low bias and high variance.", "_____no_output_____" ], [ "Data Generation step:", "_____no_output_____" ] ], [ [ "x = np.linspace(0, 10, 110)\ny = x + np.random.randn(110)\nmy_list = [0]*30 + [1]*80\nrandom.shuffle(my_list)\nmy_list = [True if i==1 else False for i in my_list]\ntr_x, tr_y = x[np.where(my_list)[0]],y[np.where(my_list)[0]]\nmy_list = [not elem for elem in my_list]\nval_x, val_y = x[np.where(my_list)[0]],y[np.where(my_list)[0]] \ntr_x = tr_x.reshape(tr_x.shape[0],1)\nval_x = val_x.reshape(val_x.shape[0],1)", "_____no_output_____" ] ], [ [ "### Underfitting Case\nIn the underfitting case, I have set max_leaf_nodes=2, so that bias will be high.", "_____no_output_____" ] ], [ [ "m = DecisionTreeRegressor(max_leaf_nodes=2)\n\nm.fit(tr_x, tr_y);\nfig, ax = plt.subplots(figsize=(16,8))\nax.scatter(x,y, marker='+', label='actual data')\nax.scatter(tr_x, m.predict(tr_x), label='predicted data on training dataset')\nax.scatter(val_x, m.predict(val_x), label='predicted data on validation dataset')\n\nax.xaxis.set_major_locator(mpl.ticker.MultipleLocator(1))\nax.grid(which='major', axis='both', linestyle=':', linewidth = 1, color='b')\n\n\nax.set_xlabel(\"x\", labelpad=5, fontsize=26, fontname='serif', color=\"blue\")\nax.set_ylabel(\"y\", labelpad=5, fontsize=26, fontname='serif', color=\"blue\")\nax.legend(prop={\"size\":15})", "_____no_output_____" ] ], [ [ "In the above example, I have generated a dataset with $x = y + \\epsilon$, where $\\epsilon \\in N(0,1)$ is the random noise. I have generated this data because it's 2-d data, much simpler, and easy to visualize than the complex Kaggle dataset.\n\nA decision tree is implemented which tries to learn the relationship between x and y and predicts y from x. Training data is randomly chosen 80 samples from 110 samples, and the remaining 30 are in validation data. Stopping criteria is set as the max number of leaf nodes = 10. In the above figure, the orange ones are training samples and the green ones are validation samples.", "_____no_output_____" ] ], [ [ "print(f'Training rmse is {m_rmse(m, tr_x, tr_y)}, and validation rmse is {m_rmse(m, val_x, val_y)}')\ndraw_tree(m, pd.DataFrame([tr_x,tr_y], columns=['tr_y']))", "Training rmse is 1.6552681120862984, and validation rmse is 1.787770216476971\n" ] ], [ [ "### Overfitting Case\nIn the overfitting case, I have set max_leaf_nodes=100. This leads to a huge decision tree with each leaf node containing one training example only. Therefore, bias will be zero, the variance will be high and there will be overfitting.", "_____no_output_____" ] ], [ [ "m = DecisionTreeRegressor(max_leaf_nodes=100)\n\nm.fit(tr_x, tr_y);\nfig, ax = plt.subplots(figsize=(16,8))\nax.scatter(x,y, marker='+', label='actual data')\nax.scatter(tr_x, m.predict(tr_x), label='predicted data on training dataset')\nax.scatter(val_x, m.predict(val_x), label='predicted data on validation dataset')\n\nax.xaxis.set_major_locator(mpl.ticker.MultipleLocator(1))\nax.grid(which='major', axis='both', linestyle=':', linewidth = 1, color='b')\n\n\nax.set_xlabel(\"x\", labelpad=5, fontsize=26, fontname='serif', color=\"blue\")\nax.set_ylabel(\"y\", labelpad=5, fontsize=26, fontname='serif', color=\"blue\")\nax.legend(prop={\"size\":15})", "_____no_output_____" ], [ "print(f'Training rmse is {m_rmse(m, tr_x, tr_y)}, and validation rmse is {m_rmse(m, val_x, val_y)}')\ndraw_tree(m, pd.DataFrame([tr_x,tr_y], columns=['tr_y']))", "Training rmse is 0.0, and validation rmse is 1.2925307400522905\n" ] ], [ [ "### Balanced Case\nIn the balanced case, I have set max_leaf_nodes=10. This leads to a nice decision tree implementation with better generalization power than both above cases, which could be confirmed by seeing training and validation losses.", "_____no_output_____" ] ], [ [ "m = DecisionTreeRegressor(max_leaf_nodes=10)\n\nm.fit(tr_x, tr_y);\nfig, ax = plt.subplots(figsize=(16,8))\nax.scatter(x,y, marker='+', label='actual data')\nax.scatter(tr_x, m.predict(tr_x), label='predicted data on training dataset')\nax.scatter(val_x, m.predict(val_x), label='predicted data on validation dataset')\n\nax.xaxis.set_major_locator(mpl.ticker.MultipleLocator(1))\nax.grid(which='major', axis='both', linestyle=':', linewidth = 1, color='b')\n\n\nax.set_xlabel(\"x\", labelpad=5, fontsize=26, fontname='serif', color=\"blue\")\nax.set_ylabel(\"y\", labelpad=5, fontsize=26, fontname='serif', color=\"blue\")\nax.legend(prop={\"size\":15})", "_____no_output_____" ], [ "#hide\ndef rmse(preds, target):\n return np.sqrt(np.mean((preds-target)**2))\n\ndef m_rmse(m, x, y):\n return rmse(m.predict(x), y)", "_____no_output_____" ] ], [ [ "It seems like a good fit because it's neither overfitting nor underfitting.", "_____no_output_____" ], [ "Below is the training and validation losses and complete decision tree as generated by the algorithm.", "_____no_output_____" ] ], [ [ "print(f'Training rmse is {m_rmse(m, tr_x, tr_y)}, and validation rmse is {m_rmse(m, val_x, val_y)}')\ndraw_tree(m, pd.DataFrame([tr_x,tr_y], columns=['tr_y']))", "Training rmse is 0.7629283571148474, and validation rmse is 1.0146270166850742\n" ] ], [ [ "## Extrapolation problem\nThe decision tree suffers from a serious drawback when trying to predict them on data outside the domain of the current dataset. Suppose we have split the dataset into training and validation such as included the first 80 datapoints in the training and the remaining 30 datapoints in the validation dataset, like:\n```python\ntr_x, tr_y = x[:80], y[:80]\nval_x, val_y = x[80:], y[80:]\n```", "_____no_output_____" ] ], [ [ "m = DecisionTreeRegressor(max_leaf_nodes=10)\ntr_x, tr_y = x[:80], y[:80]\nval_x, val_y = x[80:], y[80:]\ntr_x = tr_x.reshape(80,1)\nval_x = val_x.reshape(30,1)\nm.fit(tr_x, tr_y);\nfig, ax = plt.subplots(figsize=(16,8))\nax.scatter(x,y, marker='+', label='actual data')\nax.scatter(tr_x, m.predict(tr_x), label='predicted data on training dataset')\nax.scatter(val_x, m.predict(val_x), label='predicted data on validation dataset')\n\nax.xaxis.set_major_locator(mpl.ticker.MultipleLocator(1))\nax.grid(which='major', axis='both', linestyle=':', linewidth = 1, color='b')\n\n\nax.set_xlabel(\"x\", labelpad=5, fontsize=26, fontname='serif', color=\"blue\")\nax.set_ylabel(\"y\", labelpad=5, fontsize=26, fontname='serif', color=\"blue\")\nax.legend(prop={\"size\":15})", "_____no_output_____" ] ], [ [ "In the above figure, because the validation data is in the range $x>7.2$ something, and training data has only seen datapoints which are in the range $0\\leq x\\leq7.2$, therefore validation data is out of the domain, and hence a poor extrapolation is done by decision trees. Because of this problem, and also high variance in predictions, a single decision tree is rarely used in practice. High Variance means high variance in the predictions, and it is because a little up and down in the training data could have changed the decision tree completely, and so the predictions will vary. A linear regression model has much less variance than a decision tree but bias is also higher than a decision tree.", "_____no_output_____" ], [ "## Conclusion\nWe have covered the most basic ML method for tabular data modeling. In the next parts, I will cover Random Forests and some DL methods. Also, there is no point in training a decision tree model on the Kaggle dataset because it will give poor results as the data is complex, and it needs some more sophisticated algorithms.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ] ]
d0dc5a305776a50556d6a7a907390e74ac48706e
338,547
ipynb
Jupyter Notebook
vizualations.ipynb
NirNagary/DataS
2def82d681653b04507520e022d22822c2b10861
[ "MIT" ]
null
null
null
vizualations.ipynb
NirNagary/DataS
2def82d681653b04507520e022d22822c2b10861
[ "MIT" ]
null
null
null
vizualations.ipynb
NirNagary/DataS
2def82d681653b04507520e022d22822c2b10861
[ "MIT" ]
null
null
null
329.646543
56,688
0.925815
[ [ [ "# Visualization principles\n\n1. Log scale\n2. Jitter\n3. Set the scale\n4. Text on plot\n", "_____no_output_____" ] ], [ [ "import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport matplotlib as mpl", "_____no_output_____" ] ], [ [ "## Plotting binary variables\n\nNot directly connected to today's lesson. But many of you asked.\nLet look at a case were we have 2 binary variables: 'sex' and 'survived'", "_____no_output_____" ] ], [ [ "titanic = sns.load_dataset(\"titanic\")\ntitanic.head()\ntitanic.info()", "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 891 entries, 0 to 890\nData columns (total 15 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 survived 891 non-null int64 \n 1 pclass 891 non-null int64 \n 2 sex 891 non-null object \n 3 age 714 non-null float64 \n 4 sibsp 891 non-null int64 \n 5 parch 891 non-null int64 \n 6 fare 891 non-null float64 \n 7 embarked 889 non-null object \n 8 class 891 non-null category\n 9 who 891 non-null object \n 10 adult_male 891 non-null bool \n 11 deck 203 non-null category\n 12 embark_town 889 non-null object \n 13 alive 891 non-null object \n 14 alone 891 non-null bool \ndtypes: bool(2), category(2), float64(2), int64(4), object(5)\nmemory usage: 80.6+ KB\n" ] ], [ [ "Use a barplot (two variables) or a countplot (one variable)", "_____no_output_____" ] ], [ [ "sns.barplot(x=\"sex\", y=\"survived\", hue=\"class\", data=titanic)\nplt.show()", "_____no_output_____" ], [ "sns.countplot(x=\"sex\", hue=\"class\", data=titanic)\nplt.show()", "_____no_output_____" ] ], [ [ "Or use a catplot for categorical data:", "_____no_output_____" ] ], [ [ "sns.catplot(x=\"sex\", y=\"survived\", hue=\"class\", kind=\"bar\", data=titanic)\nplt.show()", "_____no_output_____" ], [ "sns.catplot(x=\"sex\", hue=\"class\", kind=\"count\", data=titanic)\nplt.show()", "_____no_output_____" ] ], [ [ "## Log scale", "_____no_output_____" ] ], [ [ "diamonds = sns.load_dataset(\"diamonds\")\ndiamonds.head()", "_____no_output_____" ], [ "sns.histplot(diamonds.price[diamonds.cut == 'Ideal'])", "_____no_output_____" ] ], [ [ "##### One option:", "_____no_output_____" ] ], [ [ "sns.histplot(diamonds.price[diamonds.cut == 'Ideal'], log_scale = True)", "_____no_output_____" ] ], [ [ "##### Another option:", "_____no_output_____" ] ], [ [ "sns.histplot(np.log2(diamonds.price[diamonds.cut == 'Ideal']))", "_____no_output_____" ] ], [ [ "### Stack histogram:", "_____no_output_____" ] ], [ [ "sns.histplot(\n diamonds,\n x=\"price\", hue=\"cut\",\n multiple=\"stack\", \n)", "_____no_output_____" ], [ "sns.set_theme(style=\"ticks\")\n\nf, ax = plt.subplots(figsize=(7, 5))\nsns.despine(f)\n\nsns.histplot(\n diamonds,\n x=\"price\", hue=\"cut\",\n multiple=\"stack\",\n palette=\"colorblind\",\n edgecolor=\".3\",\n linewidth=.5, \n log_scale = True\n)\nax.xaxis.set_major_formatter(mpl.ticker.ScalarFormatter())\nax.set_xticks([500, 1000, 2000, 5000, 10000])\nplt.show()", "_____no_output_____" ] ], [ [ "## Jitter in python\n\nGoogle it: [Jitter in python](https://www.google.com/search?q=jitter+in+python&sxsrf=ALeKk01NFy18kBeX8CmyToZAT-l4YIlJeQ%3A1621252840686&ei=6FqiYPSmKYzdkwXckaGgCw&oq=jitter&gs_lcp=Cgdnd3Mtd2l6EAMYADIECCMQJzIFCAAQkQIyBQgAEMsBMgUIABDLATICCAAyAggAMgUIABDLATICCAAyAggAMgIIADoECAAQQzoFCAAQsQM6CAgAELEDEJECOggILhCxAxCDAToFCC4QsQM6BwgAEIcCEBQ6AgguUJ8gWIcuYJg1aAFwAngAgAGdAYgB1giSAQMwLjiYAQCgAQGqAQdnd3Mtd2l6wAEB&sclient=gws-wiz)\n\nDocumentation contains such a good example we'll just [follow it](https://seaborn.pydata.org/generated/seaborn.stripplot.html)", "_____no_output_____" ] ], [ [ "tips = sns.load_dataset(\"tips\")", "_____no_output_____" ], [ "tips.head()", "_____no_output_____" ], [ "sns.stripplot(x=\"day\", y=\"total_bill\", data=tips)", "_____no_output_____" ] ], [ [ "Use a smaller amount of jitter:", "_____no_output_____" ] ], [ [ "sns.stripplot(x=\"day\", y=\"total_bill\", data=tips, jitter=0.05)", "_____no_output_____" ] ], [ [ "Jitter plus a boxplot:", "_____no_output_____" ] ], [ [ "ax = sns.boxplot(x=\"tip\", y=\"day\", data=tips)\nax = sns.stripplot(x=\"tip\", y=\"day\", data=tips, color=\".3\")\nax = sns.boxplot(x=\"tip\", y=\"day\", data=tips)", "_____no_output_____" ] ], [ [ "## Set the scale\n\nGoogle it: [set scale seaborn](https://www.google.com/search?q=set+scale+seaborn&sxsrf=ALeKk02NiH79RWrRRXIqusuG-vHfuyIm2A%3A1621254123926&ei=61-iYOiGOMyxkwWAiZjICQ&oq=set+scale+sea&gs_lcp=Cgdnd3Mtd2l6EAMYADIECCMQJzIGCAAQFhAeMgYIABAWEB4yBggAEBYQHjoHCCMQsAMQJzoHCAAQRxCwAzoCCAA6BQghEKABOggIABAIEA0QHlC8EVjyJmCULGgEcAJ4AIABogGIAbgHkgEDMC43mAEAoAEBqgEHZ3dzLXdpesgBCcABAQ&sclient=gws-wiz)", "_____no_output_____" ], [ "##### One option:", "_____no_output_____" ] ], [ [ "ax = sns.stripplot(x=\"day\", y=\"total_bill\", data=tips, jitter=0.05)\nax.set(ylim=(0, 100))", "_____no_output_____" ] ], [ [ "##### Another option:", "_____no_output_____" ] ], [ [ "plt.ylim(0, 400)\nax = sns.stripplot(x=\"day\", y=\"total_bill\", data=tips, jitter=0.05)", "_____no_output_____" ] ], [ [ "## Add labels onto the plot\n\nGoogle it: [add text to plot seaborn](https://www.google.com/search?q=add+text+to+plot+seaborn&sxsrf=ALeKk01vym2w-SfYoAOBXBgUbDCr0I04Uw%3A1621255993821&ei=OWeiYObWMdCTkwXRoIngCw&oq=add+text+to+plot+seaborn&gs_lcp=Cgdnd3Mtd2l6EAMyAggAMgYIABAWEB4yBggAEBYQHjoHCCMQsAMQJzoHCAAQRxCwAzoECAAQQzoGCAAQBxAeUJAcWKgzYJs1aAFwAngAgAGeAYgBmgqSAQQwLjEwmAEAoAEBqgEHZ3dzLXdpesgBCcABAQ&sclient=gws-wiz&ved=0ahUKEwim1-ec4dDwAhXQyaQKHVFQArwQ4dUDCA4&uact=5_)", "_____no_output_____" ] ], [ [ "penguins = sns.load_dataset(\"penguins\")\npenguins.head()", "_____no_output_____" ] ], [ [ "With a legend:", "_____no_output_____" ] ], [ [ "ax = sns.scatterplot(data=penguins, x=\"bill_length_mm\", y=\"bill_depth_mm\", hue = 'species', palette = 'colorblind')", "_____no_output_____" ] ], [ [ "Without a legend but with text:", "_____no_output_____" ] ], [ [ "ax = sns.scatterplot(data=penguins, x=\"bill_length_mm\", y=\"bill_depth_mm\", hue = 'species', palette = 'colorblind', legend=False)\n\nstyle = dict(size=12, color='black')\nax.text(35, 15, \"Adelie\", **style)\nax.text(55, 20, \"Chinstrap\", **style)\nax.text(52, 14, \"Gentoo\", **style)\nplt.show()", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
d0dc5dbfd68b0e8eeccbd34ca836750e777e94b9
7,888
ipynb
Jupyter Notebook
Programming/sycl/syclBlur.ipynb
rkurniawati/modules
98bfebe058a54cc89ea49166523bbea07fda2d3b
[ "CC-BY-4.0" ]
6
2021-06-04T12:22:24.000Z
2021-12-10T16:10:23.000Z
Programming/sycl/syclBlur.ipynb
rkurniawati/modules
98bfebe058a54cc89ea49166523bbea07fda2d3b
[ "CC-BY-4.0" ]
4
2021-08-17T15:15:20.000Z
2021-08-22T21:50:28.000Z
Programming/sycl/syclBlur.ipynb
rkurniawati/modules
98bfebe058a54cc89ea49166523bbea07fda2d3b
[ "CC-BY-4.0" ]
12
2021-04-11T21:14:38.000Z
2022-02-17T17:20:41.000Z
23.759036
323
0.481237
[ [ [ "This notebook will set up colab so that you can run the SYCL blur lab for the module \"Introduction to SYCYL programming\" created by the TOUCH project. (https://github.com/TeachingUndergradsCHC/modules/tree/master/Programming/sycl). The initial setup instructions are created following slides by Aksel Alpay\nhttps://www.iwocl.org/wp-content/uploads/iwocl-syclcon-2020-alpay-32-slides.pdf\nand the hipSCYL documentation https://github.com/illuhad/hipSYCL/blob/develop/doc/installing.md .\n\nBegin by setting your runtime to use a CPU (Select \"Change runtime type\" in the Runtime menu and choose \"CPU\".) Then run the first couple of instructions below. Run them one at a time, waiting for each to finish before beginning the next. This will take several minutes.", "_____no_output_____" ], [ "Update repositories and then get and build llvm so can build hipSYCL.", "_____no_output_____" ] ], [ [ "!apt update -qq;\n!apt-get update -qq;\n!add-apt-repository -y ppa:ubuntu-toolchain-r/test\n!apt update\n!apt install gcc-11 g++-11\n!bash -c \"$(wget -O - https://apt.llvm.org/llvm.sh)\"\n!apt-get install libboost-all-dev libclang-13-dev cmake python -qq;\n!git clone --recurse-submodules https://github.com/illuhad/hipSYCL", "_____no_output_____" ], [ "!apt-get upgrade", "_____no_output_____" ] ], [ [ "Now build hipSYCL", "_____no_output_____" ] ], [ [ "\n!mkdir hipSYCL_build\n%cd hipSYCL_build\n!export CC=/usr/bin/gcc-11\n!export CXX=/usr/bin/g++-11\n!cmake -DCMAKE_INSTALL_PREFIX=/content/hipSYCL_install -DCMAKE_C_COMPILER=/usr/bin/gcc-11 -DCMAKE_CXX_COMPILER=/usr/bin/g++-11 /content/hipSYCL\n!make install\n%cd ..", "_____no_output_____" ] ], [ [ "Get the examples", "_____no_output_____" ] ], [ [ "!git clone https://github.com/TeachingUndergradsCHC/modules\n%cd modules/Programming/sycl", "_____no_output_____" ] ], [ [ "Examine hello.cpp", "_____no_output_____" ] ], [ [ "!cat hello.cpp", "_____no_output_____" ] ], [ [ "Now compile hello.cpp", "_____no_output_____" ] ], [ [ "!/content/hipSYCL_install/bin/syclcc --hipsycl-platform=cpu -o hello hello.cpp", "_____no_output_____" ] ], [ [ "Then run it", "_____no_output_____" ] ], [ [ "!./hello", "_____no_output_____" ] ], [ [ "Now try the addVector program, first view it", "_____no_output_____" ] ], [ [ "\n!cat addVectors.cpp", "_____no_output_____" ] ], [ [ "Then compile it", "_____no_output_____" ] ], [ [ "!/content/hipSYCL_install/bin/syclcc --hipsycl-platform=cpu -o addVectors addVectors.cpp", "_____no_output_____" ] ], [ [ "Finally run it", "_____no_output_____" ] ], [ [ "!./addVectors", "_____no_output_____" ] ], [ [ "Next, examine the files that you'll need for the blur project. These are the library code for managing bmp files (stb_image.h and stb_image_write.h), the image that you'll be using (I provide 640x426.bmp, but you could use another file instead) and the program itself noRed.cpp. Then compile it", "_____no_output_____" ] ], [ [ "!/content/hipSYCL_install/bin/syclcc --hipsycl-platform=cpu -o noRed noRed.cpp\n\n", "_____no_output_____" ] ], [ [ "Now run the code", "_____no_output_____" ] ], [ [ "!./noRed", "_____no_output_____" ] ], [ [ "Original Image", "_____no_output_____" ] ], [ [ "from IPython.display import display\nfrom PIL import Image\n\n\npath=\"/content/modules/Programming/sycl/640x426.bmp\"\ndisplay(Image.open(path))", "_____no_output_____" ] ], [ [ "Final Image", "_____no_output_____" ] ], [ [ "from IPython.display import display\nfrom PIL import Image\n\n\npath=\"/content/modules/Programming/sycl/out.bmp\"\ndisplay(Image.open(path))", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
d0dc73ce56af30fe9ed51874fe4ebe203c20da00
8,318
ipynb
Jupyter Notebook
examples/gmm/hist.ipynb
infergo-ml/infergo
9d52453cfea1943692287a2096147cfa2fe24ff1
[ "MIT" ]
7
2019-05-22T07:19:37.000Z
2021-12-20T14:15:11.000Z
examples/gmm/hist.ipynb
infergo-ml/infergo
9d52453cfea1943692287a2096147cfa2fe24ff1
[ "MIT" ]
null
null
null
examples/gmm/hist.ipynb
infergo-ml/infergo
9d52453cfea1943692287a2096147cfa2fe24ff1
[ "MIT" ]
1
2020-11-21T08:52:07.000Z
2020-11-21T08:52:07.000Z
96.72093
6,676
0.865232
[ [ [ "import matplotlib.pyplot as plt\nimport numpy\n%matplotlib inline", "_____no_output_____" ], [ "x = numpy.genfromtxt(\"data.csv\")", "_____no_output_____" ], [ "plt.hist(x)", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code" ] ]
d0dc92d438ad1402b663768d0fc7784e24031713
10,594
ipynb
Jupyter Notebook
Classification_of_Chest_and_Abdominal_X_rays.ipynb
sabiipoks/AI-in-healthcare
86aa092eff97cbd0144fb92adb747282be4949a3
[ "MIT" ]
2
2020-05-09T22:11:46.000Z
2020-08-10T14:35:13.000Z
Classification_of_Chest_and_Abdominal_X_rays.ipynb
sabiipoks/AI-in-healthcare
86aa092eff97cbd0144fb92adb747282be4949a3
[ "MIT" ]
null
null
null
Classification_of_Chest_and_Abdominal_X_rays.ipynb
sabiipoks/AI-in-healthcare
86aa092eff97cbd0144fb92adb747282be4949a3
[ "MIT" ]
4
2019-10-07T15:59:54.000Z
2021-09-22T19:12:33.000Z
32.29878
142
0.521144
[ [ [ "# Classification of Chest and Abdominal X-rays", "_____no_output_____" ], [ "Code Source: Lakhani, P., Gray, D.L., Pett, C.R. et al. J Digit Imaging (2018) 31: 283. https://doi.org/10.1007/s10278-018-0079-6\n\nThe code to download and prepare dataset had been modified form the original source code.", "_____no_output_____" ] ], [ [ "# load requirements for the Keras library\nfrom keras import applications\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras import optimizers\nfrom keras.models import Sequential\nfrom keras.layers import Dropout, Flatten, Dense, GlobalAveragePooling2D\nfrom keras.models import Model\nfrom keras.optimizers import Adam", "_____no_output_____" ], [ "!rm -rf /content/*", "_____no_output_____" ], [ "# Download dataset\n!wget https://github.com/paras42/Hello_World_Deep_Learning/raw/9921a12c905c00a88898121d5dc538e3b524e520/Open_I_abd_vs_CXRs.zip", "_____no_output_____" ], [ "!ls /content", "_____no_output_____" ], [ "# unzip\n!unzip /content/Open_I_abd_vs_CXRs.zip", "_____no_output_____" ], [ "# dimensions of our images\nimg_width, img_height = 299, 299\n\n# directory and image information\ntrain_data_dir = 'Open_I_abd_vs_CXRs/TRAIN/'\nvalidation_data_dir = 'Open_I_abd_vs_CXRs/VAL/'\n\n# epochs = number of passes of through training data\n# batch_size = number of images processes at the same time\ntrain_samples = 65\nvalidation_samples = 10\nepochs = 20\nbatch_size = 5", "_____no_output_____" ], [ "# build the Inception V3 network, use pretrained weights from ImgaeNet\n# remove top funnly connected layers by imclude_top=False\n\nbase_model = applications.InceptionV3(weights='imagenet', include_top=False,\n input_shape=(img_width, img_height,3))", "_____no_output_____" ], [ "# build a classifier model to put on top of the convolutional model\n# This consists of a global average pooling layer and a fully connected layer with 256 nodes\n# Then apply dropout and signoid activation\n\nmodel_top = Sequential()\nmodel_top.add(GlobalAveragePooling2D(input_shape=base_model.output_shape[1:],\n data_format=None)),\nmodel_top.add(Dense(256, activation='relu'))\nmodel_top.add(Dropout(0.5))\nmodel_top.add(Dense(1, activation='sigmoid'))\nmodel = Model(inputs=base_model.input, outputs=model_top(base_model.output))\n\n# Compile model using Adam optimizer with common values and binary cross entropy loss\n# USe low learning rate (lr) for transfer learning\nmodel.compile(optimizer=Adam(lr=0.0001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0),\n loss='binary_crossentropy',\n metrics=['accuracy'])", "_____no_output_____" ], [ "# Some on-the-fly augmentation options\ntrain_datagen = ImageDataGenerator(\n rescale = 1./255, # Rescale pixel values to 0-1 to aid CNN processing\n shear_range = 0.2, # 0-1 range for shearing\n zoom_range = 0.2, # 0-1 range for zoom\n rotation_range = 20, # 0.180 range, degrees of rotation\n width_shift_range = 0.2, # 0-1 range horizontal translation\n height_shift_range = 0.2, # 0-1 range vertical translation\n horizontal_flip = True # set True or false\n)\n\nval_datagen = ImageDataGenerator(\n rescale=1./255 # Rescale pixel values to 0-1 to aid CNN processing\n)", "_____no_output_____" ], [ "# Directory, image size, batch size already specied above\n# Class mode is set to 'binary' for a 2-class problem\n# Generator randomly shuffles and presents images in batches to the network\n\ntrain_generator = train_datagen.flow_from_directory(\n train_data_dir,\n target_size=(img_height, img_width),\n batch_size=batch_size,\n class_mode='binary'\n)\n\nvalidation_generator = val_datagen.flow_from_directory(\n validation_data_dir,\n target_size=(img_height, img_width),\n batch_size=batch_size,\n class_mode='binary'\n)", "_____no_output_____" ], [ "# Fine-tune the pretrained Inception V3 model using the data generator\n# Specify steps per epoch (number of samples/batch_size)\n\nhistory = model.fit_generator(\n train_generator,\n steps_per_epoch=train_samples//batch_size,\n epochs=epochs,\n validation_data=validation_generator,\n validation_steps=validation_samples//batch_size\n)", "_____no_output_____" ], [ "# import matplotlib library, and plot training curve\nimport matplotlib.pyplot as plt\nprint(history.history.keys())\n\nplt.figure()\nplt.plot(history.history['acc'],'orange', label='Training accuracy')\nplt.plot(history.history['val_acc'],'blue', label='Validation accuracy')\nplt.plot(history.history['loss'],'red', label='Training loss')\nplt.plot(history.history['val_loss'],'green', label='validation loss')\nplt.legend()\nplt.show()", "_____no_output_____" ], [ "# import numpy and keras preprocessing libraries\nimport numpy as np\nfrom keras.preprocessing import image\n\n# load, resize, and display test images\nimg_path = 'Open_I_abd_vs_CXRs/TEST/abd2.png'\nimg_path2 = 'Open_I_abd_vs_CXRs/TEST/chest2.png'\nimg = image.load_img(img_path, target_size=(img_width, img_height))\nimg2 = image.load_img(img_path2, target_size=(img_width, img_height))\nplt.imshow(img)\nplt.show()\n\n# convert image to numpy array, so Keras can render a prediction\nimg = image.img_to_array(img)\n\n# expand array from 3 dimensions (height, width, channels) to 4 dimensions (batch size, height, width, channels)\n# rescale pixel values to 0-1\nx = np.expand_dims(img, axis=0) * 1./255\n\n# get prediction on test image\nscore = model.predict(x)\nprint('Predicted:', score, 'Chest X-ray' if score < 0.5 else 'Abd X-ray')\n\n# display and render a prediction for the 2nd image\nplt.imshow(img2)\nplt.show()\nimg2 = image.img_to_array(img2)\nx = np.expand_dims(img2, axis=0) * 1./255\nscore = model.predict(x)\nprint('Predicted:', score, 'Chest X-ray' if score < 0.5 else 'Abd X-ray')\n", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
d0dc9fcb33fbf4ddaa85183512d27b31f91f8614
550,985
ipynb
Jupyter Notebook
Zesty_AI_Data_Scientist_Assignment_|_Hadi_Sotudeh.ipynb
hadisotudeh/zestyAI_challenge
20f9bd2b2b158422d2640e1793d496a95aee1e14
[ "MIT" ]
1
2021-05-28T13:26:02.000Z
2021-05-28T13:26:02.000Z
Zesty_AI_Data_Scientist_Assignment_|_Hadi_Sotudeh.ipynb
hadisotudeh/zestyAI_challenge
20f9bd2b2b158422d2640e1793d496a95aee1e14
[ "MIT" ]
null
null
null
Zesty_AI_Data_Scientist_Assignment_|_Hadi_Sotudeh.ipynb
hadisotudeh/zestyAI_challenge
20f9bd2b2b158422d2640e1793d496a95aee1e14
[ "MIT" ]
null
null
null
85.081069
49,498
0.653121
[ [ [ "<a href=\"https://colab.research.google.com/github/hadisotudeh/zestyAI_challenge/blob/main/Zesty_AI_Data_Scientist_Assignment_%7C_Hadi_Sotudeh.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ], [ "<center> <h1><b>Zesty AI Data Science Interview Task - Hadi Sotudeh</b></h1> </center>", "_____no_output_____" ], [ "To perform this task, I had access to the [`2009 RESIDENTIAL ENERGY CONSUMPTION SURVEY`](https://www.eia.gov/consumption/residential/data/2009/index.php?view=microdata) to predict `electricity consumption`.\n</br>\n</br>\nLibraries available in Python such as `scikit-learn` and `fastai` were employed to perform this machine learning regression task.\n</br>\n</br>\nFirst, I need to install the notebook dependencies, import the relevant libraries, download the dataset, and have them available in Google Colab (next cell).", "_____no_output_____" ], [ "## Install Dependencies, Import Libraries, and Download the dataset", "_____no_output_____" ] ], [ [ "%%capture\n\n# install dependencies\n!pip install fastai --upgrade", "_____no_output_____" ], [ "# Import Libraries\n\n# general libraries\nimport warnings\nimport os\nfrom datetime import datetime\nfrom tqdm import tqdm_notebook as tqdm\n\n# machine learning libraries\nimport pandas as pd\nimport matplotlib\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom fastai.tabular.all import *\nfrom sklearn.ensemble import RandomForestRegressor\nfrom pandas_profiling import ProfileReport\nimport joblib\nfrom xgboost import XGBRegressor\nfrom lightgbm import LGBMRegressor\n\n# model interpretation library\nfrom sklearn.inspection import plot_partial_dependence", "_____no_output_____" ], [ "%%capture\n\n#download the dataset\n! wget https://www.eia.gov/consumption/residential/data/2009/csv/recs2009_public.csv", "_____no_output_____" ] ], [ [ "## Set Global parameters", "_____no_output_____" ], [ "The electric consumption is located in the `KWH` field of the dataset.", "_____no_output_____" ] ], [ [ "#show plots inside the jupyter notebook\n%matplotlib inline\n\n# pandas settings to show more columns are rows in the jupyter notebook\npd.set_option('display.max_rows', 500)\npd.set_option('display.max_columns', 50000)\n\n# don't show warnings\nwarnings.filterwarnings('ignore')\n\n# dataset file path\ndataset = \"recs2009_public.csv\"\n\n# target variable to predict\ndep_var = \"KWH\"", "_____no_output_____" ] ], [ [ "## Read the dataset from CSV files, Perform Data Cleaning, and Feature Engineering", "_____no_output_____" ], [ "Following a typical machine learning project, I first clean up the dataset to prevent data-leakage related features or non-relevant features.</br></br>It is important to mention that I did not first look at each column to figure out which feature to keep or not. What I did first was to train a model and iteratively look at the feature importances and check their meanings in the dataset documentation to figure out what features to remove to prevent data leakage.</br></br>In addition, a group of features with high correlations were identified and only one of them in each group was kept. ", "_____no_output_____" ] ], [ [ "# read the train file\ndf = pd.read_csv(dataset)\n\n# remove data-leakage and non-relevant features\nnon_essential_features = [\"KWHSPH\",\"KWHCOL\",\"KWHWTH\",\"KWHRFG\",\"KWHOTH\",\"BTUEL\",\"BTUELSPH\",\"BTUELCOL\",\n \"BTUELWTH\",\"BTUELRFG\",\"BTUELOTH\",\"DOLLAREL\",\"DOLELSPH\",\"DOLELCOL\",\"DOLELWTH\",\n \"DOLELRFG\",\"DOLELOTH\",\"TOTALBTUOTH\",\"TOTALBTURFG\",\"TOTALDOL\",\"ELWATER\",\n \"TOTALBTUWTH\",\"TOTALBTU\",\"ELWARM\",\"TOTALBTUCOL\",\"TOTALDOLCOL\",\n \"REPORTABLE_DOMAIN\",\"TOTALDOLWTH\",\"TOTALBTUSPH\",\"TOTCSQFT\",\"TOTALDOLSPH\",\n \"BTUNG\", \"BTUNGSPH\", \"BTUNGWTH\",\"BTUNGOTH\",\"DOLLARNG\",\"DOLNGSPH\",\"DOLNGWTH\",\"DOLNGOTH\",\n \"DIVISION\"\n ]\ndf.drop(columns = non_essential_features, inplace=True)\n\n# take the log of dependent variable ('price'). More details are in the training step.\ndf[dep_var] = np.log(df[dep_var])", "_____no_output_____" ] ], [ [ "I created train and validation sets with random selection (80% vs.20% rule) from the dataset[link text](https://) file in the next step.", "_____no_output_____" ] ], [ [ "splits = RandomSplitter(valid_pct=0.2)(range_of(df))\n\nprocs = [Categorify, FillMissing]\n\ncont, cat = cont_cat_split(df, 1, dep_var=dep_var)\n\nto = TabularPandas(df, procs, cat, cont, y_names=dep_var, splits = splits)", "_____no_output_____" ] ], [ [ "The following cell shows 5 random instances of the dataset (after cleaning and feature engineering).", "_____no_output_____" ] ], [ [ "to.show(5)", "_____no_output_____" ] ], [ [ "## Train the ML Model", "_____no_output_____" ], [ "Since model interpretation is also important for me, I chose RandomForest for both prediction and interpretation and knowledge discovery.", "_____no_output_____" ] ], [ [ "def rf(xs, y, n_estimators=40, max_features=0.5, min_samples_leaf=5, **kwargs):\n \"randomforst regressor\"\n return RandomForestRegressor(n_jobs=-1, n_estimators=n_estimators, max_features=max_features, min_samples_leaf=min_samples_leaf, oob_score=True).fit(xs, y)", "_____no_output_____" ], [ "xs,y = to.train.xs,to.train.y\nvalid_xs,valid_y = to.valid.xs,to.valid.y\n\nm = rf(xs, y)", "_____no_output_____" ] ], [ [ "The predictions are evaluated based on [Root-Mean-Squared-Error (RMSE) between the logarithm of the predicted value and the logarithm of the observed sales price](https://www.kaggle.com/c/house-prices-advanced-regression-techniques/overview/evaluation) (Taking logs means that errors in predicting high electricity consumptions and low ones will affect the result equally). \n</br>\n</br>", "_____no_output_____" ] ], [ [ "def r_mse(pred,y): \n return round(math.sqrt(((pred-y)**2).mean()), 6)\n\ndef m_rmse(m, xs, y): \n return r_mse(m.predict(xs), y)", "_____no_output_____" ] ], [ [ "Print the Mean Root Squared Error of the logarithmic `KWH` on the train set:", "_____no_output_____" ] ], [ [ "m_rmse(m, xs, y)", "_____no_output_____" ] ], [ [ "Print the Mean Root Squared Error of the logarithmic `KWH` on the validation set:", "_____no_output_____" ] ], [ [ "m_rmse(m, valid_xs, valid_y)", "_____no_output_____" ] ], [ [ "Calculate Feature Importance and remove non-important features and re-train the model.", "_____no_output_____" ] ], [ [ "def rf_feat_importance(m, df):\n return pd.DataFrame({'cols':df.columns, 'imp':m.feature_importances_}).sort_values('imp', ascending=False)\n\n# show the top 10 features\nfi = rf_feat_importance(m, xs)\nfi[:10]", "_____no_output_____" ] ], [ [ "Only keep features with importance of more than 0.005 for re-training.", "_____no_output_____" ] ], [ [ "to_keep = fi[fi.imp>0.005].cols\nprint(f\"features to keep are : {list(to_keep)}\")", "features to keep are : ['TOTALDOLOTH', 'PELHOTWA', 'TOTALDOLRFG', 'ACROOMS', 'FUELHEAT', 'TEMPHOMEAC', 'FUELH2O', 'TYPEHUQ', 'REGIONC', 'WASHLOAD', 'TEMPNITEAC', 'DRYRFUEL', 'PELHEAT', 'CDD30YR', 'USECENAC', 'CUFEETNG', 'TEMPGONEAC', 'CUFEETNGOTH', 'BEDROOMS', 'CUFEETNGWTH']\n" ] ], [ [ "Some of the features to keep for re-training are:\n\n1. `TOTALDOLOTH`: Total cost for appliances, electronics, lighting, and miscellaneous\n\n2. `PELHOTWA`: Who pays for electricity used for water heating\n\n3. `ACROOMS`: Number of rooms cooled\n\n4. `TOTALDOLRFG`: Total cost for refrigerators, in whole dollars\n\n5. `REGIONC`: Census Region\n\n6. `TEMPNITEAC`: Temperature at night (summer)", "_____no_output_____" ] ], [ [ "xs_imp = xs[to_keep]\nvalid_xs_imp = valid_xs[to_keep]\n\nm = rf(xs_imp, y)", "_____no_output_____" ] ], [ [ "Print the loss function of the re-trained model on train and validation sets.", "_____no_output_____" ] ], [ [ "m_rmse(m, xs_imp, y), m_rmse(m, valid_xs_imp, valid_y)", "_____no_output_____" ] ], [ [ "Check the correlation among the final features and adjust the set of features to remove at the beginning of the code.", "_____no_output_____" ] ], [ [ "from scipy.cluster import hierarchy as hc\n\ndef cluster_columns(df, figsize=(10,6), font_size=12):\n corr = np.round(scipy.stats.spearmanr(df).correlation, 4)\n corr_condensed = hc.distance.squareform(1-corr)\n z = hc.linkage(corr_condensed, method='average')\n fig = plt.figure(figsize=figsize)\n hc.dendrogram(z, labels=df.columns, orientation='left', leaf_font_size=font_size)\n plt.show()\n\ncluster_columns(xs_imp)", "_____no_output_____" ] ], [ [ "Store the re-trained model.", "_____no_output_____" ] ], [ [ "joblib.dump(m, 'model.joblib') ", "_____no_output_____" ] ], [ [ "## Interpret the Model and Do Knowledge Discovery", "_____no_output_____" ], [ "When I plot the feature importances of the trained model, I can clearly see that `TOTALDOLOTH` (Total cost for appliances, electronics, lighting, and miscellaneous uses in whole dollars) is the most important factor for the model to make its decisions.", "_____no_output_____" ] ], [ [ "def plot_fi(fi):\n return fi.plot('cols', 'imp', 'barh', figsize=(12,7), legend=False)\n\nplot_fi(rf_feat_importance(m, xs_imp));", "_____no_output_____" ] ], [ [ "In this section, I make use of the [Partial Dependence Plots](https://christophm.github.io/interpretable-ml-book/pdp.html) to interpret the learned function (ML model) and understand how this function makes decisions and predicts house prices for sale.</br></br>The 1D-feature plots show by changing one unit (increase or decrease) of the feature shown in the x-axis, how much the predicted dependent variable (`log KWH`) changes on average.", "_____no_output_____" ] ], [ [ "explore_cols = ['TOTALDOLOTH','TOTALDOLRFG','ACROOMS','TEMPHOMEAC','TEMPNITEAC','CDD30YR','CUFEETNGOTH','WASHLOAD','CUFEETNG']\n\nexplore_cols_vals = [\"Total cost for appliances, electronics, lighting, and miscellaneous uses, in whole dollars\",\n \"Total cost for refrigerators, in whole dollars\",\n \"Number of rooms cooled\",\n \"Temperature when someone is home during the day (summer)\",\n \"Temperature at night (summer)\",\n \"Cooling degree days, 30-year average 1981-2010, base 65F\",\n \"Natural Gas usage for other purposes (all end-uses except SPH and WTH), in hundred cubic feet\",\n \"Frequency clothes washer used\",\n \"Total Natural Gas usage, in hundred cubic feet\"]\n\nfor index, col in enumerate(explore_cols):\n fig,ax = plt.subplots(figsize=(12, 4))\n plot_partial_dependence(m, valid_xs_imp, [col], grid_resolution=20, ax=ax);\n x_label = explore_cols_vals[index]\n plt.xlabel(x_label)", "_____no_output_____" ] ], [ [ "The 2D-feature plots show by changing one unit (increase or decrease) of the features shown in the x and y axes, how much the dependent variable changes.\n</br>\n</br>\nHere, the plot shows how much the model (learned function) changes its `log KWH` prediction on average when the two dimensions on the x and y axes change.", "_____no_output_____" ] ], [ [ "paired_features = [(\"TEMPNITEAC\",\"TEMPHOMEAC\"),(\"CUFEETNG\",\"CUFEETNGOTH\")]\npaired_features_vals = [(\"Temperature at night (summer)\",\"Temperature when someone is home during the day (summer)\"),\n (\"Total Natural Gas usage, in hundred cubic feet\",\"Natural Gas usage for other purposes (all end-uses except SPH and WTH), in hundred cubic feet\")]\n\n\nfor index, pair in enumerate(paired_features):\n fig,ax = plt.subplots(figsize=(8, 8))\n plot_partial_dependence(m, valid_xs_imp, [pair], grid_resolution=20, ax=ax);\n \n x_label = paired_features_vals[index][0]\n y_label = paired_features_vals[index][1]\n\n plt.xlabel(x_label)\n plt.ylabel(y_label)", "_____no_output_____" ] ], [ [ "## THE END!", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
d0dca0207e5467815adb8b37110b30c7d1d3e296
395,841
ipynb
Jupyter Notebook
1_1_Image_Representation/6_5. Accuracy and Misclassification.ipynb
Abdulrahman-Adel/CVND-Exercises
ec8618e1651b5302c37788b2383620d143fdd8e3
[ "MIT" ]
null
null
null
1_1_Image_Representation/6_5. Accuracy and Misclassification.ipynb
Abdulrahman-Adel/CVND-Exercises
ec8618e1651b5302c37788b2383620d143fdd8e3
[ "MIT" ]
null
null
null
1_1_Image_Representation/6_5. Accuracy and Misclassification.ipynb
Abdulrahman-Adel/CVND-Exercises
ec8618e1651b5302c37788b2383620d143fdd8e3
[ "MIT" ]
null
null
null
782.294466
159,484
0.951991
[ [ [ "# Day and Night Image Classifier\n---\n\nThe day/night image dataset consists of 200 RGB color images in two categories: day and night. There are equal numbers of each example: 100 day images and 100 night images.\n\nWe'd like to build a classifier that can accurately label these images as day or night, and that relies on finding distinguishing features between the two types of images!\n\n*Note: All images come from the [AMOS dataset](http://cs.uky.edu/~jacobs/datasets/amos/) (Archive of Many Outdoor Scenes).*\n", "_____no_output_____" ], [ "### Import resources\n\nBefore you get started on the project code, import the libraries and resources that you'll need.", "_____no_output_____" ] ], [ [ "import cv2 # computer vision library\nimport helpers\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\n\n%matplotlib inline", "_____no_output_____" ] ], [ [ "## Training and Testing Data\nThe 200 day/night images are separated into training and testing datasets. \n\n* 60% of these images are training images, for you to use as you create a classifier.\n* 40% are test images, which will be used to test the accuracy of your classifier.\n\nFirst, we set some variables to keep track of some where our images are stored:\n\n image_dir_training: the directory where our training image data is stored\n image_dir_test: the directory where our test image data is stored", "_____no_output_____" ] ], [ [ "# Image data directories\nimage_dir_training = \"day_night_images/training/\"\nimage_dir_test = \"day_night_images/test/\"", "_____no_output_____" ] ], [ [ "## Load the datasets\n\nThese first few lines of code will load the training day/night images and store all of them in a variable, `IMAGE_LIST`. This list contains the images and their associated label (\"day\" or \"night\"). \n\nFor example, the first image-label pair in `IMAGE_LIST` can be accessed by index: \n``` IMAGE_LIST[0][:]```.\n", "_____no_output_____" ] ], [ [ "# Using the load_dataset function in helpers.py\n# Load training data\nIMAGE_LIST = helpers.load_dataset(image_dir_training)\n", "_____no_output_____" ] ], [ [ "## Construct a `STANDARDIZED_LIST` of input images and output labels.\n\nThis function takes in a list of image-label pairs and outputs a **standardized** list of resized images and numerical labels.", "_____no_output_____" ] ], [ [ "# Standardize all training images\nSTANDARDIZED_LIST = helpers.standardize(IMAGE_LIST)", "_____no_output_____" ] ], [ [ "## Visualize the standardized data\n\nDisplay a standardized image from STANDARDIZED_LIST.", "_____no_output_____" ] ], [ [ "# Display a standardized image and its label\n\n# Select an image by index\nimage_num = 0\nselected_image = STANDARDIZED_LIST[image_num][0]\nselected_label = STANDARDIZED_LIST[image_num][1]\n\n# Display image and data about it\nplt.imshow(selected_image)\nprint(\"Shape: \"+str(selected_image.shape))\nprint(\"Label [1 = day, 0 = night]: \" + str(selected_label))\n", "Shape: (600, 1100, 3)\nLabel [1 = day, 0 = night]: 1\n" ] ], [ [ "# Feature Extraction\n\nCreate a feature that represents the brightness in an image. We'll be extracting the **average brightness** using HSV colorspace. Specifically, we'll use the V channel (a measure of brightness), add up the pixel values in the V channel, then divide that sum by the area of the image to get the average Value of the image.\n", "_____no_output_____" ], [ "---\n### Find the average brigtness using the V channel\n\nThis function takes in a **standardized** RGB image and returns a feature (a single value) that represent the average level of brightness in the image. We'll use this value to classify the image as day or night.", "_____no_output_____" ] ], [ [ "# Find the average Value or brightness of an image\ndef avg_brightness(rgb_image):\n # Convert image to HSV\n hsv = cv2.cvtColor(rgb_image, cv2.COLOR_RGB2HSV)\n\n # Add up all the pixel values in the V channel\n sum_brightness = np.sum(hsv[:,:,2])\n area = 600*1100.0 # pixels\n \n # find the avg\n avg = sum_brightness/area\n \n return avg", "_____no_output_____" ], [ "# Testing average brightness levels\n# Look at a number of different day and night images and think about \n# what average brightness value separates the two types of images\n\n# As an example, a \"night\" image is loaded in and its avg brightness is displayed\nimage_num = 190\ntest_im = STANDARDIZED_LIST[image_num][0]\n\navg = avg_brightness(test_im)\nprint('Avg brightness: ' + str(avg))\nplt.imshow(test_im)", "Avg brightness: 35.217\n" ] ], [ [ "# Classification and Visualizing Error\n\nIn this section, we'll turn our average brightness feature into a classifier that takes in a standardized image and returns a `predicted_label` for that image. This `estimate_label` function should return a value: 0 or 1 (night or day, respectively).", "_____no_output_____" ], [ "---\n### TODO: Build a complete classifier \n\nComplete this code so that it returns an estimated class label given an input RGB image.", "_____no_output_____" ] ], [ [ "# This function should take in RGB image input\ndef estimate_label(rgb_image):\n \n # Extract average brightness feature from an RGB image \n avg = avg_brightness(rgb_image)\n \n # Use the avg brightness feature to predict a label (0, 1)\n predicted_label = 0\n threshold = 100\n if(avg > threshold):\n # if the average brightness is above the threshold value, we classify it as \"day\"\n predicted_label = 1\n # else, the pred-cted_label can stay 0 (it is predicted to be \"night\")\n \n return predicted_label \n ", "_____no_output_____" ] ], [ [ "## Testing the classifier\n\nHere is where we test your classification algorithm using our test set of data that we set aside at the beginning of the notebook!\n\nSince we are using a pretty simple brightess feature, we may not expect this classifier to be 100% accurate. We'll aim for around 75-85% accuracy usin this one feature.\n\n\n### Test dataset\n\nBelow, we load in the test dataset, standardize it using the `standardize` function you defined above, and then **shuffle** it; this ensures that order will not play a role in testing accuracy.\n", "_____no_output_____" ] ], [ [ "import random\n\n# Using the load_dataset function in helpers.py\n# Load test data\nTEST_IMAGE_LIST = helpers.load_dataset(image_dir_test)\n\n# Standardize the test data\nSTANDARDIZED_TEST_LIST = helpers.standardize(TEST_IMAGE_LIST)\n\n# Shuffle the standardized test data\nrandom.shuffle(STANDARDIZED_TEST_LIST)", "_____no_output_____" ] ], [ [ "## Determine the Accuracy\n\nCompare the output of your classification algorithm (a.k.a. your \"model\") with the true labels and determine the accuracy.\n\nThis code stores all the misclassified images, their predicted labels, and their true labels, in a list called `misclassified`.", "_____no_output_____" ] ], [ [ "# Constructs a list of misclassified images given a list of test images and their labels\ndef get_misclassified_images(test_images):\n # Track misclassified images by placing them into a list\n misclassified_images_labels = []\n\n # Iterate through all the test images\n # Classify each image and compare to the true label\n for image in test_images:\n\n # Get true data\n im = image[0]\n true_label = image[1]\n\n # Get predicted label from your classifier\n predicted_label = estimate_label(im)\n\n # Compare true and predicted labels \n if(predicted_label != true_label):\n # If these labels are not equal, the image has been misclassified\n misclassified_images_labels.append((im, predicted_label, true_label))\n \n # Return the list of misclassified [image, predicted_label, true_label] values\n return misclassified_images_labels\n", "_____no_output_____" ], [ "# Find all misclassified images in a given test set\nMISCLASSIFIED = get_misclassified_images(STANDARDIZED_TEST_LIST)\n\n# Accuracy calculations\ntotal = len(STANDARDIZED_TEST_LIST)\nnum_correct = total - len(MISCLASSIFIED)\naccuracy = num_correct/total\n\nprint('Accuracy: ' + str(accuracy))\nprint(\"Number of misclassified images = \" + str(len(MISCLASSIFIED)) +' out of '+ str(total))", "Accuracy: 0.925\nNumber of misclassified images = 12 out of 160\n" ] ], [ [ "---\n<a id='task9'></a>\n### Visualize the misclassified images\n\nVisualize some of the images you classified wrong (in the `MISCLASSIFIED` list) and note any qualities that make them difficult to classify. This will help you identify any weaknesses in your classification algorithm.", "_____no_output_____" ] ], [ [ "# Visualize misclassified example(s)\nnum = 0\ntest_mis_im = MISCLASSIFIED[num][0]\n\n## TODO: Display an image in the `MISCLASSIFIED` list \nplt.imshow(test_mis_im)\n## TODO: Print out its predicted label - \n## to see what the image *was* incorrectly classified as\nprint(\"Misslabeled as: \",MISCLASSIFIED[num][1])", "Misslabeled as: 0\n" ] ], [ [ "---\n<a id='question2'></a>\n## (Question): After visualizing these misclassifications, what weaknesses do you think your classification algorithm has?", "_____no_output_____" ], [ "**Answer:** Write your answer, here.", "_____no_output_____" ], [ "# 5. Improve your algorithm!\n\n* (Optional) Tweak your threshold so that accuracy is better.\n* (Optional) Add another feature that tackles a weakness you identified!\n---\n", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ] ]
d0dcb28a48565386809d33d9ad7afecca9442c89
440,511
ipynb
Jupyter Notebook
model/Real State Price prediction.ipynb
Rishikeshrajrxl/Real_State_Price_Estimator_Model
0a7af9399c7a92644b3bfaf6bf7d0b14028a6eda
[ "MIT" ]
1
2021-01-16T04:10:02.000Z
2021-01-16T04:10:02.000Z
model/Real State Price prediction.ipynb
Rishikeshrajrxl/Real_State_Price_Estimator_Model
0a7af9399c7a92644b3bfaf6bf7d0b14028a6eda
[ "MIT" ]
null
null
null
model/Real State Price prediction.ipynb
Rishikeshrajrxl/Real_State_Price_Estimator_Model
0a7af9399c7a92644b3bfaf6bf7d0b14028a6eda
[ "MIT" ]
null
null
null
45.554395
12,592
0.304644
[ [ [ "## Bengaluru House Price", "_____no_output_____" ] ], [ [ "import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\npd.set_option(\"display.max_rows\", None, \"display.max_columns\", None)", "_____no_output_____" ], [ "df1=pd.read_csv(\"Dataset/Bengaluru_House_Data.csv\")\ndf1.head()", "_____no_output_____" ] ], [ [ "### Data Cleaning", "_____no_output_____" ] ], [ [ "df1.info()", "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 13320 entries, 0 to 13319\nData columns (total 9 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 area_type 13320 non-null object \n 1 availability 13320 non-null object \n 2 location 13319 non-null object \n 3 size 13304 non-null object \n 4 society 7818 non-null object \n 5 total_sqft 13320 non-null object \n 6 bath 13247 non-null float64\n 7 balcony 12711 non-null float64\n 8 price 13320 non-null float64\ndtypes: float64(3), object(6)\nmemory usage: 936.7+ KB\n" ], [ "df1.isnull().sum()", "_____no_output_____" ], [ "df1.groupby('area_type')['area_type'].agg('count')", "_____no_output_____" ], [ "df2=df1.drop(['area_type','availability','society','balcony'], axis='columns')", "_____no_output_____" ], [ "df2.head()", "_____no_output_____" ], [ "df2.isnull().sum()", "_____no_output_____" ], [ "df2.shape", "_____no_output_____" ], [ "df2['location'].fillna(df2['location'].mode().values[0],inplace=True)", "_____no_output_____" ], [ "df2['size'].fillna(df2['size'].mode().values[0],inplace=True)", "_____no_output_____" ], [ "df2['bath'].fillna(df2['bath'].mode().values[0],inplace=True)", "_____no_output_____" ], [ "df2.isnull().sum()", "_____no_output_____" ], [ "df2['size'].unique()", "_____no_output_____" ], [ "df2['bhk']=df2['size'].apply(lambda x: int(x.split(' ')[0]))", "_____no_output_____" ], [ "df2=df2.drop(['size'],axis='columns')", "_____no_output_____" ], [ "df2.head()", "_____no_output_____" ], [ "df2['bhk'].unique()", "_____no_output_____" ], [ "df2['total_sqft'].unique()", "_____no_output_____" ] ], [ [ "###### Dimension Reduction", "_____no_output_____" ] ], [ [ "def infloat(x):\n try:\n float(x)\n except:\n return False\n return True\n ", "_____no_output_____" ], [ "df2[~df2['total_sqft'].apply(infloat)].head(10)", "_____no_output_____" ], [ "def convert(x):\n token=x.split('-')\n if(len(token)==2):\n return (float(token[0])+float(token[1]))/2\n try:\n return float(x)\n except:\n return 1600\n ", "_____no_output_____" ], [ "df2['total_sqft']=df2['total_sqft'].apply(convert)", "_____no_output_____" ], [ "df2.head()", "_____no_output_____" ], [ "df2.loc[410]", "_____no_output_____" ], [ "df2.isnull().sum()", "_____no_output_____" ], [ "df2['total_sqft'].agg('mean')", "_____no_output_____" ], [ "df2['bath'].unique()", "_____no_output_____" ], [ "df3=df2.copy()", "_____no_output_____" ], [ "df3['price_per_sqft']=(df3['price']*100000/df3['total_sqft']).round(2)\ndf3.head()", "_____no_output_____" ], [ "df3.location.unique()", "_____no_output_____" ], [ "\nstats=df3.groupby('location')['location'].agg('count').sort_values(ascending=False)\nstats\n", "_____no_output_____" ], [ "location_stat_less_than_10=stats[stats<=10]", "_____no_output_____" ], [ "location_stat_less_than_10", "_____no_output_____" ], [ "df3['location']=df3['location'].apply(lambda x:'others' if x in location_stat_less_than_10 else x)", "_____no_output_____" ], [ "len(df3.location.unique())", "_____no_output_____" ], [ " df3.head(10)", "_____no_output_____" ], [ "df3[df3['total_sqft']/df3['bhk']<300].head()", "_____no_output_____" ], [ "df3.shape", "_____no_output_____" ], [ "df4=df3[~(df3['total_sqft']/df3['bhk']<300)]", "_____no_output_____" ], [ "df4.shape", "_____no_output_____" ], [ "df4.price_per_sqft.describe()", "_____no_output_____" ], [ "def remove(df):\n df_out = pd.DataFrame()\n for key, subdf in df.groupby('location'):\n m=np.mean(subdf.price_per_sqft)\n st=np.std(subdf.price_per_sqft)\n reduced_df=subdf[(subdf.price_per_sqft >(m-st)) & (subdf.price_per_sqft<=(m+st))]\n df_out = pd.concat([df_out, reduced_df],ignore_index=True)\n return df_out\n \ndf5=remove(df4)\ndf5.shape", "_____no_output_____" ], [ "def draw(df,location):\n bhk2=df[ (df.location==location) & (df.bhk==2)]\n bhk3=df[ (df.location==location) & (df.bhk==3)]\n plt.rcParams['figure.figsize']=(15,10)\n plt.scatter(bhk2.total_sqft,bhk2.price,color='blue')\n plt.scatter(bhk3.total_sqft,bhk3.price,color='green',marker='+')\n \n \n ", "_____no_output_____" ], [ "draw(df5,'Rajaji Nagar')", "_____no_output_____" ], [ "import matplotlib\nmatplotlib.rcParams['figure.figsize']=(15,10)\nplt.hist(df5.price_per_sqft,rwidth=.8)", "_____no_output_____" ], [ "df5.bath.unique()", "_____no_output_____" ], [ "df5[df5.bath>df5.bhk+2]", "_____no_output_____" ], [ "df6=df5[df5.bath<df5.bhk+2]", "_____no_output_____" ], [ "df6.shape", "_____no_output_____" ], [ "df6.head()", "_____no_output_____" ], [ "df6=df6.drop(['price_per_sqft'],axis='columns')", "_____no_output_____" ], [ "df6.head()", "_____no_output_____" ], [ "dummies=pd.get_dummies(df6.location)\ndummies.head(3)", "_____no_output_____" ], [ "dummies.shape", "_____no_output_____" ], [ "df7=pd.concat([df6,dummies.drop('others',axis='columns')],axis='columns')", "_____no_output_____" ], [ "df7.shape", "_____no_output_____" ], [ "df7.head(3)", "_____no_output_____" ], [ "df8=df7.drop('location',axis='columns')", "_____no_output_____" ], [ "df8.head(3)", "_____no_output_____" ], [ "df8.shape", "_____no_output_____" ], [ "x=df8.drop('price',axis='columns')\nx.head(2)", "_____no_output_____" ], [ "y=df8['price']", "_____no_output_____" ], [ "y.head()", "_____no_output_____" ], [ "from sklearn.model_selection import train_test_split\nx_train,x_test,y_train,y_test=train_test_split(x,y, test_size=0.2,random_state=10)", "_____no_output_____" ], [ "from sklearn.linear_model import LinearRegression\nlr=LinearRegression()\nlr.fit(x_train,y_train)", "_____no_output_____" ], [ "y_pred=lr.predict(x_test)", "_____no_output_____" ], [ "from sklearn.metrics import r2_score\nr2_score(y_pred,y_test)", "_____no_output_____" ], [ "lr.score(x_test,y_test)", "_____no_output_____" ], [ "from sklearn.model_selection import ShuffleSplit\nfrom sklearn.model_selection import cross_val_score\n\ncv=ShuffleSplit(n_splits=5, test_size=.2,random_state=0)\n\ncross_val_score(LinearRegression(),x,y,cv=cv)\n", "_____no_output_____" ], [ "from sklearn.ensemble import RandomForestRegressor\nrfg=RandomForestRegressor(n_estimators=50)\nrfg.fit(x_train,y_train)\nr2_score(y_test,rfg.predict(x_test))", "_____no_output_____" ], [ "rfg.score(x_test,y_test)", "_____no_output_____" ], [ "cross_val_score(RandomForestRegressor(),x,y,cv=cv)", "_____no_output_____" ], [ "x.columns", "_____no_output_____" ], [ "X=x\n", "_____no_output_____" ], [ "def predict_price(location,sqft,bath,bhk):\n loc_index = np.where(X.columns==location)[0][0]\n \n x=np.zeros(len(X.columns))\n x[0]=sqft\n x[1]=bath\n x[2]=bhk\n if loc_index>=0:\n x[loc_index]=1\n \n return lr.predict([x])[0]", "_____no_output_____" ], [ "predict_price('1st Phase JP Nagar',1000,4,5)", "_____no_output_____" ], [ "predict_price('Indira Nagar',1000,2,2)", "_____no_output_____" ], [ "import pickle\nwith open('banglore_home_price_model.pickle','wb') as f:\n pickle.dump(lr,f)\n ", "_____no_output_____" ], [ "import json\ncolumns={\n 'data_columns' : [col.lower() for col in X.columns]\n}\nwith open(\"columns.json\",\"w\") as f:\n f.write(json.dumps(columns))", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
d0dcdacce1cb51ad298a9ca527257ab797abb0fb
44,386
ipynb
Jupyter Notebook
site/en-snapshot/tutorials/distribute/multi_worker_with_keras.ipynb
minamstar/docs-l10n
3428d442f46e3e0c33f2f3b3df27aa52c65924aa
[ "Apache-2.0" ]
null
null
null
site/en-snapshot/tutorials/distribute/multi_worker_with_keras.ipynb
minamstar/docs-l10n
3428d442f46e3e0c33f2f3b3df27aa52c65924aa
[ "Apache-2.0" ]
null
null
null
site/en-snapshot/tutorials/distribute/multi_worker_with_keras.ipynb
minamstar/docs-l10n
3428d442f46e3e0c33f2f3b3df27aa52c65924aa
[ "Apache-2.0" ]
null
null
null
38.832896
839
0.592326
[ [ [ "##### Copyright 2019 The TensorFlow Authors.\n", "_____no_output_____" ] ], [ [ "#@title Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.", "_____no_output_____" ] ], [ [ "# Multi-worker training with Keras\n\n<table class=\"tfo-notebook-buttons\" align=\"left\">\n <td>\n <a target=\"_blank\" href=\"https://www.tensorflow.org/tutorials/distribute/multi_worker_with_keras\"><img src=\"https://www.tensorflow.org/images/tf_logo_32px.png\" />View on TensorFlow.org</a>\n </td>\n <td>\n <a target=\"_blank\" href=\"https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/tutorials/distribute/multi_worker_with_keras.ipynb\"><img src=\"https://www.tensorflow.org/images/colab_logo_32px.png\" />Run in Google Colab</a>\n </td>\n <td>\n <a target=\"_blank\" href=\"https://github.com/tensorflow/docs/blob/master/site/en/tutorials/distribute/multi_worker_with_keras.ipynb\"><img src=\"https://www.tensorflow.org/images/GitHub-Mark-32px.png\" />View source on GitHub</a>\n </td>\n <td>\n <a href=\"https://storage.googleapis.com/tensorflow_docs/docs/site/en/tutorials/distribute/multi_worker_with_keras.ipynb\"><img src=\"https://www.tensorflow.org/images/download_logo_32px.png\" />Download notebook</a>\n </td>\n</table>", "_____no_output_____" ], [ "## Overview\n\nThis tutorial demonstrates multi-worker distributed training with Keras model using `tf.distribute.Strategy` API, specifically `tf.distribute.MultiWorkerMirroredStrategy`. With the help of this strategy, a Keras model that was designed to run on single-worker can seamlessly work on multiple workers with minimal code change.\n\n[Distributed Training in TensorFlow](../../guide/distributed_training.ipynb) guide is available for an overview of the distribution strategies TensorFlow supports for those interested in a deeper understanding of `tf.distribute.Strategy` APIs.\n", "_____no_output_____" ], [ "## Setup\n\nFirst, some necessary imports.", "_____no_output_____" ] ], [ [ "import json\nimport os\nimport sys", "_____no_output_____" ] ], [ [ "Before importing TensorFlow, make a few changes to the environment.\n\nDisable all GPUs. This prevents errors caused by the workers all trying to use the same GPU. For a real application each worker would be on a different machine.", "_____no_output_____" ] ], [ [ "os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"-1\"", "_____no_output_____" ] ], [ [ "Reset the `TF_CONFIG` environment variable, you'll see more about this later.", "_____no_output_____" ] ], [ [ "os.environ.pop('TF_CONFIG', None)", "_____no_output_____" ] ], [ [ "Be sure that the current directory is on python's path. This allows the notebook to import the files written by `%%writefile` later.\n", "_____no_output_____" ] ], [ [ "if '.' not in sys.path:\n sys.path.insert(0, '.')", "_____no_output_____" ] ], [ [ "Now import TensorFlow.", "_____no_output_____" ] ], [ [ "import tensorflow as tf", "_____no_output_____" ] ], [ [ "### Dataset and model definition", "_____no_output_____" ], [ "Next create an `mnist.py` file with a simple model and dataset setup. This python file will be used by the worker-processes in this tutorial:", "_____no_output_____" ] ], [ [ "%%writefile mnist.py\n\nimport os\nimport tensorflow as tf\nimport numpy as np\n\ndef mnist_dataset(batch_size):\n (x_train, y_train), _ = tf.keras.datasets.mnist.load_data()\n # The `x` arrays are in uint8 and have values in the range [0, 255].\n # You need to convert them to float32 with values in the range [0, 1]\n x_train = x_train / np.float32(255)\n y_train = y_train.astype(np.int64)\n train_dataset = tf.data.Dataset.from_tensor_slices(\n (x_train, y_train)).shuffle(60000).repeat().batch(batch_size)\n return train_dataset\n\ndef build_and_compile_cnn_model():\n model = tf.keras.Sequential([\n tf.keras.Input(shape=(28, 28)),\n tf.keras.layers.Reshape(target_shape=(28, 28, 1)),\n tf.keras.layers.Conv2D(32, 3, activation='relu'),\n tf.keras.layers.Flatten(),\n tf.keras.layers.Dense(128, activation='relu'),\n tf.keras.layers.Dense(10)\n ])\n model.compile(\n loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),\n optimizer=tf.keras.optimizers.SGD(learning_rate=0.001),\n metrics=['accuracy'])\n return model", "_____no_output_____" ] ], [ [ "Try training the model for a small number of epochs and observe the results of a single worker to make sure everything works correctly. As training progresses, the loss should drop and the accuracy should increase.", "_____no_output_____" ] ], [ [ "import mnist\n\nbatch_size = 64\nsingle_worker_dataset = mnist.mnist_dataset(batch_size)\nsingle_worker_model = mnist.build_and_compile_cnn_model()\nsingle_worker_model.fit(single_worker_dataset, epochs=3, steps_per_epoch=70)", "_____no_output_____" ] ], [ [ "## Multi-worker Configuration\n\nNow let's enter the world of multi-worker training. In TensorFlow, the `TF_CONFIG` environment variable is required for training on multiple machines, each of which possibly has a different role. `TF_CONFIG` is a JSON string used to specify the cluster configuration on each worker that is part of the cluster.\n\nHere is an example configuration:", "_____no_output_____" ] ], [ [ "tf_config = {\n 'cluster': {\n 'worker': ['localhost:12345', 'localhost:23456']\n },\n 'task': {'type': 'worker', 'index': 0}\n}", "_____no_output_____" ] ], [ [ "Here is the same `TF_CONFIG` serialized as a JSON string:", "_____no_output_____" ] ], [ [ "json.dumps(tf_config)", "_____no_output_____" ] ], [ [ "There are two components of `TF_CONFIG`: `cluster` and `task`.\n\n* `cluster` is the same for all workers and provides information about the training cluster, which is a dict consisting of different types of jobs such as `worker`. In multi-worker training with `MultiWorkerMirroredStrategy`, there is usually one `worker` that takes on a little more responsibility like saving checkpoint and writing summary file for TensorBoard in addition to what a regular `worker` does. Such a worker is referred to as the `chief` worker, and it is customary that the `worker` with `index` 0 is appointed as the chief `worker` (in fact this is how `tf.distribute.Strategy` is implemented).\n\n* `task` provides information of the current task and is different on each worker. It specifies the `type` and `index` of that worker. ", "_____no_output_____" ], [ "In this example, you set the task `type` to `\"worker\"` and the task `index` to `0`. This machine is the first worker and will be appointed as the chief worker and do more work than the others. Note that other machines will need to have the `TF_CONFIG` environment variable set as well, and it should have the same `cluster` dict, but different task `type` or task `index` depending on what the roles of those machines are.\n", "_____no_output_____" ], [ "For illustration purposes, this tutorial shows how one may set a `TF_CONFIG` with 2 workers on `localhost`. In practice, users would create multiple workers on external IP addresses/ports, and set `TF_CONFIG` on each worker appropriately.\n\nIn this example you will use 2 workers, the first worker's `TF_CONFIG` is shown above. For the second worker you would set `tf_config['task']['index']=1`", "_____no_output_____" ], [ "Above, `tf_config` is just a local variable in python. To actually use it to configure training, this dictionary needs to be serialized as JSON, and placed in the `TF_CONFIG` environment variable.", "_____no_output_____" ], [ "### Environment variables and subprocesses in notebooks", "_____no_output_____" ], [ "Subprocesses inherit environment variables from their parent. So if you set an environment variable in this `jupyter notebook` process:", "_____no_output_____" ] ], [ [ "os.environ['GREETINGS'] = 'Hello TensorFlow!'", "_____no_output_____" ] ], [ [ "You can access the environment variable from a subprocesses:", "_____no_output_____" ] ], [ [ "%%bash\necho ${GREETINGS}", "_____no_output_____" ] ], [ [ "In the next section, you'll use this to pass the `TF_CONFIG` to the worker subprocesses. You would never really launch your jobs this way, but it's sufficient for the purposes of this tutorial: To demonstrate a minimal multi-worker example.", "_____no_output_____" ], [ "## Choose the right strategy\n\nIn TensorFlow there are two main forms of distributed training:\n\n* Synchronous training, where the steps of training are synced across the workers and replicas, and\n* Asynchronous training, where the training steps are not strictly synced.\n\n`MultiWorkerMirroredStrategy`, which is the recommended strategy for synchronous multi-worker training, will be demonstrated in this guide.\nTo train the model, use an instance of `tf.distribute.MultiWorkerMirroredStrategy`.\n\n`MultiWorkerMirroredStrategy` creates copies of all variables in the model's layers on each device across all workers. It uses `CollectiveOps`, a TensorFlow op for collective communication, to aggregate gradients and keep the variables in sync. The [`tf.distribute.Strategy` guide](../../guide/distributed_training.ipynb) has more details about this strategy.", "_____no_output_____" ] ], [ [ "strategy = tf.distribute.MultiWorkerMirroredStrategy()", "_____no_output_____" ] ], [ [ "Note: `TF_CONFIG` is parsed and TensorFlow's GRPC servers are started at the time `MultiWorkerMirroredStrategy()` is called, so the `TF_CONFIG` environment variable must be set before a `tf.distribute.Strategy` instance is created. Since `TF_CONFIG` is not set yet the above strategy is effectively single-worker training.", "_____no_output_____" ], [ "`MultiWorkerMirroredStrategy` provides multiple implementations via the [`CommunicationOptions`](https://www.tensorflow.org/api_docs/python/tf/distribute/experimental/CommunicationOptions) parameter. `RING` implements ring-based collectives using gRPC as the cross-host communication layer. `NCCL` uses [Nvidia's NCCL](https://developer.nvidia.com/nccl) to implement collectives. `AUTO` defers the choice to the runtime. The best choice of collective implementation depends upon the number and kind of GPUs, and the network interconnect in the cluster.", "_____no_output_____" ], [ "## Train the model\n\nWith the integration of `tf.distribute.Strategy` API into `tf.keras`, the only change you will make to distribute the training to multiple-workers is enclosing the model building and `model.compile()` call inside `strategy.scope()`. The distribution strategy's scope dictates how and where the variables are created, and in the case of `MultiWorkerMirroredStrategy`, the variables created are `MirroredVariable`s, and they are replicated on each of the workers.\n", "_____no_output_____" ] ], [ [ "with strategy.scope():\n # Model building/compiling need to be within `strategy.scope()`.\n multi_worker_model = mnist.build_and_compile_cnn_model()", "_____no_output_____" ] ], [ [ "Note: Currently there is a limitation in `MultiWorkerMirroredStrategy` where TensorFlow ops need to be created after the instance of strategy is created. If you see `RuntimeError: Collective ops must be configured at program startup`, try creating the instance of `MultiWorkerMirroredStrategy` at the beginning of the program and put the code that may create ops after the strategy is instantiated.", "_____no_output_____" ], [ "To actually run with `MultiWorkerMirroredStrategy` you'll need to run worker processes and pass a `TF_CONFIG` to them.\n\nLike the `mnist.py` file written earlier, here is the `main.py` that each of the workers will run:", "_____no_output_____" ] ], [ [ "%%writefile main.py\n\nimport os\nimport json\n\nimport tensorflow as tf\nimport mnist\n\nper_worker_batch_size = 64\ntf_config = json.loads(os.environ['TF_CONFIG'])\nnum_workers = len(tf_config['cluster']['worker'])\n\nstrategy = tf.distribute.MultiWorkerMirroredStrategy()\n\nglobal_batch_size = per_worker_batch_size * num_workers\nmulti_worker_dataset = mnist.mnist_dataset(global_batch_size)\n\nwith strategy.scope():\n # Model building/compiling need to be within `strategy.scope()`.\n multi_worker_model = mnist.build_and_compile_cnn_model()\n\n\nmulti_worker_model.fit(multi_worker_dataset, epochs=3, steps_per_epoch=70)", "_____no_output_____" ] ], [ [ "In the code snippet above note that the `global_batch_size`, which gets passed to `Dataset.batch`, is set to `per_worker_batch_size * num_workers`. This ensures that each worker processes batches of `per_worker_batch_size` examples regardless of the number of workers.", "_____no_output_____" ], [ "The current directory now contains both Python files:", "_____no_output_____" ] ], [ [ "%%bash\nls *.py", "_____no_output_____" ] ], [ [ "So json-serialize the `TF_CONFIG` and add it to the environment variables:", "_____no_output_____" ] ], [ [ "os.environ['TF_CONFIG'] = json.dumps(tf_config)", "_____no_output_____" ] ], [ [ "Now, you can launch a worker process that will run the `main.py` and use the `TF_CONFIG`:", "_____no_output_____" ] ], [ [ "# first kill any previous runs\n%killbgscripts", "_____no_output_____" ], [ "%%bash --bg\npython main.py &> job_0.log", "_____no_output_____" ] ], [ [ "There are a few things to note about the above command:\n\n1. It uses the `%%bash` which is a [notebook \"magic\"](https://ipython.readthedocs.io/en/stable/interactive/magics.html) to run some bash commands.\n2. It uses the `--bg` flag to run the `bash` process in the background, because this worker will not terminate. It waits for all the workers before it starts.\n\nThe backgrounded worker process won't print output to this notebook, so the `&>` redirects its output to a file, so you can see what happened.\n\nSo, wait a few seconds for the process to start up:", "_____no_output_____" ] ], [ [ "import time\ntime.sleep(10)", "_____no_output_____" ] ], [ [ "Now look what's been output to the worker's logfile so far:", "_____no_output_____" ] ], [ [ "%%bash\ncat job_0.log", "_____no_output_____" ] ], [ [ "The last line of the log file should say: `Started server with target: grpc://localhost:12345`. The first worker is now ready, and is waiting for all the other worker(s) to be ready to proceed.", "_____no_output_____" ], [ "So update the `tf_config` for the second worker's process to pick up:", "_____no_output_____" ] ], [ [ "tf_config['task']['index'] = 1\nos.environ['TF_CONFIG'] = json.dumps(tf_config)", "_____no_output_____" ] ], [ [ "Now launch the second worker. This will start the training since all the workers are active (so there's no need to background this process):", "_____no_output_____" ] ], [ [ "%%bash\npython main.py", "_____no_output_____" ] ], [ [ "Now if you recheck the logs written by the first worker you'll see that it participated in training that model:", "_____no_output_____" ] ], [ [ "%%bash\ncat job_0.log", "_____no_output_____" ] ], [ [ "Unsurprisingly this ran _slower_ than the the test run at the beginning of this tutorial. Running multiple workers on a single machine only adds overhead. The goal here was not to improve the training time, but only to give an example of multi-worker training.", "_____no_output_____" ] ], [ [ "# Delete the `TF_CONFIG`, and kill any background tasks so they don't affect the next section.\nos.environ.pop('TF_CONFIG', None)\n%killbgscripts", "_____no_output_____" ] ], [ [ "## Multi worker training in depth\n\nSo far this tutorial has demonstrated a basic multi-worker setup. The rest of this document looks in detail other factors which may be useful or important for real use cases.", "_____no_output_____" ], [ "### Dataset sharding\n\nIn multi-worker training, dataset sharding is needed to ensure convergence and performance.\n\nThe example in the previous section relies on the default autosharding provided by the `tf.distribute.Strategy` API. You can control the sharding by setting the `tf.data.experimental.AutoShardPolicy` of the `tf.data.experimental.DistributeOptions`. To learn more about auto-sharding see the [Distributed input guide](https://www.tensorflow.org/tutorials/distribute/input#sharding).\n\nHere is a quick example of how to turn OFF the auto sharding, so each replica processes every example (not recommended):\n", "_____no_output_____" ] ], [ [ "options = tf.data.Options()\noptions.experimental_distribute.auto_shard_policy = tf.data.experimental.AutoShardPolicy.OFF\n\nglobal_batch_size = 64\nmulti_worker_dataset = mnist.mnist_dataset(batch_size=64)\ndataset_no_auto_shard = multi_worker_dataset.with_options(options)", "_____no_output_____" ] ], [ [ "### Evaluation\n\nIf you pass `validation_data` into `model.fit`, it will alternate between training and evaluation for each epoch. The evaluation taking `validation_data` is distributed across the same set of workers and the evaluation results are aggregated and available for all workers. Similar to training, the validation dataset is automatically sharded at the file level. You need to set a global batch size in the validation dataset and set `validation_steps`. A repeated dataset is also recommended for evaluation.\n\nAlternatively, you can also create another task that periodically reads checkpoints and runs the evaluation. This is what Estimator does. But this is not a recommended way to perform evaluation and thus its details are omitted.", "_____no_output_____" ], [ "### Prediction\nCurrently `model.predict` doesn't work with `MultiWorkerMirroredStrategy.`", "_____no_output_____" ], [ "### Performance\n\nYou now have a Keras model that is all set up to run in multiple workers with `MultiWorkerMirroredStrategy`. You can try the following techniques to tweak performance of multi-worker training with `MultiWorkerMirroredStrategy`.\n\n* `MultiWorkerMirroredStrategy` provides multiple [collective communication implementations](https://www.tensorflow.org/api_docs/python/tf/distribute/experimental/CommunicationImplementation). `RING` implements ring-based collectives using gRPC as the cross-host communication layer. `NCCL` uses [Nvidia's NCCL](https://developer.nvidia.com/nccl) to implement collectives. `AUTO` defers the choice to the runtime. The best choice of collective implementation depends upon the number and kind of GPUs, and the network interconnect in the cluster. To override the automatic choice, specify `communication_options` parameter of `MultiWorkerMirroredStrategy`'s constructor, e.g. `communication_options=tf.distribute.experimental.CommunicationOptions(implementation=tf.distribute.experimental.CollectiveCommunication.NCCL)`.\n* Cast the variables to `tf.float` if possible. The official ResNet model includes [an example](https://github.com/tensorflow/models/blob/8367cf6dabe11adf7628541706b660821f397dce/official/resnet/resnet_model.py#L466) of how this can be done.\n", "_____no_output_____" ], [ "### Fault tolerance\n\nIn synchronous training, the cluster would fail if one of the workers fails and no failure-recovery mechanism exists. Using Keras with `tf.distribute.Strategy` comes with the advantage of fault tolerance in cases where workers die or are otherwise unstable. You do this by preserving training state in the distributed file system of your choice, such that upon restart of the instance that previously failed or preempted, the training state is recovered.\n\nWhen a worker becomes unavailable, other workers will fail (possibly after a timeout). In such cases, the unavailable worker needs to be restarted, as well as other workers that have failed.\n\nNote:\nPreviously, the `ModelCheckpoint` callback provided a mechanism to restore training state upon restart from job failure for multi-worker training. The TensorFlow team are introducing a new [`BackupAndRestore`](#scrollTo=kmH8uCUhfn4w) callback, to also add the support to single worker training for a consistent experience, and removed fault tolerance functionality from existing `ModelCheckpoint` callback. From now on, applications that rely on this behavior should migrate to the new callback.", "_____no_output_____" ], [ "#### ModelCheckpoint callback\n\n`ModelCheckpoint` callback no longer provides fault tolerance functionality, please use [`BackupAndRestore`](#scrollTo=kmH8uCUhfn4w) callback instead.\n\nThe `ModelCheckpoint` callback can still be used to save checkpoints. But with this, if training was interrupted or successfully finished, in order to continue training from the checkpoint, the user is responsible to load the model manually.\n\nOptionally the user can choose to save and restore model/weights outside `ModelCheckpoint` callback.", "_____no_output_____" ], [ "### Model saving and loading\n\nTo save your model using `model.save` or `tf.saved_model.save`, the destination for saving needs to be different for each worker. On the non-chief workers, you will need to save the model to a temporary directory, and on the chief, you will need to save to the provided model directory. The temporary directories on the worker need to be unique to prevent errors resulting from multiple workers trying to write to the same location. The model saved in all the directories are identical and typically only the model saved by the chief should be referenced for restoring or serving. You should have some cleanup logic that deletes the temporary directories created by the workers once your training has completed.\n\nThe reason you need to save on the chief and workers at the same time is because you might be aggregating variables during checkpointing which requires both the chief and workers to participate in the allreduce communication protocol. On the other hand, letting chief and workers save to the same model directory will result in errors due to contention.\n\nWith `MultiWorkerMirroredStrategy`, the program is run on every worker, and in order to know whether the current worker is chief, it takes advantage of the cluster resolver object that has attributes `task_type` and `task_id`. `task_type` tells you what the current job is (e.g. 'worker'), and `task_id` tells you the identifier of the worker. The worker with id 0 is designated as the chief worker.\n\nIn the code snippet below, `write_filepath` provides the file path to write, which depends on the worker id. In the case of chief (worker with id 0), it writes to the original file path; for others, it creates a temporary directory (with id in the directory path) to write in:", "_____no_output_____" ] ], [ [ "model_path = '/tmp/keras-model'\n\ndef _is_chief(task_type, task_id):\n # Note: there are two possible `TF_CONFIG` configuration.\n # 1) In addition to `worker` tasks, a `chief` task type is use;\n # in this case, this function should be modified to \n # `return task_type == 'chief'`.\n # 2) Only `worker` task type is used; in this case, worker 0 is\n # regarded as the chief. The implementation demonstrated here\n # is for this case.\n # For the purpose of this colab section, we also add `task_type is None` \n # case because it is effectively run with only single worker.\n return (task_type == 'worker' and task_id == 0) or task_type is None\n\ndef _get_temp_dir(dirpath, task_id):\n base_dirpath = 'workertemp_' + str(task_id)\n temp_dir = os.path.join(dirpath, base_dirpath)\n tf.io.gfile.makedirs(temp_dir)\n return temp_dir\n\ndef write_filepath(filepath, task_type, task_id):\n dirpath = os.path.dirname(filepath)\n base = os.path.basename(filepath)\n if not _is_chief(task_type, task_id):\n dirpath = _get_temp_dir(dirpath, task_id)\n return os.path.join(dirpath, base)\n\ntask_type, task_id = (strategy.cluster_resolver.task_type,\n strategy.cluster_resolver.task_id)\nwrite_model_path = write_filepath(model_path, task_type, task_id)", "_____no_output_____" ] ], [ [ "With that, you're now ready to save:", "_____no_output_____" ] ], [ [ "multi_worker_model.save(write_model_path)", "_____no_output_____" ] ], [ [ "As described above, later on the model should only be loaded from the path chief saved to, so let's remove the temporary ones the non-chief workers saved:", "_____no_output_____" ] ], [ [ "if not _is_chief(task_type, task_id):\n tf.io.gfile.rmtree(os.path.dirname(write_model_path))", "_____no_output_____" ] ], [ [ "Now, when it's time to load, let's use convenient `tf.keras.models.load_model` API, and continue with further work. Here, assume only using single worker to load and continue training, in which case you do not call `tf.keras.models.load_model` within another `strategy.scope()`.", "_____no_output_____" ] ], [ [ "loaded_model = tf.keras.models.load_model(model_path)\n\n# Now that the model is restored, and can continue with the training.\nloaded_model.fit(single_worker_dataset, epochs=2, steps_per_epoch=20)", "_____no_output_____" ] ], [ [ "### Checkpoint saving and restoring\n\nOn the other hand, checkpointing allows you to save model's weights and restore them without having to save the whole model. Here, you'll create one `tf.train.Checkpoint` that tracks the model, which is managed by a `tf.train.CheckpointManager` so that only the latest checkpoint is preserved. ", "_____no_output_____" ] ], [ [ "checkpoint_dir = '/tmp/ckpt'\n\ncheckpoint = tf.train.Checkpoint(model=multi_worker_model)\nwrite_checkpoint_dir = write_filepath(checkpoint_dir, task_type, task_id)\ncheckpoint_manager = tf.train.CheckpointManager(\n checkpoint, directory=write_checkpoint_dir, max_to_keep=1)", "_____no_output_____" ] ], [ [ "Once the `CheckpointManager` is set up, you're now ready to save, and remove the checkpoints non-chief workers saved.", "_____no_output_____" ] ], [ [ "checkpoint_manager.save()\nif not _is_chief(task_type, task_id):\n tf.io.gfile.rmtree(write_checkpoint_dir)", "_____no_output_____" ] ], [ [ "Now, when you need to restore, you can find the latest checkpoint saved using the convenient `tf.train.latest_checkpoint` function. After restoring the checkpoint, you can continue with training.", "_____no_output_____" ] ], [ [ "latest_checkpoint = tf.train.latest_checkpoint(checkpoint_dir)\ncheckpoint.restore(latest_checkpoint)\nmulti_worker_model.fit(multi_worker_dataset, epochs=2, steps_per_epoch=20)", "_____no_output_____" ] ], [ [ "#### BackupAndRestore callback\n\nBackupAndRestore callback provides fault tolerance functionality, by backing up the model and current epoch number in a temporary checkpoint file under `backup_dir` argument to `BackupAndRestore`. This is done at the end of each epoch.\n\nOnce jobs get interrupted and restart, the callback restores the last checkpoint, and training continues from the beginning of the interrupted epoch. Any partial training already done in the unfinished epoch before interruption will be thrown away, so that it doesn't affect the final model state.\n\nTo use it, provide an instance of `tf.keras.callbacks.experimental.BackupAndRestore` at the `tf.keras.Model.fit()` call.\n\nWith MultiWorkerMirroredStrategy, if a worker gets interrupted, the whole cluster pauses until the interrupted worker is restarted. Other workers will also restart, and the interrupted worker rejoins the cluster. Then, every worker reads the checkpoint file that was previously saved and picks up its former state, thereby allowing the cluster to get back in sync. Then the training continues.\n\n`BackupAndRestore` callback uses `CheckpointManager` to save and restore the training state, which generates a file called checkpoint that tracks existing checkpoints together with the latest one. For this reason, `backup_dir` should not be re-used to store other checkpoints in order to avoid name collision.\n\nCurrently, `BackupAndRestore` callback supports single worker with no strategy, MirroredStrategy, and multi-worker with MultiWorkerMirroredStrategy.\nBelow are two examples for both multi-worker training and single worker training.", "_____no_output_____" ] ], [ [ "# Multi-worker training with MultiWorkerMirroredStrategy.\n\ncallbacks = [tf.keras.callbacks.experimental.BackupAndRestore(backup_dir='/tmp/backup')]\nwith strategy.scope():\n multi_worker_model = mnist.build_and_compile_cnn_model()\nmulti_worker_model.fit(multi_worker_dataset,\n epochs=3,\n steps_per_epoch=70,\n callbacks=callbacks)", "_____no_output_____" ] ], [ [ "If you inspect the directory of `backup_dir` you specified in `BackupAndRestore`, you may notice some temporarily generated checkpoint files. Those files are needed for recovering the previously lost instances, and they will be removed by the library at the end of `tf.keras.Model.fit()` upon successful exiting of your training.\n\nNote: Currently BackupAndRestore only supports eager mode. In graph mode, consider using [Save/Restore Model](https://www.tensorflow.org/tutorials/distribute/multi_worker_with_keras#model_saving_and_loading) mentioned above, and by providing `initial_epoch` in `model.fit()`.", "_____no_output_____" ], [ "## See also\n1. [Distributed Training in TensorFlow](https://www.tensorflow.org/guide/distributed_training) guide provides an overview of the available distribution strategies.\n2. [Official models](https://github.com/tensorflow/models/tree/master/official), many of which can be configured to run multiple distribution strategies.\n3. The [Performance section](../../guide/function.ipynb) in the guide provides information about other strategies and [tools](../../guide/profiler.md) you can use to optimize the performance of your TensorFlow models.\n", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ] ]
d0dcddabc130b0096106dffde64791402c5db205
3,921
ipynb
Jupyter Notebook
scripts/Streaming.ipynb
fudan-cyma/KafkaDashboard
e2ec1f594c66c76631d7c7fa0cae24dd9b61618c
[ "Apache-2.0" ]
null
null
null
scripts/Streaming.ipynb
fudan-cyma/KafkaDashboard
e2ec1f594c66c76631d7c7fa0cae24dd9b61618c
[ "Apache-2.0" ]
null
null
null
scripts/Streaming.ipynb
fudan-cyma/KafkaDashboard
e2ec1f594c66c76631d7c7fa0cae24dd9b61618c
[ "Apache-2.0" ]
1
2018-10-11T07:11:57.000Z
2018-10-11T07:11:57.000Z
22.405714
118
0.549095
[ [ [ "import os \nos.environ['PYSPARK_SUBMIT_ARGS'] = '--jars spark-streaming-kafka-0-8-assembly_2.11-2.2.1.jar pyspark-shell' \n", "_____no_output_____" ], [ "from pyspark.streaming.kafka import KafkaUtils\nfrom pyspark import SparkContext\nfrom pyspark.streaming import StreamingContext\nimport sys\nimport json", "_____no_output_____" ], [ "sc = SparkContext.getOrCreate()\nsc.stop()\n\n\n", "_____no_output_____" ], [ "sc = SparkContext(appName = \"PythonStreamingReciever\")\nssc = StreamingContext(sc, 5)", "_____no_output_____" ], [ "kafkaStream = KafkaUtils.createStream(ssc, 'localhost:2181', 'spark-streaming', {'province':1})\nlines = kafkaStream.map(lambda x:x[1])\ncounts = lines.flatMap(lambda line:line.split(\" \")).map(lambda word:(word,1)).reduceByKey(lambda a,b:a+b)\ncounts.pprint()", "_____no_output_____" ], [ "from kafka import KafkaProducer\nproducer = KafkaProducer(bootstrap_servers = 'localhost:9092')\n", "_____no_output_____" ], [ "def process(rdd):\n print(rdd)\n message = json.dumps(rdd.map(lambda x:[str(x[0]),str(x[1])]).collect())\n\n producer.send('result', message.encode('utf-8'))\n", "_____no_output_____" ], [ "counts.foreachRDD(process)\nssc.start()", "_____no_output_____" ], [ "ssc.awaitTermination()", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
d0dcde50211a027c4bfda87d765710e5f64d729f
2,409
ipynb
Jupyter Notebook
notebooks/Deep Learning with TensorFlow/2.3.1_cross_entropy_cost.ipynb
grantbey/TensorFlow-LiveLessons
a54655829868a50427fb4cdd98dce1b09bb673ee
[ "MIT" ]
null
null
null
notebooks/Deep Learning with TensorFlow/2.3.1_cross_entropy_cost.ipynb
grantbey/TensorFlow-LiveLessons
a54655829868a50427fb4cdd98dce1b09bb673ee
[ "MIT" ]
null
null
null
notebooks/Deep Learning with TensorFlow/2.3.1_cross_entropy_cost.ipynb
grantbey/TensorFlow-LiveLessons
a54655829868a50427fb4cdd98dce1b09bb673ee
[ "MIT" ]
null
null
null
16.613793
47
0.466169
[ [ [ "from numpy import log", "_____no_output_____" ], [ "def cross_entropy(y, a):\n return -1*(y*log(a) + (1-y)*log(1-a))", "_____no_output_____" ], [ "cross_entropy(0, 0.01)", "_____no_output_____" ], [ "cross_entropy(1, 0.99)", "_____no_output_____" ], [ "cross_entropy(0, 0.3)", "_____no_output_____" ], [ "cross_entropy(0, 0.6)", "_____no_output_____" ], [ "cross_entropy(0, 0.9)", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code" ] ]
d0dcef550da3d9ed7727addaf9a914b004b3d0c0
14,542
ipynb
Jupyter Notebook
tutorials/streamlit_notebooks/healthcare/RE_POSOLOGY.ipynb
ewbolme/spark-nlp-workshop
421e9b8b3941c825262e46fdf09243996e757992
[ "Apache-2.0" ]
null
null
null
tutorials/streamlit_notebooks/healthcare/RE_POSOLOGY.ipynb
ewbolme/spark-nlp-workshop
421e9b8b3941c825262e46fdf09243996e757992
[ "Apache-2.0" ]
null
null
null
tutorials/streamlit_notebooks/healthcare/RE_POSOLOGY.ipynb
ewbolme/spark-nlp-workshop
421e9b8b3941c825262e46fdf09243996e757992
[ "Apache-2.0" ]
null
null
null
26.201802
459
0.549443
[ [ [ "\n\n![JohnSnowLabs](https://nlp.johnsnowlabs.com/assets/images/logo.png)\n\n[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/JohnSnowLabs/spark-nlp-workshop/blob/master/tutorials/streamlit_notebooks/healthcare/RE_POSOLOGY.ipynb)\n\n\n", "_____no_output_____" ], [ "# **Detect posology relations**", "_____no_output_____" ], [ "To run this yourself, you will need to upload your license keys to the notebook. Otherwise, you can look at the example outputs at the bottom of the notebook. To upload license keys, open the file explorer on the left side of the screen and upload `workshop_license_keys.json` to the folder that opens.", "_____no_output_____" ], [ "## 1. Colab Setup", "_____no_output_____" ], [ "Import license keys", "_____no_output_____" ] ], [ [ "import os\nimport json\n\nwith open('/content/spark_nlp_for_healthcare.json', 'r') as f:\n license_keys = json.load(f)\n\nlicense_keys.keys()\n\nsecret = license_keys['SECRET']\nos.environ['SPARK_NLP_LICENSE'] = license_keys['SPARK_NLP_LICENSE']\nos.environ['AWS_ACCESS_KEY_ID'] = license_keys['AWS_ACCESS_KEY_ID']\nos.environ['AWS_SECRET_ACCESS_KEY'] = license_keys['AWS_SECRET_ACCESS_KEY']\nsparknlp_version = license_keys[\"PUBLIC_VERSION\"]\njsl_version = license_keys[\"JSL_VERSION\"]\n\nprint ('SparkNLP Version:', sparknlp_version)\nprint ('SparkNLP-JSL Version:', jsl_version)", "_____no_output_____" ] ], [ [ "Install dependencies", "_____no_output_____" ] ], [ [ "# Install Java\n! apt-get update -qq\n! apt-get install -y openjdk-8-jdk-headless -qq > /dev/null\n! java -version\n\n# Install pyspark\n! pip install --ignore-installed -q pyspark==2.4.4\n\n# Install Spark NLP\n! pip install --ignore-installed spark-nlp==$sparknlp_version\n! python -m pip install --upgrade spark-nlp-jsl==$jsl_version --extra-index-url https://pypi.johnsnowlabs.com/$secret", "_____no_output_____" ] ], [ [ "Import dependencies into Python", "_____no_output_____" ] ], [ [ "os.environ['JAVA_HOME'] = \"/usr/lib/jvm/java-8-openjdk-amd64\"\nos.environ['PATH'] = os.environ['JAVA_HOME'] + \"/bin:\" + os.environ['PATH']\n\nimport pandas as pd\nfrom pyspark.ml import Pipeline\nfrom pyspark.sql import SparkSession\nimport pyspark.sql.functions as F\n\nimport sparknlp\nfrom sparknlp.annotator import *\nfrom sparknlp_jsl.annotator import *\nfrom sparknlp.base import *\nimport sparknlp_jsl\n", "_____no_output_____" ] ], [ [ "Start the Spark session", "_____no_output_____" ] ], [ [ "spark = sparknlp_jsl.start(secret)", "_____no_output_____" ] ], [ [ "## 2. Select the Relation Extraction model and construct the pipeline", "_____no_output_____" ], [ "Select the models:\n\n\n* Posology Relation Extraction models: **posology_re**\n\n\n\n\nFor more details: https://github.com/JohnSnowLabs/spark-nlp-models#pretrained-models---spark-nlp-for-healthcare", "_____no_output_____" ] ], [ [ "# Change this to the model you want to use and re-run the cells below.\nRE_MODEL_NAME = \"posology_re\"\nNER_MODEL_NAME = \"ner_posology_large\"", "_____no_output_____" ] ], [ [ "Create the pipeline", "_____no_output_____" ] ], [ [ "document_assembler = DocumentAssembler() \\\n .setInputCol('text')\\\n .setOutputCol('document')\n\nsentence_detector = SentenceDetector() \\\n .setInputCols(['document'])\\\n .setOutputCol('sentences')\n\ntokenizer = Tokenizer()\\\n .setInputCols(['sentences']) \\\n .setOutputCol('tokens')\n\npos_tagger = PerceptronModel()\\\n .pretrained(\"pos_clinical\", \"en\", \"clinical/models\") \\\n .setInputCols([\"sentences\", \"tokens\"])\\\n .setOutputCol(\"pos_tags\")\n\ndependency_parser = DependencyParserModel()\\\n .pretrained(\"dependency_conllu\", \"en\")\\\n .setInputCols([\"sentences\", \"pos_tags\", \"tokens\"])\\\n .setOutputCol(\"dependencies\")\n\nembeddings = WordEmbeddingsModel.pretrained('embeddings_clinical', 'en', 'clinical/models')\\\n .setInputCols([\"sentences\", \"tokens\"])\\\n .setOutputCol(\"embeddings\")\n\nclinical_ner_model = NerDLModel().pretrained(NER_MODEL_NAME, 'en', 'clinical/models').setInputCols(\"sentences\", \"tokens\", \"embeddings\")\\\n .setOutputCol(\"clinical_ner_tags\") \n\nclinical_ner_chunker = NerConverter()\\\n .setInputCols([\"sentences\", \"tokens\", \"clinical_ner_tags\"])\\\n .setOutputCol(\"clinical_ner_chunks\")\n\nclinical_re_Model = RelationExtractionModel()\\\n .pretrained(RE_MODEL_NAME, 'en', 'clinical/models')\\\n .setInputCols([\"embeddings\", \"pos_tags\", \"clinical_ner_chunks\", \"dependencies\"])\\\n .setOutputCol(\"clinical_relations\")\\\n .setMaxSyntacticDistance(4)\n #.setRelationPairs()#[\"problem-test\", \"problem-treatment\"]) # we can set the possible relation pairs (if not set, all the relations will be calculated)\n\npipeline = Pipeline(stages=[\n document_assembler, \n sentence_detector,\n tokenizer,\n pos_tagger,\n dependency_parser,\n embeddings,\n clinical_ner_model,\n clinical_ner_chunker,\n clinical_re_Model])\n\nempty_df = spark.createDataFrame([['']]).toDF(\"text\")\npipeline_model = pipeline.fit(empty_df)\nlight_pipeline = LightPipeline(pipeline_model)", "_____no_output_____" ] ], [ [ "## 3. Create example inputs", "_____no_output_____" ] ], [ [ "# Enter examples as strings in this array\ninput_list = [\n\"\"\"The patient is a 40-year-old white male who presents with a chief complaint of \"chest pain\". The patient is diabetic and has a prior history of coronary artery disease. The patient presents today stating that his chest pain started yesterday evening and has been somewhat intermittent. He has been advised Aspirin 81 milligrams QDay. Humulin N. insulin 50 units in a.m. HCTZ 50 mg QDay. Nitroglycerin 1/150 sublingually PRN chest pain.\"\"\",\n]", "_____no_output_____" ] ], [ [ "# 4. Run the pipeline", "_____no_output_____" ] ], [ [ "df = spark.createDataFrame(pd.DataFrame({\"text\": input_list}))\nresult = pipeline_model.transform(df)\nlight_result = light_pipeline.fullAnnotate(input_list[0])", "_____no_output_____" ] ], [ [ "# 5. Visualize", "_____no_output_____" ], [ "helper function for visualization", "_____no_output_____" ] ], [ [ "def get_relations_df (results, rel='clinical_relations'):\n rel_pairs=[]\n for rel in results[rel]:\n rel_pairs.append((\n rel.result, \n rel.metadata['entity1'], \n rel.metadata['entity1_begin'],\n rel.metadata['entity1_end'],\n rel.metadata['chunk1'], \n rel.metadata['entity2'],\n rel.metadata['entity2_begin'],\n rel.metadata['entity2_end'],\n rel.metadata['chunk2'], \n rel.metadata['confidence']\n ))\n\n rel_df = pd.DataFrame(rel_pairs, columns=['relation','entity1','entity1_begin','entity1_end','chunk1','entity2','entity2_begin','entity2_end','chunk2', 'confidence'])\n\n return rel_df[rel_df.relation!='O']", "_____no_output_____" ], [ "get_relations_df(light_result[0])", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ] ]
d0dd0b8e5144a902f8ebb03326602e7d2467b245
141,023
ipynb
Jupyter Notebook
notebooks/Show First Layer Filters.ipynb
patrickmineault/brain-scorer
5e882bafb323ff58028ade2394d18176e6c02e80
[ "MIT" ]
7
2021-07-22T02:19:14.000Z
2022-02-21T15:07:35.000Z
notebooks/Show First Layer Filters.ipynb
patrickmineault/your-head-is-there-to-move-you-around
5e882bafb323ff58028ade2394d18176e6c02e80
[ "MIT" ]
null
null
null
notebooks/Show First Layer Filters.ipynb
patrickmineault/your-head-is-there-to-move-you-around
5e882bafb323ff58028ade2394d18176e6c02e80
[ "MIT" ]
2
2021-07-22T02:27:17.000Z
2022-03-21T02:08:42.000Z
446.275316
23,924
0.918403
[ [ [ "from python_dict_wrapper import wrap\nimport sys\nsys.path.append('../')", "_____no_output_____" ], [ "import torch", "_____no_output_____" ], [ "sys.path.append(\"../../CPC/dpc\")\nsys.path.append(\"../../CPC/backbone\")", "_____no_output_____" ], [ "import matplotlib.pyplot as plt\nimport numpy as np\nimport scipy\n\ndef find_dominant_orientation(W):\n Wf = abs(np.fft.fft2(W))\n orient_sel = 1 - Wf[0, 0] / Wf.sum()\n Wf[0, 0] = 0\n Wf = np.fft.fftshift(Wf)\n dt = W.shape[0] // 2\n xi, yi = np.meshgrid(np.arange(-dt, dt+1), np.arange(-dt, dt+1))\n \n # Check whether we should split this horizontally or vertically\n if Wf[xi == 0].sum() > Wf[yi == 0].sum():\n # Use a top-down split \n xi_ = xi * (xi >= 0)\n yi_ = yi * (xi >= 0)\n x0 = (xi_ * Wf).sum() / ((xi >= 0) * Wf).sum()\n y0 = (yi_ * Wf).sum() / ((xi >= 0) * Wf).sum()\n else:\n xi_ = xi * (yi >= 0)\n yi_ = yi * (yi >= 0)\n x0 = (xi_ * Wf).sum() / ((yi >= 0) * Wf).sum()\n y0 = (yi_ * Wf).sum() / ((yi >= 0) * Wf).sum()\n return np.arctan2(y0, x0), orient_sel\n\ndef get_spatial_slice(W, theta):\n dx = W.shape[0] // 2\n dt = W.shape[2] // 2\n xi, yi, zi = np.meshgrid(np.arange(W.shape[0]), \n np.arange(W.shape[1]),\n np.arange(W.shape[2]))\n \n xi_, zi_ = np.meshgrid(np.arange(W.shape[0]), \n np.arange(W.shape[2]))\n Ws = []\n for i in range(W.shape[3]):\n interp = scipy.interpolate.LinearNDInterpolator(np.array([xi.ravel(), \n yi.ravel(), \n zi.ravel()]).T, W[:, :, :, i].ravel())\n probe = np.array([dx + (xi_ - dx) * np.cos(theta),\n dx + (xi_ - dx) * np.sin(theta),\n zi_]).T\n Ws.append(interp(probe))\n \n return np.stack(Ws, axis=2)\n\ndef plot_static_shot(W):\n #assert W.shape[0] == 64\n W = W / abs(W).max(axis=4).max(axis=3).max(axis=2).max(axis=1).reshape(-1, 1, 1, 1, 1) / 2 + .5\n t = W.shape[2] // 2\n \n best_thetas = []\n orient_sels = []\n for i in range(W.shape[0]):\n theta, orient_sel = find_dominant_orientation(W[i, :, t, :, :].transpose(1, 2, 0).sum(2))\n best_thetas.append(theta)\n orient_sels.append(orient_sel)\n \n best_thetas = np.array(best_thetas)\n orient_sels = np.array(orient_sels)\n \n sort_idx = np.argsort(orient_sels)[::-1]\n best_thetas = best_thetas[sort_idx]\n orient_sels = orient_sels[sort_idx]\n W = W[sort_idx, :, :, :, :]\n \n plt.figure(figsize=(8, 8))\n for i in range(W.shape[0]):\n plt.subplot(8, 8, i + 1)\n plt.imshow(W[i, :, t, :, :].transpose(1, 2, 0))\n theta = best_thetas[i]\n #plt.plot([3 + 3 * np.sin(theta), 3 - 3 * np.sin(theta)], [3 + 3 * np.cos(theta), 3 - 3 * np.cos(theta)], 'r-')\n plt.axis(False)\n #plt.suptitle(f'xy filters, sliced at t = {t}')\n plt.show()\n \n dt = W.shape[-1] // 2\n xi, yi = np.meshgrid(np.arange(-dt, dt+1), np.arange(-dt, dt+1))\n \n plt.figure(figsize=(8, 8))\n for i in range(W.shape[0]):\n W_ = W[i, :, :, :, :].transpose((3, 2, 1, 0))\n plt.subplot(8, 8, i + 1)\n theta = best_thetas[i]\n W_ = get_spatial_slice(W_, theta)\n plt.imshow(W_)\n plt.axis(False)\n plt.show()\n\nfrom models import get_feature_model\nargs = wrap({'features': 'cpc_02',\n 'ckpt_root': '../pretrained',\n 'slowfast_root': '../../slowfast',\n 'ntau': 1,\n 'subsample_layers': False})\n\nmodel, _, _ = get_feature_model(args)\n \nplot_static_shot(model.s1.conv1.weight.detach().cpu().numpy())\n\nargs = wrap({'features': 'cpc_01',\n 'ckpt_root': '../pretrained',\n 'slowfast_root': '../../slowfast',\n 'ntau': 1,\n 'subsample_layers': False})\n\n\nmodel, _, _ = get_feature_model(args)\n \nplot_static_shot(model.s1.conv1.weight.detach().cpu().numpy())\n\nargs = wrap({'features': 'airsim_04',\n 'ckpt_root': '../pretrained',\n 'slowfast_root': '../../slowfast',\n 'ntau': 1,\n 'subsample_layers': False})\n\n\nmodel, _, _ = get_feature_model(args)\n \nplot_static_shot(model.s1.conv1.weight.detach().cpu().numpy())", "Using DPC-RNN model\nfinal feature map has size 2x2\n" ], [ "data = model.s1.conv1.weight.detach().cpu().numpy()\nF = data.mean(axis=1).reshape((64, 5, 7*7))\nsepindexes = []\n\nfor i in range(F.shape[0]):\n U, S, V = np.linalg.svd(F[i, :, :])\n sepindex = S[0] ** 2 / (S ** 2).sum() \n sepindexes.append(sepindex)\n\n \nplt.figure(figsize=(2,2))\nplt.hist(sepindexes, np.arange(11)/10)\nplt.xlabel('Separability index')\nplt.ylabel('Count')\n\nplt.plot([.71, .71], [0, 17], 'k--')\nplt.plot([.71], [17], 'kv')\n\nimport seaborn as sns\nsns.despine()", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code" ] ]
d0dd0dde3e56eaff4b5356b0f4c8c0108c8b26ba
267,324
ipynb
Jupyter Notebook
lectures/lecture7/lecture7.ipynb
AlexanderSLevin/nla2020_ozon
647b4bbf57eea0847ce4020932aad9b4e710bad0
[ "MIT" ]
15
2020-09-08T17:29:55.000Z
2021-02-08T02:36:28.000Z
lectures/lecture7/lecture7.ipynb
AlexanderSLevin/nla2020_ozon
647b4bbf57eea0847ce4020932aad9b4e710bad0
[ "MIT" ]
null
null
null
lectures/lecture7/lecture7.ipynb
AlexanderSLevin/nla2020_ozon
647b4bbf57eea0847ce4020932aad9b4e710bad0
[ "MIT" ]
40
2020-09-09T14:10:17.000Z
2021-09-07T12:26:19.000Z
137.159569
50,040
0.875451
[ [ [ "# Лекция 7. Разреженные матрицы и прямые методы для решения больших разреженных систем", "_____no_output_____" ], [ "## План на сегодняшнюю лекцию\n\n- Плотные неструктурированные матрицы и распределённое хранение\n- Разреженные матрицы и форматы их представления\n- Быстрая реализация умножения разреженной матрицы на вектор \n- Метод Гаусса для разреженных матриц: упорядоченность\n- Заполнение и графы: сепараторы\n- Лапласиан графа", "_____no_output_____" ], [ "## Плотные матрицы большой размерности\n\n- Если размер матрицы очень большой, то она не помещается в память\n- Возможные способы работы с такими матрицами\n - Если матрица **структурирована**, например блочно Тёплицева с Тёплицевыми блоками (в следующих лекциях), тогда возможно сжатое хранение\n - Для неструктурированных матриц помогает **распределённая память**\n - MPI для обработки распределённо хранимых матриц", "_____no_output_____" ], [ "### Распределённая память и MPI\n\n- Разбиваем матрицу на блоки и храним их на различных машинах\n- Каждая машина имеет своё собственное адресное пространство и не может повредить данные на других машинах\n- В этом случае машины передают друг другу данные для агрегирования результата вычислений\n- [MPI (Message Passing Interface)](https://en.wikipedia.org/wiki/Message_Passing_Interface) – стандарт в параллельных вычислениях с распределённой памятью", "_____no_output_____" ], [ "### Пример: умножение матрицы на вектор\n\n- Предположим, вы хотите посчитать произведение $Ax$ и матрица $A$ не помещается в памяти\n- В этом случае вы можете разбить матрицу на блоки и поместить их на разные машины\n- Возможные стратегии:\n - Одномерное деление на блоки использует только строки\n - Двумерное деление на блоки использует и строки и столбцы", "_____no_output_____" ], [ "#### Пример одномерного деления на блоки\n<img src=\"./1d_block.jpg\">", "_____no_output_____" ], [ "#### Общее время вычисления произведения матрицы на вектор для одномерного разбиения на блоки\n\n- Каждая машина хранит $n / p $ полных строк и $n / p$ элементов вектора $x$\n- Общее число операций $n^2 / p$\n- Общее время для отправки и записи данных $t_s \\log p + t_w n$, где $t_s$ – единица времени на отправку и $t_w$ – единица времени на запись", "_____no_output_____" ], [ "#### Пример двумерного деления на блоки\n\n<img src=\"./2d_block.png\" width=400>", "_____no_output_____" ], [ "#### Общее время вычисления умножения матрицы на вектор с использованием двумерного разбиения на блоки\n\n- Каждая машина хранит блок размера $n / \\sqrt{p} $ и $n / \\sqrt{p}$ элементов вектора\n- Общее число операций $n^2 / p$\n- Общее время для отправки и записи данных примерно равно $t_s \\log p + t_w (n/\\sqrt{p}) \\log p$, где $t_s$ – единица времени на отправку и $t_w$ – единица времени на запись", "_____no_output_____" ], [ "### Пакеты с поддержкой распределённого хранения данных\n\n- [ScaLAPACK](http://www.netlib.org/scalapack/)\n- [Trilinos](https://trilinos.org/)\n\nВ Python вы можете использовать [mpi4py](https://mpi4py.readthedocs.io/en/stable/) для параллельной реализации ваших алгоритмов.\n\n- PyTorch поддерживает распределённое обучение и хранение данных, см подробности [тут](https://pytorch.org/tutorials/intermediate/dist_tuto.html) ", "_____no_output_____" ], [ "### Резюме про работу с большими плотными неструктурированными матрицами\n\n- Распределённое хранение матриц\n- MPI\n- Пакеты, которые используют блочные вычисления\n- Различные подходы к блочным вычислениям", "_____no_output_____" ], [ "## Разреженные матрицы\n\n- Ограничением в решении задач линейной алгебры с плотными матрицами является память, требуемая для хранения плотных матриц, $N^2$ элементов.\n\n- Разреженные матрицы, где большинство элементов нулевые позволяют по крайней мере хранить их в памяти.\n\n- Основные вопросы: можем ли мы решать следующие задачи для разреженных матриц?\n - решение линейных систем\n - вычисление собственных значений и собственных векторов\n - вычисление матричных функций", "_____no_output_____" ], [ "## Приложения разреженных матриц\n\nРазреженные матрицы возникают в следующих областях:\n\n- математическое моделирование и решение уравнений в частных производных\n- обработка графов, например анализ социальных сетей\n- рекомендательные системы\n- в целом там, где отношения между объектами \"разрежены\".", "_____no_output_____" ], [ "### Разреженные матрицы помогают в вычислительной теории графов \n\n- Графы представляют в виде матриц смежности, которые чаще всего разрежены\n- Численное решение задач теории графов сводится к операциям с этими разреженными матрицами\n - Кластеризация графа и выделение сообществ\n - Ранжирование\n - Случайные блуждатели\n - И другие....\n- Пример: возможно, самый большой доступный граф гиперссылок содержит 3.5 миллиарда веб-страниц и 128 миллиардов гиперссылок, больше подробностей см. [тут](http://webdatacommons.org/hyperlinkgraph/) \n- Различные графы среднего размера для тестирования ваших алгоритмов доступны в [Stanford Large Network Dataset Collection](https://snap.stanford.edu/data/)", "_____no_output_____" ], [ "### Florida sparse matrix collection\n\n- Большое количество разреженных матриц из различных приложений вы можете найти в [Florida sparse matrix collection](http://www.cise.ufl.edu/research/sparse/matrices/).", "_____no_output_____" ] ], [ [ "from IPython.display import IFrame\nIFrame('http://yifanhu.net/GALLERY/GRAPHS/search.html', 500, 500)", "_____no_output_____" ] ], [ [ "### Разреженные матрицы и глубокое обучение\n\n- DNN имеют очень много параметров\n- Некоторые из них могут быть избыточными\n- Как уменьшить число параметров без серьёзной потери в точности?\n- [Sparse variational dropout method](https://github.com/ars-ashuha/variational-dropout-sparsifies-dnn) даёт существенно разреженные фильтры в DNN почти без потери точности!", "_____no_output_____" ], [ "## Построение разреженных матриц\n\n- Мы можем генерировать разреженные матрицы с помощью пакета **scipy.sparse**\n\n- Можно задать матрицы очень большого размера\n\nПолезные функции при создании разреженных матриц:\n- для созданий диагональной матрицы с заданными диагоналями ```spdiags```\n- Кронекерово произведение (определение будет далее) разреженных матриц ```kron```\n- также арифметические операции для разреженных матриц перегружены", "_____no_output_____" ], [ "### Кронекерово произведение\n\nДля матриц $A\\in\\mathbb{R}^{n\\times m}$ и $B\\in\\mathbb{R}^{l\\times k}$ Кронекерово произведение определяется как блочная матрица следующего вида\n\n$$\n A\\otimes B = \\begin{bmatrix}a_{11}B & \\dots & a_{1m}B \\\\ \\vdots & \\ddots & \\vdots \\\\ a_{n1}B & \\dots & a_{nm}B\\end{bmatrix}\\in\\mathbb{R}^{nl\\times mk}.\n$$\n\nОсновные свойства:\n- билинейность\n- $(A\\otimes B) (C\\otimes D) = AC \\otimes BD$\n- Пусть $\\mathrm{vec}(X)$ оператор векторизации матрицы по столбцам. Тогда \n$\\mathrm{vec}(AXB) = (B^T \\otimes A) \\mathrm{vec}(X).$", "_____no_output_____" ] ], [ [ "import numpy as np\nimport scipy as sp\nimport scipy.sparse\nfrom scipy.sparse import csc_matrix, csr_matrix\nimport matplotlib.pyplot as plt\nimport scipy.linalg\nimport scipy.sparse.linalg\n%matplotlib inline\nn = 5\nex = np.ones(n);\nlp1 = sp.sparse.spdiags(np.vstack((ex, -2*ex, ex)), [-1, 0, 1], n, n, 'csr'); \ne = sp.sparse.eye(n)\nA = sp.sparse.kron(lp1, e) + sp.sparse.kron(e, lp1)\nA = csc_matrix(A)\nplt.spy(A, aspect='equal', marker='.', markersize=5)", "_____no_output_____" ] ], [ [ "### Шаблон разреженности\n\n- Команда ```spy``` рисует шаблон разреженности данной матрицы: пиксель $(i, j)$ отображается на рисунке, если соответствующий элемент матрицы ненулевой.\n\n- Шаблон разреженности действительно очень важен для понимания сложности алгоритмов линейной алгебры для разреженных матриц. \n\n- Зачастую шаблона разреженности достаточно для анализа того, насколько \"сложно\" работать с этой матрицей.", "_____no_output_____" ], [ "### Определение разреженных матриц\n\n- Разреженные матрицы – это матрицы, такие что количество ненулевых элементов в них существенно меньше общего числа элементов в матрице. \n\n- Из-за этого вы можете выполнять базовые операции линейной алгебры (прежде всего решать линейные системы) гораздо быстрее по сравнению с использованием плотных матриц.", "_____no_output_____" ], [ "## Что нам необходимо, чтобы увидеть, как это работает\n\n- **Вопрос 1:** Как хранить разреженные матрицы в памяти?\n\n- **Вопрос 2:** Как умножить разреженную матрицу на вектор быстро?\n\n- **Вопрос 3:** Как быстро решать линейные системы с разреженными матрицами?", "_____no_output_____" ], [ "### Хранение разреженных матриц\n\nСуществет много форматов хранения разреженных матриц, наиболее важные:\n\n- COO (координатный формат)\n- LIL (список списков)\n- CSR (compressed sparse row)\n- CSC (compressed sparse column)\n- блочные варианты\n\nВ ```scipy``` представлены конструкторы для каждого из этих форматов, например\n\n```scipy.sparse.lil_matrix(A)```.", "_____no_output_____" ], [ "#### Координатный формат (COO)\n\n- Простейший формат хранения разреженной матрицы – координатный. \n- В этом формате разреженная матрица – это набор индексов и значений в этих индексах.\n\n```python\ni, j, val\n```\n\nгде ```i, j``` массивы индексов, ```val``` массив элементов матрицы. <br>\n\n- Таким образом, нам нужно хранить $3\\cdot$**nnz** элементов, где **nnz** обозначает число ненулевых элементов в матрице.\n\n**Q:** Что хорошего и что плохого в использовании такого формата?", "_____no_output_____" ], [ "#### Основные недостатки\n\n- Он неоптимален по памяти (почему?)\n- Он неоптимален для умножения матрицы на вектор (почему?)\n- Он неоптимален для удаления элемента (почему?)\n\nПервые два недостатка решены в формате CSR.\n\n**Q**: какой формат решает третий недостаток? ", "_____no_output_____" ], [ "#### Compressed sparse row (CSR)\n\nВ формате CSR матрица хранится также с помощью трёх массивов, но других:\n\n```python\nia, ja, sa\n```\n\nгде:\n\n- **ia** (начало строк) массив целых чисел длины $n+1$ \n- **ja** (индексы столбцов) массив целых чисел длины **nnz** \n- **sa** (элементы матрицы) массив действительных чисел длины **nnz**\n\n<img src=\"https://www.karlrupp.net/wp-content/uploads/2016/02/csr_storage_sparse_marix.png\" width=60% />\n\nИтак, всего необходимо хранить $2\\cdot{\\bf nnz} + n+1$ элементов.", "_____no_output_____" ], [ "### Разреженные матрицы в PyTorch и Tensorflow\n\n- PyTorch поддерживает разреженные матрицы в формате COO\n- Неполная поддержка вычисления градиентов в операциях с такими матрицами, список и обсуждение см. [тут](https://github.com/pytorch/pytorch/issues/9674)\n- Tensorflow также поддерживает разреженные матрицы в COO формате\n- Список поддерживаемых операций приведён [здесь](https://www.tensorflow.org/api_docs/python/tf/sparse) и поддержка вычисления градиентов также ограничена", "_____no_output_____" ], [ "### CSR формат позволяет быстро умножить разреженную матрицу на вектор (SpMV)\n\n```python\n\n for i in range(n):\n \n for k in range(ia[i]:ia[i+1]):\n \n y[i] += sa[k] * x[ja[k]]\n```", "_____no_output_____" ] ], [ [ "import numpy as np\nimport scipy as sp\nimport scipy.sparse\nimport scipy.sparse.linalg\nfrom scipy.sparse import csc_matrix, csr_matrix, coo_matrix\nimport matplotlib.pyplot as plt\n%matplotlib inline\nn = 1000\nex = np.ones(n);\nlp1 = sp.sparse.spdiags(np.vstack((ex, -2*ex, ex)), [-1, 0, 1], n, n, 'csr'); \ne = sp.sparse.eye(n)\nA = sp.sparse.kron(lp1, e) + sp.sparse.kron(e, lp1)\nA = csr_matrix(A)\nrhs = np.ones(n * n)\nB = coo_matrix(A)\n%timeit A.dot(rhs)\n%timeit B.dot(rhs)", "7.91 ms ± 453 µs per loop (mean ± std. dev. of 7 runs, 100 loops each)\n10.8 ms ± 536 µs per loop (mean ± std. dev. of 7 runs, 100 loops each)\n" ] ], [ [ "Видно, что **CSR** быстрее, и чем менее структурирован шаблон разреженности, тем выше выигрыш в скорости.", "_____no_output_____" ], [ "### Разреженные матрицы и эффективность\n\n- Использование разреженных матриц приводит к уменьшению сложности\n- Но они не очень подходят для параллельных/GPU реализаций \n- Они не показывают максимальную эффективность из-за случайного доступа к данным. \n- Обычно, пиковая производительность порядка $10\\%-15\\%$ считается хорошей. ", "_____no_output_____" ], [ "### Вспомним как измеряется эффективность операций\n\n- Стандартный способ измерения эффективности операций линейной алгебры – это использование **flops** (число опраций с плавающей точкой в секунду)\n\n- Измерим эффективность умножения матрицы на вектор в случае плотной и разреженной матрицы", "_____no_output_____" ] ], [ [ "import numpy as np\nimport time\nn = 4000\na = np.random.randn(n, n)\nv = np.random.randn(n)\nt = time.time()\nnp.dot(a, v)\nt = time.time() - t\nprint('Time: {0: 3.1e}, Efficiency: {1: 3.1e} Gflops'.\\\n format(t, ((2 * n ** 2)/t) / 10 ** 9))", "Time: 1.2e-02, Efficiency: 2.7e+00 Gflops\n" ], [ "n = 4000\nex = np.ones(n);\na = sp.sparse.spdiags(np.vstack((ex, -2*ex, ex)), [-1, 0, 1], n, n, 'csr'); \nrhs = np.random.randn(n)\nt = time.time()\na.dot(rhs)\nt = time.time() - t\nprint('Time: {0: 3.1e}, Efficiency: {1: 3.1e} Gflops'.\\\n format(t, (3 * n) / t / 10 ** 9))", "Time: 1.3e-04, Efficiency: 9.0e-02 Gflops\n" ] ], [ [ "### Случайный доступ к данным и промахи в обращении к кешу\n\n- Сначала все элементы матрицы и вектора хранятся в оперативной памяти (RAM – Random Access Memory)\n- Если вы хотите вычислить произведение матрицы на вектор, часть элементов матрицы и вектора перемещаются в кеш (быстрой памяти малого объёма), см. [лекцию об алгоритме Штрассена и умножении матриц](https://github.com/amkatrutsa/nla2020_ozon/blob/master/lectures/lecture4/lecture4.ipynb)\n- После этого CPU берёт данные из кеша, обрабатывает их и возвращает результат снова в кеш\n- Если CPU требуются данные, которых ещё нет в кеше, это называется промах в обращении к кешу (cache miss)\n- Если случается промах в обращении к кешу, необходимые данные перемещаются из оперативной памяти в кеш\n\n**Q**: что если в кеше нет свободного места?\n\n\n- Чем больше промахов в обращении к кешу, тем медленнее выполняются вычисления", "_____no_output_____" ], [ "### План кеша и LRU\n\n<img src=\"./cache_scheme.png\" width=\"500\">", "_____no_output_____" ], [ "#### Умножение матрицы в CSR формате на вектор\n\n```python\n\n for i in range(n):\n \n for k in range(ia[i]:ia[i+1]):\n \n y[i] += sa[k] * x[ja[k]]\n \n```\n\n- Какая часть операций приводит к промахам в обращении к кешу?\n- Как эту проблему можно решить?", "_____no_output_____" ], [ "### Переупорядочивание уменьшает количество промахов в обращении к кешу\n\n- Если ```ja``` хранит последовательно элементы, тогда они могут быть перемещены в кеш одновременно и количество промахов в обращении к кешу уменьшится\n- Так происходит, когда разреженная матрица является **ленточной** или хотя бы блочно-диагональной\n- Мы можем превратить данную разреженную матрицу в ленточную или блочно-диагональную с помощью *перестановок* \n\n- Пусть $P$ матрица перестановок строк матрицы и $Q$ матрица перестановок столбцов матрицы\n- $A_1 = PAQ$ – матрица с шириной ленты меньшей, чем у матрицы $A$\n- $y = Ax \\to \\tilde{y} = A_1 \\tilde{x}$, где $\\tilde{x} = Q^{\\top}x$ и $\\tilde{y} = Py$\n- [Separated block diagonal form](http://albert-jan.yzelman.net/PDFs/yzelman09-rev.pdf) призван минимизировать количество промахов в обращении к кешу\n- Он также может быть расширен на двумерный случай, где разделяются не только строки, но и столбцы", "_____no_output_____" ], [ "#### Пример\n\n- SBD в одномерном случае\n<img src=\"./sbd.png\" width=\"400\">", "_____no_output_____" ], [ "## Методы решения линейных систем с разреженными матрицами\n\n- Прямые методы\n - LU разложение\n - Различные методы переупорядочивания для минимизации заполнения факторов\n- Крыловские методы", "_____no_output_____" ] ], [ [ "n = 10\nex = np.ones(n);\nlp1 = sp.sparse.spdiags(np.vstack((ex, -2*ex, ex)), [-1, 0, 1], n, n, 'csr'); \ne = sp.sparse.eye(n)\nA = sp.sparse.kron(lp1, e) + sp.sparse.kron(e, lp1)\nA = csr_matrix(A)\nrhs = np.ones(n * n)\nsol = sp.sparse.linalg.spsolve(A, rhs)\n_, (ax1, ax2) = plt.subplots(1, 2)\nax1.plot(sol)\nax1.set_title('Not reshaped solution')\nax2.contourf(sol.reshape((n, n), order='f'))\nax2.set_title('Reshaped solution')", "_____no_output_____" ] ], [ [ "## LU разложение разреженной матрицы\n\n- Почему разреженная линейная система может быть решена быстрее, чем плотная? С помощью какого метода? \n\n- В LU разложении матрицы $A$ факторы $L$ и $U$ могут быть также разреженными:\n\n$$A = L U$$\n\n- А решение линейной системы с разреженной треугольной матрицей может быть вычислено очень быстро. \n\n<font color='red'> Заметим, что обратная матрица от разреженной матрицы НЕ разрежена! </font>\n", "_____no_output_____" ] ], [ [ "n = 7\nex = np.ones(n);\na = sp.sparse.spdiags(np.vstack((ex, -2*ex, ex)), [-1, 0, 1], n, n, 'csr'); \nb = np.array(np.linalg.inv(a.toarray()))\nprint(a.toarray())\nprint(b)", "[[-2. 1. 0. 0. 0. 0. 0.]\n [ 1. -2. 1. 0. 0. 0. 0.]\n [ 0. 1. -2. 1. 0. 0. 0.]\n [ 0. 0. 1. -2. 1. 0. 0.]\n [ 0. 0. 0. 1. -2. 1. 0.]\n [ 0. 0. 0. 0. 1. -2. 1.]\n [ 0. 0. 0. 0. 0. 1. -2.]]\n[[-0.875 -0.75 -0.625 -0.5 -0.375 -0.25 -0.125]\n [-0.75 -1.5 -1.25 -1. -0.75 -0.5 -0.25 ]\n [-0.625 -1.25 -1.875 -1.5 -1.125 -0.75 -0.375]\n [-0.5 -1. -1.5 -2. -1.5 -1. -0.5 ]\n [-0.375 -0.75 -1.125 -1.5 -1.875 -1.25 -0.625]\n [-0.25 -0.5 -0.75 -1. -1.25 -1.5 -0.75 ]\n [-0.125 -0.25 -0.375 -0.5 -0.625 -0.75 -0.875]]\n" ] ], [ [ "## А факторы...\n\n- $L$ и $U$ обычно разрежены\n- В случае трёхдиагональной матрицы они даже бидиагональны!", "_____no_output_____" ] ], [ [ "from scipy.sparse.linalg import splu\nT = splu(a.tocsc(), permc_spec=\"NATURAL\")\nplt.spy(T.L)", "_____no_output_____" ] ], [ [ "Отметим, что ```splu``` со значением параметра ```permc_spec``` по умолчанию даёт перестановку, которая не даёт бидиагональные факторы:", "_____no_output_____" ] ], [ [ "from scipy.sparse.linalg import splu\nT = splu(a.tocsc())\nplt.spy(T.L)\nprint(T.perm_c)", "[0 1 2 3 5 4 6]\n" ] ], [ [ "## Двумерный случай\n\nВ двумерном случае всё гораздо хуже:", "_____no_output_____" ] ], [ [ "n = 20\nex = np.ones(n);\nlp1 = sp.sparse.spdiags(np.vstack((ex, -2*ex, ex)), [-1, 0, 1], n, n, 'csr'); \ne = sp.sparse.eye(n)\nA = sp.sparse.kron(lp1, e) + sp.sparse.kron(e, lp1)\nA = csc_matrix(A)\nT = scipy.sparse.linalg.spilu(A)\nplt.spy(T.L, marker='.', color='k', markersize=8)", "_____no_output_____" ] ], [ [ "Для правильной перестановки в двумерном случае число ненулевых элементов в $L$ растёт как $\\mathcal{O}(N \\log N)$. Однако сложность равна $\\mathcal{O}(N^{3/2})$.", "_____no_output_____" ], [ "## Разреженные матрицы и теория графов\n\n- Число ненулей в факторах из LU разложения тесно связано с теорией графов.\n\n- Пакет ``networkx`` можно использовать для визуализации графов, имея только матрицу смежности. ", "_____no_output_____" ] ], [ [ "import networkx as nx\nn = 10\nex = np.ones(n);\nlp1 = sp.sparse.spdiags(np.vstack((ex, -2*ex, ex)), [-1, 0, 1], n, n, 'csr'); \ne = sp.sparse.eye(n)\nA = sp.sparse.kron(lp1, e) + sp.sparse.kron(e, lp1)\nA = csc_matrix(A)\nG = nx.Graph(A)\nnx.draw(G, pos=nx.spectral_layout(G), node_size=10)", "_____no_output_____" ] ], [ [ "## Заполнение (fill-in)\n\n- Заполнение матрицы – это элементы, которые были **нулями**, но стали **ненулями** в процессе выполнения алгоритма.\n\n- Заполнение может быть различным для различных перестановок. Итак, до того как делать факторизацию матрицы нам необходимо переупорядочить её элементы так, чтобы заполнение факторов было наименьшим.\n\n**Пример**\n\n$$A = \\begin{bmatrix} * & * & * & * & *\\\\ * & * & 0 & 0 & 0 \\\\ * & 0 & * & 0 & 0 \\\\ * & 0 & 0& * & 0 \\\\ * & 0 & 0& 0 & * \\end{bmatrix} $$\n\n- Если мы исключаем элементы сверху вниз, тогда мы получим плотную матрицу.\n- Однако мы можем сохранить разреженность, если исключение будет проводиться снизу вверх.\n- Подробности на следующих слайдах", "_____no_output_____" ], [ "## Метод Гаусса для разреженных матриц\n\n- Дана матрица $A$ такая что $A=A^*>0$. \n- Вычислим её разложение Холецкого $A = LL^*$.\n\nФактор $L$ может быть плотным даже если $A$ разреженная:\n\n$$\n\\begin{bmatrix} * & * & * & * \\\\ * & * & & \\\\ * & & * & \\\\ * & & & * \\end{bmatrix} = \n\\begin{bmatrix} * & & & \\\\ * & * & & \\\\ * & * & * & \\\\ * & * & * & * \\end{bmatrix}\n\\begin{bmatrix} * & * & * & * \\\\ & * & * & * \\\\ & & * & * \\\\ & & & * \\end{bmatrix}\n$$\n\n**Q**: как сделать факторы разреженными, то есть минимизировать заполнение?", "_____no_output_____" ], [ "## Метод Гаусса и перестановка\n\n- Нам нужно найти перестановку индексов такую что факторы будут разреженными, то есть мы будем вычислять разложение Холецкого для матрицы $PAP^\\top$, где $P$ – матрица перестановки.\n\n- Для примера с предыдущего слайда\n\n$$\nP \\begin{bmatrix} * & * & * & * \\\\ * & * & & \\\\ * & & * & \\\\ * & & & * \\end{bmatrix} P^\\top = \n\\begin{bmatrix} * & & & * \\\\ & * & & * \\\\ & & * & * \\\\ * & * & * & * \\end{bmatrix} = \n\\begin{bmatrix} * & & & \\\\ & * & & \\\\ & & * & \\\\ * & * & * & * \\end{bmatrix}\n\\begin{bmatrix} * & & & * \\\\ & * & & * \\\\ & & * & * \\\\ & & & * \\end{bmatrix}\n$$\n\nгде\n\n$$\nP = \\begin{bmatrix} & & & 1 \\\\ & & 1 & \\\\ & 1 & & \\\\ 1 & & & \\end{bmatrix}\n$$\n\n- Такая форма матрицы даёт разреженные факторы в LU разложении", "_____no_output_____" ] ], [ [ "import numpy as np\nimport scipy.sparse as spsp\nimport scipy.sparse.linalg as spsplin\nimport scipy.linalg as splin\nimport matplotlib.pyplot as plt\n%matplotlib inline\n\nA = spsp.coo_matrix((np.random.randn(10), ([0, 0, 0, 0, 1, 1, 2, 2, 3, 3], \n [0, 1, 2, 3, 0, 1, 0, 2, 0, 3])))\nprint(\"Original matrix\")\nplt.spy(A)\nplt.show()\nlu = spsplin.splu(A.tocsc(), permc_spec=\"NATURAL\")\nprint(\"L factor\")\nplt.spy(lu.L)\nplt.show()\nprint(\"U factor\")\nplt.spy(lu.U)\nplt.show()\nprint(\"Column permutation:\", lu.perm_c)\nprint(\"Row permutation:\", lu.perm_r)", "Original matrix\n" ] ], [ [ "### Блочный случай\n\n$$\nPAP^\\top = \\begin{bmatrix} A_{11} & & A_{13} \\\\ & A_{22} & A_{23} \\\\ A_{31} & A_{32} & A_{33}\\end{bmatrix}\n$$\n\nтогда\n\n$$\nPAP^\\top = \\begin{bmatrix} A_{11} & 0 & 0 \\\\ 0 & A_{22} & 0 \\\\ A_{31} & A_{32} & A_{33} - A_{31}A_{11}^{-1} A_{13} - A_{32}A_{22}^{-1}A_{23} \\end{bmatrix} \\begin{bmatrix} I & 0 & A_{11}^{-1}A_{13} \\\\ 0 & I & A_{22}^{-1}A_{23} \\\\ 0 & 0 & I\\end{bmatrix}\n$$\n\n- Блок $ A_{33} - A_{31}A_{11}^{-1} A_{13} - A_{32}A_{22}^{-1}A_{23}$ является дополнением по Шуру для блочно-диагональной матрицы $\\begin{bmatrix} A_{11} & 0 \\\\ 0 & A_{22} \\end{bmatrix}$\n- Мы свели задачу к решению меньших линейных систем с матрицами $A_{11}$ и $A_{22}$ ", "_____no_output_____" ], [ "### Как найти перестановку?\n\n- Основная идея взята из теории графов\n- Разреженную матрицы можно рассматривать как **матрицу смежности** некоторого графа: вершины $(i, j)$ связаны ребром, если соответствующий элемент матрицы не ноль.\n", "_____no_output_____" ], [ "### Пример\n\nГрафы для матрицы $\\begin{bmatrix} * & * & * & * \\\\ * & * & & \\\\ * & & * & \\\\ * & & & * \\end{bmatrix}$ и для матрицы $\\begin{bmatrix} * & & & * \\\\ & * & & * \\\\ & & * & * \\\\ * & * & * & * \\end{bmatrix}$ имеют следующий вид:\n\n<img src=\"./graph_dense.png\" width=300 align=\"center\"> и <img src=\"./graph_sparse.png\" width=300 align=\"center\">\n\n* Почему вторая упорядоченность лучше, чем первая?", "_____no_output_____" ], [ "### Сепаратор графа\n\n**Определение.** Сепаратором графа $G$ называется множество вершин $S$, таких что их удаление оставляет как минимум две связные компоненты.\n\nСепаратор $S$ даёт следующий метод нумерации вершин графа $G$:\n- Найти сепаратор $S$, удаление которого оставляет связные компоненты $T_1$, $T_2$, $\\ldots$, $T_k$\n- Номера вершин в $S$ от $N − |S| + 1$ до $N$\n- Рекурсивно, номера вершин в каждой компоненте: \n - в $T_1$ от $1$ до $|T_1|$\n - в $T_2$ от $|T_1| + 1$ до $|T_1| + |T_2|$\n - и так далее\n- Если компонента достаточно мала, то нумерация внутри этой компоненты произвольная", "_____no_output_____" ], [ "### Сепаратор и структура матрицы: пример\n\nСепаратор для матрицы двумерного лапласиана\n\n$$\n A_{2D} = I \\otimes A_{1D} + A_{1D} \\otimes I, \\quad A_{1D} = \\mathrm{tridiag}(-1, 2, -1),\n$$\n\nимеет следующий вид\n\n<img src='./separator.png' width=300> </img>", "_____no_output_____" ], [ "Если мы пронумеруем сначала индексы в $\\alpha$, затем в $\\beta$, и наконец индексы в сепараторе $\\sigma$ получим следующую матрицу\n\n$$\nPAP^\\top = \\begin{bmatrix} A_{\\alpha\\alpha} & & A_{\\alpha\\sigma} \\\\ & A_{\\beta\\beta} & A_{\\beta\\sigma} \\\\ A_{\\sigma\\alpha} & A_{\\sigma\\beta} & A_{\\sigma\\sigma}\\end{bmatrix},\n$$\n\nкоторая имеет подходящую структуру.\n\n- Таким образом, задача поиска перестановки была сведена к задаче поиска сепаратора графа!", "_____no_output_____" ], [ "### Nested dissection\n\n- Для блоков $A_{\\alpha\\alpha}$, $A_{\\beta\\beta}$ можем продолжить разбиение рекурсивно\n\n- После завершения рекурсии нужно исключить блоки $A_{\\sigma\\alpha}$ и $A_{\\sigma\\beta}$. \n\n- Это делает блок в положении $A_{\\sigma\\sigma}\\in\\mathbb{R}^{n\\times n}$ **плотным**.\n\n- Вычисление разложения Холецкого этого блока стоит $\\mathcal{O}(n^3) = \\mathcal{O}(N^{3/2})$, где $N = n^2$ – общее число вершин.\n\n- В итоге сложность $\\mathcal{O}(N^{3/2})$", "_____no_output_____" ], [ "## Пакеты для nested dissection\n\n- MUltifrontal Massively Parallel sparse direct Solver ([MUMPS](http://mumps.enseeiht.fr/))\n- [Pardiso](https://www.pardiso-project.org/)\n- [Umfpack как часть пакета SuiteSparse](http://faculty.cse.tamu.edu/davis/suitesparse.html)\n\nУ них есть интефейс для C/C++, Fortran и Matlab ", "_____no_output_____" ], [ "### Резюме про nested dissection\n\n- Нумерация свелась к поиску сепаратора\n- Подход разделяй и властвуй\n- Рекурсивно продолжается на два (или более) подмножества вершин после разделения\n- В теории nested dissection даёт оптимальную сложность (почему?)\n- На практике этот метод лучше других только на очень больших задачах", "_____no_output_____" ], [ "## Сепараторы на практике\n\n- Вычисление сепаратора – это **нетривиальная задача!**\n\n- Построение методов разбиения графа было активной сферой научных исследований долгие годы\n\nСуществующие подходы:\n\n- Спектральное разбиение (использует собственные векторы **Лапласиана графа**) – подробности далее\n- Геометрическое разбиение (для сеток с заданными координатами вершин) [обзор и анализ](http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.31.4886&rep=rep1&type=pdf)\n- Итеративные перестановки ([(Kernighan-Lin, 1970)](http://xilinx.asia/_hdl/4/eda.ee.ucla.edu/EE201A-04Spring/kl.pdf), [(Fiduccia-Matheysses, 1982](https://dl.acm.org/citation.cfm?id=809204))\n- Поиск в ширину [(Lipton, Tarjan 1979)](http://www.cs.princeton.edu/courses/archive/fall06/cos528/handouts/sepplanar.pdf)\n- Многоуровневая рекурсивная бисекция (наиболее практичная эвристика) ([обзор](https://people.csail.mit.edu/jshun/6886-s18/lectures/lecture13-1.pdf) и [статья](http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.499.4130&rep=rep1&type=pdf)). Пакет для подобного рода разбиений называется METIS, написан на C, и доступен [здесь](http://glaros.dtc.umn.edu/gkhome/views/metis)", "_____no_output_____" ], [ "## Спектральное разбиение графа\n\n- Идея спектрального разбиения восходит к работам Мирослава Фидлера, который изучал связность графов ([статья](https://dml.cz/bitstream/handle/10338.dmlcz/101168/CzechMathJ_23-1973-2_11.pdf)).\n\n- Нам нужно разбить вершинеы графа на 2 множества\n\n- Рассмотрим метки вершин +1/-1 и **функцию потерь**\n\n$$E_c(x) = \\sum_{j} \\sum_{i \\in N(j)} (x_i - x_j)^2, \\quad N(j) \\text{ обозначает множество соседей вершины } j. $$\n\nНам нужно сбалансированное разбиение, поэтому\n\n$$\\sum_i x_i = 0 \\quad \\Longleftrightarrow \\quad x^\\top e = 0, \\quad e = \\begin{bmatrix}1 & \\dots & 1\\end{bmatrix}^\\top,$$\n\nи поскольку мы ввели метки +1/-1, то выполнено\n\n$$\\sum_i x^2_i = n \\quad \\Longleftrightarrow \\quad \\|x\\|_2^2 = n.$$", "_____no_output_____" ], [ "## Лапласиан графа\n\nФункция потерь $E_c$ может быть записана в виде (проверьте почему)\n\n$$E_c = (Lx, x)$$\n\nгде $L$ – **Лапласиан графа**, который определяется как симметричная матрица с элементами\n\n$$L_{ii} = \\mbox{степень вершины $i$},$$\n\n$$L_{ij} = -1, \\quad \\mbox{если $i \\ne j$ и существует ребро},$$\n\nи $0$ иначе.\n\n- Строчные суммы в матрице $L$ равны нулю, поэтому существует собственное значение $0$, которое даёт собственный вектор из всех 1.\n- Собственные значения неотрицательны (почему?).", "_____no_output_____" ], [ "## Разбиение как задача оптимизации\n\n- Минимизация $E_c$ с упомянутыми ограничениями приводит к разбиению, которое минимизирует число вершин в сепараторе, но сохраняет разбиение сбалансированным\n\n- Теперь мы запишем релаксацию целочисленной задачи квадратичного программирования в форме непрерывной задачи квадратичного программирования\n\n$$E_c(x) = (Lx, x)\\to \\min_{\\substack{x^\\top e =0, \\\\ \\|x\\|_2^2 = n}}$$", "_____no_output_____" ], [ "## Вектор Фидлера\n\n- Решение этой задачи минимизации – собственный вектор матрицы $L$, соответствующий **второму** минимальному собственному значению (он называется вектором Фидлера)\n- В самом деле,\n\n$$\n \\min_{\\substack{x^\\top e =0, \\\\ \\|x\\|_2^2 = n}} (Lx, x) = n \\cdot \\min_{{x^\\top e =0}} \\frac{(Lx, x)}{(x, x)} = n \\cdot \\min_{{x^\\top e =0}} R(x), \\quad R(x) \\text{ отношение Релея}\n$$\n\n- Поскольку $e$ – собственный вектор, соответствующий наименьшему собственному значению, то на подпространстве $x^\\top e =0$ мы получим второе минимальное собственное значение.\n\n- Знак $x_i$ обозначает разбиение графа.\n\n- Осталось понять, как вычислить этот вектор. Мы знаем про степенной метод, но он ищет собственный вектор для максимального по модулю собственного значения.\n- Итерационные методы для задачи на собственные значения будут рассмотрены далее в курсе...\n", "_____no_output_____" ] ], [ [ "import numpy as np\n%matplotlib inline\nimport matplotlib.pyplot as plt\nimport networkx as nx\nkn = nx.read_gml('karate.gml')\nprint(\"Number of vertices = {}\".format(kn.number_of_nodes()))\nprint(\"Number of edges = {}\".format(kn.number_of_edges()))\nnx.draw_networkx(kn, node_color=\"red\") #Draw the graph", "Number of vertices = 34\nNumber of edges = 78\n" ], [ "Laplacian = nx.laplacian_matrix(kn).asfptype()\nplt.spy(Laplacian, markersize=5)\nplt.title(\"Graph laplacian\")\nplt.axis(\"off\")\nplt.show()\neigval, eigvec = spsplin.eigsh(Laplacian, k=2, which=\"SM\")\nprint(\"The 2 smallest eigenvalues =\", eigval)", "_____no_output_____" ], [ "plt.scatter(np.arange(len(eigvec[:, 1])), np.sign(eigvec[:, 1]))\nplt.show()\nprint(\"Sum of elements in Fiedler vector = {}\".format(np.sum(eigvec[:, 1].real)))", "_____no_output_____" ], [ "nx.draw_networkx(kn, node_color=np.sign(eigvec[:, 1]))", "_____no_output_____" ] ], [ [ "### Резюме по примеру использования спектрального разбиения графа\n\n- Мы вызвали функцию из SciPy для поиска фиксированного числа собственных векторов и собственных значений, которые минимальны (возможны другие опции)\n- Детали методов, которые реализованы в этих функциях, обсудим уже скоро\n- Вектор Фидлера даёт простой способ разбиения графа\n- Для разбиения графа на большее количество частей следует использовать собственные векторы Лапласиана как векторы признаков и запустить какой-нибудь алгоритм кластеризации, например $k$-means", "_____no_output_____" ], [ "### Вектор Фидлера и алгебраическая связность графа\n\n**Определение.** Алгебраическая связность графа – это второе наименьшее собственное значение матрицы Лапласиана графа.\n\n**Утверждение.** Алгебраическая связность графа больше 0 тогда и только тогда, когда граф связный.", "_____no_output_____" ], [ "## Minimal degree orderings\n\n- Идея в том, чтобы исклоючить строки и/или столбцы с малым числом ненулей, обновить заполнение и повторить.\n\n- Эффективная реализация является отдельной задачей (добавление/удаление элементов).\n\n- На практике часто лучше всего для задач среднего размера\n\n- SciPy [использует](https://docs.scipy.org/doc/scipy-1.3.0/reference/generated/scipy.sparse.linalg.splu.html) этот подход для различных матриц ($A^{\\top}A$, $A + A^{\\top}$) ", "_____no_output_____" ], [ "## Главное в сегодняшней лекции\n\n- Плотные матрицы большого размера и распределённые вычисления\n- Разреженные матрицы, приложения и форматы их хранения\n- Эффективные способы умножения разреженной матрицы на вектор\n- LU разложение разреженной матрицы: заполнение и перестановки строк\n- Минимизация заполнения: сепараторы и разбиение графа\n- Nested dissection\n- Спектральное разбиение графа: Лапласиан графа и вектор Фидлера", "_____no_output_____" ] ], [ [ "from IPython.core.display import HTML\ndef css_styling():\n styles = open(\"./styles/custom.css\", \"r\").read()\n return HTML(styles)\ncss_styling()", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ] ]
d0dd144b6b4cbd8cb3cefff2fe3ed879aae27952
256,962
ipynb
Jupyter Notebook
grain_size_tools/example_notebooks/stereology_module_examples.ipynb
jwestraadt/GrainSizeTools
81e8694421b1bd4b8ccd73355ee2394c85c10a89
[ "Apache-2.0" ]
48
2015-08-06T14:06:12.000Z
2022-03-08T16:39:12.000Z
grain_size_tools/example_notebooks/stereology_module_examples.ipynb
jwestraadt/GrainSizeTools
81e8694421b1bd4b8ccd73355ee2394c85c10a89
[ "Apache-2.0" ]
12
2018-07-11T15:09:14.000Z
2021-05-21T00:32:31.000Z
grain_size_tools/example_notebooks/stereology_module_examples.ipynb
jwestraadt/GrainSizeTools
81e8694421b1bd4b8ccd73355ee2394c85c10a89
[ "Apache-2.0" ]
16
2016-07-19T16:38:59.000Z
2021-09-22T08:24:21.000Z
679.793651
110,469
0.754664
[ [ [ "# The stereology module\n\nThe main purpose of stereology is to extract quantitative information from microscope images relating two-dimensional measures obtained on sections to three-dimensional parameters defining the structure. The aim of stereology is not to reconstruct the 3D geometry of the material (as in tomography) but to estimate a particular 3D feature. In this case, we aim to approximate the actual (3D) grain size distribution from the apparent (2D) grain size distribution obtained in sections.\n\nGrainSizeTools script includes two stereological methods: 1) the Saltykov, and 2) the two-step methods. Before looking at its functionalities, applications and limitations, let's import the example dataset.", "_____no_output_____" ] ], [ [ "# Load the script first (change the path to GrainSizeTools_script.py accordingly!)\n%run C:/Users/marco/Documents/GitHub/GrainSizeTools/grain_size_tools/GrainSizeTools_script.py", "module plot imported\nmodule averages imported\nmodule stereology imported\nmodule piezometers imported\nmodule template imported\n\n======================================================================================\nWelcome to GrainSizeTools script\n======================================================================================\nA free open-source cross-platform script to visualize and characterize grain size\npopulation and estimate differential stress via paleopizometers.\n\nVersion: v3.0.2 (2020-12-30)\nDocumentation: https://marcoalopez.github.io/GrainSizeTools/\n\nType get.functions_list() to get a list of the main methods\n\n" ], [ "# Import the example dataset\nfilepath = 'C:/Users/marco/Documents/GitHub/GrainSizeTools/grain_size_tools/DATA/data_set.txt'\ndataset = pd.read_csv(filepath, sep='\\t')\ndataset['diameters'] = 2 * np.sqrt(dataset['Area'] / np.pi) # estimate ECD", "_____no_output_____" ] ], [ [ "## The Saltykov method\n\n> **What is it?**\n>\n> It is a stereological method that approximates the actual grain size distribution from the histogram of the apparent grain size distribution. The method is distribution-free, meaning that no assumption is made upon the type of statistical distribution, making the method very versatile.\n>\n> **What do I use it for?**\n>\n> Its main use (in geosciences) is to estimate the volume fraction of a specific range of grain sizes.\n>\n> **What are its limitations?**\n>\n> The method presents several limitations for its use in rocks\n>\n> - It assumes that grains are non-touching spheres uniformly distributed in a matrix (e.g. bubbles within a piece of glass). This never holds for polycrystalline rocks. To apply the method, the grains should be at least approximately equiaxed, which is normally fulfilled in recrystallized grains.\n> - Due to the use of the histogram, the number of classes determines the accuracy and success of the method. There is a trade-off here because the smaller the number of classes, the better the numerical stability of the method, but the worse the approximation of the targeted distribution and vice versa. The issue is that no method exists to find an optimal number of classes and this has to be set by the user. The use of the histogram also implies that we cannot obtain a complete description of the grain size distribution.\n> - The method lacks a formulation for estimating errors during the unfolding procedure.\n> - You cannot obtain an estimate of the actual average grain size (3D) as individual data is lost when using the histogram (i.e. The Saltykov method reconstructs the 3D histogram, not every apparent diameter in the actual one as this is mathematically impossible).\n>\n\nTODO: explain the details of the method", "_____no_output_____" ] ], [ [ "fig1, (ax1, ax2) = stereology.Saltykov(dataset['diameters'], numbins=11, calc_vol=50)", "=======================================\nvolume fraction (up to 50 microns) = 41.65 %\n=======================================\n=======================================\nbin size = 14.24\n=======================================\n" ], [ "fig1.savefig(\"saltykov_plot.png\", dpi=150)", "_____no_output_____" ] ], [ [ "Now let's assume that we want to use the class densities estimated by Saltykov's method to calculate the specific volume of each or one of the classes. We have two options here.", "_____no_output_____" ] ], [ [ "stereology.Saltykov?", "\u001b[1;31mSignature:\u001b[0m\n\u001b[0mstereology\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mSaltykov\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m\n\u001b[0m \u001b[0mdiameters\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;33m\n\u001b[0m \u001b[0mnumbins\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;36m10\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;33m\n\u001b[0m \u001b[0mcalc_vol\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;32mNone\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;33m\n\u001b[0m \u001b[0mtext_file\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;32mNone\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;33m\n\u001b[0m \u001b[0mreturn_data\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;32mFalse\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;33m\n\u001b[0m \u001b[0mleft_edge\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;36m0\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;33m\n\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;31mDocstring:\u001b[0m\nEstimate the actual (3D) distribution of grain size from the population\nof apparent diameters measured in a thin section using a Saltykov-type\nalgorithm (Saltykov 1967; Sahagian and Proussevitch 1998).\n\nThe Saltykov method is optimal to estimate the volume of a particular grain\nsize fraction as well as to obtain a qualitative view of the appearance of\nthe actual 3D grain size population, either in uni- or multimodal populations.\n\nParameters\n----------\ndiameters : array_like\n the apparent diameters of the grains.\n\nnumbins : positive integer, optional\n the number of bins/classes of the histogram. If not declared,\n is set to 10 by default.\n\ncalc_vol : positive scalar or None, optional\n if the user specifies a diameter, the function will return the volume\n occupied by the grain fraction up to that diameter.\n\ntext_file : string or None, optional\n if the user specifies a name, the function will store a csv file\n with that name containing the Saltykov output.\n\nreturn_data : bool, optional\n if True the function will return the position of the midpoints and\n the frequencies.\n\nleft_edge : positive scalar or 'min', optional\n set the left edge of the histogram. Default is zero.\n\nCall functions\n--------------\n- unfold_population\n- Saltykov_plot\n\nExamples\n--------\n>>> Saltykov(diameters)\n>>> Saltykov(diameters, numbins=16, calc_vol=40)\n>>> Saltykov(diameters, text_file='foo.csv')\n>>> mid_points, frequencies = Saltykov(diameters, return_data=True)\n\nReferences\n----------\nSaltykov SA (1967) http://doi.org/10.1007/978-3-642-88260-9_31\nSahagian and Proussevitch (1998) https://doi.org/10.1016/S0377-0273(98)00043-2\n\nReturn\n------\nStatistical descriptors, a plot, and/or a file with the data (optional)\n\u001b[1;31mFile:\u001b[0m c:\\users\\marco\\documents\\github\\grainsizetools\\grain_size_tools\\stereology.py\n\u001b[1;31mType:\u001b[0m function\n" ] ], [ [ "The input parameter ``text_file`` allows you to save a text file with the data in tabular format, you only have to declare the name of the file and the file type, either txt or csv (as in the function documentation example). Alternatively, you can use the Saltykov function to directly return the density and the midpoint values of the classes as follows:", "_____no_output_____" ] ], [ [ "mid_points, densities = stereology.Saltykov(dataset['diameters'], numbins=11, return_data=True)\nprint(densities)", "[1.31536871e-03 2.17302235e-02 2.25631643e-02 1.45570771e-02\n 6.13586532e-03 2.24830266e-03 1.29306084e-03 3.60326809e-04\n 0.00000000e+00 0.00000000e+00 4.11071036e-05]\n" ] ], [ [ "As you may notice, these density values do not add up to 1 or 100.", "_____no_output_____" ] ], [ [ "np.sum(densities)", "_____no_output_____" ] ], [ [ "This is because the script normalized the frequencies of the different classes so that the integral over the range (not the sum) is one (see FAQs for an explanation on this). If you want to calculate the relative proportion for each class you must multiply the value of the densities by the bin size. After doing this, you can check that the relative densities sum one (i.e. they are proportions relative to one).", "_____no_output_____" ] ], [ [ "corrected_densities = densities * 14.236\nnp.sum(corrected_densities)", "_____no_output_____" ] ], [ [ "So for example if you have a volume of rock of say 100 cm2 and you want to estimate what proportion of that volume is occupied by each grain size class/range, you could estimate it as follows:", "_____no_output_____" ] ], [ [ "# I use np.around to round the values\nnp.around(corrected_densities * 100, 2)", "_____no_output_____" ] ], [ [ "## The two-step method\n\n> **What is it?**\n>\n> It is a stereological method that approximates the actual grain size distribution. The method is distribution-dependent, meaning that it is assumed that the distribution of grain sizes follows a lognormal distribution. The method fit a lognormal distribution on top of the Saltykov output, hence the name two-step method.\n>\n> **What do I use it for?**\n>\n> Its main use is to estimate the shape of the lognormal distribution, the average grain size (3D), and the volume fraction of a specific range of grain sizes (not yet implemented).\n>\n> **What are its limitations?**\n>\n> The method is partially based on the Saltykov method and therefore inherits some of its limitations. The method however do not require to define a specific number of classes.", "_____no_output_____" ] ], [ [ "fig2, ax = stereology.calc_shape(dataset['diameters'])", "=======================================\nPREDICTED OPTIMAL VALUES\nNumber of classes: 11\nMSD (lognormal shape) = 1.63 ± 0.06\nGeometric mean (scale) = 36.05 ± 1.27\n=======================================\n" ], [ "fig2.savefig(\"2step_plot.png\", dpi=150)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ] ]
d0dd15c74e86f449a2eb3bb97c5b8214772fee55
87,101
ipynb
Jupyter Notebook
TensorFlow2.0__03.01_Convolutional_Neural_Network.ipynb
Gautamtverma/TensorFlow2.0_Notebooks
8e87025843e82b12f7d8fc192f4a6f997355dadb
[ "MIT" ]
29
2020-02-24T22:04:11.000Z
2022-03-23T07:09:12.000Z
TensorFlow2.0__03.01_Convolutional_Neural_Network.ipynb
IvanBongiorni/TensorFlow2.0_Tutorial
e3c18362fca602d291c06f91ddb5aa919ba00102
[ "MIT" ]
null
null
null
TensorFlow2.0__03.01_Convolutional_Neural_Network.ipynb
IvanBongiorni/TensorFlow2.0_Tutorial
e3c18362fca602d291c06f91ddb5aa919ba00102
[ "MIT" ]
17
2019-10-20T14:47:06.000Z
2021-11-19T10:02:23.000Z
145.897822
37,704
0.870931
[ [ [ "# Convolutional Neural Network\n\n### Author: Ivan Bongiorni, Data Scientist at GfK.\n\n[LinkedIn profile](https://www.linkedin.com/in/ivan-bongiorni-b8a583164/)\n\nIn this Notebook I will implement a **basic CNN in TensorFlow 2.0**. I will use the famous **Fashion MNIST** dataset, [published by Zalando](https://github.com/zalandoresearch/fashion-mnist) and made [available on Kaggle](https://www.kaggle.com/zalando-research/fashionmnist). Images come already preprocessed in 28 x 28 black and white format.\n\nIt is a multiclass classification task on the following labels:\n0. T-shirt/top\n1. Trouser\n2. Pullover\n3. Dress\n4. Coat\n5. Sandal\n6. Shirt\n7. Sneaker\n8. Bag\n9. Ankle boot ", "_____no_output_____" ], [ "![](https://limetta.se/globalassets/nyhetbloggpress-bilder/fashion-mnist.jpg)", "_____no_output_____" ], [ "Summary:\n\n0. Import data + Dataprep\n0. CNN architecture\n0. Training with Mini Batch Gradient Descent\n0. Test", "_____no_output_____" ] ], [ [ "# Import necessary modules\n\nimport numpy as np\nimport pandas as pd\n\nimport tensorflow as tf\nprint(tf.__version__)\n\nfrom sklearn.utils import shuffle\n\nfrom matplotlib import pyplot as plt\nimport seaborn", "2.0.0-beta0\n" ] ], [ [ "# 0. Import data + Dataprep\n\nThe dataset comes already divided in 60k and 10k Train and Test images. I will now import Training data, and leave Test for later. In order to dataprep image data, I need to reshape the pixel into `(, 28, 28, 1)` arrays; the 1 at the end represents the channel: 1 for black and white images, 3 (red, green, blue) for colored images. Pixel data are also scaled to the `[0, 1]` interval.", "_____no_output_____" ] ], [ [ "df = pd.read_csv('fashion-mnist_train.csv')\n\n# extract labels, one-hot encode them\nlabel = df.label\nlabel = pd.get_dummies(label)\nlabel = label.values\nlabel = label.astype(np.float32)\n\ndf.drop('label', axis = 1, inplace = True)\ndf = df.values\ndf = df.astype(np.float32)\n\n# reshape and scale data\ndf = df.reshape((len(df), 28, 28, 1))\ndf = df / 255.", "_____no_output_____" ] ], [ [ "# 1. CNN architecture\n\nI will feed images into a set of **convolutional** and **max-pooling layers**:\n\n- Conv layers are meant to extract relevant informations from pixel data. A number of *filters* scroll through the image, learning the most relevant informations to extract.\n- Max-Pool layers instead are meant to drastically reduce the number of pixel data. For each (2, 2) window size, Max-Pool will save only the pixel with the highest activation value. Max-Pool is meant to make the model lighter by removing the least relevant observations, at the cost of course of loosing a lot of data!\n\nSince I'm focused on the implementation, rather than on the theory behind it, please refer to [this good article](https://towardsdatascience.com/types-of-convolutions-in-deep-learning-717013397f4d) on how Conv and Max-Pool work in practice. If you are a die-hard, check [this awesome page from a CNN Stanford Course](http://cs231n.github.io/convolutional-networks/?source=post_page---------------------------#overview).\n\nCon and Max-Pool will extract and reduce the size of the input, so that the following feed-forward part could process it. The first convolutional layer requires a specification of the input shape, corresponding to the shape of each image.\n\nSince it's a multiclass classification tasks, softmax activation must be placed at the output layer in order to transform the Network's output into a probability distribution over the ten target categories.", "_____no_output_____" ] ], [ [ "from tensorflow.keras import Sequential\nfrom tensorflow.keras.layers import Conv2D, MaxPool2D, Flatten, Dense, BatchNormalization, Dropout\nfrom tensorflow.keras.activations import relu, elu, softmax\n\n\nCNN = Sequential([\n \n Conv2D(32, kernel_size = (3, 3), activation = elu, \n kernel_initializer = 'he_normal', input_shape = (28, 28, 1)), \n MaxPool2D((2, 2)), \n \n Conv2D(64, kernel_size = (3, 3), kernel_initializer = 'he_normal', activation = elu), \n BatchNormalization(), \n \n Conv2D(128, kernel_size = (3, 3), kernel_initializer = 'he_normal', activation = elu), \n BatchNormalization(), \n Dropout(0.2), \n \n \n Flatten(), \n \n \n Dense(400, activation = elu), \n BatchNormalization(), \n Dropout(0.2), \n \n Dense(400, activation = elu), \n BatchNormalization(), \n Dropout(0.2), \n \n Dense(10, activation = softmax)\n \n])\n", "_____no_output_____" ], [ "CNN.summary()", "Model: \"sequential\"\n_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\nconv2d (Conv2D) (None, 26, 26, 32) 320 \n_________________________________________________________________\nmax_pooling2d (MaxPooling2D) (None, 13, 13, 32) 0 \n_________________________________________________________________\nconv2d_1 (Conv2D) (None, 11, 11, 64) 18496 \n_________________________________________________________________\nbatch_normalization (BatchNo (None, 11, 11, 64) 256 \n_________________________________________________________________\nconv2d_2 (Conv2D) (None, 9, 9, 128) 73856 \n_________________________________________________________________\nbatch_normalization_1 (Batch (None, 9, 9, 128) 512 \n_________________________________________________________________\ndropout (Dropout) (None, 9, 9, 128) 0 \n_________________________________________________________________\nflatten (Flatten) (None, 10368) 0 \n_________________________________________________________________\ndense (Dense) (None, 400) 4147600 \n_________________________________________________________________\nbatch_normalization_2 (Batch (None, 400) 1600 \n_________________________________________________________________\ndropout_1 (Dropout) (None, 400) 0 \n_________________________________________________________________\ndense_1 (Dense) (None, 400) 160400 \n_________________________________________________________________\nbatch_normalization_3 (Batch (None, 400) 1600 \n_________________________________________________________________\ndropout_2 (Dropout) (None, 400) 0 \n_________________________________________________________________\ndense_2 (Dense) (None, 10) 4010 \n=================================================================\nTotal params: 4,408,650\nTrainable params: 4,406,666\nNon-trainable params: 1,984\n_________________________________________________________________\n" ] ], [ [ "# 2. Training with Mini Batch Gradient Descent\n\nThe training part is no different from mini batch gradient descent training of feed-forward classifiers. I wrote [a Notebook on this technique](https://github.com/IvanBongiorni/TensorFlow2.0_Tutorial/blob/master/TensorFlow2.0_02_MiniBatch_Gradient_Descent.ipynb) in which I explain it in more detail.\n\nAssuming you already know how it works, I will define a function to fetch mini batches into the Network and process with training in eager execution.", "_____no_output_____" ] ], [ [ "@tf.function\ndef fetch_batch(X, y, batch_size, epoch):\n start = epoch*batch_size\n \n X_batch = X[start:start+batch_size, :, :]\n y_batch = y[start:start+batch_size, :]\n \n return X_batch, y_batch", "_____no_output_____" ], [ "# To measure execution time\nimport time\nstart = time.time()", "_____no_output_____" ] ], [ [ "There is one big difference with respect to previous training exercises. Since this Network has a high number of parameters (approx 4.4 million) it will require comparatively longer training times. For this reason, I will measure training not just in *epochs*, but also in *cycles*.\n\nAt each training cycle, I will shuffle the dataset and feed it into the Network in mini batches until it's completed. At the following cycle, I will reshuffle the data using a different random seed and repeat the process. (What I called cycles are nothing but Keras' \"epochs\".)\n\nUsing 50 cycles and batches of size 120 on a 60.000 images dataset, I will be able to train my CNN for an overall number of 25.000 epochs.", "_____no_output_____" ] ], [ [ "from tensorflow.keras.losses import CategoricalCrossentropy\nfrom tensorflow.keras.metrics import CategoricalAccuracy\n\nloss = tf.keras.losses.CategoricalCrossentropy()\naccuracy = tf.keras.metrics.CategoricalAccuracy()\n\noptimizer = tf.optimizers.Adam(learning_rate = 0.0001)\n\n\n### TRAINING\n\ncycles = 50\nbatch_size = 120\n\nloss_history = []\naccuracy_history = []\n\nfor cycle in range(cycles):\n \n df, label = shuffle(df, label, random_state = cycle**2)\n \n for epoch in range(len(df) // batch_size):\n \n X_batch, y_batch = fetch_batch(df, label, batch_size, epoch)\n \n with tf.GradientTape() as tape:\n current_loss = loss(CNN(X_batch), y_batch)\n \n gradients = tape.gradient(current_loss, CNN.trainable_variables)\n optimizer.apply_gradients(zip(gradients, CNN.trainable_variables))\n \n loss_history.append(current_loss.numpy())\n \n current_accuracy = accuracy(CNN(X_batch), y_batch).numpy()\n accuracy_history.append(current_accuracy)\n accuracy.reset_states()\n \n print(str(cycle + 1) + '.\\tTraining Loss: ' + str(current_loss.numpy()) \n + ',\\tAccuracy: ' + str(current_accuracy)) \n#\nprint('\\nTraining complete.')\nprint('Final Loss: ' + str(current_loss.numpy()) + '. Final accuracy: ' + str(current_accuracy))", "1.\tTraining Loss: 2.963158,\tAccuracy: 0.825\n2.\tTraining Loss: 2.2184157,\tAccuracy: 0.875\n3.\tTraining Loss: 1.6165245,\tAccuracy: 0.90833336\n4.\tTraining Loss: 1.8334527,\tAccuracy: 0.89166665\n5.\tTraining Loss: 1.2200023,\tAccuracy: 0.93333334\n6.\tTraining Loss: 1.9942344,\tAccuracy: 0.89166665\n7.\tTraining Loss: 2.1937194,\tAccuracy: 0.8833333\n8.\tTraining Loss: 1.8208188,\tAccuracy: 0.89166665\n9.\tTraining Loss: 1.6057307,\tAccuracy: 0.9\n10.\tTraining Loss: 1.036152,\tAccuracy: 0.94166666\n11.\tTraining Loss: 1.1800997,\tAccuracy: 0.925\n12.\tTraining Loss: 0.9550361,\tAccuracy: 0.9583333\n13.\tTraining Loss: 0.71287704,\tAccuracy: 0.96666664\n14.\tTraining Loss: 1.1355927,\tAccuracy: 0.93333334\n15.\tTraining Loss: 0.42137903,\tAccuracy: 0.975\n16.\tTraining Loss: 1.2537751,\tAccuracy: 0.925\n17.\tTraining Loss: 0.6678084,\tAccuracy: 0.96666664\n18.\tTraining Loss: 0.67579913,\tAccuracy: 0.96666664\n19.\tTraining Loss: 0.93005073,\tAccuracy: 0.95\n20.\tTraining Loss: 0.9756758,\tAccuracy: 0.95\n21.\tTraining Loss: 0.5439663,\tAccuracy: 0.96666664\n22.\tTraining Loss: 0.48298022,\tAccuracy: 0.975\n23.\tTraining Loss: 0.71461415,\tAccuracy: 0.96666664\n24.\tTraining Loss: 0.59260106,\tAccuracy: 0.96666664\n25.\tTraining Loss: 0.52771163,\tAccuracy: 0.975\n26.\tTraining Loss: 0.70353717,\tAccuracy: 0.96666664\n27.\tTraining Loss: 0.8188721,\tAccuracy: 0.95\n28.\tTraining Loss: 0.5126863,\tAccuracy: 0.975\n29.\tTraining Loss: 0.45719293,\tAccuracy: 0.975\n30.\tTraining Loss: 0.54228246,\tAccuracy: 0.96666664\n31.\tTraining Loss: 0.55225444,\tAccuracy: 0.96666664\n32.\tTraining Loss: 0.9566125,\tAccuracy: 0.94166666\n33.\tTraining Loss: 0.29893246,\tAccuracy: 0.98333335\n34.\tTraining Loss: 0.70337826,\tAccuracy: 0.9583333\n35.\tTraining Loss: 0.5612643,\tAccuracy: 0.96666664\n36.\tTraining Loss: 0.59767526,\tAccuracy: 0.96666664\n37.\tTraining Loss: 0.67508984,\tAccuracy: 0.96666664\n38.\tTraining Loss: 0.67434996,\tAccuracy: 0.9583333\n39.\tTraining Loss: 0.0228432,\tAccuracy: 1.0\n40.\tTraining Loss: 0.2526902,\tAccuracy: 0.9916667\n41.\tTraining Loss: 0.17682979,\tAccuracy: 0.9916667\n42.\tTraining Loss: 0.4235972,\tAccuracy: 0.975\n43.\tTraining Loss: 0.13609551,\tAccuracy: 0.9916667\n44.\tTraining Loss: 0.5388846,\tAccuracy: 0.96666664\n45.\tTraining Loss: 0.293708,\tAccuracy: 0.98333335\n46.\tTraining Loss: 0.04402418,\tAccuracy: 1.0\n47.\tTraining Loss: 0.5923631,\tAccuracy: 0.96666664\n48.\tTraining Loss: 0.7310451,\tAccuracy: 0.9583333\n49.\tTraining Loss: 0.41430146,\tAccuracy: 0.975\n50.\tTraining Loss: 0.14125405,\tAccuracy: 0.9916667\n\nTraining complete.\nFinal Loss: 0.14125405. Final accuracy: 0.9916667\n" ], [ "end = time.time()\nprint(end - start) # around 3.5 hours :(", "12431.102645158768\n" ], [ "plt.figure(figsize = (15, 4)) # adjust figures size\nplt.subplots_adjust(wspace=0.2) # adjust distance\n\nfrom scipy.signal import savgol_filter\n\n# loss plot\nplt.subplot(1, 2, 1)\nplt.plot(loss_history)\nplt.plot(savgol_filter(loss_history, len(loss_history)//3, 3))\nplt.title('Loss')\nplt.xlabel('epochs')\nplt.ylabel('Categorical Cross-Entropy')\n\n# accuracy plot\nplt.subplot(1, 2, 2)\nplt.plot(accuracy_history)\nplt.plot(savgol_filter(accuracy_history, len(loss_history)//3, 3))\nplt.title('Accuracy')\nplt.xlabel('epochs')\nplt.ylabel('Accuracy')\n\nplt.show()", "_____no_output_____" ] ], [ [ "# 3. Test\n\nLet's now test the model on the Test set. I will repeat the dataprep part on it:", "_____no_output_____" ] ], [ [ "# Dataprep Test data\n\ntest = pd.read_csv('fashion-mnist_test.csv')\n\ntest_label = pd.get_dummies(test.label)\ntest_label = test_label.values\ntest_label = test_label.astype(np.float32)\n\ntest.drop('label', axis = 1, inplace = True)\ntest = test.values\ntest = test.astype(np.float32)\n\ntest = test / 255.\n\ntest = test.reshape((len(test), 28, 28, 1))\n\nprediction = CNN.predict(test)\n\nprediction = np.argmax(prediction, axis=1)\ntest_label = np.argmax(test_label, axis=1) # reverse one-hot encoding", "_____no_output_____" ], [ "from sklearn.metrics import confusion_matrix\n\nCM = confusion_matrix(prediction, test_label)\nprint(CM)\nprint('\\nTest Accuracy: ' + str(np.sum(np.diag(CM)) / np.sum(CM)))", "[[855 2 13 13 4 1 92 0 4 0]\n [ 0 980 2 2 0 2 4 0 1 0]\n [ 19 1 848 6 23 0 41 0 2 0]\n [ 18 13 9 926 9 0 23 0 1 0]\n [ 2 1 74 35 936 0 74 1 3 0]\n [ 0 0 0 0 0 963 1 9 0 6]\n [ 92 2 50 16 26 0 757 0 4 0]\n [ 1 0 0 0 0 17 0 966 2 32]\n [ 13 1 4 1 2 3 8 0 983 5]\n [ 0 0 0 1 0 14 0 24 0 957]]\n\nTest Accuracy: 0.9171\n" ], [ "seaborn.heatmap(CM, annot=True)\nplt.show()", "_____no_output_____" ] ], [ [ "My Convolutional Neural Network classified 91.7% of Test data correctly. The confusion matrix showed that category no. 6 (Shirt) has been misclassified the most. The next goal is to correct it; one possible solution would be to increase regularization, another to build an ensemble of models.", "_____no_output_____" ], [ "Thanks for coming thus far. In the next Notebooks I will implement more advanced Convolutional models, among other things.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ] ]
d0dd1fcea6a6282a3c97a43ce1261b2dc1064240
14,749
ipynb
Jupyter Notebook
data-processing.ipynb
ryubidragonfire/python-machine-learning-101
679a0e245c60a23f212b1f23b72a22012004a690
[ "MIT" ]
1
2019-06-19T02:50:29.000Z
2019-06-19T02:50:29.000Z
data-processing.ipynb
ryubidragonfire/python-machine-learning-101
679a0e245c60a23f212b1f23b72a22012004a690
[ "MIT" ]
null
null
null
data-processing.ipynb
ryubidragonfire/python-machine-learning-101
679a0e245c60a23f212b1f23b72a22012004a690
[ "MIT" ]
null
null
null
22.760802
353
0.393993
[ [ [ "# Data Processing", "_____no_output_____" ] ], [ [ "import numpy as np\nimport pandas as pd", "_____no_output_____" ], [ "df_orig = pd.DataFrame([['big', 2.00, 'black', 'coffee'],\n ['medium', 1.90, 'black', 'coffee'],\n ['small', 1.80, 'black', 'coffee'],\n ['small', 1.20, 'brown', 'tea'],\n ['medium', 1.30, 'brown', 'tea']], \n columns=['size', 'price', 'colour', 'drink'])\ndf_orig", "_____no_output_____" ] ], [ [ "## Categorical Data\n- **nominal**\n- **ordinal**", "_____no_output_____" ], [ "## Encoding Norminal: `LabelEncoder`", "_____no_output_____" ] ], [ [ "df = df_orig.copy()", "_____no_output_____" ], [ "from sklearn.preprocessing import LabelEncoder\nclass_le = LabelEncoder()\ny = class_le.fit_transform(df['drink']) # y can now be used as labels\nprint(y[0:5])\nclass_le.classes_", "[0 0 0 1 1]\n" ] ], [ [ "## Encoding Norminal into One-Hot: `LabelEncoder`", "_____no_output_____" ] ], [ [ "df = df_orig.copy()\nfrom sklearn.preprocessing import OneHotEncoder\ncolour_le = LabelEncoder()\ncolour_le.fit_transform(df['colour'].values)\ncolour_le.classes_", "_____no_output_____" ], [ "df['colour']", "_____no_output_____" ], [ "X = pd.get_dummies(df[['price', 'colour']])\nX", "_____no_output_____" ] ], [ [ "## Hmm ... Encoding Nominal: `OneHotEncoder` ??", "_____no_output_____" ] ], [ [ "df = df_orig.copy()\n\nfrom sklearn.preprocessing import OneHotEncoder\ncolour_le = LabelEncoder()\ndf['colour'] = colour_le.fit_transform(df['colour'].values)\ncolour_le.classes_", "_____no_output_____" ], [ "df['colour']", "_____no_output_____" ], [ "from sklearn.preprocessing import OneHotEncoder\n#ohe = OneHotEncoder(categorical_features=[2], sparse=False) # feature column number 0. ['size'] won't work\n#ohe.fit_transform(df['colour'].values)\n\nohe = OneHotEncoder(categorical_features=[0], sparse=True) # feature column number 0. ['size'] won't work\nohe.fit_transform(df['colour'].values).toarray()\nX = pd.get_dummies(df[['price', 'colour']])\nX", "C:\\WinPython-64bit-3.4.4.3Qt5\\python-3.4.4.amd64\\lib\\site-packages\\sklearn\\utils\\validation.py:386: DeprecationWarning: Passing 1d arrays as data is deprecated in 0.17 and willraise ValueError in 0.19. Reshape your data either using X.reshape(-1, 1) if your data has a single feature or X.reshape(1, -1) if it contains a single sample.\n DeprecationWarning)\n" ] ], [ [ "## Encoding Ordinal: `.map`\nOrder does has a meaning.", "_____no_output_____" ] ], [ [ "df = df_orig.copy()\nsize_map = {'big':3, 'medium':2, 'small':1}\ndf['size'] = df['size'].map(size_map)\ndf", "_____no_output_____" ] ], [ [ "## Below won't preserve the intended order!!", "_____no_output_____" ] ], [ [ "df = df_orig.copy()\nnp.unique(df['size'])", "_____no_output_____" ], [ "size_map = {category:idx for idx, category in enumerate(np.unique(df['size']))}\nsize_map", "_____no_output_____" ] ], [ [ "## Data Scaling 1 of 2: Normalisation\nSee `classification-k-nn.ipynb`", "_____no_output_____" ], [ "## Data Scaling 2 of 2: Standardization\nsee `classification-knn.ipynb`", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ] ]
d0dd20bdbacbe44275590c9b54ccce7f5f0e92cb
47,750
ipynb
Jupyter Notebook
career/career_solutions_py.ipynb
indextrading/my
0e8719becef0a9526619885afe547cc9a34c5512
[ "BSD-3-Clause" ]
11
2018-05-02T22:12:14.000Z
2021-11-18T01:07:33.000Z
career/career_solutions_py.ipynb
indextrading/my
0e8719becef0a9526619885afe547cc9a34c5512
[ "BSD-3-Clause" ]
null
null
null
career/career_solutions_py.ipynb
indextrading/my
0e8719becef0a9526619885afe547cc9a34c5512
[ "BSD-3-Clause" ]
13
2017-11-11T22:38:22.000Z
2022-02-21T20:33:03.000Z
150.630915
26,662
0.868168
[ [ [ "# quant-econ Solutions: Modeling Career Choice", "_____no_output_____" ], [ "Solutions for http://quant-econ.net/py/career.html", "_____no_output_____" ] ], [ [ "%matplotlib inline", "_____no_output_____" ], [ "import numpy as np\nimport matplotlib.pyplot as plt\nfrom quantecon import DiscreteRV, compute_fixed_point\nfrom career import CareerWorkerProblem", "_____no_output_____" ] ], [ [ "## Exercise 1", "_____no_output_____" ], [ "\nSimulate job / career paths. \n\nIn reading the code, recall that `optimal_policy[i, j]` = policy at\n$(\\theta_i, \\epsilon_j)$ = either 1, 2 or 3; meaning 'stay put', 'new job' and\n'new life'.\n\n", "_____no_output_____" ] ], [ [ "wp = CareerWorkerProblem()\nv_init = np.ones((wp.N, wp.N))*100\nv = compute_fixed_point(wp.bellman_operator, v_init, verbose=False)\noptimal_policy = wp.get_greedy(v)\nF = DiscreteRV(wp.F_probs)\nG = DiscreteRV(wp.G_probs)\n\ndef gen_path(T=20):\n i = j = 0 \n theta_index = []\n epsilon_index = []\n for t in range(T):\n if optimal_policy[i, j] == 1: # Stay put\n pass\n elif optimal_policy[i, j] == 2: # New job\n j = int(G.draw())\n else: # New life\n i, j = int(F.draw()), int(G.draw())\n theta_index.append(i)\n epsilon_index.append(j)\n return wp.theta[theta_index], wp.epsilon[epsilon_index]\n\ntheta_path, epsilon_path = gen_path()\n\nfig, axes = plt.subplots(2, 1, figsize=(10, 8))\nfor ax in axes:\n ax.plot(epsilon_path, label='epsilon')\n ax.plot(theta_path, label='theta')\n ax.legend(loc='lower right')\n\nplt.show()\n\n", "_____no_output_____" ] ], [ [ "## Exercise 2", "_____no_output_____" ], [ "The median for the original parameterization can be computed as follows", "_____no_output_____" ] ], [ [ "\nwp = CareerWorkerProblem()\nv_init = np.ones((wp.N, wp.N))*100\nv = compute_fixed_point(wp.bellman_operator, v_init)\noptimal_policy = wp.get_greedy(v)\nF = DiscreteRV(wp.F_probs)\nG = DiscreteRV(wp.G_probs)\n\ndef gen_first_passage_time():\n t = 0\n i = j = 0\n while 1:\n if optimal_policy[i, j] == 1: # Stay put\n return t\n elif optimal_policy[i, j] == 2: # New job\n j = int(G.draw())\n else: # New life\n i, j = int(F.draw()), int(G.draw())\n t += 1\n\nM = 25000 # Number of samples\nsamples = np.empty(M)\nfor i in range(M): \n samples[i] = gen_first_passage_time()\nprint(np.median(samples))\n", "Iteration Distance Elapsed (seconds)\n---------------------------------------------\n5 4.073e+00 9.388e-02 \n10 3.151e+00 1.871e-01 \n15 2.438e+00 2.811e-01 \n20 1.887e+00 3.741e-01 \n25 1.460e+00 4.675e-01 \n30 1.130e+00 5.603e-01 \n35 8.741e-01 6.537e-01 \n40 6.764e-01 7.467e-01 \n45 5.234e-01 8.400e-01 \n50 4.050e-01 9.336e-01 \n7.0\n" ] ], [ [ "To compute the median with $\\beta=0.99$ instead of the default value $\\beta=0.95$,\nreplace `wp = CareerWorkerProblem()` with `wp = CareerWorkerProblem(beta=0.99)`\n\nThe medians are subject to randomness, but should be about 7 and 11\nrespectively. Not surprisingly, more patient workers will wait longer to settle down to their final job\n\n", "_____no_output_____" ], [ "## Exercise 3", "_____no_output_____" ], [ "Here’s the code to reproduce the original figure", "_____no_output_____" ] ], [ [ "from matplotlib import cm\n\nwp = CareerWorkerProblem()\nv_init = np.ones((wp.N, wp.N))*100\nv = compute_fixed_point(wp.bellman_operator, v_init)\noptimal_policy = wp.get_greedy(v)\n\nfig, ax = plt.subplots(figsize=(6,6))\ntg, eg = np.meshgrid(wp.theta, wp.epsilon)\nlvls=(0.5, 1.5, 2.5, 3.5)\nax.contourf(tg, eg, optimal_policy.T, levels=lvls, cmap=cm.winter, alpha=0.5)\nax.contour(tg, eg, optimal_policy.T, colors='k', levels=lvls, linewidths=2)\nax.set_xlabel('theta', fontsize=14)\nax.set_ylabel('epsilon', fontsize=14)\nax.text(1.8, 2.5, 'new life', fontsize=14)\nax.text(4.5, 2.5, 'new job', fontsize=14, rotation='vertical')\nax.text(4.0, 4.5, 'stay put', fontsize=14)\n\n", "Iteration Distance Elapsed (seconds)\n---------------------------------------------\n5 4.073e+00 9.415e-02 \n10 3.151e+00 1.866e-01 \n15 2.438e+00 2.793e-01 \n20 1.887e+00 3.721e-01 \n25 1.460e+00 4.647e-01 \n30 1.130e+00 5.568e-01 \n35 8.741e-01 6.499e-01 \n40 6.764e-01 7.424e-01 \n45 5.234e-01 8.348e-01 \n50 4.050e-01 9.276e-01 \n" ] ], [ [ "Now we want to set `G_a = G_b = 100` and generate a new figure with these parameters. \n\nTo do this replace:\n\n wp = CareerWorkerProblem()\n\nwith:\n\n wp = CareerWorkerProblem(G_a=100, G_b=100)\n\nIn the new figure, you will see that the region for which the worker will stay put has grown because the distribution for $\\epsilon$ has become more concentrated around the mean, making high-paying jobs less realistic\n", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ] ]
d0dd4d67d10f47b7427356b32f2d54148380d938
28,279
ipynb
Jupyter Notebook
main/nbs/mdling/fineTuning/gpt-2_fine_tuning.ipynb
jason424217/Artificial-Code-Gen
a6e2c097c5ffe8cb0929e6703035b526f477e514
[ "MIT" ]
null
null
null
main/nbs/mdling/fineTuning/gpt-2_fine_tuning.ipynb
jason424217/Artificial-Code-Gen
a6e2c097c5ffe8cb0929e6703035b526f477e514
[ "MIT" ]
null
null
null
main/nbs/mdling/fineTuning/gpt-2_fine_tuning.ipynb
jason424217/Artificial-Code-Gen
a6e2c097c5ffe8cb0929e6703035b526f477e514
[ "MIT" ]
null
null
null
39.113416
189
0.518335
[ [ [ "cd /tf/src/data/gpt-2/", "/tf/src/data/gpt-2\n" ], [ "!pip3 install -r requirements.txt", "Collecting fire>=0.1.3 (from -r requirements.txt (line 1))\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/d9/69/faeaae8687f4de0f5973694d02e9d6c3eb827636a009157352d98de1129e/fire-0.2.1.tar.gz (76kB)\n\u001b[K |████████████████████████████████| 81kB 1.1MB/s eta 0:00:01\n\u001b[?25hCollecting regex==2017.4.5 (from -r requirements.txt (line 2))\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/36/62/c0c0d762ffd4ffaf39f372eb8561b8d491a11ace5a7884610424a8b40f95/regex-2017.04.05.tar.gz (601kB)\n\u001b[K |████████████████████████████████| 604kB 2.7MB/s eta 0:00:01\n\u001b[?25hCollecting requests==2.21.0 (from -r requirements.txt (line 3))\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/7d/e3/20f3d364d6c8e5d2353c72a67778eb189176f08e873c9900e10c0287b84b/requests-2.21.0-py2.py3-none-any.whl (57kB)\n\u001b[K |████████████████████████████████| 61kB 6.1MB/s eta 0:00:011\n\u001b[?25hCollecting tqdm==4.31.1 (from -r requirements.txt (line 4))\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/6c/4b/c38b5144cf167c4f52288517436ccafefe9dc01b8d1c190e18a6b154cd4a/tqdm-4.31.1-py2.py3-none-any.whl (48kB)\n\u001b[K |████████████████████████████████| 51kB 6.1MB/s eta 0:00:011\n\u001b[?25hCollecting toposort==1.5 (from -r requirements.txt (line 5))\n Downloading https://files.pythonhosted.org/packages/e9/8a/321cd8ea5f4a22a06e3ba30ef31ec33bea11a3443eeb1d89807640ee6ed4/toposort-1.5-py2.py3-none-any.whl\nRequirement already satisfied: six in /usr/lib/python3/dist-packages (from fire>=0.1.3->-r requirements.txt (line 1)) (1.11.0)\nRequirement already satisfied: termcolor in /usr/local/lib/python3.6/dist-packages (from fire>=0.1.3->-r requirements.txt (line 1)) (1.1.0)\nCollecting certifi>=2017.4.17 (from requests==2.21.0->-r requirements.txt (line 3))\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/69/1b/b853c7a9d4f6a6d00749e94eb6f3a041e342a885b87340b79c1ef73e3a78/certifi-2019.6.16-py2.py3-none-any.whl (157kB)\n\u001b[K |████████████████████████████████| 163kB 231kB/s eta 0:00:01\n\u001b[?25hRequirement already satisfied: idna<2.9,>=2.5 in /usr/lib/python3/dist-packages (from requests==2.21.0->-r requirements.txt (line 3)) (2.6)\nCollecting urllib3<1.25,>=1.21.1 (from requests==2.21.0->-r requirements.txt (line 3))\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/01/11/525b02e4acc0c747de8b6ccdab376331597c569c42ea66ab0a1dbd36eca2/urllib3-1.24.3-py2.py3-none-any.whl (118kB)\n\u001b[K |████████████████████████████████| 122kB 3.1MB/s eta 0:00:01\n\u001b[?25hCollecting chardet<3.1.0,>=3.0.2 (from requests==2.21.0->-r requirements.txt (line 3))\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/bc/a9/01ffebfb562e4274b6487b4bb1ddec7ca55ec7510b22e4c51f14098443b8/chardet-3.0.4-py2.py3-none-any.whl (133kB)\n\u001b[K |████████████████████████████████| 143kB 170kB/s eta 0:00:01\n\u001b[?25hBuilding wheels for collected packages: fire, regex\n Building wheel for fire (setup.py) ... \u001b[?25ldone\n\u001b[?25h Stored in directory: /root/.cache/pip/wheels/31/9c/c0/07b6dc7faf1844bb4688f46b569efe6cafaa2179c95db821da\n Building wheel for regex (setup.py) ... \u001b[?25ldone\n\u001b[?25h Stored in directory: /root/.cache/pip/wheels/75/07/38/3c16b529d50cb4e0cd3dbc7b75cece8a09c132692c74450b01\nSuccessfully built fire regex\nInstalling collected packages: fire, regex, certifi, urllib3, chardet, requests, tqdm, toposort\nSuccessfully installed certifi-2019.6.16 chardet-3.0.4 fire-0.2.1 regex-2017.4.5 requests-2.21.0 toposort-1.5 tqdm-4.31.1 urllib3-1.24.3\n\u001b[33mWARNING: You are using pip version 19.1.1, however version 19.2.1 is available.\nYou should consider upgrading via the 'pip install --upgrade pip' command.\u001b[0m\n" ], [ "import fire\nimport json\nimport os\nimport numpy as np\nimport tensorflow as tf\nimport regex as re\nfrom functools import lru_cache\nimport tqdm\nfrom tensorflow.core.protobuf import rewriter_config_pb2\nimport glob\nimport pickle\n\ntf.__version__", "_____no_output_____" ] ], [ [ "# Encoding", "_____no_output_____" ] ], [ [ "\"\"\"Byte pair encoding utilities\"\"\"\n\n\n@lru_cache()\ndef bytes_to_unicode():\n \"\"\"\n Returns list of utf-8 byte and a corresponding list of unicode strings.\n The reversible bpe codes work on unicode strings.\n This means you need a large # of unicode characters in your vocab if you want to avoid UNKs.\n When you're at something like a 10B token dataset you end up needing around 5K for decent coverage.\n This is a signficant percentage of your normal, say, 32K bpe vocab.\n To avoid that, we want lookup tables between utf-8 bytes and unicode strings.\n And avoids mapping to whitespace/control characters the bpe code barfs on.\n \"\"\"\n bs = list(range(ord(\"!\"), ord(\"~\")+1))+list(range(ord(\"¡\"), ord(\"¬\")+1))+list(range(ord(\"®\"), ord(\"ÿ\")+1))\n cs = bs[:]\n n = 0\n for b in range(2**8):\n if b not in bs:\n bs.append(b)\n cs.append(2**8+n)\n n += 1\n cs = [chr(n) for n in cs]\n return dict(zip(bs, cs))\n\ndef get_pairs(word):\n \"\"\"Return set of symbol pairs in a word.\n\n Word is represented as tuple of symbols (symbols being variable-length strings).\n \"\"\"\n pairs = set()\n prev_char = word[0]\n for char in word[1:]:\n pairs.add((prev_char, char))\n prev_char = char\n return pairs\n\nclass Encoder:\n def __init__(self, encoder, bpe_merges, errors='replace'):\n self.encoder = encoder\n self.decoder = {v:k for k,v in self.encoder.items()}\n self.errors = errors # how to handle errors in decoding\n self.byte_encoder = bytes_to_unicode()\n self.byte_decoder = {v:k for k, v in self.byte_encoder.items()}\n self.bpe_ranks = dict(zip(bpe_merges, range(len(bpe_merges))))\n self.cache = {}\n\n # Should haved added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions\n self.pat = re.compile(r\"\"\"'s|'t|'re|'ve|'m|'ll|'d| ?\\p{L}+| ?\\p{N}+| ?[^\\s\\p{L}\\p{N}]+|\\s+(?!\\S)|\\s+\"\"\")\n\n def bpe(self, token):\n if token in self.cache:\n return self.cache[token]\n word = tuple(token)\n pairs = get_pairs(word)\n\n if not pairs:\n return token\n\n while True:\n bigram = min(pairs, key = lambda pair: self.bpe_ranks.get(pair, float('inf')))\n if bigram not in self.bpe_ranks:\n break\n first, second = bigram\n new_word = []\n i = 0\n while i < len(word):\n try:\n j = word.index(first, i)\n new_word.extend(word[i:j])\n i = j\n except:\n new_word.extend(word[i:])\n break\n\n if word[i] == first and i < len(word)-1 and word[i+1] == second:\n new_word.append(first+second)\n i += 2\n else:\n new_word.append(word[i])\n i += 1\n new_word = tuple(new_word)\n word = new_word\n if len(word) == 1:\n break\n else:\n pairs = get_pairs(word)\n word = ' '.join(word)\n self.cache[token] = word\n return word\n\n def encode(self, text):\n bpe_tokens = []\n for token in re.findall(self.pat, text):\n token = ''.join(self.byte_encoder[b] for b in token.encode('utf-8'))\n bpe_tokens.extend(self.encoder[bpe_token] for bpe_token in self.bpe(token).split(' '))\n return bpe_tokens\n\n def decode(self, tokens):\n text = ''.join([self.decoder[token] for token in tokens])\n text = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8', errors=self.errors)\n return text\n\ndef get_encoder(model_name, models_dir):\n with open(os.path.join(models_dir, model_name, 'encoder.json'), 'r') as f:\n encoder = json.load(f)\n with open(os.path.join(models_dir, model_name, 'vocab.bpe'), 'r', encoding=\"utf-8\") as f:\n bpe_data = f.read()\n bpe_merges = [tuple(merge_str.split()) for merge_str in bpe_data.split('\\n')[1:-1]]\n return Encoder(\n encoder=encoder,\n bpe_merges=bpe_merges,\n )", "_____no_output_____" ] ], [ [ "# Model", "_____no_output_____" ] ], [ [ "class HParams():\n n_vocab=50257\n n_ctx=1024\n n_embd=768\n n_head=12\n n_layer=12\n \n def __init__(self, n_vocab, n_ctx, n_embd, n_head, n_layer):\n self.n_vocab = n_vocab\n self.n_ctx = n_ctx\n self.n_embd = n_embd\n self.n_head = n_head\n self.n_layer = n_layer", "_____no_output_____" ], [ "def default_hparams():\n return HParams(\n n_vocab=50257,\n n_ctx=1024,\n n_embd=768,\n n_head=12,\n n_layer=12,\n )\n\ndef shape_list(x):\n \"\"\"Deal with dynamic shape in tensorflow cleanly.\"\"\"\n static = x.shape.as_list()\n dynamic = tf.shape(input=x)\n return [dynamic[i] if s is None else s for i, s in enumerate(static)]\n\ndef gelu(x):\n return 0.5 * x * (1 + tf.tanh(np.sqrt(2 / np.pi) * (x + 0.044715 * tf.pow(x, 3))))\n\ndef norm(x, scope, *, axis=-1, epsilon=1e-5):\n \"\"\"Normalize to mean = 0, std = 1, then do a diagonal affine transform.\"\"\"\n with tf.compat.v1.variable_scope(scope):\n n_state = x.shape[-1]\n g = tf.compat.v1.get_variable('g', [n_state], initializer=tf.compat.v1.constant_initializer(1), use_resource=False)\n b = tf.compat.v1.get_variable('b', [n_state], initializer=tf.compat.v1.constant_initializer(0), use_resource=False)\n u = tf.reduce_mean(input_tensor=x, axis=axis, keepdims=True)\n s = tf.reduce_mean(input_tensor=tf.square(x-u), axis=axis, keepdims=True)\n x = (x - u) * tf.math.rsqrt(s + epsilon)\n x = x*g + b\n return x\n\ndef split_states(x, n):\n \"\"\"Reshape the last dimension of x into [n, x.shape[-1]/n].\"\"\"\n *start, m = shape_list(x)\n return tf.reshape(x, start + [n, m//n])\n\ndef merge_states(x):\n \"\"\"Smash the last two dimensions of x into a single dimension.\"\"\"\n *start, a, b = shape_list(x)\n return tf.reshape(x, start + [a*b])\n\ndef conv1d(x, scope, nf, *, w_init_stdev=0.02):\n with tf.compat.v1.variable_scope(scope):\n *start, nx = shape_list(x)\n w = tf.compat.v1.get_variable('w', [1, nx, nf], initializer=tf.compat.v1.random_normal_initializer(stddev=w_init_stdev), use_resource=False)\n b = tf.compat.v1.get_variable('b', [nf], initializer=tf.compat.v1.constant_initializer(0), use_resource=False)\n c = tf.reshape(tf.matmul(tf.reshape(x, [-1, nx]), tf.reshape(w, [-1, nf]))+b, start+[nf])\n return c\n\ndef attention_mask(nd, ns, *, dtype):\n \"\"\"1's in the lower triangle, counting from the lower right corner.\n\n Same as tf.matrix_band_part(tf.ones([nd, ns]), -1, ns-nd), but doesn't produce garbage on TPUs.\n \"\"\"\n i = tf.range(nd)[:,None]\n j = tf.range(ns)\n m = i >= j - ns + nd\n return tf.cast(m, dtype)\n\n\ndef attn(x, scope, n_state, *, past, hparams):\n assert x.shape.ndims == 3 # Should be [batch, sequence, features]\n assert n_state % hparams.n_head == 0\n if past is not None:\n assert past.shape.ndims == 5 # Should be [batch, 2, heads, sequence, features], where 2 is [k, v]\n\n def split_heads(x):\n # From [batch, sequence, features] to [batch, heads, sequence, features]\n return tf.transpose(a=split_states(x, hparams.n_head), perm=[0, 2, 1, 3])\n\n def merge_heads(x):\n # Reverse of split_heads\n return merge_states(tf.transpose(a=x, perm=[0, 2, 1, 3]))\n\n def mask_attn_weights(w):\n # w has shape [batch, heads, dst_sequence, src_sequence], where information flows from src to dst.\n _, _, nd, ns = shape_list(w)\n b = attention_mask(nd, ns, dtype=w.dtype)\n b = tf.reshape(b, [1, 1, nd, ns])\n w = w*b - tf.cast(1e10, w.dtype)*(1-b)\n return w\n\n def multihead_attn(q, k, v):\n # q, k, v have shape [batch, heads, sequence, features]\n w = tf.matmul(q, k, transpose_b=True)\n w = w * tf.math.rsqrt(tf.cast(v.shape[-1], w.dtype))\n\n w = mask_attn_weights(w)\n w = tf.nn.softmax(w, axis=-1)\n a = tf.matmul(w, v)\n return a\n\n with tf.compat.v1.variable_scope(scope):\n c = conv1d(x, 'c_attn', n_state*3)\n q, k, v = map(split_heads, tf.split(c, 3, axis=2))\n present = tf.stack([k, v], axis=1)\n if past is not None:\n pk, pv = tf.unstack(past, axis=1)\n k = tf.concat([pk, k], axis=-2)\n v = tf.concat([pv, v], axis=-2)\n a = multihead_attn(q, k, v)\n a = merge_heads(a)\n a = conv1d(a, 'c_proj', n_state)\n return a, present\n\n\ndef mlp(x, scope, n_state, *, hparams):\n with tf.compat.v1.variable_scope(scope):\n nx = x.shape[-1]\n h = gelu(conv1d(x, 'c_fc', n_state))\n h2 = conv1d(h, 'c_proj', nx)\n return h2\n\ndef block(x, scope, *, past, hparams):\n with tf.compat.v1.variable_scope(scope):\n nx = x.shape[-1]\n a, present = attn(norm(x, 'ln_1'), 'attn', nx, past=past, hparams=hparams)\n x = x + a\n m = mlp(norm(x, 'ln_2'), 'mlp', nx*4, hparams=hparams)\n x = x + m\n return x, present\n\ndef past_shape(*, hparams, batch_size=None, sequence=None):\n return [batch_size, hparams.n_layer, 2, hparams.n_head, sequence, hparams.n_embd // hparams.n_head]\n\ndef expand_tile(value, size):\n \"\"\"Add a new axis of given size.\"\"\"\n value = tf.convert_to_tensor(value=value, name='value')\n ndims = value.shape.ndims\n return tf.tile(tf.expand_dims(value, axis=0), [size] + [1]*ndims)\n\ndef positions_for(tokens, past_length):\n batch_size = tf.shape(input=tokens)[0]\n nsteps = tf.shape(input=tokens)[1]\n return expand_tile(past_length + tf.range(nsteps), batch_size)\n\n\ndef model(hparams, X, past=None, scope='model', reuse=tf.compat.v1.AUTO_REUSE):\n with tf.compat.v1.variable_scope(scope, reuse=reuse):\n results = {}\n batch, sequence = shape_list(X)\n\n wpe = tf.compat.v1.get_variable('wpe', [hparams.n_ctx, hparams.n_embd],\n initializer=tf.compat.v1.random_normal_initializer(stddev=0.01), use_resource=False)\n wte = tf.compat.v1.get_variable('wte', [hparams.n_vocab, hparams.n_embd],\n initializer=tf.compat.v1.random_normal_initializer(stddev=0.02), use_resource=False)\n past_length = 0 if past is None else tf.shape(input=past)[-2]\n h = tf.gather(wte, X) + tf.gather(wpe, positions_for(X, past_length))\n# print(h.shape)\n\n # Transformer\n presents = []\n pasts = tf.unstack(past, axis=1) if past is not None else [None] * hparams.n_layer\n assert len(pasts) == hparams.n_layer\n for layer, past in enumerate(pasts):\n h, present = block(h, 'h%d' % layer, past=past, hparams=hparams)\n presents.append(present)\n results['present'] = tf.stack(presents, axis=1)\n h = norm(h, 'ln_f')\n results['hidden_state'] = h\n\n # Language model loss. Do tokens <n predict token n?\n h_flat = tf.reshape(h, [batch*sequence, hparams.n_embd])\n logits = tf.matmul(h_flat, wte, transpose_b=True)\n logits = tf.reshape(logits, [batch, sequence, hparams.n_vocab])\n results['logits'] = logits\n return results", "_____no_output_____" ] ], [ [ "# Sample from Model", "_____no_output_____" ] ], [ [ "def top_k_logits(logits, k):\n if k == 0:\n # no truncation\n return logits\n\n def _top_k():\n values, _ = tf.nn.top_k(logits, k=k)\n min_values = values[:, -1, tf.newaxis]\n return tf.compat.v1.where(\n logits < min_values,\n tf.ones_like(logits, dtype=logits.dtype) * -1e10,\n logits,\n )\n return tf.cond(\n pred=tf.equal(k, 0),\n true_fn=lambda: logits,\n false_fn=lambda: _top_k(),\n )\n\n\ndef sample_sequence(*, hparams, length, start_token=None, batch_size=None, context=None, past=None, temperature=1, top_k=0):\n if start_token is None:\n assert context is not None, 'Specify exactly one of start_token and context!'\n else:\n assert context is None, 'Specify exactly one of start_token and context!'\n context = tf.fill([batch_size, 1], start_token)\n\n def step(hparams, tokens, past=None):\n lm_output = model(hparams=hparams, X=tokens, past=past, reuse=tf.compat.v1.AUTO_REUSE)\n\n logits = lm_output['logits'][:, :, :hparams.n_vocab]\n presents = lm_output['present']\n presents.set_shape(past_shape(hparams=hparams, batch_size=batch_size))\n return {\n 'logits': logits,\n 'presents': presents,\n 'hidden_state': lm_output['hidden_state']\n }\n\n def body(past, prev, output, embedding):\n next_outputs = step(hparams, prev, past=past)\n logits = next_outputs['logits'][:, -1, :] / tf.cast(temperature, dtype=tf.float32)\n logits = top_k_logits(logits, k=top_k)\n samples = tf.random.categorical(logits=logits, num_samples=1, dtype=tf.int32)\n return [\n next_outputs['presents'] if past is None else tf.concat([past, next_outputs['presents']], axis=-2),\n samples,\n tf.concat([output, samples], axis=1),\n next_outputs['hidden_state']\n ]\n\n past, prev, output, h = body(past, context, context, context)\n\n def cond(*args):\n return True\n\n return output, past, h", "_____no_output_____" ] ], [ [ "# Embedding Methods", "_____no_output_____" ] ], [ [ "import math\nclass Embedder:\n \n def __init__(self, chkpt_path, chunk_size):\n tf.compat.v1.disable_eager_execution()\n self.g = tf.Graph()\n with self.g.as_default():\n self.context = tf.compat.v1.placeholder(tf.int32, [1, None])\n\n self.sess = tf.compat.v1.Session(graph=self.g)\n \n self.MAX_CHUNK = chunk_size\n self.enc = get_encoder(\"117M\", \"models\")\n hparams = default_hparams()\n with self.g.as_default():\n self.output, self.past, self.hidden_state = sample_sequence(\n hparams=hparams, length=None,\n context=self.context,\n past=None,\n batch_size=1,\n temperature=1, top_k=1\n )\n \n if chkpt_path is not None:\n self.restore(chkpt_path)\n \n def restore(self, chkpt_path):\n with self.g.as_default():\n saver = tf.compat.v1.train.Saver()\n chkpt = tf.train.latest_checkpoint(chkpt_path)\n saver.restore(self.sess, chkpt)\n \n def __call__(self, method):\n with self.g.as_default():\n\n p = None\n for i in range(math.ceil(len(method) / self.MAX_CHUNK)):\n chunk = method[i * self.MAX_CHUNK : (i + 1) * self.MAX_CHUNK]\n context_tokens = self.enc.encode(chunk)\n\n if p is None:\n out, p, h = self.sess.run([self.output, self.past, self.hidden_state], feed_dict={\n self.context: [context_tokens]\n }, options = tf.compat.v1.RunOptions(report_tensor_allocations_upon_oom = True))\n else:\n out, p, h = self.sess.run([self.output, self.past, self.hidden_state], feed_dict={\n self.context: [context_tokens],\n self.past: p\n }, options = tf.compat.v1.RunOptions(report_tensor_allocations_upon_oom = True))\n\n return h", "_____no_output_____" ], [ "emb = Embedder(\"/tf/src/data/gpt-2/checkpoint/run3\", 1024)\nwith open(\"/tf/src/data/methods/DATA00M_[god-r]/after.java.~186835~\", 'r') as fp:\n method = fp.read()", "_____no_output_____" ], [ "emb(method).shape", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ] ]
d0dd5710bcbcf87b4ed91baa1b17992bd22fa5fd
226,027
ipynb
Jupyter Notebook
assignments/assignment2/PyTorch.ipynb
BearingBall/dlcourse_ai-master
7f3b6ccc9fca6187e11eaa61d64fef7a1d8bff3f
[ "MIT" ]
null
null
null
assignments/assignment2/PyTorch.ipynb
BearingBall/dlcourse_ai-master
7f3b6ccc9fca6187e11eaa61d64fef7a1d8bff3f
[ "MIT" ]
null
null
null
assignments/assignment2/PyTorch.ipynb
BearingBall/dlcourse_ai-master
7f3b6ccc9fca6187e11eaa61d64fef7a1d8bff3f
[ "MIT" ]
1
2021-05-14T17:11:00.000Z
2021-05-14T17:11:00.000Z
262.822093
64,948
0.910281
[ [ [ "# Задание 2.2 - Введение в PyTorch\n\nДля этого задания потребуется установить версию PyTorch 1.0\n\nhttps://pytorch.org/get-started/locally/\n\nВ этом задании мы познакомимся с основными компонентами PyTorch и натренируем несколько небольших моделей.<br>\nGPU нам пока не понадобится.\n\nОсновные ссылки: \nhttps://pytorch.org/tutorials/beginner/deep_learning_60min_blitz.html \nhttps://pytorch.org/docs/stable/nn.html \nhttps://pytorch.org/docs/stable/torchvision/index.html ", "_____no_output_____" ] ], [ [ "import torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torchvision.datasets as dset\nfrom torch.utils.data.sampler import SubsetRandomSampler, Sampler\n\nfrom torchvision import transforms\n\nimport matplotlib.pyplot as plt\n%matplotlib inline\n\nimport numpy as np", "_____no_output_____" ] ], [ [ "## Как всегда, начинаем с загрузки данных\n\nPyTorch поддерживает загрузку SVHN из коробки.", "_____no_output_____" ] ], [ [ "# First, lets load the dataset\ndata_train = dset.SVHN('./data/', split='train',\n transform=transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.43,0.44,0.47],\n std=[0.20,0.20,0.20]) \n ])\n )\ndata_test = dset.SVHN('./data/', split='test', \n transform=transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.43,0.44,0.47],\n std=[0.20,0.20,0.20]) \n ]))", "_____no_output_____" ] ], [ [ "Теперь мы разделим данные на training и validation с использованием классов `SubsetRandomSampler` и `DataLoader`.\n\n`DataLoader` подгружает данные, предоставляемые классом `Dataset`, во время тренировки и группирует их в батчи.\nОн дает возможность указать `Sampler`, который выбирает, какие примеры из датасета использовать для тренировки. Мы используем это, чтобы разделить данные на training и validation.\n\nПодробнее: https://pytorch.org/tutorials/beginner/data_loading_tutorial.html", "_____no_output_____" ] ], [ [ "batch_size = 64\n\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\nprint(device)\n\ndata_size = data_train.data.shape[0]\nvalidation_split = .2\nsplit = int(np.floor(validation_split * data_size))\nindices = list(range(data_size))\nnp.random.shuffle(indices)\n\ntrain_indices, val_indices = indices[split:], indices[:split]\n\ntrain_sampler = SubsetRandomSampler(train_indices)\nval_sampler = SubsetRandomSampler(val_indices)\n\ntrain_loader = torch.utils.data.DataLoader(data_train, batch_size=batch_size, \n sampler=train_sampler)\nval_loader = torch.utils.data.DataLoader(data_train, batch_size=batch_size,\n sampler=val_sampler)", "cuda:0\n" ] ], [ [ "В нашей задаче мы получаем на вход изображения, но работаем с ними как с одномерными массивами. Чтобы превратить многомерный массив в одномерный, мы воспользуемся очень простым вспомогательным модулем `Flattener`.", "_____no_output_____" ] ], [ [ "sample, label = data_train[0]\nprint(\"SVHN data sample shape: \", sample.shape)\n# As you can see, the data is shaped like an image\n\n# We'll use a special helper module to shape it into a tensor\nclass Flattener(nn.Module):\n def forward(self, x):\n batch_size, *_ = x.shape\n return x.view(batch_size, -1)", "SVHN data sample shape: torch.Size([3, 32, 32])\n" ] ], [ [ "И наконец, мы создаем основные объекты PyTorch:\n- `nn_model` - собственно, модель с нейросетью\n- `loss` - функцию ошибки, в нашем случае `CrossEntropyLoss`\n- `optimizer` - алгоритм оптимизации, в нашем случае просто `SGD`", "_____no_output_____" ] ], [ [ "nn_model = nn.Sequential(\n Flattener(),\n nn.Linear(3*32*32, 100),\n nn.ReLU(inplace=True),\n nn.Linear(100, 10), \n )\nnn_model.type(torch.FloatTensor)\n\n# We will minimize cross-entropy between the ground truth and\n# network predictions using an SGD optimizer\nloss = nn.CrossEntropyLoss().type(torch.FloatTensor)\noptimizer = optim.SGD(nn_model.parameters(), lr=1e-2, weight_decay=1e-1)", "_____no_output_____" ] ], [ [ "## Тренируем!\n\nНиже приведена функция `train_model`, реализующая основной цикл тренировки PyTorch.\n\nКаждую эпоху эта функция вызывает функцию `compute_accuracy`, которая вычисляет точность на validation, эту последнюю функцию предлагается реализовать вам.", "_____no_output_____" ] ], [ [ "# This is how to implement the same main train loop in PyTorch. Pretty easy, right?\n\ndef train_model(model, train_loader, val_loader, loss, optimizer, num_epochs): \n loss_history = []\n train_history = []\n val_history = []\n model.to(device)\n for epoch in range(num_epochs):\n model.train() # Enter train mode\n \n loss_accum = 0\n correct_samples = 0\n total_samples = 0\n for i_step, (x, y) in enumerate(train_loader):\n x = x.to(device)\n y = y.to(device)\n prediction = model(x) \n loss_value = loss(prediction, y)\n optimizer.zero_grad()\n loss_value.backward()\n optimizer.step()\n \n _, indices = torch.max(prediction, 1)\n correct_samples += torch.sum(indices == y)\n total_samples += y.shape[0]\n \n loss_accum += loss_value\n\n ave_loss = loss_accum / (i_step + 1)\n train_accuracy = float(correct_samples) / total_samples\n val_accuracy = compute_accuracy(model, val_loader)\n \n loss_history.append(float(ave_loss))\n train_history.append(train_accuracy)\n val_history.append(val_accuracy)\n \n print(\"Average loss: %f, Train accuracy: %f, Val accuracy: %f\" % (ave_loss, train_accuracy, val_accuracy))\n \n return loss_history, train_history, val_history\n \ndef compute_accuracy(model, loader):\n \"\"\"\n Computes accuracy on the dataset wrapped in a loader\n \n Returns: accuracy as a float value between 0 and 1\n \"\"\"\n model.eval() # Evaluation mode\n trueAnswerCounter = 0 \n totalAnswerCounter = 0 \n with torch.no_grad():\n for i_step, (x, y) in enumerate(loader):\n x = x.to(device)\n y = y.to(device)\n prediction = torch.argmax(model(x) , 1) \n for i in range(len(prediction)):\n if prediction[i] == y[i]:\n trueAnswerCounter += float(1)\n totalAnswerCounter += float(len(prediction))\n \n del prediction\n\n return float(trueAnswerCounter/totalAnswerCounter)\n\nloss_history, train_history, val_history = train_model(nn_model, train_loader, val_loader, loss, optimizer, 3)", "Average loss: 1.837043, Train accuracy: 0.405078, Val accuracy: 0.548222\nAverage loss: 1.463647, Train accuracy: 0.577927, Val accuracy: 0.598457\nAverage loss: 1.383073, Train accuracy: 0.616217, Val accuracy: 0.628217\n" ] ], [ [ "## После основного цикла\n\nПосмотрим на другие возможности и оптимизации, которые предоставляет PyTorch.\n\nДобавьте еще один скрытый слой размера 100 нейронов к модели", "_____no_output_____" ] ], [ [ "# Since it's so easy to add layers, let's add some!\n\n# TODO: Implement a model with 2 hidden layers of the size 100\nnn_model = nn.Sequential(\n Flattener(),\n nn.Linear(3*32*32, 100),\n nn.ReLU(inplace=True),\n nn.Linear(100, 100), \n nn.ReLU(inplace=True),\n nn.Linear(100, 10), \n )\nnn_model.type(torch.FloatTensor)\n\noptimizer = optim.SGD(nn_model.parameters(), lr=1e-2, weight_decay=1e-1)\nloss_history, train_history, val_history = train_model(nn_model, train_loader, val_loader, loss, optimizer, 5)", "Average loss: 2.179205, Train accuracy: 0.201515, Val accuracy: 0.232885\nAverage loss: 1.987170, Train accuracy: 0.291813, Val accuracy: 0.346939\nAverage loss: 1.779322, Train accuracy: 0.389875, Val accuracy: 0.422292\nAverage loss: 1.700140, Train accuracy: 0.426373, Val accuracy: 0.424886\nAverage loss: 1.678671, Train accuracy: 0.436764, Val accuracy: 0.444884\n" ] ], [ [ "Добавьте слой с Batch Normalization", "_____no_output_____" ] ], [ [ "# We heard batch normalization is powerful, let's use it!\n# TODO: Add batch normalization after each of the hidden layers of the network, before or after non-linearity\n# Hint: check out torch.nn.BatchNorm1d\n\nnn_model = nn.Sequential(\n Flattener(),\n nn.Linear(3*32*32, 100),\n nn.BatchNorm1d(100),\n nn.ReLU(inplace=True),\n nn.Linear(100, 100), \n nn.BatchNorm1d(100),\n nn.ReLU(inplace=True),\n nn.Linear(100, 10), \n nn.BatchNorm1d(10),\n )\n\noptimizer = optim.SGD(nn_model.parameters(), lr=1e-3, weight_decay=1e-1)\nloss_history, train_history, val_history = train_model(nn_model, train_loader, val_loader, loss, optimizer, 5)", "Average loss: 1.825798, Train accuracy: 0.404737, Val accuracy: 0.566446\nAverage loss: 1.395718, Train accuracy: 0.617343, Val accuracy: 0.679612\nAverage loss: 1.298632, Train accuracy: 0.664096, Val accuracy: 0.698246\nAverage loss: 1.250279, Train accuracy: 0.690271, Val accuracy: 0.706641\nAverage loss: 1.218383, Train accuracy: 0.706583, Val accuracy: 0.715924\n" ], [ "def my_train_model(model, train_loader, val_loader, loss, optimizer, num_epochs, scheduler): \n loss_history = []\n train_history = []\n val_history = []\n model.to(device)\n for epoch in range(num_epochs):\n model.train() # Enter train mode\n \n loss_accum = 0\n correct_samples = 0\n total_samples = 0\n for i_step, (x, y) in enumerate(train_loader):\n x = x.to(device)\n y = y.to(device)\n prediction = model(x) \n loss_value = loss(prediction, y)\n optimizer.zero_grad()\n loss_value.backward()\n optimizer.step()\n \n _, indices = torch.max(prediction, 1)\n correct_samples += torch.sum(indices == y)\n total_samples += y.shape[0]\n \n loss_accum += loss_value\n\n ave_loss = loss_accum / (i_step + 1)\n train_accuracy = float(correct_samples) / total_samples\n val_accuracy = compute_accuracy(model, val_loader)\n \n scheduler.step(ave_loss)\n del loss_accum,correct_samples, total_samples\n \n loss_history.append(float(ave_loss))\n train_history.append(train_accuracy)\n val_history.append(val_accuracy)\n \n print(\"Average loss: %f, Train accuracy: %f, Val accuracy: %f\" % (ave_loss, train_accuracy, val_accuracy))\n \n return loss_history, train_history, val_history", "_____no_output_____" ] ], [ [ "Добавьте уменьшение скорости обучения по ходу тренировки.", "_____no_output_____" ] ], [ [ "# Learning rate annealing\n# Reduce your learning rate 2x every 2 epochs\n# Hint: look up learning rate schedulers in PyTorch. You might need to extend train_model function a little bit too!\n\nnn_model = nn.Sequential(\n Flattener(),\n nn.Linear(3*32*32, 100),\n nn.BatchNorm1d(100),\n nn.ReLU(inplace=True),\n nn.Linear(100, 10), \n nn.BatchNorm1d(10)\n )\n\noptimizer = optim.SGD(nn_model.parameters(), lr=1e-3, weight_decay=1e-1)\nscheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer , factor = 0.5 , patience= 3 , verbose= True)\nloss_history, train_history, val_history = my_train_model(nn_model, train_loader, val_loader, loss, optimizer, 5, scheduler)\n", "Average loss: 1.806619, Train accuracy: 0.427038, Val accuracy: 0.588561\nAverage loss: 1.406469, Train accuracy: 0.619988, Val accuracy: 0.684254\nAverage loss: 1.321503, Train accuracy: 0.667935, Val accuracy: 0.712716\nAverage loss: 1.282513, Train accuracy: 0.689452, Val accuracy: 0.717289\nAverage loss: 1.259725, Train accuracy: 0.706190, Val accuracy: 0.725002\n" ] ], [ [ "# Визуализируем ошибки модели\n\nПопробуем посмотреть, на каких изображениях наша модель ошибается.\nДля этого мы получим все предсказания модели на validation set и сравним их с истинными метками (ground truth).\n\nПервая часть - реализовать код на PyTorch, который вычисляет все предсказания модели на validation set. \nЧтобы это сделать мы приводим код `SubsetSampler`, который просто проходит по всем заданным индексам последовательно и составляет из них батчи. \n\nРеализуйте функцию `evaluate_model`, которая прогоняет модель через все сэмплы validation set и запоминает предсказания модели и истинные метки.", "_____no_output_____" ] ], [ [ "class SubsetSampler(Sampler):\n r\"\"\"Samples elements with given indices sequentially\n\n Arguments:\n indices (ndarray): indices of the samples to take\n \"\"\"\n\n def __init__(self, indices):\n self.indices = indices\n\n def __iter__(self):\n return (self.indices[i] for i in range(len(self.indices)))\n\n def __len__(self):\n return len(self.indices)\n \n \ndef evaluate_model(model, dataset, indices):\n \"\"\"\n Computes predictions and ground truth labels for the indices of the dataset\n \n Returns: \n predictions: np array of ints - model predictions\n grount_truth: np array of ints - actual labels of the dataset\n \"\"\"\n model.eval() # Evaluation mode\n model.to(device)\n val_set = torch.utils.data.DataLoader(dataset, batch_size=len(indices),sampler=SubsetSampler(indices))\n \n # TODO: Evaluate model on the list of indices and capture predictions\n # and ground truth labels\n # Hint: SubsetSampler above could be useful!\n with torch.no_grad():\n for i_step, (x, y) in enumerate(val_set):\n x = x.to(device)\n y = y.to(device)\n predictions = torch.argmax(model(x) , 1) \n ground_truth = y\n return np.array(predictions.cpu()), np.array(ground_truth.cpu())\n\n# Evaluate model on validation\npredictions, gt = evaluate_model(nn_model, data_train, val_indices)\nassert len(predictions) == len(val_indices)\nassert len(gt) == len(val_indices)\nassert gt[100] == data_train[val_indices[100]][1]\nassert np.any(np.not_equal(gt, predictions))", "_____no_output_____" ] ], [ [ "## Confusion matrix\nПервая часть визуализации - вывести confusion matrix (https://en.wikipedia.org/wiki/Confusion_matrix ).\n\nConfusion matrix - это матрица, где каждой строке соответствуют классы предсказанный, а столбцу - классы истинных меток (ground truth). Число с координатами `i,j` - это количество сэмплов класса `j`, которые модель считает классом `i`.\n\n![image.png](attachment:image.png)\n\nДля того, чтобы облегчить вам задачу, ниже реализована функция `visualize_confusion_matrix` которая визуализирует такую матрицу. \nВам осталось реализовать функцию `build_confusion_matrix`, которая ее вычислит.\n\nРезультатом должна быть матрица 10x10.", "_____no_output_____" ] ], [ [ "def visualize_confusion_matrix(confusion_matrix):\n \"\"\"\n Visualizes confusion matrix\n \n confusion_matrix: np array of ints, x axis - predicted class, y axis - actual class\n [i][j] should have the count of samples that were predicted to be class i,\n but have j in the ground truth\n \n \"\"\"\n # Adapted from \n # https://stackoverflow.com/questions/2897826/confusion-matrix-with-number-of-classified-misclassified-instances-on-it-python\n assert confusion_matrix.shape[0] == confusion_matrix.shape[1]\n size = confusion_matrix.shape[0]\n fig = plt.figure(figsize=(10,10))\n plt.title(\"Confusion matrix\")\n plt.ylabel(\"predicted\")\n plt.xlabel(\"ground truth\")\n res = plt.imshow(confusion_matrix, cmap='GnBu', interpolation='nearest')\n cb = fig.colorbar(res)\n plt.xticks(np.arange(size))\n plt.yticks(np.arange(size))\n for i, row in enumerate(confusion_matrix):\n for j, count in enumerate(row):\n plt.text(j, i, count, fontsize=14, horizontalalignment='center', verticalalignment='center')\n \ndef build_confusion_matrix(predictions, ground_truth):\n \"\"\"\n Builds confusion matrix from predictions and ground truth\n\n predictions: np array of ints, model predictions for all validation samples\n ground_truth: np array of ints, ground truth for all validation samples\n \n Returns:\n np array of ints, (10,10), counts of samples for predicted/ground_truth classes\n \"\"\"\n \n confusion_matrix = np.zeros((10,10), int)\n \n print(predictions)\n print(ground_truth)\n \n for i in range(predictions.shape[0]):\n confusion_matrix[predictions[i]][ground_truth[i]] += 1\n \n return confusion_matrix\n\n\nconfusion_matrix = build_confusion_matrix(predictions, gt)\nvisualize_confusion_matrix(confusion_matrix)", "[3 2 1 ... 1 1 2]\n[0 7 1 ... 1 1 2]\n" ] ], [ [ "Наконец, посмотрим на изображения, соответствующие некоторым элементам этой матрицы.\n\nКак и раньше, вам дана функция `visualize_images`, которой нужно воспрользоваться при реализации функции `visualize_predicted_actual`. Эта функция должна вывести несколько примеров, соответствующих заданному элементу матрицы.\n\nВизуализируйте наиболее частые ошибки и попробуйте понять, почему модель их совершает.", "_____no_output_____" ] ], [ [ "data_train_images = dset.SVHN('./data/', split='train')\n\ndef visualize_images(indices, data, title='', max_num=10):\n \"\"\"\n Visualizes several images from the dataset\n \n indices: array of indices to visualize\n data: torch Dataset with the images\n title: string, title of the plot\n max_num: int, max number of images to display\n \"\"\"\n to_show = min(len(indices), max_num)\n fig = plt.figure(figsize=(10,1.5))\n fig.suptitle(title)\n for i, index in enumerate(indices[:to_show]):\n plt.subplot(1,to_show, i+1)\n plt.axis('off')\n sample = data[index][0]\n plt.imshow(sample)\n \ndef visualize_predicted_actual(predicted_class, gt_class, predictions, groud_truth, val_indices, data):\n \"\"\"\n Visualizes images of a ground truth class which were predicted as the other class \n \n predicted: int 0-9, index of the predicted class\n gt_class: int 0-9, index of the ground truth class\n predictions: np array of ints, model predictions for all validation samples\n ground_truth: np array of ints, ground truth for all validation samples\n val_indices: np array of ints, indices of validation samples\n \"\"\"\n\n # TODO: Implement visualization using visualize_images above\n # predictions and ground_truth are provided for validation set only, defined by val_indices\n # Hint: numpy index arrays might be helpful\n # https://docs.scipy.org/doc/numpy/user/basics.indexing.html#index-arrays\n # Please make the title meaningful!\n indices = np.where((predictions == predicted_class) & (groud_truth == gt_class))\n visualize_images(val_indices[indices], data)\n\nvisualize_predicted_actual(6, 8, predictions, gt, np.array(val_indices), data_train_images)\nvisualize_predicted_actual(1, 7, predictions, gt, np.array(val_indices), data_train_images)", "_____no_output_____" ] ], [ [ "# Переходим к свободным упражнениям!\n\nНатренируйте модель как можно лучше - экспериментируйте сами!\nЧто следует обязательно попробовать:\n- перебор гиперпараметров с помощью валидационной выборки\n- другие оптимизаторы вместо SGD\n- изменение количества слоев и их размеров\n- наличие Batch Normalization\n\nНо ограничиваться этим не стоит!\n\nТочность на тестовой выборке должна быть доведена до **80%**", "_____no_output_____" ] ], [ [ "nn_model = nn.Sequential(\n Flattener(),\n nn.Linear(3*32*32, 100),\n nn.BatchNorm1d(100),\n nn.ReLU(inplace=True),\n nn.Linear(100, 100),\n nn.BatchNorm1d(100),\n nn.ReLU(inplace=True),\n nn.Linear(100, 10)\n )\n\noptimizer = optim.Adam(nn_model.parameters(), lr=0.8e-3, weight_decay=1e-4)\nsheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer , factor = 0.5 , patience= 3 , verbose= True)\n\nloss_history, train_history, val_history = my_train_model(nn_model, train_loader, val_loader, loss, optimizer, 30, sheduler)", "Average loss: 1.363839, Train accuracy: 0.557451, Val accuracy: 0.693605\nAverage loss: 0.950863, Train accuracy: 0.701191, Val accuracy: 0.735854\nAverage loss: 0.836577, Train accuracy: 0.738406, Val accuracy: 0.772985\nAverage loss: 0.757244, Train accuracy: 0.762584, Val accuracy: 0.780152\nAverage loss: 0.706240, Train accuracy: 0.778436, Val accuracy: 0.799263\nAverage loss: 0.673274, Train accuracy: 0.789612, Val accuracy: 0.809842\nAverage loss: 0.642621, Train accuracy: 0.798946, Val accuracy: 0.809023\nAverage loss: 0.627442, Train accuracy: 0.801949, Val accuracy: 0.802676\nAverage loss: 0.604402, Train accuracy: 0.811589, Val accuracy: 0.815166\nAverage loss: 0.593587, Train accuracy: 0.813074, Val accuracy: 0.821514\nAverage loss: 0.580327, Train accuracy: 0.817493, Val accuracy: 0.823289\nAverage loss: 0.563763, Train accuracy: 0.824574, Val accuracy: 0.828339\nAverage loss: 0.558496, Train accuracy: 0.824370, Val accuracy: 0.815917\nAverage loss: 0.546168, Train accuracy: 0.827526, Val accuracy: 0.831616\nAverage loss: 0.539990, Train accuracy: 0.829215, Val accuracy: 0.824790\nAverage loss: 0.530318, Train accuracy: 0.833362, Val accuracy: 0.831684\nAverage loss: 0.524641, Train accuracy: 0.835051, Val accuracy: 0.828817\nAverage loss: 0.515824, Train accuracy: 0.838703, Val accuracy: 0.837759\nAverage loss: 0.510912, Train accuracy: 0.838481, Val accuracy: 0.831070\nAverage loss: 0.504318, Train accuracy: 0.840614, Val accuracy: 0.832162\nAverage loss: 0.500301, Train accuracy: 0.840682, Val accuracy: 0.838987\nAverage loss: 0.496784, Train accuracy: 0.843173, Val accuracy: 0.840898\nAverage loss: 0.488715, Train accuracy: 0.845357, Val accuracy: 0.840557\nAverage loss: 0.487820, Train accuracy: 0.844965, Val accuracy: 0.840625\nAverage loss: 0.482398, Train accuracy: 0.846756, Val accuracy: 0.834278\nAverage loss: 0.480014, Train accuracy: 0.848173, Val accuracy: 0.835574\nAverage loss: 0.477898, Train accuracy: 0.848787, Val accuracy: 0.840830\nAverage loss: 0.470878, Train accuracy: 0.848463, Val accuracy: 0.834824\nAverage loss: 0.470596, Train accuracy: 0.850322, Val accuracy: 0.841308\nAverage loss: 0.463579, Train accuracy: 0.851397, Val accuracy: 0.833595\n" ], [ "# Как всегда, в конце проверяем на test set\ntest_loader = torch.utils.data.DataLoader(data_test, batch_size=batch_size)\ntest_accuracy = compute_accuracy(nn_model, test_loader)\nprint(\"Test accuracy: %2.4f\" % test_accuracy)", "Test accuracy: 0.8045\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ] ]
d0dd5f5b840fe3bd845bb9e16449b05936427da4
273,630
ipynb
Jupyter Notebook
Course_2/Regularization.ipynb
karanchawla/DeepLearningSpecialization-Coursera
eb5421cdc4b02f47be99a6a41da68af06c831c81
[ "MIT" ]
3
2017-09-19T19:12:07.000Z
2018-12-20T13:20:37.000Z
Course_2/Regularization.ipynb
karanchawla/DeepLearningSpecialization-Coursera
eb5421cdc4b02f47be99a6a41da68af06c831c81
[ "MIT" ]
null
null
null
Course_2/Regularization.ipynb
karanchawla/DeepLearningSpecialization-Coursera
eb5421cdc4b02f47be99a6a41da68af06c831c81
[ "MIT" ]
5
2018-07-26T14:13:36.000Z
2019-11-20T08:53:46.000Z
239.396325
56,104
0.890882
[ [ [ "# Regularization\n\nWelcome to the second assignment of this week. Deep Learning models have so much flexibility and capacity that **overfitting can be a serious problem**, if the training dataset is not big enough. Sure it does well on the training set, but the learned network **doesn't generalize to new examples** that it has never seen!\n\n**You will learn to:** Use regularization in your deep learning models.\n\nLet's first import the packages you are going to use.", "_____no_output_____" ] ], [ [ "# import packages\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom reg_utils import sigmoid, relu, plot_decision_boundary, initialize_parameters, load_2D_dataset, predict_dec\nfrom reg_utils import compute_cost, predict, forward_propagation, backward_propagation, update_parameters\nimport sklearn\nimport sklearn.datasets\nimport scipy.io\nfrom testCases import *\n\n%matplotlib inline\nplt.rcParams['figure.figsize'] = (7.0, 4.0) # set default size of plots\nplt.rcParams['image.interpolation'] = 'nearest'\nplt.rcParams['image.cmap'] = 'gray'", "/home/jovyan/work/week5/Regularization/reg_utils.py:85: SyntaxWarning: assertion is always true, perhaps remove parentheses?\n assert(parameters['W' + str(l)].shape == layer_dims[l], layer_dims[l-1])\n/home/jovyan/work/week5/Regularization/reg_utils.py:86: SyntaxWarning: assertion is always true, perhaps remove parentheses?\n assert(parameters['W' + str(l)].shape == layer_dims[l], 1)\n" ] ], [ [ "**Problem Statement**: You have just been hired as an AI expert by the French Football Corporation. They would like you to recommend positions where France's goal keeper should kick the ball so that the French team's players can then hit it with their head. \n\n<img src=\"images/field_kiank.png\" style=\"width:600px;height:350px;\">\n<caption><center> <u> **Figure 1** </u>: **Football field**<br> The goal keeper kicks the ball in the air, the players of each team are fighting to hit the ball with their head </center></caption>\n\n\nThey give you the following 2D dataset from France's past 10 games.", "_____no_output_____" ] ], [ [ "train_X, train_Y, test_X, test_Y = load_2D_dataset()", "_____no_output_____" ] ], [ [ "Each dot corresponds to a position on the football field where a football player has hit the ball with his/her head after the French goal keeper has shot the ball from the left side of the football field.\n- If the dot is blue, it means the French player managed to hit the ball with his/her head\n- If the dot is red, it means the other team's player hit the ball with their head\n\n**Your goal**: Use a deep learning model to find the positions on the field where the goalkeeper should kick the ball.", "_____no_output_____" ], [ "**Analysis of the dataset**: This dataset is a little noisy, but it looks like a diagonal line separating the upper left half (blue) from the lower right half (red) would work well. \n\nYou will first try a non-regularized model. Then you'll learn how to regularize it and decide which model you will choose to solve the French Football Corporation's problem. ", "_____no_output_____" ], [ "## 1 - Non-regularized model\n\nYou will use the following neural network (already implemented for you below). This model can be used:\n- in *regularization mode* -- by setting the `lambd` input to a non-zero value. We use \"`lambd`\" instead of \"`lambda`\" because \"`lambda`\" is a reserved keyword in Python. \n- in *dropout mode* -- by setting the `keep_prob` to a value less than one\n\nYou will first try the model without any regularization. Then, you will implement:\n- *L2 regularization* -- functions: \"`compute_cost_with_regularization()`\" and \"`backward_propagation_with_regularization()`\"\n- *Dropout* -- functions: \"`forward_propagation_with_dropout()`\" and \"`backward_propagation_with_dropout()`\"\n\nIn each part, you will run this model with the correct inputs so that it calls the functions you've implemented. Take a look at the code below to familiarize yourself with the model.", "_____no_output_____" ] ], [ [ "def model(X, Y, learning_rate = 0.3, num_iterations = 30000, print_cost = True, lambd = 0, keep_prob = 1):\n \"\"\"\n Implements a three-layer neural network: LINEAR->RELU->LINEAR->RELU->LINEAR->SIGMOID.\n \n Arguments:\n X -- input data, of shape (input size, number of examples)\n Y -- true \"label\" vector (1 for blue dot / 0 for red dot), of shape (output size, number of examples)\n learning_rate -- learning rate of the optimization\n num_iterations -- number of iterations of the optimization loop\n print_cost -- If True, print the cost every 10000 iterations\n lambd -- regularization hyperparameter, scalar\n keep_prob - probability of keeping a neuron active during drop-out, scalar.\n \n Returns:\n parameters -- parameters learned by the model. They can then be used to predict.\n \"\"\"\n \n grads = {}\n costs = [] # to keep track of the cost\n m = X.shape[1] # number of examples\n layers_dims = [X.shape[0], 20, 3, 1]\n \n # Initialize parameters dictionary.\n parameters = initialize_parameters(layers_dims)\n\n # Loop (gradient descent)\n\n for i in range(0, num_iterations):\n\n # Forward propagation: LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SIGMOID.\n if keep_prob == 1:\n a3, cache = forward_propagation(X, parameters)\n elif keep_prob < 1:\n a3, cache = forward_propagation_with_dropout(X, parameters, keep_prob)\n \n # Cost function\n if lambd == 0:\n cost = compute_cost(a3, Y)\n else:\n cost = compute_cost_with_regularization(a3, Y, parameters, lambd)\n \n # Backward propagation.\n assert(lambd==0 or keep_prob==1) # it is possible to use both L2 regularization and dropout, \n # but this assignment will only explore one at a time\n if lambd == 0 and keep_prob == 1:\n grads = backward_propagation(X, Y, cache)\n elif lambd != 0:\n grads = backward_propagation_with_regularization(X, Y, cache, lambd)\n elif keep_prob < 1:\n grads = backward_propagation_with_dropout(X, Y, cache, keep_prob)\n \n # Update parameters.\n parameters = update_parameters(parameters, grads, learning_rate)\n \n # Print the loss every 10000 iterations\n if print_cost and i % 10000 == 0:\n print(\"Cost after iteration {}: {}\".format(i, cost))\n if print_cost and i % 1000 == 0:\n costs.append(cost)\n \n # plot the cost\n plt.plot(costs)\n plt.ylabel('cost')\n plt.xlabel('iterations (x1,000)')\n plt.title(\"Learning rate =\" + str(learning_rate))\n plt.show()\n \n return parameters", "_____no_output_____" ] ], [ [ "Let's train the model without any regularization, and observe the accuracy on the train/test sets.", "_____no_output_____" ] ], [ [ "parameters = model(train_X, train_Y)\nprint (\"On the training set:\")\npredictions_train = predict(train_X, train_Y, parameters)\nprint (\"On the test set:\")\npredictions_test = predict(test_X, test_Y, parameters)", "Cost after iteration 0: 0.6557412523481002\nCost after iteration 10000: 0.16329987525724216\nCost after iteration 20000: 0.13851642423255986\n" ] ], [ [ "The train accuracy is 94.8% while the test accuracy is 91.5%. This is the **baseline model** (you will observe the impact of regularization on this model). Run the following code to plot the decision boundary of your model.", "_____no_output_____" ] ], [ [ "plt.title(\"Model without regularization\")\naxes = plt.gca()\naxes.set_xlim([-0.75,0.40])\naxes.set_ylim([-0.75,0.65])\nplot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y)", "_____no_output_____" ] ], [ [ "The non-regularized model is obviously overfitting the training set. It is fitting the noisy points! Lets now look at two techniques to reduce overfitting.", "_____no_output_____" ], [ "## 2 - L2 Regularization\n\nThe standard way to avoid overfitting is called **L2 regularization**. It consists of appropriately modifying your cost function, from:\n$$J = -\\frac{1}{m} \\sum\\limits_{i = 1}^{m} \\large{(}\\small y^{(i)}\\log\\left(a^{[L](i)}\\right) + (1-y^{(i)})\\log\\left(1- a^{[L](i)}\\right) \\large{)} \\tag{1}$$\nTo:\n$$J_{regularized} = \\small \\underbrace{-\\frac{1}{m} \\sum\\limits_{i = 1}^{m} \\large{(}\\small y^{(i)}\\log\\left(a^{[L](i)}\\right) + (1-y^{(i)})\\log\\left(1- a^{[L](i)}\\right) \\large{)} }_\\text{cross-entropy cost} + \\underbrace{\\frac{1}{m} \\frac{\\lambda}{2} \\sum\\limits_l\\sum\\limits_k\\sum\\limits_j W_{k,j}^{[l]2} }_\\text{L2 regularization cost} \\tag{2}$$\n\nLet's modify your cost and observe the consequences.\n\n**Exercise**: Implement `compute_cost_with_regularization()` which computes the cost given by formula (2). To calculate $\\sum\\limits_k\\sum\\limits_j W_{k,j}^{[l]2}$ , use :\n```python\nnp.sum(np.square(Wl))\n```\nNote that you have to do this for $W^{[1]}$, $W^{[2]}$ and $W^{[3]}$, then sum the three terms and multiply by $ \\frac{1}{m} \\frac{\\lambda}{2} $.", "_____no_output_____" ] ], [ [ "# GRADED FUNCTION: compute_cost_with_regularization\n\ndef compute_cost_with_regularization(A3, Y, parameters, lambd):\n \"\"\"\n Implement the cost function with L2 regularization. See formula (2) above.\n \n Arguments:\n A3 -- post-activation, output of forward propagation, of shape (output size, number of examples)\n Y -- \"true\" labels vector, of shape (output size, number of examples)\n parameters -- python dictionary containing parameters of the model\n \n Returns:\n cost - value of the regularized loss function (formula (2))\n \"\"\"\n m = Y.shape[1]\n W1 = parameters[\"W1\"]\n W2 = parameters[\"W2\"]\n W3 = parameters[\"W3\"]\n \n cross_entropy_cost = compute_cost(A3, Y) # This gives you the cross-entropy part of the cost\n \n ### START CODE HERE ### (approx. 1 line)\n L2_regularization_cost = (lambd/(2*m)) * (np.sum(np.square(W1)) + np.sum(np.square(W2)) + np.sum(np.square(W3)))\n ### END CODER HERE ###\n \n cost = cross_entropy_cost + L2_regularization_cost\n \n return cost", "_____no_output_____" ], [ "A3, Y_assess, parameters = compute_cost_with_regularization_test_case()\n\nprint(\"cost = \" + str(compute_cost_with_regularization(A3, Y_assess, parameters, lambd = 0.1)))", "cost = 1.78648594516\n" ] ], [ [ "**Expected Output**: \n\n<table> \n <tr>\n <td>\n **cost**\n </td>\n <td>\n 1.78648594516\n </td>\n \n </tr>\n\n</table> ", "_____no_output_____" ], [ "Of course, because you changed the cost, you have to change backward propagation as well! All the gradients have to be computed with respect to this new cost. \n\n**Exercise**: Implement the changes needed in backward propagation to take into account regularization. The changes only concern dW1, dW2 and dW3. For each, you have to add the regularization term's gradient ($\\frac{d}{dW} ( \\frac{1}{2}\\frac{\\lambda}{m} W^2) = \\frac{\\lambda}{m} W$).", "_____no_output_____" ] ], [ [ "# GRADED FUNCTION: backward_propagation_with_regularization\n\ndef backward_propagation_with_regularization(X, Y, cache, lambd):\n \"\"\"\n Implements the backward propagation of our baseline model to which we added an L2 regularization.\n \n Arguments:\n X -- input dataset, of shape (input size, number of examples)\n Y -- \"true\" labels vector, of shape (output size, number of examples)\n cache -- cache output from forward_propagation()\n lambd -- regularization hyperparameter, scalar\n \n Returns:\n gradients -- A dictionary with the gradients with respect to each parameter, activation and pre-activation variables\n \"\"\"\n \n m = X.shape[1]\n (Z1, A1, W1, b1, Z2, A2, W2, b2, Z3, A3, W3, b3) = cache\n \n dZ3 = A3 - Y\n \n ### START CODE HERE ### (approx. 1 line)\n dW3 = 1./m * np.dot(dZ3, A2.T) + lambd/m * W3\n ### END CODE HERE ###\n db3 = 1./m * np.sum(dZ3, axis=1, keepdims = True)\n \n dA2 = np.dot(W3.T, dZ3)\n dZ2 = np.multiply(dA2, np.int64(A2 > 0))\n ### START CODE HERE ### (approx. 1 line)\n dW2 = 1./m * np.dot(dZ2, A1.T) + lambd/m * W2\n ### END CODE HERE ###\n db2 = 1./m * np.sum(dZ2, axis=1, keepdims = True)\n \n dA1 = np.dot(W2.T, dZ2)\n dZ1 = np.multiply(dA1, np.int64(A1 > 0))\n ### START CODE HERE ### (approx. 1 line)\n dW1 = 1./m * np.dot(dZ1, X.T) + lambd/m * W1\n ### END CODE HERE ###\n db1 = 1./m * np.sum(dZ1, axis=1, keepdims = True)\n \n gradients = {\"dZ3\": dZ3, \"dW3\": dW3, \"db3\": db3,\"dA2\": dA2,\n \"dZ2\": dZ2, \"dW2\": dW2, \"db2\": db2, \"dA1\": dA1, \n \"dZ1\": dZ1, \"dW1\": dW1, \"db1\": db1}\n \n return gradients", "_____no_output_____" ], [ "X_assess, Y_assess, cache = backward_propagation_with_regularization_test_case()\n\ngrads = backward_propagation_with_regularization(X_assess, Y_assess, cache, lambd = 0.7)\nprint (\"dW1 = \"+ str(grads[\"dW1\"]))\nprint (\"dW2 = \"+ str(grads[\"dW2\"]))\nprint (\"dW3 = \"+ str(grads[\"dW3\"]))", "dW1 = [[-0.25604646 0.12298827 -0.28297129]\n [-0.17706303 0.34536094 -0.4410571 ]]\ndW2 = [[ 0.79276486 0.85133918]\n [-0.0957219 -0.01720463]\n [-0.13100772 -0.03750433]]\ndW3 = [[-1.77691347 -0.11832879 -0.09397446]]\n" ] ], [ [ "**Expected Output**:\n\n<table> \n <tr>\n <td>\n **dW1**\n </td>\n <td>\n [[-0.25604646 0.12298827 -0.28297129]\n [-0.17706303 0.34536094 -0.4410571 ]]\n </td>\n </tr>\n <tr>\n <td>\n **dW2**\n </td>\n <td>\n [[ 0.79276486 0.85133918]\n [-0.0957219 -0.01720463]\n [-0.13100772 -0.03750433]]\n </td>\n </tr>\n <tr>\n <td>\n **dW3**\n </td>\n <td>\n [[-1.77691347 -0.11832879 -0.09397446]]\n </td>\n </tr>\n</table> ", "_____no_output_____" ], [ "Let's now run the model with L2 regularization $(\\lambda = 0.7)$. The `model()` function will call: \n- `compute_cost_with_regularization` instead of `compute_cost`\n- `backward_propagation_with_regularization` instead of `backward_propagation`", "_____no_output_____" ] ], [ [ "parameters = model(train_X, train_Y, lambd = 0.7)\nprint (\"On the train set:\")\npredictions_train = predict(train_X, train_Y, parameters)\nprint (\"On the test set:\")\npredictions_test = predict(test_X, test_Y, parameters)", "Cost after iteration 0: 0.6974484493131264\nCost after iteration 10000: 0.2684918873282239\nCost after iteration 20000: 0.2680916337127301\n" ] ], [ [ "Congrats, the test set accuracy increased to 93%. You have saved the French football team!\n\nYou are not overfitting the training data anymore. Let's plot the decision boundary.", "_____no_output_____" ] ], [ [ "plt.title(\"Model with L2-regularization\")\naxes = plt.gca()\naxes.set_xlim([-0.75,0.40])\naxes.set_ylim([-0.75,0.65])\nplot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y)", "_____no_output_____" ] ], [ [ "**Observations**:\n- The value of $\\lambda$ is a hyperparameter that you can tune using a dev set.\n- L2 regularization makes your decision boundary smoother. If $\\lambda$ is too large, it is also possible to \"oversmooth\", resulting in a model with high bias.\n\n**What is L2-regularization actually doing?**:\n\nL2-regularization relies on the assumption that a model with small weights is simpler than a model with large weights. Thus, by penalizing the square values of the weights in the cost function you drive all the weights to smaller values. It becomes too costly for the cost to have large weights! This leads to a smoother model in which the output changes more slowly as the input changes. \n\n<font color='blue'>\n**What you should remember** -- the implications of L2-regularization on:\n- The cost computation:\n - A regularization term is added to the cost\n- The backpropagation function:\n - There are extra terms in the gradients with respect to weight matrices\n- Weights end up smaller (\"weight decay\"): \n - Weights are pushed to smaller values.", "_____no_output_____" ], [ "## 3 - Dropout\n\nFinally, **dropout** is a widely used regularization technique that is specific to deep learning. \n**It randomly shuts down some neurons in each iteration.** Watch these two videos to see what this means!\n\n<!--\nTo understand drop-out, consider this conversation with a friend:\n- Friend: \"Why do you need all these neurons to train your network and classify images?\". \n- You: \"Because each neuron contains a weight and can learn specific features/details/shape of an image. The more neurons I have, the more featurse my model learns!\"\n- Friend: \"I see, but are you sure that your neurons are learning different features and not all the same features?\"\n- You: \"Good point... Neurons in the same layer actually don't talk to each other. It should be definitly possible that they learn the same image features/shapes/forms/details... which would be redundant. There should be a solution.\"\n!--> \n\n\n<center>\n<video width=\"620\" height=\"440\" src=\"images/dropout1_kiank.mp4\" type=\"video/mp4\" controls>\n</video>\n</center>\n<br>\n<caption><center> <u> Figure 2 </u>: Drop-out on the second hidden layer. <br> At each iteration, you shut down (= set to zero) each neuron of a layer with probability $1 - keep\\_prob$ or keep it with probability $keep\\_prob$ (50% here). The dropped neurons don't contribute to the training in both the forward and backward propagations of the iteration. </center></caption>\n\n<center>\n<video width=\"620\" height=\"440\" src=\"images/dropout2_kiank.mp4\" type=\"video/mp4\" controls>\n</video>\n</center>\n\n<caption><center> <u> Figure 3 </u>: Drop-out on the first and third hidden layers. <br> $1^{st}$ layer: we shut down on average 40% of the neurons. $3^{rd}$ layer: we shut down on average 20% of the neurons. </center></caption>\n\n\nWhen you shut some neurons down, you actually modify your model. The idea behind drop-out is that at each iteration, you train a different model that uses only a subset of your neurons. With dropout, your neurons thus become less sensitive to the activation of one other specific neuron, because that other neuron might be shut down at any time. \n\n### 3.1 - Forward propagation with dropout\n\n**Exercise**: Implement the forward propagation with dropout. You are using a 3 layer neural network, and will add dropout to the first and second hidden layers. We will not apply dropout to the input layer or output layer. \n\n**Instructions**:\nYou would like to shut down some neurons in the first and second layers. To do that, you are going to carry out 4 Steps:\n1. In lecture, we dicussed creating a variable $d^{[1]}$ with the same shape as $a^{[1]}$ using `np.random.rand()` to randomly get numbers between 0 and 1. Here, you will use a vectorized implementation, so create a random matrix $D^{[1]} = [d^{[1](1)} d^{[1](2)} ... d^{[1](m)}] $ of the same dimension as $A^{[1]}$.\n2. Set each entry of $D^{[1]}$ to be 0 with probability (`1-keep_prob`) or 1 with probability (`keep_prob`), by thresholding values in $D^{[1]}$ appropriately. Hint: to set all the entries of a matrix X to 0 (if entry is less than 0.5) or 1 (if entry is more than 0.5) you would do: `X = (X < 0.5)`. Note that 0 and 1 are respectively equivalent to False and True.\n3. Set $A^{[1]}$ to $A^{[1]} * D^{[1]}$. (You are shutting down some neurons). You can think of $D^{[1]}$ as a mask, so that when it is multiplied with another matrix, it shuts down some of the values.\n4. Divide $A^{[1]}$ by `keep_prob`. By doing this you are assuring that the result of the cost will still have the same expected value as without drop-out. (This technique is also called inverted dropout.)", "_____no_output_____" ] ], [ [ "# GRADED FUNCTION: forward_propagation_with_dropout\n\ndef forward_propagation_with_dropout(X, parameters, keep_prob = 0.5):\n \"\"\"\n Implements the forward propagation: LINEAR -> RELU + DROPOUT -> LINEAR -> RELU + DROPOUT -> LINEAR -> SIGMOID.\n \n Arguments:\n X -- input dataset, of shape (2, number of examples)\n parameters -- python dictionary containing your parameters \"W1\", \"b1\", \"W2\", \"b2\", \"W3\", \"b3\":\n W1 -- weight matrix of shape (20, 2)\n b1 -- bias vector of shape (20, 1)\n W2 -- weight matrix of shape (3, 20)\n b2 -- bias vector of shape (3, 1)\n W3 -- weight matrix of shape (1, 3)\n b3 -- bias vector of shape (1, 1)\n keep_prob - probability of keeping a neuron active during drop-out, scalar\n \n Returns:\n A3 -- last activation value, output of the forward propagation, of shape (1,1)\n cache -- tuple, information stored for computing the backward propagation\n \"\"\"\n \n np.random.seed(1)\n \n # retrieve parameters\n W1 = parameters[\"W1\"]\n b1 = parameters[\"b1\"]\n W2 = parameters[\"W2\"]\n b2 = parameters[\"b2\"]\n W3 = parameters[\"W3\"]\n b3 = parameters[\"b3\"]\n \n # LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SIGMOID\n Z1 = np.dot(W1, X) + b1\n A1 = relu(Z1)\n ### START CODE HERE ### (approx. 4 lines) # Steps 1-4 below correspond to the Steps 1-4 described above. \n D1 = np.random.rand(A1.shape[0], A1.shape[1]) # Step 1: initialize matrix D1 = np.random.rand(..., ...)\n D1 = D1 < keep_prob # Step 2: convert entries of D1 to 0 or 1 (using keep_prob as the threshold)\n A1 = A1*D1 # Step 3: shut down some neurons of A1\n A1 = A1/keep_prob # Step 4: scale the value of neurons that haven't been shut down\n ### END CODE HERE ###\n Z2 = np.dot(W2, A1) + b2\n A2 = relu(Z2)\n ### START CODE HERE ### (approx. 4 lines)\n D2 = np.random.rand(A2.shape[0],A2.shape[1]) # Step 1: initialize matrix D2 = np.random.rand(..., ...)\n D2 = (D2 < keep_prob)\n # Step 2: convert entries of D2 to 0 or 1 (using keep_prob as the threshold)\n A2 = A2*D2 # Step 3: shut down some neurons of A2\n A2 = A2/keep_prob # Step 4: scale the value of neurons that haven't been shut down\n ### END CODE HERE ###\n Z3 = np.dot(W3, A2) + b3\n A3 = sigmoid(Z3)\n \n cache = (Z1, D1, A1, W1, b1, Z2, D2, A2, W2, b2, Z3, A3, W3, b3)\n \n return A3, cache", "_____no_output_____" ], [ "X_assess, parameters = forward_propagation_with_dropout_test_case()\n\nA3, cache = forward_propagation_with_dropout(X_assess, parameters, keep_prob = 0.7)\nprint (\"A3 = \" + str(A3))", "A3 = [[ 0.36974721 0.00305176 0.04565099 0.49683389 0.36974721]]\n" ] ], [ [ "**Expected Output**: \n\n<table> \n <tr>\n <td>\n **A3**\n </td>\n <td>\n [[ 0.36974721 0.00305176 0.04565099 0.49683389 0.36974721]]\n </td>\n \n </tr>\n\n</table> ", "_____no_output_____" ], [ "### 3.2 - Backward propagation with dropout\n\n**Exercise**: Implement the backward propagation with dropout. As before, you are training a 3 layer network. Add dropout to the first and second hidden layers, using the masks $D^{[1]}$ and $D^{[2]}$ stored in the cache. \n\n**Instruction**:\nBackpropagation with dropout is actually quite easy. You will have to carry out 2 Steps:\n1. You had previously shut down some neurons during forward propagation, by applying a mask $D^{[1]}$ to `A1`. In backpropagation, you will have to shut down the same neurons, by reapplying the same mask $D^{[1]}$ to `dA1`. \n2. During forward propagation, you had divided `A1` by `keep_prob`. In backpropagation, you'll therefore have to divide `dA1` by `keep_prob` again (the calculus interpretation is that if $A^{[1]}$ is scaled by `keep_prob`, then its derivative $dA^{[1]}$ is also scaled by the same `keep_prob`).\n", "_____no_output_____" ] ], [ [ "# GRADED FUNCTION: backward_propagation_with_dropout\n\ndef backward_propagation_with_dropout(X, Y, cache, keep_prob):\n \"\"\"\n Implements the backward propagation of our baseline model to which we added dropout.\n \n Arguments:\n X -- input dataset, of shape (2, number of examples)\n Y -- \"true\" labels vector, of shape (output size, number of examples)\n cache -- cache output from forward_propagation_with_dropout()\n keep_prob - probability of keeping a neuron active during drop-out, scalar\n \n Returns:\n gradients -- A dictionary with the gradients with respect to each parameter, activation and pre-activation variables\n \"\"\"\n \n m = X.shape[1]\n (Z1, D1, A1, W1, b1, Z2, D2, A2, W2, b2, Z3, A3, W3, b3) = cache\n \n dZ3 = A3 - Y\n dW3 = 1./m * np.dot(dZ3, A2.T)\n db3 = 1./m * np.sum(dZ3, axis=1, keepdims = True)\n dA2 = np.dot(W3.T, dZ3)\n ### START CODE HERE ### (≈ 2 lines of code)\n dA2 = dA2*D2 # Step 1: Apply mask D2 to shut down the same neurons as during the forward propagation\n dA2 = dA2/keep_prob # Step 2: Scale the value of neurons that haven't been shut down\n ### END CODE HERE ###\n dZ2 = np.multiply(dA2, np.int64(A2 > 0))\n dW2 = 1./m * np.dot(dZ2, A1.T)\n db2 = 1./m * np.sum(dZ2, axis=1, keepdims = True)\n \n dA1 = np.dot(W2.T, dZ2)\n ### START CODE HERE ### (≈ 2 lines of code)\n dA1 = dA1*D1 # Step 1: Apply mask D1 to shut down the same neurons as during the forward propagation\n dA1 = dA1/keep_prob # Step 2: Scale the value of neurons that haven't been shut down\n ### END CODE HERE ###\n dZ1 = np.multiply(dA1, np.int64(A1 > 0))\n dW1 = 1./m * np.dot(dZ1, X.T)\n db1 = 1./m * np.sum(dZ1, axis=1, keepdims = True)\n \n gradients = {\"dZ3\": dZ3, \"dW3\": dW3, \"db3\": db3,\"dA2\": dA2,\n \"dZ2\": dZ2, \"dW2\": dW2, \"db2\": db2, \"dA1\": dA1, \n \"dZ1\": dZ1, \"dW1\": dW1, \"db1\": db1}\n \n return gradients", "_____no_output_____" ], [ "X_assess, Y_assess, cache = backward_propagation_with_dropout_test_case()\n\ngradients = backward_propagation_with_dropout(X_assess, Y_assess, cache, keep_prob = 0.8)\n\nprint (\"dA1 = \" + str(gradients[\"dA1\"]))\nprint (\"dA2 = \" + str(gradients[\"dA2\"]))", "dA1 = [[ 0.36544439 0. -0.00188233 0. -0.17408748]\n [ 0.65515713 0. -0.00337459 0. -0. ]]\ndA2 = [[ 0.58180856 0. -0.00299679 0. -0.27715731]\n [ 0. 0.53159854 -0. 0.53159854 -0.34089673]\n [ 0. 0. -0.00292733 0. -0. ]]\n" ] ], [ [ "**Expected Output**: \n\n<table> \n <tr>\n <td>\n **dA1**\n </td>\n <td>\n [[ 0.36544439 0. -0.00188233 0. -0.17408748]\n [ 0.65515713 0. -0.00337459 0. -0. ]]\n </td>\n \n </tr>\n <tr>\n <td>\n **dA2**\n </td>\n <td>\n [[ 0.58180856 0. -0.00299679 0. -0.27715731]\n [ 0. 0.53159854 -0. 0.53159854 -0.34089673]\n [ 0. 0. -0.00292733 0. -0. ]]\n </td>\n \n </tr>\n</table> ", "_____no_output_____" ], [ "Let's now run the model with dropout (`keep_prob = 0.86`). It means at every iteration you shut down each neurons of layer 1 and 2 with 24% probability. The function `model()` will now call:\n- `forward_propagation_with_dropout` instead of `forward_propagation`.\n- `backward_propagation_with_dropout` instead of `backward_propagation`.", "_____no_output_____" ] ], [ [ "parameters = model(train_X, train_Y, keep_prob = 0.86, learning_rate = 0.3)\n\nprint (\"On the train set:\")\npredictions_train = predict(train_X, train_Y, parameters)\nprint (\"On the test set:\")\npredictions_test = predict(test_X, test_Y, parameters)", "Cost after iteration 0: 0.6543912405149825\n" ] ], [ [ "Dropout works great! The test accuracy has increased again (to 95%)! Your model is not overfitting the training set and does a great job on the test set. The French football team will be forever grateful to you! \n\nRun the code below to plot the decision boundary.", "_____no_output_____" ] ], [ [ "plt.title(\"Model with dropout\")\naxes = plt.gca()\naxes.set_xlim([-0.75,0.40])\naxes.set_ylim([-0.75,0.65])\nplot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y)", "_____no_output_____" ] ], [ [ "**Note**:\n- A **common mistake** when using dropout is to use it both in training and testing. You should use dropout (randomly eliminate nodes) only in training. \n- Deep learning frameworks like [tensorflow](https://www.tensorflow.org/api_docs/python/tf/nn/dropout), [PaddlePaddle](http://doc.paddlepaddle.org/release_doc/0.9.0/doc/ui/api/trainer_config_helpers/attrs.html), [keras](https://keras.io/layers/core/#dropout) or [caffe](http://caffe.berkeleyvision.org/tutorial/layers/dropout.html) come with a dropout layer implementation. Don't stress - you will soon learn some of these frameworks.\n\n<font color='blue'>\n**What you should remember about dropout:**\n- Dropout is a regularization technique.\n- You only use dropout during training. Don't use dropout (randomly eliminate nodes) during test time.\n- Apply dropout both during forward and backward propagation.\n- During training time, divide each dropout layer by keep_prob to keep the same expected value for the activations. For example, if keep_prob is 0.5, then we will on average shut down half the nodes, so the output will be scaled by 0.5 since only the remaining half are contributing to the solution. Dividing by 0.5 is equivalent to multiplying by 2. Hence, the output now has the same expected value. You can check that this works even when keep_prob is other values than 0.5. ", "_____no_output_____" ], [ "## 4 - Conclusions", "_____no_output_____" ], [ "**Here are the results of our three models**: \n\n<table> \n <tr>\n <td>\n **model**\n </td>\n <td>\n **train accuracy**\n </td>\n <td>\n **test accuracy**\n </td>\n\n </tr>\n <td>\n 3-layer NN without regularization\n </td>\n <td>\n 95%\n </td>\n <td>\n 91.5%\n </td>\n <tr>\n <td>\n 3-layer NN with L2-regularization\n </td>\n <td>\n 94%\n </td>\n <td>\n 93%\n </td>\n </tr>\n <tr>\n <td>\n 3-layer NN with dropout\n </td>\n <td>\n 93%\n </td>\n <td>\n 95%\n </td>\n </tr>\n</table> ", "_____no_output_____" ], [ "Note that regularization hurts training set performance! This is because it limits the ability of the network to overfit to the training set. But since it ultimately gives better test accuracy, it is helping your system. ", "_____no_output_____" ], [ "Congratulations for finishing this assignment! And also for revolutionizing French football. :-) ", "_____no_output_____" ], [ "<font color='blue'>\n**What we want you to remember from this notebook**:\n- Regularization will help you reduce overfitting.\n- Regularization will drive your weights to lower values.\n- L2 regularization and Dropout are two very effective regularization techniques.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ] ]
d0dd60e90f92b09b9b0c6c33dd039e044fab587c
113,840
ipynb
Jupyter Notebook
hackathons/hackathon1/fipy/1c.ipynb
wd15/chimad-phase-field
b8ead2ef666201b500033052d0a4efb55796c2da
[ "MIT" ]
11
2016-04-19T13:51:02.000Z
2021-05-30T07:52:21.000Z
hackathons/hackathon1/fipy/1c.ipynb
usnistgov/chimad-phase-field
7f07e5ab046b917dfa32d84a68421ed94ec03a3b
[ "MIT" ]
417
2015-03-20T16:39:11.000Z
2018-01-16T16:33:53.000Z
hackathons/hackathon1/fipy/1c.ipynb
wd15/chimad-phase-field
b8ead2ef666201b500033052d0a4efb55796c2da
[ "MIT" ]
15
2015-03-20T21:44:06.000Z
2017-12-05T23:39:36.000Z
205.117117
74,464
0.898278
[ [ [ "# Table of Contents\n* [1c. Fixed flux spinodal decomposition on a T shaped domain](#1c.-Fixed-flux-spinodal-decomposition-on-a-T-shaped-domain)\n\t* [Use Binder For Live Examples](#Use-Binder-For-Live-Examples)\n\t* [Define $f_0$](#Define-$f_0$)\n\t* [Define the Equation](#Define-the-Equation)\n\t* [Solve the Equation](#Solve-the-Equation)\n\t* [Run the Example Locally](#Run-the-Example-Locally)\n\t* [Movie of Evolution](#Movie-of-Evolution)\n", "_____no_output_____" ], [ "# 1c. Fixed flux spinodal decomposition on a T shaped domain", "_____no_output_____" ], [ "## Use Binder For Live Examples", "_____no_output_____" ], [ "[![Binder](http://mybinder.org/badge.svg)](http://mybinder.org/repo/wd15/fipy-hackathon1)", "_____no_output_____" ], [ "The free energy is given by,\n\n$$ f_0\\left[ c \\left( \\vec{r} \\right) \\right] =\n - \\frac{A}{2} \\left(c - c_m\\right)^2\n + \\frac{B}{4} \\left(c - c_m\\right)^4\n + \\frac{c_{\\alpha}}{4} \\left(c - c_{\\alpha} \\right)^4\n + \\frac{c_{\\beta}}{4} \\left(c - c_{\\beta} \\right)^4 $$\n\nIn FiPy we write the evolution equation as \n\n$$ \\frac{\\partial c}{\\partial t} = \\nabla \\cdot \\left[\n D \\left( c \\right) \\left( \\frac{ \\partial^2 f_0 }{ \\partial c^2} \\nabla c - \\kappa \\nabla \\nabla^2 c \\right)\n \\right] $$\n\nLet's start by calculating $ \\frac{ \\partial^2 f_0 }{ \\partial c^2} $ using sympy. It's easy for this case, but useful in the general case for taking care of difficult book keeping in phase field problems.", "_____no_output_____" ] ], [ [ "%matplotlib inline\n\nimport sympy\nimport fipy as fp\nimport numpy as np", "_____no_output_____" ], [ "A, c, c_m, B, c_alpha, c_beta = sympy.symbols(\"A c_var c_m B c_alpha c_beta\")", "_____no_output_____" ], [ "f_0 = - A / 2 * (c - c_m)**2 + B / 4 * (c - c_m)**4 + c_alpha / 4 * (c - c_alpha)**4 + c_beta / 4 * (c - c_beta)**4", "_____no_output_____" ], [ "print f_0", "-A*(-c_m + c_var)**2/2 + B*(-c_m + c_var)**4/4 + c_alpha*(-c_alpha + c_var)**4/4 + c_beta*(-c_beta + c_var)**4/4\n" ], [ "sympy.diff(f_0, c, 2)", "_____no_output_____" ] ], [ [ "The first step in implementing any problem in FiPy is to define the mesh. For [Problem 1a]({{ site.baseurl }}/hackathons/hackathon1/problems.ipynb/#1.a-Square-Periodic) the solution domain is just a square domain, but the boundary conditions are periodic, so a `PeriodicGrid2D` object is used. No other boundary conditions are required.", "_____no_output_____" ] ], [ [ "mesh = fp.Grid2D(dx=0.5, dy=0.5, nx=40, ny=200) + (fp.Grid2D(dx=0.5, dy=0.5, nx=200, ny=40) + [[-40],[100]])", "_____no_output_____" ] ], [ [ "The next step is to define the parameters and create a solution variable.", "_____no_output_____" ] ], [ [ "c_alpha = 0.05\nc_beta = 0.95\nA = 2.0\nkappa = 2.0\nc_m = (c_alpha + c_beta) / 2.\nB = A / (c_alpha - c_m)**2\nD = D_alpha = D_beta = 2. / (c_beta - c_alpha)\nc_0 = 0.45\nq = np.sqrt((2., 3.))\nepsilon = 0.01\n\nc_var = fp.CellVariable(mesh=mesh, name=r\"$c$\", hasOld=True)", "_____no_output_____" ] ], [ [ "Now we need to define the initial conditions given by,\n\nSet $c\\left(\\vec{r}, t\\right)$ such that\n\n$$ c\\left(\\vec{r}, 0\\right) = \\bar{c}_0 + \\epsilon \\cos \\left( \\vec{q} \\cdot \\vec{r} \\right) $$", "_____no_output_____" ] ], [ [ "r = np.array((mesh.x, mesh.y))\nc_var[:] = c_0 + epsilon * np.cos((q[:, None] * r).sum(0))\nviewer = fp.Viewer(c_var)", "_____no_output_____" ] ], [ [ "## Define $f_0$", "_____no_output_____" ], [ "To define the equation with FiPy first define `f_0` in terms of FiPy. Recall `f_0` from above calculated using Sympy. Here we use the string representation and set it equal to `f_0_var` using the `exec` command.", "_____no_output_____" ] ], [ [ "out = sympy.diff(f_0, c, 2)", "_____no_output_____" ], [ "exec \"f_0_var = \" + repr(out)", "_____no_output_____" ], [ "#f_0_var = -A + 3*B*(c_var - c_m)**2 + 3*c_alpha*(c_var - c_alpha)**2 + 3*c_beta*(c_var - c_beta)**2\nf_0_var", "_____no_output_____" ] ], [ [ "## Define the Equation", "_____no_output_____" ] ], [ [ "eqn = fp.TransientTerm(coeff=1.) == fp.DiffusionTerm(D * f_0_var) - fp.DiffusionTerm((D, kappa))\neqn", "_____no_output_____" ] ], [ [ "## Solve the Equation", "_____no_output_____" ], [ "To solve the equation a simple time stepping scheme is used which is decreased or increased based on whether the residual decreases or increases. A time step is recalculated if the required tolerance is not reached.", "_____no_output_____" ] ], [ [ "elapsed = 0.0\nsteps = 0\ndt = 0.01\ntotal_sweeps = 2\ntolerance = 1e-1\ntotal_steps = 10", "_____no_output_____" ], [ "c_var[:] = c_0 + epsilon * np.cos((q[:, None] * r).sum(0))\nc_var.updateOld()\nfrom fipy.solvers.pysparse import LinearLUSolver as Solver\nsolver = Solver()\n\nwhile steps < total_steps:\n res0 = eqn.sweep(c_var, dt=dt, solver=solver)\n\n for sweeps in range(total_sweeps):\n res = eqn.sweep(c_var, dt=dt, solver=solver)\n\n if res < res0 * tolerance:\n steps += 1\n elapsed += dt\n dt *= 1.1\n c_var.updateOld()\n else:\n dt *= 0.8\n c_var[:] = c_var.old\n\nviewer.plot()\nprint 'elapsed_time:',elapsed", "_____no_output_____" ] ], [ [ "## Run the Example Locally", "_____no_output_____" ], [ "The following cell will dumpy a file called `fipy_hackathon1c.py` to the local file system to be run. The images are saved out at each time step.", "_____no_output_____" ] ], [ [ "%%writefile fipy_hackathon_1c.py\n\nimport fipy as fp\nimport numpy as np\n\nmesh = fp.Grid2D(dx=0.5, dy=0.5, nx=40, ny=200) + (fp.Grid2D(dx=0.5, dy=0.5, nx=200, ny=40) + [[-40],[100]])\n\nc_alpha = 0.05\nc_beta = 0.95\nA = 2.0\nkappa = 2.0\nc_m = (c_alpha + c_beta) / 2.\nB = A / (c_alpha - c_m)**2\nD = D_alpha = D_beta = 2. / (c_beta - c_alpha)\nc_0 = 0.45\nq = np.sqrt((2., 3.))\nepsilon = 0.01\n\nc_var = fp.CellVariable(mesh=mesh, name=r\"$c$\", hasOld=True)\n\nr = np.array((mesh.x, mesh.y))\nc_var[:] = c_0 + epsilon * np.cos((q[:, None] * r).sum(0))\n\nf_0_var = -A + 3*B*(c_var - c_m)**2 + 3*c_alpha*(c_var - c_alpha)**2 + 3*c_beta*(c_var - c_beta)**2\n\neqn = fp.TransientTerm(coeff=1.) == fp.DiffusionTerm(D * f_0_var) - fp.DiffusionTerm((D, kappa))\n\nelapsed = 0.0\nsteps = 0\ndt = 0.01\ntotal_sweeps = 2\ntolerance = 1e-1\ntotal_steps = 600\n\nc_var[:] = c_0 + epsilon * np.cos((q[:, None] * r).sum(0))\n\nc_var.updateOld()\n\nfrom fipy.solvers.pysparse import LinearLUSolver as Solver\n\nsolver = Solver()\n\nviewer = fp.Viewer(c_var)\nwhile steps < total_steps:\n res0 = eqn.sweep(c_var, dt=dt, solver=solver)\n\n for sweeps in range(total_sweeps):\n res = eqn.sweep(c_var, dt=dt, solver=solver)\n\n print ' '\n print 'steps',steps\n print 'res',res\n print 'sweeps',sweeps\n print 'dt',dt\n\n\n if res < res0 * tolerance:\n steps += 1\n elapsed += dt\n dt *= 1.1\n if steps % 1 == 0:\n viewer.plot('image{0}.png'.format(steps))\n c_var.updateOld()\n else:\n dt *= 0.8\n c_var[:] = c_var.old", "Overwriting fipy_hackathon_1c.py\n" ] ], [ [ "## Movie of Evolution", "_____no_output_____" ], [ "The movie of the evolution for 600 steps.\n\nThe movie was generated with the output files of the form `image*.png` using the following commands,\n\n $ rename 's/\\d+/sprintf(\"%05d\",$&)/e' image*\n $ ffmpeg -f image2 -r 6 -i 'image%05d.png' output.mp4", "_____no_output_____" ] ], [ [ "from IPython.display import YouTubeVideo\nscale = 1.5\nYouTubeVideo('aZk38E7OxcQ', width=420 * scale, height=315 * scale, rel=0)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ] ]
d0dd6c9e1c977efb0f0febb962d3e257bcbb83cc
34,076
ipynb
Jupyter Notebook
web/00_quiz/crawling_quiz.ipynb
hyelansgithub/fastcampus-programming
a3e15a0ac30249f85df6939562997b514d9fba90
[ "MIT" ]
null
null
null
web/00_quiz/crawling_quiz.ipynb
hyelansgithub/fastcampus-programming
a3e15a0ac30249f85df6939562997b514d9fba90
[ "MIT" ]
null
null
null
web/00_quiz/crawling_quiz.ipynb
hyelansgithub/fastcampus-programming
a3e15a0ac30249f85df6939562997b514d9fba90
[ "MIT" ]
null
null
null
32.828516
123
0.301356
[ [ [ "##### 아래 URL의 NBA 데이터를 크롤링하여 판다스 데이터 프레임으로 나타내세요. \n- http://stats.nba.com/teams/traditional/?sort=GP&dir=-1", "_____no_output_____" ] ], [ [ "from selenium import webdriver", "_____no_output_____" ], [ "import pandas as pd", "_____no_output_____" ], [ "def Nba():\n\n try:\n\n driver = webdriver.Chrome()\n driver.get(\"http://stats.nba.com/teams/traditional/?sort=GP&dir=-1\")\n\n # column_list\n columns = driver.find_elements_by_css_selector(\".nba-stat-table__overflow > table > \\\n thead > tr > th\")[1:28]\n column_list = [\" \",]\n for column in columns:\n column = column.text\n column_list.append(column)\n\n # making dataframe\n df = pd.DataFrame(columns=column_list, index = [\" \" for i in range(30)])\n\n # data\n data = driver.find_elements_by_css_selector\\\n (\".nba-stat-table__overflow > table > tbody > tr > td\")\n data_list = []\n\n for datum in data:\n datum = datum.text.replace(\"\\n\", \" \")\n data_list.append(datum)\n\n grouped = list(zip(*[iter(data_list)] * 28))\n\n for i in range(30):\n for j in range(28):\n df.iloc[i, j] = grouped[i][j]\n\n driver.quit()\n\n except:\n\n driver.quit()\n\n return df\n", "_____no_output_____" ], [ "Nba()", "_____no_output_____" ] ], [ [ "##### 셀레니움을 이용하여 네이버 IT/과학 기사의 10 페이지 까지의 최신 제목 리스트를 크롤링하세요.\n- http://news.naver.com/main/main.nhn?mode=LSD&mid=shm&sid1=105", "_____no_output_____" ] ], [ [ "def News():\n \n try:\n\n driver = webdriver.Chrome()\n driver.get(\"http://news.naver.com/main/main.nhn?mode=LSD&mid=shm&sid1=105\")\n\n # news = pd.DataFrame(columns = [\"Number of Articles\",\"Article Headline\"])\n\n for i in range(10):\n results = driver.find_elements_by_css_selector(\"#section_body > ul > li > dl > dt:nth-child(2) > a\")\n for result in results:\n result = result.text\n print(result)\n \n driver.find_element_by_css_selector(\"#paging > a:nth-child({})\".format (i+2)).click()\n\n driver.quit()\n \n except:\n driver.quit()\n", "_____no_output_____" ], [ "News()", "웹젠, ‘일하기 좋은 회사’로 바뀐다\n권영수 LGU+ “5G장비, 화웨이 가장 앞서…5G 킬러서비스는 고민”\n권영수 \"5G 장비서 中 화웨이 가장 앞서\"\nLG유플러스, 화웨이 5G 장비 '호평'…도입 유력\n'화웨이' 만난 권영수 LGU+ 부회장 \"기술력 가장 앞선다\"\n권영수 LG유플러스 부회장, \"이변 없는 한 화웨이 도입\"...5G 서비스 구현 고민\n[전문]한경바이오헬스포럼 제5차 조찬간담회 토론 내용\nLGU+, 5G 화웨이 장비 도입 유력해졌다\n브랜든 김 삼성넥스트 투자총괄 “탈중앙화 시대, 제2의 우버·페이스북 나올 것”\n카카오M '이병헌·김태리' 소속사 지분 사들인 까닭은?\n권영수 LGU+ 부회장 “5G 장비, 화웨이가 가장 앞서”\n\"10년 넘게 걸리는 신약개발, AI로 단축… 의료 빅데이터 활용이 관건\"\nKTㆍLG 앞에서…5G 야심 드러낸 中 화웨이\n“알뜰폰 구하기”… 올해도 전파사용료 면제 유력\n[MWC 상하이 2018] 드론·VR게임… ‘5G의 미래’ 엿본다\n[알아봅시다] 보이지 않는 위협 전자파 막는 차폐 소재\n황창규 - 권영수, 5G 상용화 `리더십 행보`\n삼성이냐 화웨이냐… 5G 장비 도입 앞두고 고민 깊은 이통3사\n삼성디스플레이 '와이옥타' 패널 中 오포에 첫 판매\n휴대폰 명의도용 피해 '뚝'···부정가입방지시스템 빛봤다\n" ], [ "# 다음 페이지로 넘어가는 것에서 문제가 있음", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ] ]
d0dd6f3651661f4444978f5c323b92599dcf3858
299,260
ipynb
Jupyter Notebook
Laptop_Price_Predict/laptop_price_main.ipynb
Andy666Fox/TINY_DS_PROJECTS
777edd709062acf673c02575d0a0433fc897584f
[ "MIT" ]
1
2021-09-22T16:37:27.000Z
2021-09-22T16:37:27.000Z
Laptop_Price_Predict/laptop_price_main.ipynb
Andy666Fox/TINY_DS_PROJECTS
777edd709062acf673c02575d0a0433fc897584f
[ "MIT" ]
null
null
null
Laptop_Price_Predict/laptop_price_main.ipynb
Andy666Fox/TINY_DS_PROJECTS
777edd709062acf673c02575d0a0433fc897584f
[ "MIT" ]
null
null
null
95.824528
51,346
0.74578
[ [ [ "# IMPORTS", "_____no_output_____" ] ], [ [ "import pandas as pd\nimport numpy as np \nimport seaborn as sns \nimport matplotlib.pyplot as plt ", "_____no_output_____" ] ], [ [ "# READ THE DATA", "_____no_output_____" ] ], [ [ "data = pd.read_csv('./input/laptops.csv', encoding='latin-1')\ndata.head(10)", "_____no_output_____" ] ], [ [ "# MAIN EDA BLOCK", "_____no_output_____" ] ], [ [ "print(f'Data Shape\\nRows: {data.shape[0]}\\nColumns: {data.shape[1]}')\nprint('=' * 30)\ndata.info()", "Data Shape\nRows: 1303\nColumns: 13\n==============================\n<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 1303 entries, 0 to 1302\nData columns (total 13 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 Unnamed: 0 1303 non-null int64 \n 1 Company 1303 non-null object \n 2 Product 1303 non-null object \n 3 TypeName 1303 non-null object \n 4 Inches 1303 non-null float64\n 5 ScreenResolution 1303 non-null object \n 6 Cpu 1303 non-null object \n 7 Ram 1303 non-null object \n 8 Memory 1303 non-null object \n 9 Gpu 1303 non-null object \n 10 OpSys 1303 non-null object \n 11 Weight 1303 non-null object \n 12 Price_euros 1303 non-null float64\ndtypes: float64(2), int64(1), object(10)\nmemory usage: 132.5+ KB\n" ], [ "data.describe()", "_____no_output_____" ], [ "data['Product'] = data['Product'].str.split('(').apply(lambda x: x[0])", "_____no_output_____" ], [ "data['CPu_Speed'] = data['Cpu'].str.split(' ').apply(lambda x: x[-1]).str.replace('GHz', '')\ndata['Cpu_Vender'] = data['Cpu'].str.split(' ').apply(lambda x: x[0])\ndata['Cpu_Type'] = data['Cpu'].str.split(' ').apply(lambda x: x[1:4] if x[1]=='Celeron' and 'Pentium' and 'Xeon' else (x[1:3] if (x[1]=='Core' or x[0]=='AMD') else x[0]))\ndata['Cpu_Type'] = data['Cpu_Type'].apply(lambda x: ' '.join(x))\ndata['Cpu_Type']\ndata.head(10)", "_____no_output_____" ], [ "split_mem = data['Memory'].str.split(' ', 1, expand=True)\ndata['Storage_Type'] = split_mem[1]\ndata['Memory'] = split_mem[0]\ndata['Memory'].unique()\ndata.head(10)", "_____no_output_____" ], [ "data['Ram'] = data['Ram'].str.replace('GB', '')\n\ndf_mem = data['Memory'].str.split('(\\d+)', expand=True)\ndata['Memory'] = pd.to_numeric(df_mem[1])\ndata.rename(columns={'Memory': 'Memory (GB or TB)'}, inplace=True)", "_____no_output_____" ], [ "def mem(x):\n if x == 1:\n return 1024\n elif x == 2:\n return 2048", "_____no_output_____" ], [ "data['Memory (GB or TB)'] = data['Memory (GB or TB)'].apply(lambda x: 1024 if x==1 else x)\ndata['Memory (GB or TB)'] = data['Memory (GB or TB)'].apply(lambda x: 2048 if x==2 else x)\ndata.rename(columns={'Memory (GB or TB)': 'Storage (GB)'}, inplace=True)\ndata.head(10)", "_____no_output_____" ], [ "data['Weight'] = data['Weight'].str.replace('kg', '')\ndata.head(10)", "_____no_output_____" ], [ "gpu_distr_list = data['Gpu'].str.split(' ')\ndata['Gpu_Vender'] = data['Gpu'].str.split(' ').apply(lambda x: x[0])\ndata['Gpu_Type'] = data['Gpu'].str.split(' ').apply(lambda x: x[1:])\ndata['Gpu_Type'] = data['Gpu_Type'].apply(lambda x: ' '.join(x))\ndata.head(10)", "_____no_output_____" ], [ "data['Touchscreen'] = data['ScreenResolution'].apply(lambda x: 1 if 'Touchscreen' in x else 0)\ndata['Ips'] = data['ScreenResolution'].apply(lambda x: 1 if 'IPS' in x else 0)", "_____no_output_____" ], [ "def cat_os(op_s):\n if op_s =='Windows 10' or op_s == 'Windows 7' or op_s == 'Windows 10 S':\n return 'Windows'\n elif op_s =='macOS' or op_s == 'Mac OS X':\n return 'Mac'\n else:\n return 'Other/No OS/Linux'\n \ndata['OpSys'] = data['OpSys'].apply(cat_os)", "_____no_output_____" ], [ "data = data.reindex(columns=[\"Company\", \"TypeName\", \"Inches\", \"Touchscreen\", \n \"Ips\", \"Cpu_Vender\", \"Cpu_Type\",\"Ram\", \"Storage (GB)\", \n \"Storage Type\", \"Gpu_Vender\", \"Gpu_Type\", \"Weight\", \"OpSys\", \"Price_euros\" ])", "_____no_output_____" ], [ "data.head(10)", "_____no_output_____" ], [ "data['Ram'] = data['Ram'].astype('int')\ndata['Storage (GB)'] = data['Storage (GB)'].astype('int')\ndata['Weight'] = data['Weight'].astype('float')", "_____no_output_____" ], [ "data.info()", "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 1303 entries, 0 to 1302\nData columns (total 15 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 Company 1303 non-null object \n 1 TypeName 1303 non-null object \n 2 Inches 1303 non-null float64\n 3 Touchscreen 1303 non-null int64 \n 4 Ips 1303 non-null int64 \n 5 Cpu_Vender 1303 non-null object \n 6 Cpu_Type 1303 non-null object \n 7 Ram 1303 non-null int32 \n 8 Storage (GB) 1303 non-null int32 \n 9 Storage Type 0 non-null float64\n 10 Gpu_Vender 1303 non-null object \n 11 Gpu_Type 1303 non-null object \n 12 Weight 1303 non-null float64\n 13 OpSys 1303 non-null object \n 14 Price_euros 1303 non-null float64\ndtypes: float64(4), int32(2), int64(2), object(7)\nmemory usage: 142.6+ KB\n" ], [ "sns.set(rc={'figure.figsize': (9,5)})", "_____no_output_____" ], [ "data['Company'].value_counts().plot(kind='bar')", "_____no_output_____" ], [ "sns.barplot(x=data['Company'], y=data['Price_euros'])", "_____no_output_____" ], [ "data['TypeName'].value_counts().plot(kind='bar')", "_____no_output_____" ], [ "sns.barplot(x=data['TypeName'], y=data['Price_euros'])", "_____no_output_____" ], [ "cpu_distr = data['Cpu_Type'].value_counts()[:10].reset_index()\ncpu_distr", "_____no_output_____" ], [ "sns.barplot(x=cpu_distr['index'], y=cpu_distr['Cpu_Type'], hue='Cpu_Vender', data=data)", "_____no_output_____" ], [ "gpu_distr = data['Gpu_Type'].value_counts()[:10].reset_index()\ngpu_distr", "_____no_output_____" ], [ "sns.barplot(x=gpu_distr['index'], y=gpu_distr['Gpu_Type'], hue='Gpu_Vender', data=data)", "_____no_output_____" ], [ "sns.barplot(x=data['OpSys'], y=data['Price_euros'])", "_____no_output_____" ], [ "corr_data = data.corr()\ncorr_data['Price_euros'].sort_values(ascending=False)", "_____no_output_____" ], [ "sns.heatmap(data.corr(), annot=True)", "_____no_output_____" ], [ "X = data.drop(columns=['Price_euros'])\ny = np.log(data['Price_euros'])", "_____no_output_____" ], [ "from sklearn.model_selection import train_test_split", "_____no_output_____" ], [ "X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.15, random_state=42)", "_____no_output_____" ], [ "from sklearn.compose import ColumnTransformer\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.preprocessing import OneHotEncoder\nfrom sklearn.metrics import r2_score, mean_absolute_error\nfrom sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor, ExtraTreesRegressor\nfrom xgboost import XGBRegressor\nfrom sklearn.ensemble import VotingRegressor, StackingRegressor", "_____no_output_____" ], [ "step1 = ColumnTransformer(transformers=[\n ('col_inf', OneHotEncoder(sparse=False, handle_unknown='ignore'), [0,1,5,6,9,10,11,13])\n],remainder='passthrough')", "_____no_output_____" ], [ "rf = RandomForestRegressor(n_estimators=350, random_state=3, max_samples=0.5, max_features=0.75, max_depth=15)\ngbdt = GradientBoostingRegressor(n_estimators=100, max_features=0.5)\nxgb = XGBRegressor(n_estimators=25, learning_rate=0.3, max_depth=5)\net = ExtraTreesRegressor(n_estimators=100, random_state=3, max_samples=0.5, max_features=0.75, max_depth=10)", "_____no_output_____" ], [ "step2 = VotingRegressor([('rf', rf), ('gbdt', gbdt), ('xgb', xgb), ('et', et)], weights=[5,1,1,1])", "_____no_output_____" ], [ "pipe = Pipeline([ \n ('step1', step1),\n ('step2', step2)])", "_____no_output_____" ], [ "pipe.fit(X_train, y_train)", "_____no_output_____" ], [ "y_pred = pipe.predict(X_test)", "_____no_output_____" ], [ "print('R2 score', r2_score(y_test, y_pred))\nprint('MAE', mean_absolute_error(y_test, y_pred))", "R2 score 0.8578750665087866\nMAE 0.16211150811805436\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
d0dda391c13e538f46ed63a66dfa2b38a6dafcf3
7,076
ipynb
Jupyter Notebook
examples/ALBERTBinaryClassifier.ipynb
Anychnn/unif
7b25ba0aac28c4e60dc4f9973d4338ad09c94177
[ "Apache-2.0" ]
93
2020-10-08T10:20:22.000Z
2022-03-09T02:37:17.000Z
examples/ALBERTBinaryClassifier.ipynb
Anychnn/unif
7b25ba0aac28c4e60dc4f9973d4338ad09c94177
[ "Apache-2.0" ]
9
2020-10-20T03:16:10.000Z
2022-02-21T02:33:28.000Z
examples/ALBERTBinaryClassifier.ipynb
Anychnn/unif
7b25ba0aac28c4e60dc4f9973d4338ad09c94177
[ "Apache-2.0" ]
23
2020-10-13T01:10:16.000Z
2022-03-01T01:45:45.000Z
32.163636
281
0.588609
[ [ [ "import uf\n\nprint(uf.__version__)", "beta v2.8.0\n" ], [ "model = uf.ALBERTBinaryClassifier('../demo/albert_config.json', '../demo/vocab.txt')\nprint(model)", "uf.ALBERTBinaryClassifier(config_file='../demo/albert_config.json', vocab_file='../demo/vocab.txt', max_seq_length=128, label_size=None, label_weight=None, init_checkpoint=None, output_dir=None, gpu_ids=None, drop_pooler=False, do_lower_case=True, truncate_method='LIFO')\n" ], [ "X = ['天亮以前说再见', '笑着泪流满面', '去迎接应该你的', '更好的明天']\ny = [[0, 2], [1], [1], []]", "_____no_output_____" ] ], [ [ "# 训练", "_____no_output_____" ] ], [ [ "model.fit(X, y, total_steps=20)", "WARNING:tensorflow:From /Users/geyingli/Library/Python/3.8/lib/python/site-packages/tensorflow/python/util/dispatch.py:201: calling dropout (from tensorflow.python.ops.nn_ops) with keep_prob is deprecated and will be removed in a future version.\nInstructions for updating:\nPlease use `rate` instead of `keep_prob`. Rate should be set to `rate = 1 - keep_prob`.\n" ] ], [ [ "# 推理", "_____no_output_____" ] ], [ [ "model.predict(X)", "INFO:tensorflow:Time usage 0m-2.16s, 0.46 steps/sec, 1.85 examples/sec\n" ] ], [ [ "# 评分", "_____no_output_____" ] ], [ [ "model.score(X, y)", "INFO:tensorflow:Time usage 0m-0.98s, 1.02 steps/sec, 4.10 examples/sec\n" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
d0ddb2e5e053a6c391fafdcdf2ce4039599d6550
557,787
ipynb
Jupyter Notebook
Lab 2 - Acoustic Noise Classification/[SVA]_Lab_2_Acoustic_Noise_Classification.ipynb
northuraldude/ETU-2021-SVA
3e98a791fc08d1f37010edcf3cd09a5a3d5b7117
[ "MIT" ]
null
null
null
Lab 2 - Acoustic Noise Classification/[SVA]_Lab_2_Acoustic_Noise_Classification.ipynb
northuraldude/ETU-2021-SVA
3e98a791fc08d1f37010edcf3cd09a5a3d5b7117
[ "MIT" ]
null
null
null
Lab 2 - Acoustic Noise Classification/[SVA]_Lab_2_Acoustic_Noise_Classification.ipynb
northuraldude/ETU-2021-SVA
3e98a791fc08d1f37010edcf3cd09a5a3d5b7117
[ "MIT" ]
null
null
null
143.611483
39,162
0.763639
[ [ [ "# <center>АНАЛИЗ ЗВУКА И ГОЛОСА</center>\n\n**Преподаватель**: Рыбин Сергей Витальевич\n\n**Группа**: 6304\n\n**Студент**: Белоусов Евгений Олегович", "_____no_output_____" ], [ "## <center>Классификация акустических шумов</center>\n\n*Необоходимый результат: неизвестно*", "_____no_output_____" ] ], [ [ "import os\nimport IPython\nimport warnings\nwarnings.filterwarnings('ignore')", "_____no_output_____" ], [ "import numpy as np\nimport pandas as pd\nimport librosa\nimport librosa.display\nimport matplotlib.pyplot as plt\n\nimport seaborn as sns\n%matplotlib inline", "_____no_output_____" ], [ "from tqdm.notebook import tqdm\n\nfrom tensorflow.keras import losses, models, optimizers\nfrom tensorflow.keras.activations import relu, softmax\nfrom tensorflow.keras.callbacks import (EarlyStopping, ModelCheckpoint, TensorBoard)\nfrom tensorflow.keras.layers import (Input, Dense, Convolution2D, BatchNormalization,\n Flatten, MaxPool2D, Activation)\nfrom tensorflow.keras.utils import Sequence\nfrom tensorflow.keras import backend as K\n\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.model_selection import StratifiedKFold, train_test_split\nfrom sklearn.metrics import classification_report, confusion_matrix", "_____no_output_____" ], [ "# from google.colab import drive\n# drive.mount('/content/drive')", "_____no_output_____" ], [ "# Ручная часть работы - директория с набором аудиофайлов, набор меток классов, учёт разновидностей имён файлов\npredictions = \"predictions\"\ndirectory = \"./content/drive/MyDrive/Training\"\n\nlabels = [\"background\",\n \"bags\",\n \"door\",\n \"keyboard\",\n \"knocking_door\",\n \"ring\",\n \"speech\",\n \"tool\"]\n\nnum_classes = len(labels)\n\nfilename_search = {\"background\": [\"background_\"],\n \"bags\": [\"bags_\", \"bg_\", \"t_bags_\"],\n \"door\": [\"door_\", \"d_\", \"t_door_\"],\n \"keyboard\": [\"keyboard_\", \"t_keyboard_\", \"k_\"],\n \"knocking_door\": [\"knocking_door_\", \"tt_kd_\", \"t_knocking_door_\"],\n \"ring\": [\"ring_\", \"t_ring_\"],\n \"speech\": [\"speech_\"],\n \"tool\": [\"tool_\"]}", "_____no_output_____" ], [ "# Параметры конфигурации для будущей модели нейросети\nclass Config(object):\n def __init__(self,\n sampling_rate=16000, audio_duration=7, n_classes=10, use_mfcc=True,\n n_mfcc=20, n_folds=10, n_features=100, learning_rate=0.0001, max_epochs=50):\n self.sampling_rate = sampling_rate\n self.audio_duration = audio_duration\n self.n_classes = n_classes\n self.use_mfcc = use_mfcc\n self.n_mfcc = n_mfcc\n self.n_folds = n_folds\n self.learning_rate = learning_rate\n self.max_epochs = max_epochs\n self.n_features = n_features\n self.audio_length = self.sampling_rate * self.audio_duration\n if self.use_mfcc:\n self.dim = (self.n_mfcc, 1 + int(np.floor(self.audio_length / 512)), 1)\n else:\n self.dim = (self.audio_length, 1)", "_____no_output_____" ], [ "# Извлечение метки аудиошума из названия аудиофайла\ndef get_label_from_filename(filename):\n for key, value in filename_search.items():\n for val in value:\n if (filename.find(val) == 0):\n return key", "_____no_output_____" ], [ "# Подготовка датафрейма\ndef prepare_dataframe(directory):\n files = ([f.path for f in os.scandir(directory) if f.is_file()])\n # Создание датафрейма по предоставленной в условии задачи схеме\n df = pd.DataFrame(columns=[\"filename\", \"label\"])\n\n # Проход по всем аудиофайлам в наборе\n for path in tqdm(files[:]):\n filename = os.path.splitext(os.path.basename(path).strip())[0]\n label = get_label_from_filename(filename)\n \n # Добавляем обработанный аудиофайл в датафрейм\n row = pd.Series([filename, label], index = df.columns)\n df = df.append(row, ignore_index=True)\n \n return df", "_____no_output_____" ], [ "# Извлечение признаков из набора аудиофайлов\ndef prepare_data(config, directory, df):\n X = np.empty(shape=(df.shape[0], config.dim[0], config.dim[1], 1))\n files = ([f.path for f in os.scandir(directory) if f.is_file()])\n\n # Задаём длительность аудиофайла\n input_length = config.audio_length\n\n i = 0\n # Проход по всем аудиофайлам в наборе\n for path in tqdm(files[:]):\n filename = os.path.splitext(os.path.basename(path).strip())[0]\n\n data, sr = librosa.load(path, sr=config.sampling_rate)\n\n # Обрезка/приведение длительности аудиофайла к указанной в параметрах конфигурации\n if len(data) > input_length:\n max_offset = len(data) - input_length\n offset = np.random.randint(max_offset)\n data = data[offset:(input_length+offset)]\n else:\n if input_length > len(data):\n max_offset = input_length - len(data)\n offset = np.random.randint(max_offset)\n else:\n offset = 0\n data = np.pad(data, (offset, input_length - len(data) - offset), \"constant\")\n \n # Извлечение признаков MFCC с помощью библиотеки librosa\n data = librosa.feature.mfcc(data, sr=config.sampling_rate, n_mfcc=config.n_mfcc)\n data = np.expand_dims(data, axis=-1)\n X[i,] = data\n i = i + 1\n\n return X", "_____no_output_____" ], [ "# Модель свёрточной нейросети\ndef get_2d_conv_model(config):\n num_classes = config.n_classes\n \n inp = Input(shape=(config.dim[0], config.dim[1], 1))\n \n x = Convolution2D(32, (4,10), padding=\"same\")(inp)\n x = BatchNormalization()(x)\n x = Activation(\"relu\")(x)\n x = MaxPool2D()(x)\n \n x = Convolution2D(32, (4,10), padding=\"same\")(x)\n x = BatchNormalization()(x)\n x = Activation(\"relu\")(x)\n x = MaxPool2D()(x)\n \n x = Convolution2D(32, (4,10), padding=\"same\")(x)\n x = BatchNormalization()(x)\n x = Activation(\"relu\")(x)\n x = MaxPool2D()(x)\n\n x = Flatten()(x)\n x = Dense(64)(x)\n x = BatchNormalization()(x)\n x = Activation(\"relu\")(x)\n \n out = Dense(num_classes, activation=softmax)(x)\n \n model = models.Model(inputs=inp, outputs=out)\n opt = optimizers.Adam(config.learning_rate)\n model.compile(optimizer=opt, loss=losses.SparseCategoricalCrossentropy(), metrics=['acc'])\n \n return model", "_____no_output_____" ], [ "# Матрица ошибок классификации\ndef plot_confusion_matrix(predictions, y):\n max_test = y\n max_predictions = np.argmax(predictions, axis=1)\n matrix = confusion_matrix(max_test, max_predictions)\n plt.figure(figsize=(12, 8))\n sns.heatmap(matrix, xticklabels=labels, yticklabels=labels, annot=True,\n linewidths = 0.1, fmt=\"d\", cmap = 'YlGnBu');\n plt.title(\"Матрица ошибок классификации\", fontsize = 15)\n plt.ylabel(\"Настоящий класс\")\n plt.xlabel(\"Предсказанный\")\n plt.show()", "_____no_output_____" ], [ "# Подготовим датафрейм\ndf = prepare_dataframe(directory)", "_____no_output_____" ], [ "df.head()", "_____no_output_____" ], [ "# Сериализуем датафрейм в целях дальнейшей экономии времени\ndf.to_pickle(\"./content/drive/MyDrive/SVA_lab_2_dataframe.pkl\")", "_____no_output_____" ], [ "# Десериализация ранее сохранённого датафрейма\ndf = pd.read_pickle(\"./content/drive/MyDrive/SVA_lab_2_dataframe.pkl\")", "_____no_output_____" ], [ "# Подсчёт количества аудиозаписей каждого класса\ndf[\"label\"].value_counts()", "_____no_output_____" ], [ "# Представим значения меток классов в виде целых чисел\nencode = LabelEncoder()\nencoded_labels = encode.fit_transform(df['label'].to_numpy())\ndf = df.assign(label=encoded_labels)", "_____no_output_____" ], [ "df.head()", "_____no_output_____" ], [ "# Задаём параметры конфигурации\nconfig = Config(n_classes=num_classes, n_folds=10, n_mfcc=20)", "_____no_output_____" ], [ "X_train = prepare_data(config, directory, df)\nprint(X_train.shape)", "_____no_output_____" ], [ "# Нормализация данных\n\nmean = np.mean(X_train, axis=0)\nstd = np.std(X_train, axis=0)\n\nX_train = (X_train - mean)/std", "_____no_output_____" ], [ "X_train", "_____no_output_____" ], [ "# ПРОВЕРКА НА ТЕСТОВОМ НАБОРЕ ДАННЫХ\n\nfiles = ([f.path for f in os.scandir(\"./content/drive/MyDrive/Test\") if f.is_file()])\n# Создание датафрейма по предоставленной в условии задачи схеме\nsubmission = pd.DataFrame(columns=[\"fname\"])\n\n# Проход по всем аудиофайлам в наборе\nfor path in tqdm(files[:]):\n filename = os.path.splitext(os.path.basename(path).strip())[0]\n \n # Добавляем имя аудиофайла в датафрейм\n row = pd.Series([filename], index = submission.columns)\n submission = submission.append(row, ignore_index=True)", "_____no_output_____" ], [ "submission.head()", "_____no_output_____" ], [ "X_test = prepare_data(config, \"./content/drive/MyDrive/Test\", submission)", "_____no_output_____" ], [ "# Нормализация данных\n\nmean = np.mean(X_test, axis=0)\nstd = np.std(X_test, axis=0)\n\nX_test = (X_test - mean)/std", "_____no_output_____" ], [ "X_test", "_____no_output_____" ], [ "if not os.path.exists(predictions):\n os.mkdir(predictions)\nif os.path.exists(\"./content/drive/MyDrive/\" + predictions):\n shutil.rmtree(\"./content/drive/MyDrive/\" + predictions)", "_____no_output_____" ], [ "# Для кросс-валидации используется StratifiedKFold - разновдность KFold алгоритма, которая возвращает\n# стратифицированные папки c данными: каждый набор в папке содержит примерно такой же процент выборок каждого целевого класса,\n# что и полный набор.\nskf = StratifiedKFold(n_splits=config.n_folds)\ny_train = df[\"label\"].values\ny_train = np.stack(y_train[:])\nmodel = get_2d_conv_model(config)\ni = 0\nfor train_split, val_split in skf.split(X_train, y_train):\n K.clear_session()\n \n # Разделение имеющегося набора данных на тренировочную и валидационные выборки\n X, y, X_val, y_val = X_train[train_split], y_train[train_split], X_train[val_split], y_train[val_split]\n \n # Callback-функции для модели Keras\n # В ходе обучения сохраняем веса лучшей модели для потенциального дальнейшего использования\n checkpoint = ModelCheckpoint('best_%d.h5'%i, monitor='val_loss', verbose=1, save_best_only=True)\n early = EarlyStopping(monitor=\"val_loss\", mode=\"min\", patience=5)\n callbacks_list = [checkpoint, early]\n print(\"#\"*50)\n print(\"Fold: \", i)\n model = get_2d_conv_model(config)\n history = model.fit(X, y, validation_data=(X_val, y_val), callbacks=callbacks_list, batch_size=256, epochs=config.max_epochs)\n model.load_weights('best_%d.h5'%i)\n \n # Сохраняем предсказания модели по тренировочным данным\n print(\"TRAIN PREDICTIONS: \", i)\n predictions = model.predict(X_train, batch_size=256)\n save_train_preds_path = \"./predictions/train_predictions_{:d}.npy\".format(i)\n np.save(save_train_preds_path, predictions)\n plot_confusion_matrix(predictions, y_train)\n \n # Сохраняем предсказания модели по тестовым данным\n print(\"TEST PREDICTIONS: \", i)\n predictions = model.predict(X_test, batch_size=256)\n save_test_preds_path = \"./predictions/test_predictions_{:d}.npy\".format(i)\n np.save(save_test_preds_path, predictions)\n\n # # Создание файла с результатами (submission)\n # top_3 = np.array(labels)[np.argsort(-predictions, axis=1)[:, :3]]\n # predicted_labels = [' '.join(list(x)) for x in top_3]\n # df_test['label'] = predicted_labels\n # save_preds_path = \"./predictions/predictions_{:d}.npy\".format(i)\n # df_test[['label']].to_csv(save_preds_path)\n \n j = 0\n for prob in predictions:\n #print(prob)\n #print(np.argmax(prob))\n submission.loc[j,'score'] = max(prob)\n prob_index = list(prob).index(max(prob))\n #print(prob_index)\n submission.loc[j,'label'] = prob_index\n j += 1\n\n submission_result = submission.copy()\n submission_result['label'] = encode.inverse_transform(np.array(submission['label']).astype(int))\n submission = submission_result\n save_submission_path = \"./predictions/submission_{:d}.npy\".format(i)\n submission.to_csv(save_submission_path.format(i), index=False)\n \n i += 1", "##################################################\nFold: 0\nEpoch 1/50\n13/13 [==============================] - ETA: 0s - loss: 1.9035 - acc: 0.2924\nEpoch 00001: val_loss improved from inf to 2.05545, saving model to best_0.h5\n13/13 [==============================] - 14s 1s/step - loss: 1.9035 - acc: 0.2924 - val_loss: 2.0554 - val_acc: 0.1945\nEpoch 2/50\n13/13 [==============================] - ETA: 0s - loss: 1.3596 - acc: 0.5897\nEpoch 00002: val_loss improved from 2.05545 to 1.82722, saving model to best_0.h5\n13/13 [==============================] - 13s 1s/step - loss: 1.3596 - acc: 0.5897 - val_loss: 1.8272 - val_acc: 0.5479\nEpoch 3/50\n13/13 [==============================] - ETA: 0s - loss: 1.1423 - acc: 0.6893\nEpoch 00003: val_loss improved from 1.82722 to 1.74156, saving model to best_0.h5\n13/13 [==============================] - 13s 995ms/step - loss: 1.1423 - acc: 0.6893 - val_loss: 1.7416 - val_acc: 0.6274\nEpoch 4/50\n13/13 [==============================] - ETA: 0s - loss: 0.9896 - acc: 0.7441\nEpoch 00004: val_loss did not improve from 1.74156\n13/13 [==============================] - 13s 986ms/step - loss: 0.9896 - acc: 0.7441 - val_loss: 1.7512 - val_acc: 0.5534\nEpoch 5/50\n13/13 [==============================] - ETA: 0s - loss: 0.8792 - acc: 0.7810\nEpoch 00005: val_loss did not improve from 1.74156\n13/13 [==============================] - 13s 990ms/step - loss: 0.8792 - acc: 0.7810 - val_loss: 1.7438 - val_acc: 0.4932\nEpoch 6/50\n13/13 [==============================] - ETA: 0s - loss: 0.7968 - acc: 0.8115\nEpoch 00006: val_loss improved from 1.74156 to 1.68326, saving model to best_0.h5\n13/13 [==============================] - 13s 992ms/step - loss: 0.7968 - acc: 0.8115 - val_loss: 1.6833 - val_acc: 0.5123\nEpoch 7/50\n13/13 [==============================] - ETA: 0s - loss: 0.7258 - acc: 0.8352\nEpoch 00007: val_loss improved from 1.68326 to 1.62308, saving model to best_0.h5\n13/13 [==============================] - 13s 991ms/step - loss: 0.7258 - acc: 0.8352 - val_loss: 1.6231 - val_acc: 0.5178\nEpoch 8/50\n13/13 [==============================] - ETA: 0s - loss: 0.6704 - acc: 0.8541\nEpoch 00008: val_loss improved from 1.62308 to 1.58114, saving model to best_0.h5\n13/13 [==============================] - 13s 993ms/step - loss: 0.6704 - acc: 0.8541 - val_loss: 1.5811 - val_acc: 0.5123\nEpoch 9/50\n13/13 [==============================] - ETA: 0s - loss: 0.6143 - acc: 0.8724\nEpoch 00009: val_loss improved from 1.58114 to 1.57706, saving model to best_0.h5\n13/13 [==============================] - 13s 1s/step - loss: 0.6143 - acc: 0.8724 - val_loss: 1.5771 - val_acc: 0.5068\nEpoch 10/50\n13/13 [==============================] - ETA: 0s - loss: 0.5652 - acc: 0.8861\nEpoch 00010: val_loss improved from 1.57706 to 1.54073, saving model to best_0.h5\n13/13 [==============================] - 13s 1s/step - loss: 0.5652 - acc: 0.8861 - val_loss: 1.5407 - val_acc: 0.5096\nEpoch 11/50\n13/13 [==============================] - ETA: 0s - loss: 0.5188 - acc: 0.8967\nEpoch 00011: val_loss did not improve from 1.54073\n13/13 [==============================] - 13s 995ms/step - loss: 0.5188 - acc: 0.8967 - val_loss: 1.5598 - val_acc: 0.4877\nEpoch 12/50\n13/13 [==============================] - ETA: 0s - loss: 0.4777 - acc: 0.9108\nEpoch 00012: val_loss did not improve from 1.54073\n13/13 [==============================] - 13s 991ms/step - loss: 0.4777 - acc: 0.9108 - val_loss: 1.5500 - val_acc: 0.4959\nEpoch 13/50\n13/13 [==============================] - ETA: 0s - loss: 0.4423 - acc: 0.9205\nEpoch 00013: val_loss did not improve from 1.54073\n13/13 [==============================] - 13s 985ms/step - loss: 0.4423 - acc: 0.9205 - val_loss: 1.5495 - val_acc: 0.5205\nEpoch 14/50\n13/13 [==============================] - ETA: 0s - loss: 0.4081 - acc: 0.9287\nEpoch 00014: val_loss improved from 1.54073 to 1.51611, saving model to best_0.h5\n13/13 [==============================] - 13s 997ms/step - loss: 0.4081 - acc: 0.9287 - val_loss: 1.5161 - val_acc: 0.5260\nEpoch 15/50\n13/13 [==============================] - ETA: 0s - loss: 0.3793 - acc: 0.9440\nEpoch 00015: val_loss improved from 1.51611 to 1.44754, saving model to best_0.h5\n13/13 [==============================] - 13s 1s/step - loss: 0.3793 - acc: 0.9440 - val_loss: 1.4475 - val_acc: 0.5397\nEpoch 16/50\n13/13 [==============================] - ETA: 0s - loss: 0.3528 - acc: 0.9513\nEpoch 00016: val_loss improved from 1.44754 to 1.42411, saving model to best_0.h5\n13/13 [==============================] - 13s 998ms/step - loss: 0.3528 - acc: 0.9513 - val_loss: 1.4241 - val_acc: 0.5479\nEpoch 17/50\n13/13 [==============================] - ETA: 0s - loss: 0.3261 - acc: 0.9552\nEpoch 00017: val_loss improved from 1.42411 to 1.41346, saving model to best_0.h5\n13/13 [==============================] - 13s 1000ms/step - loss: 0.3261 - acc: 0.9552 - val_loss: 1.4135 - val_acc: 0.5507\nEpoch 18/50\n13/13 [==============================] - ETA: 0s - loss: 0.3064 - acc: 0.9580\nEpoch 00018: val_loss improved from 1.41346 to 1.32375, saving model to best_0.h5\n13/13 [==============================] - 13s 1s/step - loss: 0.3064 - acc: 0.9580 - val_loss: 1.3237 - val_acc: 0.5699\nEpoch 19/50\n13/13 [==============================] - ETA: 0s - loss: 0.2879 - acc: 0.9656\nEpoch 00019: val_loss did not improve from 1.32375\n13/13 [==============================] - 13s 993ms/step - loss: 0.2879 - acc: 0.9656 - val_loss: 1.3336 - val_acc: 0.5753\nEpoch 20/50\n13/13 [==============================] - ETA: 0s - loss: 0.2670 - acc: 0.9701\nEpoch 00020: val_loss improved from 1.32375 to 1.30981, saving model to best_0.h5\n13/13 [==============================] - 13s 1s/step - loss: 0.2670 - acc: 0.9701 - val_loss: 1.3098 - val_acc: 0.5644\nEpoch 21/50\n13/13 [==============================] - ETA: 0s - loss: 0.2514 - acc: 0.9756\nEpoch 00021: val_loss improved from 1.30981 to 1.16497, saving model to best_0.h5\n13/13 [==============================] - 13s 994ms/step - loss: 0.2514 - acc: 0.9756 - val_loss: 1.1650 - val_acc: 0.5890\nEpoch 22/50\n13/13 [==============================] - ETA: 0s - loss: 0.2358 - acc: 0.9784\nEpoch 00022: val_loss did not improve from 1.16497\n13/13 [==============================] - 13s 993ms/step - loss: 0.2358 - acc: 0.9784 - val_loss: 1.1731 - val_acc: 0.5918\nEpoch 23/50\n13/13 [==============================] - ETA: 0s - loss: 0.2233 - acc: 0.9829\nEpoch 00023: val_loss did not improve from 1.16497\n13/13 [==============================] - 13s 991ms/step - loss: 0.2233 - acc: 0.9829 - val_loss: 1.2243 - val_acc: 0.5836\nEpoch 24/50\n13/13 [==============================] - ETA: 0s - loss: 0.2116 - acc: 0.9839\nEpoch 00024: val_loss improved from 1.16497 to 1.12985, saving model to best_0.h5\n13/13 [==============================] - 13s 1s/step - loss: 0.2116 - acc: 0.9839 - val_loss: 1.1299 - val_acc: 0.5945\nEpoch 25/50\n13/13 [==============================] - ETA: 0s - loss: 0.2017 - acc: 0.9823\nEpoch 00025: val_loss improved from 1.12985 to 1.01099, saving model to best_0.h5\n13/13 [==============================] - 13s 997ms/step - loss: 0.2017 - acc: 0.9823 - val_loss: 1.0110 - val_acc: 0.6219\nEpoch 26/50\n13/13 [==============================] - ETA: 0s - loss: 0.1913 - acc: 0.9854\nEpoch 00026: val_loss improved from 1.01099 to 0.98364, saving model to best_0.h5\n13/13 [==============================] - 13s 1s/step - loss: 0.1913 - acc: 0.9854 - val_loss: 0.9836 - val_acc: 0.6137\nEpoch 27/50\n13/13 [==============================] - ETA: 0s - loss: 0.1779 - acc: 0.9903\nEpoch 00027: val_loss improved from 0.98364 to 0.89237, saving model to best_0.h5\n13/13 [==============================] - 13s 1000ms/step - loss: 0.1779 - acc: 0.9903 - val_loss: 0.8924 - val_acc: 0.6466\nEpoch 28/50\n13/13 [==============================] - ETA: 0s - loss: 0.1726 - acc: 0.9893\nEpoch 00028: val_loss improved from 0.89237 to 0.83660, saving model to best_0.h5\n13/13 [==============================] - 13s 995ms/step - loss: 0.1726 - acc: 0.9893 - val_loss: 0.8366 - val_acc: 0.6685\nEpoch 29/50\n13/13 [==============================] - ETA: 0s - loss: 0.1605 - acc: 0.9918\nEpoch 00029: val_loss did not improve from 0.83660\n13/13 [==============================] - 13s 987ms/step - loss: 0.1605 - acc: 0.9918 - val_loss: 0.8951 - val_acc: 0.6438\n" ], [ "", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
d0ddc817cc53d03c2d0b42685ce58ff5bac82cb0
723,490
ipynb
Jupyter Notebook
group86_other_algorithms.ipynb
dukeNashor/ChessMaster
0b1f7b75a76e5c9129e73e0722af9e5b3b76f033
[ "MIT" ]
null
null
null
group86_other_algorithms.ipynb
dukeNashor/ChessMaster
0b1f7b75a76e5c9129e73e0722af9e5b3b76f033
[ "MIT" ]
null
null
null
group86_other_algorithms.ipynb
dukeNashor/ChessMaster
0b1f7b75a76e5c9129e73e0722af9e5b3b76f033
[ "MIT" ]
null
null
null
242.375209
424,308
0.904137
[ [ [ "# COMP5318 - Machine Learning and Data Mining: Assignment 2\n<div style=\"text-align: right\"> Group 86 </div>\n<div style=\"text-align: right\"> tlin4302 | 470322974 | Jenny Tsai-chen Lin </div>\n<div style=\"text-align: right\"> jsun4242 | 500409987 | Jiawei Sun </div>\n<div style=\"text-align: right\"> jyan2937 | 480546614 | Jinxuan Yang </div>", "_____no_output_____" ], [ "## The notebook includes sections :\n Section 0. Hardware and software specifications \n Section 1. Library and general functions \n Section 2. Data pre-processing\n Section 3. Implement algorithms\n 3.1 AdaBoost Classifier\n 3.2 Convolutional Neural Network Classifier\n 3.3 Support-Vector-Machine Classifier\n Section 4. Compare result between algorithms in train dataset \n Section 5: Best perfroming algorithms in testing data (we will submit this in seperate notebook as well)", "_____no_output_____" ], [ "## CODE RUNNING INSTRUCTIONS:\n Instruction:\n Simply change the switches in **0.Switches** blocks and run all.\n \n Dataset directory:\n Same directory as the jupyter notebook, in the format of :\n ---[current dir]\n |----[This file]\n |----[dataset]\n |----test\n |----train\n \n \n The default parameters will used the saved model to run simple tests,\n and load confusion matrices, accuracies, etc. from disk and plot them\n for display purpose.", "_____no_output_____" ], [ "### Hardware and software specifcations\nhardware:\n 1. CPU: Intel i7-8700K @ 3.70GHz\n 2. RAM: 64G DDR4 3000MHz\n 3. Graphics: NVidia GeForce GTX 1080Ti\n 4. Chipset: Z370", "_____no_output_____" ], [ "### Software specifications", "_____no_output_____" ] ], [ [ "import os, platform\nprint('OS name:', os.name, ', system:', platform.system(), ', release:', platform.release())\nimport sys\nprint(\"Anaconda version:\")\n#!conda list anaconda\nprint(\"Python version: \", sys.version)\nprint(\"Python version info: \", sys.version_info)\nimport PIL\nfrom PIL import Image\nprint(\"PIL version: \", PIL.__version__)\nimport matplotlib\nimport matplotlib.pyplot as plt\nprint(\"Matplotlib version: \", matplotlib.__version__)\n#import tensorflow as tf\n#print(\"Keras version:\", tf.keras.__version__)\nimport cv2\nprint(\"OpenCV version: \", cv2.__version__)\nimport numpy as np\nprint(\"nump version: \", np.__version__)", "OS name: nt , system: Windows , release: 10\nAnaconda version:\nPython version: 3.8.3 (default, Jul 2 2020, 17:30:36) [MSC v.1916 64 bit (AMD64)]\nPython version info: sys.version_info(major=3, minor=8, micro=3, releaselevel='final', serial=0)\nPIL version: 7.2.0\nMatplotlib version: 3.2.2\nOpenCV version: 4.4.0\nnump version: 1.18.5\n" ] ], [ [ "## Section 0. Switches (Default settings is great for demo purpose)", "_____no_output_____" ], [ "#### Load saved model or run training?", "_____no_output_____" ] ], [ [ "load_saved_model = True", "_____no_output_____" ] ], [ [ "#### Run preprocessing benchmark or not?", "_____no_output_____" ] ], [ [ "run_preprocessing_benchmark = True", "_____no_output_____" ] ], [ [ "#### Run test code for 3 classifiers?", "_____no_output_____" ] ], [ [ "run_test_code_for_classifiers = True", "_____no_output_____" ] ], [ [ "#### number of threads when preprocessing images", "_____no_output_____" ] ], [ [ "g_thread_num = 6", "_____no_output_____" ] ], [ [ "#### Run hyper parameter tuning? (Slow if turned on!)", "_____no_output_____" ] ], [ [ "# Caution: Slow if turned on.\ndo_hyper_parameter_tuning = False", "_____no_output_____" ] ], [ [ "#### Run 10-fold cross validation? (Slow if turned on)", "_____no_output_____" ] ], [ [ "# Caution: Slow if turned on.\nrun_ten_fold = False", "_____no_output_____" ] ], [ [ "## Section 1. Library and general functions ", "_____no_output_____" ] ], [ [ "# Go to anaconda prompt to install package imblearn\n# anaconda: conda install -c glemaitre imbalanced-learn\n#pip install kmeans-smote\n\nfrom skimage import io, transform\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\n\nimport cv2\n\nimport time", "_____no_output_____" ] ], [ [ "### global variables", "_____no_output_____" ] ], [ [ "# choose one of below two line depend file location******\n\ng_dataset_dir = \"./dataset/\" \n#g_dataset_dir = \"../dataset/\" \n\n\na_random_file = \"./dataset/train/1b1B1b2-2pK2q1-4p1rB-7k-8-8-3B4-3rb3.jpeg\" \n#a_random_file = \"../dataset/train/1b1B1b2-2pK2q1-4p1rB-7k-8-8-3B4-3rb3.jpeg\" \n\nsaved_model_path = \"./saved_model/\"\nabc_model_file = saved_model_path + \"abc_dump.pkl\"\nsvc_model_file = saved_model_path + \"svc_dump.pkl\"\ncnn_model_file = saved_model_path + \"cnn_weights\"\n\nten_fold_result_path = \"./ten_fold_results/\"\n\n\n# define global variable \n\ng_train_dir = g_dataset_dir + \"/train/\"\ng_test_dir = g_dataset_dir + \"/test/\"\n\ng_image_size = 400\n\ng_grid_row = 8\ng_grid_col = 8\n\ng_grid_num = g_grid_row * g_grid_col\ng_grid_size = int(g_image_size / g_grid_row)\n\n\n#Processing 1 - scale down \ng_down_sampled_size = 200\ng_down_sampled_grid_size = int(g_grid_size / (g_image_size / g_down_sampled_size))\n\n# global instance of mapping of char vs chess pieces\n# reference: Forsyth–Edwards Notation, https://en.wikipedia.org/wiki/Forsyth%E2%80%93Edwards_Notation\n# \n# pawn = \"P\", knight = \"N\", bishop = \"B\", rook = \"R\", queen = \"Q\" and king = \"K\"\n# White pieces are designated using upper-case letters (\"PNBRQK\") while black pieces use lowercase (\"pnbrqk\")\n# we use 0 to note an empty grid.\n# 13 items in total.\n\ng_piece_mapping = {\n \"P\" : \"pawn\",\n \"N\" : \"knight\",\n \"B\" : \"bishop\",\n \"R\" : \"rook\",\n \"Q\" : \"queen\",\n \"K\" : \"king\",\n\n \"p\" : \"pawn\",\n \"n\" : \"knight\",\n \"b\" : \"bishop\",\n \"r\" : \"rook\",\n \"q\" : \"queen\",\n \"k\" : \"king\",\n\n \"0\" : \"empty_grid\"\n}\n\ng_num_labels = len(g_piece_mapping)\n\ng_labels = [\"P\",\n\"N\",\n\"B\",\n\"R\",\n\"Q\",\n\"K\",\n\"p\",\n\"n\",\n\"b\",\n\"r\",\n\"q\",\n\"k\",\n\"0\"]\n", "_____no_output_____" ] ], [ [ "### Helper codes for label & board", "_____no_output_____" ] ], [ [ "#DataHelper.py\n\nimport os\n\nimport cv2\nfrom skimage import io\nimport numpy as np\n\nimport glob\nimport h5py\n\n# get clean name by a path, where in our case this gets the FEN conviniently\ndef GetCleanNameByPath(file_name):\n return os.path.splitext(os.path.basename(file_name))[0]\n\n# get full paths to the files in a directory.\ndef GetFileNamesInDir(path_name, extension=\"*\", num_return = 0):\n if num_return == 0:\n return glob.glob(path_name + \"/*.\" + extension)\n else:\n return glob.glob(path_name + \"/*.\" + extension)[:num_return]\n\n# get name list\ndef GetCleanNamesInDir(path_name, extension = \"*\", num_return = 0):\n names = GetFileNamesInDir(path_name, extension)\n offset = len(extension) + 1\n clean_names = [os.path.basename(x)[:-offset] for x in names]\n if num_return == 0:\n return clean_names\n else:\n return clean_names[:num_return]\n\n# read dataset\ndef ReadImages(file_names, path = \"\", format = cv2.IMREAD_COLOR):\n if path == \"\":\n return [cv2.imread(f, format) for f in file_names]\n else:\n return [cv2.imread(path + \"/\" + f, format) for f in file_names]\n\n# read image by name\ndef ReadImage(file_name, gray = False):\n return io.imread(file_name, as_gray = gray)\n\n\n# h5py functions\n \n# read h5py file\n# we assume the labels and \ndef ReadH5pyFile(file_name, data_name):\n h5_buffer = h5py.File(file_name)\n return h5_buffer[data_name].copy()\n\n# write h5py file\ndef WriteH5pyFile(file_name, mat, data_name = \"dataset\"):\n with h5py.File(file_name, 'w') as f:\n f.create_dataset(data_name, data = mat)\n", "_____no_output_____" ], [ "#BoardHelper.py\n\nimport re\nimport string\nfrom collections import OrderedDict \n\nimport numpy as np\nimport skimage.util\nfrom skimage.util.shape import view_as_blocks\n\n#from ChessGlobalDefs import *\n\n#FEN TO LABELS OF SQUARES\ndef FENtoL(fen): \n rules = {\n r\"-\": r\"\",\n r\"1\": r\"0\",\n r\"2\": r\"00\",\n r\"3\": r\"000\",\n r\"4\": r\"0000\",\n r\"5\": r\"00000\",\n r\"6\": r\"000000\",\n r\"7\": r\"0000000\",\n r\"8\": r\"00000000\",\n }\n\n for key in rules.keys():\n fen = re.sub(key, rules[key], fen)\n\n return list(fen)\n\n\n# Label array to char list:\ndef LabelArrayToL(arr):\n rules = {\n 0 : \"P\",\n 1 : \"N\",\n 2 : \"B\",\n 3 : \"R\",\n 4 : \"Q\",\n 5 : \"K\",\n\n 6 : \"p\",\n 7 : \"n\",\n 8 : \"b\",\n 9 : \"r\",\n 10 : \"q\",\n 11 : \"k\",\n\n 12 : \"0\"\n }\n\n flattened = arr.flatten(order = \"C\")\n\n L = []\n\n for x in flattened:\n L.append(rules[x])\n\n return L\n\n# char list to FEN\ndef LtoFEN(L):\n\n FEN = \"\"\n \n for y in range(8):\n counter = 0\n for x in range(8):\n idx = x + y * 8\n char = L[idx]\n\n if char == \"0\":\n counter += 1\n if x == 7:\n FEN += str(counter)\n else:\n if counter:\n FEN += str(counter)\n counter = 0\n\n FEN += char\n if y != 7:\n FEN += \"-\"\n \n \n return FEN\n\n\n\n# FEN to one-hot encoding, in our case, it returns an 64 by 13 array, with each row as a one-hot to a grid.\ndef FENtoOneHot(fen):\n\n # this rule is in the same format as g_piece_mapping\n #rules = {\n # \"P\" : np.array([1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]),\n # \"N\" : np.array([0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]),\n # \"B\" : np.array([0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]),\n # \"R\" : np.array([0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0]),\n # \"Q\" : np.array([0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0]),\n # \"K\" : np.array([0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0]),\n # \n # \"p\" : np.array([0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0]),\n # \"n\" : np.array([0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0]),\n # \"b\" : np.array([0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0]),\n # \"r\" : np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0]),\n # \"q\" : np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0]),\n # \"k\" : np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0]),\n # \n # \"0\" : np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1])\n #}\n rules = {\n \"P\" : 0,\n \"N\" : 1,\n \"B\" : 2,\n \"R\" : 3,\n \"Q\" : 4,\n \"K\" : 5,\n\n \"p\" : 6,\n \"n\" : 7,\n \"b\" : 8,\n \"r\" : 9,\n \"q\" : 10,\n \"k\" : 11,\n\n \"0\" : 12\n }\n\n L = FENtoL(fen)\n one_hot_array = np.zeros((g_grid_num, g_num_labels), dtype = np.int32) # 64 by 13\n for i, c in enumerate(L):\n one_hot_array[i, rules[c]] = 1\n\n return one_hot_array\n\n# get 8*8 char matrix\ndef LtoCharMat(l):\n if type(l) == list:\n return np.array(l).reshape((8,8))\n if type(l) == str:\n return np.array([l]).reshape((8,8))\n\ndef GetBoardCell(board_image, row = 0, col = 0, size = 50):\n return np.array(board_image)[row*size:(row+1)*size,col*size:(col+1)*size]\n\n# get grids of image\ndef ImageToGrids(image, grid_size_x, grid_size_y):\n return skimage.util.shape.view_as_blocks(image, block_shape = (grid_size_y, grid_size_x, 3)).squeeze(axis = 2)\n\n# get grids of image\ndef ImageToGrids_grey(image, grid_size_x, grid_size_y):\n return skimage.util.shape.view_as_blocks(image, block_shape = (grid_size_y, grid_size_x, 1)).squeeze(axis = 2)\n", "_____no_output_____" ] ], [ [ "## Section 2. Data pre-processing", "_____no_output_____" ], [ "### Pre-processing - generic", "_____no_output_____" ] ], [ [ "# split into 64 small square from 1 board\n# image resized to 400x 400 to 200x 200. 64 square at 25x 25 each\n\ndef PreprocessImage(image):\n image = transform.resize(image, (g_down_sampled_size, g_down_sampled_size), mode='constant')\n \n # 1st and 2nd dim is 8\n grids = ImageToGrids(image, g_down_sampled_grid_size, g_down_sampled_grid_size)\n\n return grids.reshape(g_grid_row * g_grid_col, g_down_sampled_grid_size, g_down_sampled_grid_size, 3)\n\n# split into 64 small square from 1 board -\n# output of x: number of image x 64 x 25 x 25 x 3 , y: number of image x 64 x 13\ndef func_generator(train_file_names):\n x = []\n y = []\n for image_file_name in train_file_names:\n img = ReadImage(image_file_name)\n x.append(PreprocessImage(img))\n y.append(np.array(FENtoOneHot(GetCleanNameByPath(image_file_name))))\n \n return np.array(x), np.array(y)", "_____no_output_____" ], [ "# Example output for a file - generic\nnum_train = 1\ntrain_file_names = GetFileNamesInDir(g_train_dir, extension = \"jpeg\",num_return = num_train)\n\nx, y = func_generator(train_file_names)\n\nprint(\"x type :\", type(x))\nprint(\"x shape:\", x.shape)\nprint(\"y type :\", type(y))\nprint(\"y shape:\", y.shape)\nprint()\nplt.imshow(x[0][1])", "x type : <class 'numpy.ndarray'>\nx shape: (1, 64, 25, 25, 3)\ny type : <class 'numpy.ndarray'>\ny shape: (1, 64, 13)\n\n" ] ], [ [ "### Pre-processing - canny", "_____no_output_____" ] ], [ [ "# Processing image with canny\nimport cv2\n\ndef PreprocessImage_canny(image):\n image = cv2.Canny(image,100,200)\n image = transform.resize(image, (g_down_sampled_size, g_down_sampled_size), mode='constant')\n # 1st and 2nd dim is 8\n image = image[..., np.newaxis]\n grids = ImageToGrids_grey(image, g_down_sampled_grid_size, g_down_sampled_grid_size)\n return grids.reshape(g_grid_row * g_grid_col, g_down_sampled_grid_size, g_down_sampled_grid_size)\n\n# atomic func:\ndef func_canny(file_name):\n img = ReadImage(file_name)\n x = PreprocessImage_canny(img)\n y = np.array(FENtoL(GetCleanNameByPath(file_name)))\n return x, y\n \n# split into 64 small square from 1 - output of x: number of image x64 x 25 x25 , y:number of image x 64\ndef func_generator_canny(image_file_names):\n xs = []\n ys = []\n for image_file_name in image_file_names:\n x, y = func_canny(image_file_name)\n xs.append(x)\n ys.append(y)\n \n return xs, ys\n", "_____no_output_____" ], [ "# Example output for a file - canny\n\nnum_train = 1\ntrain_file_names = GetFileNamesInDir(g_train_dir, extension = \"jpeg\",num_return = num_train)\n\nx, y = func_generator_canny(train_file_names)\n\nprint(\"x type :\", type(x))\nprint(\"x[0] type :\", type(x[0]))\nprint(\"x[0] shape:\", x[0].shape)\nprint(\"y type :\", type(y))\nprint(\"y[0] type :\", type(y[0]))\nprint(\"y[0] shape:\", y[0].shape)\n\nplt.imshow(x[0][1])", "x type : <class 'list'>\nx[0] type : <class 'numpy.ndarray'>\nx[0] shape: (64, 25, 25)\ny type : <class 'list'>\ny[0] type : <class 'numpy.ndarray'>\ny[0] shape: (64,)\n" ] ], [ [ "### Pre-processing - SIFT ", "_____no_output_____" ] ], [ [ "# Processing image with sift\nimport cv2\n\ndef ExtractSIFTForGrid(board_image, row, col, center_x = 25, center_y = 25, radius = 45):\n kps = [cv2.KeyPoint(x = center_x + 50 * col, y = center_y + 50 * row, _size = 45)]\n \n # USE THE CORRECT VERSION OF CV2\n if cv2.__version__ == \"4.4.0\":\n keypoints, descriptors = cv2.SIFT_create(edgeThreshold = 0).compute(image = board_image, keypoints = kps)\n else:\n keypoints, descriptors = cv2.xfeatures2d.SIFT_create(edgeThreshold = 0).compute(image = board_image, keypoints = kps)\n\n return keypoints[0], descriptors[0, :]\n\n\n\ndef PreprocessImage_sift(image):\n # 1st and 2nd dim is 8\n desc=[]\n for i in range(8):\n for j in range(8): \n kp, d= ExtractSIFTForGrid(image,i,j)\n desc.append(np.array(d))\n return desc\n\n# atomic func:\ndef func_sift(file_name):\n img = ReadImage(file_name)\n x = PreprocessImage_sift(img)\n y = np.array(FENtoL(GetCleanNameByPath(file_name)))\n return x, y\n \n \n# split into 64 small square from 1 - output of x: number of image x64 x128 , y:number of image x 64\ndef func_generator_sift(image_file_names):\n xs = []\n ys = []\n for image_file_name in image_file_names:\n x, y = func_sift(image_file_name)\n xs.append(np.array(x))\n ys.append(np.array(y))\n \n return xs, ys\n", "_____no_output_____" ], [ "# Example output for a file - SIFT\n\nnum_train = 1\ntrain_file_names = GetFileNamesInDir(g_train_dir, extension = \"jpeg\",num_return = num_train)\n\nx, y = func_generator_sift(train_file_names)\n\nprint(\"x type :\", type(x))\nprint(\"x[0] type :\", type(x[0]))\nprint(\"x[0] shape:\", x[0].shape)\nprint(\"y type :\", type(y))\nprint(\"y[0] type :\", type(y[0]))\nprint(\"y[0] shape:\", y[0].shape)\n\nplt.bar(x = range(128), height = x[0][1])\nplt.title(y[0][1])\nplt.xticks(x = range(128))\nplt.show()", "x type : <class 'list'>\nx[0] type : <class 'numpy.ndarray'>\nx[0] shape: (64, 128)\ny type : <class 'list'>\ny[0] type : <class 'numpy.ndarray'>\ny[0] shape: (64,)\n" ] ], [ [ "### Example of image input, canny, SIFT", "_____no_output_____" ] ], [ [ "import cv2\nfrom skimage.filters import sobel\n\n#print(\"Sift: decriptor size:\", cv2.SIFT_create().descriptorSize())\nimg = ReadImage(a_random_file)\nimg1 = cv2.Canny(img,100,200)\nimg2= sobel(img[:,:,0])\nprint(img.shape)\nprint(img1.shape)\nprint(img2.shape)\n\nkp, desc = ExtractSIFTForGrid(img, 0, 1)\nkp2, desc2 = ExtractSIFTForGrid(img, 0, 3)\nkp3, desc3 = ExtractSIFTForGrid(img, 0, 5)\nkp4, desc4 = ExtractSIFTForGrid(img, 1, 3)\nimg_kp = cv2.drawKeypoints(img, [kp, kp2,kp3,kp4], img, flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)\nimg_kp1 = cv2.drawKeypoints(img1, [kp, kp2,kp3,kp4], img1, flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)\n\n\nprint('file name:',a_random_file)\n\n\nplt.figure(figsize=(18,6))\nplt.suptitle('Image processing output', fontsize=16)\nplt.subplot(1, 3, 1)\nplt.imshow(img_kp, aspect='auto')\nplt.title('original image with keypoint')\n\nplt.subplot(1, 3, 2)\nplt.imshow(img2, aspect='auto')\nplt.title('Sobel image')\n\nplt.subplot(1, 3, 3)\nplt.imshow(img1, aspect='auto')\nplt.title('Canny image')\nplt.show()\n\nplt.figure(figsize=(15,6))\nplt.suptitle('Sift output for original image at squares', fontsize=16)\n#plt.tight_layout()\nplt.subplot(2, 2, 1)\nplt.title('square 0,1(b)')\nplt.bar(x = range(128), height = desc)\nplt.xticks(x = range(128))\n\nplt.subplot(2,2, 2)\nplt.title('square 0,3(B)')\nplt.bar(x = range(128), height = desc2)\nplt.xticks(x = range(128))\n\nplt.subplot(2,2,3)\nplt.title('square 0,5(b)')\nplt.bar(x = range(128), height = desc3)\nplt.xticks(x = range(128))\n\n\nplt.subplot(2,2,4)\nplt.title('square 1,3(K)')\nplt.bar(x = range(128), height = desc4)\nplt.xticks(x = range(128))\nplt.show()\n\n", "(400, 400, 3)\n(400, 400)\n(400, 400)\nfile name: ./dataset/train/1b1B1b2-2pK2q1-4p1rB-7k-8-8-3B4-3rb3.jpeg\n" ] ], [ [ "### Read image - generic, canny, sift - run time", "_____no_output_____" ] ], [ [ "if run_preprocessing_benchmark:\n start_time = time.time()\n num_train = 100\n train_file_names = GetFileNamesInDir(g_train_dir, extension = \"jpeg\",num_return = num_train)\n\n X_org,Y_org = func_generator(train_file_names)\n print('runnning time for generic 100 images')\n print('--- {} seconds ---'.format(time.time() - start_time))\n\n X_sift,Y_sift = func_generator_sift(train_file_names)\n print('runnning time for sift 100 images')\n print('--- {} seconds ---'.format(time.time() - start_time))\n\n start_time = time.time()\n X_canny,Y_canny = func_generator_canny(train_file_names)\n print('runnning time for canny 100 images')\n print('--- {} seconds ---'.format(time.time() - start_time))", "runnning time for generic 100 images\n--- 1.9594979286193848 seconds ---\nrunnning time for sift 100 images\n--- 28.49899911880493 seconds ---\nrunnning time for canny 100 images\n--- 0.7249987125396729 seconds ---\n" ] ], [ [ "### Subset train data - high quality (image level)", "_____no_output_____" ] ], [ [ "#https://www.researchgate.net/post/How_to_use_clustering_to_reduce_data_set_samples\n\n#subset data of high quality image (at image level)\n\n#Output is \ndef file_names_highquality(image_file_names, min_piece =15):\n names = np.array(image_file_names)\n idx_sub = []\n for idx in range(len(image_file_names)):\n y = FENtoL(GetCleanNameByPath(image_file_names[idx]))\n piece_count = 64 - y.count('0')\n if piece_count >= min_piece:\n idx_sub.append(idx) \n \n return np.array(image_file_names)[idx_sub]", "_____no_output_____" ], [ "# test \nif run_preprocessing_benchmark:\n file_name_reduced = file_names_highquality(train_file_names, min_piece =15)\n print('reduced file number from',len(train_file_names),'to',len(file_name_reduced))", "reduced file number from 100 to 21\n" ] ], [ [ "### Undersempling -square (grid level)", "_____no_output_____" ] ], [ [ "# install the package if needed.\nif run_preprocessing_benchmark:\n !pip install imblearn", "Looking in indexes: https://pypi.douban.com/simple\nRequirement already satisfied: imblearn in g:\\anaconda3\\lib\\site-packages (0.0)\nRequirement already satisfied: imbalanced-learn in g:\\anaconda3\\lib\\site-packages (from imblearn) (0.7.0)\nRequirement already satisfied: scikit-learn>=0.23 in g:\\anaconda3\\lib\\site-packages (from imbalanced-learn->imblearn) (0.23.2)\nRequirement already satisfied: scipy>=0.19.1 in g:\\anaconda3\\lib\\site-packages (from imbalanced-learn->imblearn) (1.5.0)\nRequirement already satisfied: joblib>=0.11 in g:\\anaconda3\\lib\\site-packages (from imbalanced-learn->imblearn) (0.16.0)\nRequirement already satisfied: numpy>=1.13.3 in g:\\anaconda3\\lib\\site-packages (from imbalanced-learn->imblearn) (1.18.5)\nRequirement already satisfied: threadpoolctl>=2.0.0 in g:\\anaconda3\\lib\\site-packages (from scikit-learn>=0.23->imbalanced-learn->imblearn) (2.1.0)\n" ], [ "#subset data (in square level)\n#ref: #https://www.researchgate.net/post/How_to_use_clustering_to_reduce_data_set_samples\n# implement using https://imbalanced-learn.readthedocs.io/en/stable/generated/imblearn.under_sampling.ClusterCentroids.html\n\nif run_preprocessing_benchmark:\n from imblearn.under_sampling import ClusterCentroids\n\n # output x: number of grid x 125, y: number of grid\n def undersampling_ClusterCentroids_canny(X,Y):\n trans = ClusterCentroids(random_state=0)\n length=len(np.array(Y))\n X= np.array(X).reshape(length*64,25*25)\n Y = np.array(Y).reshape(length*64)\n X_resampled, y_resampled = trans.fit_sample(X, Y)\n\n return X_resampled, y_resampled\n\n # output x: number of grid x 128, y: number of grid\n def undersampling_ClusterCentroids_sift(X,Y):\n trans = ClusterCentroids(random_state=0)\n length=len(np.array(Y))\n X= np.array(X).reshape(length*64,128)\n Y = np.array(Y).reshape(length*64)\n X_resampled, y_resampled = trans.fit_sample(X, Y)\n return X_resampled, y_resampled", "_____no_output_____" ], [ "if run_preprocessing_benchmark:\n # test canny -resampled\n\n start_time = time.time()\n X_resampled_canny, Y_resampled_canny = undersampling_ClusterCentroids_canny(X_canny,Y_canny)\n print('--- {} seconds ---'.format(time.time() - start_time))\n print('reduce grid number from',len(np.array(Y_canny))*64,'to',len(np.array(Y_resampled_canny)))\n\n # test sift -resampled\n\n start_time = time.time()\n X_resampled_sift, Y_resampled_sift = undersampling_ClusterCentroids_sift(X_sift,Y_sift)\n print('--- {} seconds ---'.format(time.time() - start_time))\n print('reduce grid number from',len(np.array(Y_sift))*64,'to',len(np.array(Y_resampled_sift)))", "--- 5.019449710845947 seconds ---\nreduce grid number from 6400 to 455\n--- 4.656001806259155 seconds ---\nreduce grid number from 6400 to 455\n" ] ], [ [ "### Read Image - parallel version", "_____no_output_____" ] ], [ [ "from joblib import Parallel, delayed\n\n# note: functions are first-class objects in Python. we pass it directly as parameter.\ndef Preprocess_parallel(func, file_names, job_count = 6):\n result = Parallel(n_jobs=job_count)(delayed(func)(file_name) for file_name in file_names)\n return zip(*result)", "_____no_output_____" ], [ "if run_preprocessing_benchmark:\n start_time = time.time()\n num_train = 100\n train_file_names = GetFileNamesInDir(g_train_dir, extension = \"jpeg\",num_return = num_train)\n\n xs, ys = Preprocess_parallel(func_canny, train_file_names)\n print('--- {} seconds ---'.format(time.time() - start_time))", "--- 1.9814999103546143 seconds ---\n" ] ], [ [ "### Read file names", "_____no_output_____" ] ], [ [ "if do_hyper_parameter_tuning:\n # Using FEN to identify grid with chess\n\n num_train = 500 # small bath train\n num_test= 500 # small bath train\n\n # Reading lables\n train_file_names = GetFileNamesInDir(g_train_dir, extension = \"jpeg\",num_return = num_train)\n test_file_names = GetFileNamesInDir(g_test_dir, extension = \"jpeg\",num_return = num_test)", "_____no_output_____" ] ], [ [ "## Section 3. Implement algorithms", "_____no_output_____" ], [ "### Section 3.0 AdaBoostClassifier (ABC) Prototype", "_____no_output_____" ], [ "#### Import data - SIFT output ( n x 128) ", "_____no_output_____" ] ], [ [ "if do_hyper_parameter_tuning:\n # import data - train\n\n start_time = time.time()\n #xs_train, ys_train = Preprocess_parallel(train_file_names) \n xs_train_sift, ys_train_sift = Preprocess_parallel(func_sift, train_file_names) #[JL - I broke Preprocess_parallel. use this instead ]\n print('xs_train_sift, ys_train_sift generated:',len(xs_train_sift))\n print(np.array(xs_train_sift).shape, np.array(ys_train_sift).shape)\n print('--- {} seconds ---'.format(time.time() - start_time))\n\n # import data - test\n\n start_time = time.time()\n #xs_train, ys_train = Preprocess_parallel(train_file_names) \n xs_test_sift, ys_test_sift = Preprocess_parallel(func_sift, test_file_names) #[JL - I broke Preprocess_parallel. use this instead ]\n print('xs_test_sift, ys_test_sift generated:',len(xs_test_sift))\n print(np.array(xs_test_sift).shape, np.array(ys_test_sift).shape)\n print('--- {} seconds ---'.format(time.time() - start_time))", "_____no_output_____" ] ], [ [ "#### intital prediction accuracy in sift data", "_____no_output_____" ] ], [ [ "if do_hyper_parameter_tuning:\n xs_train_sift2= np.array(xs_train_sift).reshape(500*64,128)\n ys_train_sift2 = np.array(ys_train_sift).reshape(500*64)\n print('shape of train data:',np.array(xs_train_sift2).shape, np.array(ys_train_sift2).shape)\n\n # ABC classifier\n\n from sklearn.ensemble import AdaBoostClassifier\n from sklearn.metrics import accuracy_score\n from sklearn.metrics import classification_report\n start_time = time.time()\n\n ada = AdaBoostClassifier(n_estimators=50, learning_rate=0.5, random_state=42)\n ada.fit(xs_train_sift2,ys_train_sift2)\n y_pred= ada.predict(xs_train_sift2)\n\n #Evaluate its performance on the training and test set\n print(\"AdaBoost- accuracy on validation set:\", accuracy_score(ys_train_sift2 ,y_pred))\n print('--- {} seconds ---'.format(time.time() - start_time))", "_____no_output_____" ] ], [ [ "#### Import data - canny output ( n x 25 x 25 ) ", "_____no_output_____" ] ], [ [ "if do_hyper_parameter_tuning:\n # import data - train 500\n\n start_time = time.time()\n #xs_train, ys_train = Preprocess_parallel(train_file_names) \n xs_train_canny, ys_train_canny = func_generator_canny(train_file_names) #[JL - I broke Preprocess_parallel. use this instead ]\n print('xs_train_canny, ys_train_canny generated:',len(xs_train_canny))\n print(np.array(xs_train_canny).shape, np.array(ys_train_canny).shape)\n print('--- {} seconds ---'.format(time.time() - start_time))\n\n # import data - test 500\n\n start_time = time.time()\n #xs_train, ys_train = Preprocess_parallel(train_file_names) \n xs_test_canny, ys_test_canny = func_generator_canny(test_file_names) #[JL - I broke Preprocess_parallel. use this instead ]\n print('xs_test_canny, ys_test_canny generated:',len(xs_test_canny))\n print(np.array(xs_test_canny).shape, np.array(ys_test_canny).shape)\n print('--- {} seconds ---'.format(time.time() - start_time))", "_____no_output_____" ] ], [ [ "#### intital prediction accuracy in canny data", "_____no_output_____" ] ], [ [ "if do_hyper_parameter_tuning:\n xs_train_canny2= np.array(xs_train_canny).reshape(500*64,25*25)\n ys_train_canny2 = np.array(ys_train_canny).reshape(500*64)\n print('shape of train data:',np.array(xs_train_canny2).shape, np.array(ys_train_canny2).shape)\n\n # ABC classifier\n\n from sklearn.ensemble import AdaBoostClassifier\n from sklearn.metrics import accuracy_score\n from sklearn.metrics import classification_report\n start_time = time.time()\n\n ada = AdaBoostClassifier(n_estimators=50, learning_rate=0.5, random_state=42)\n ada.fit(xs_train_canny2,ys_train_canny2)\n y_pred= ada.predict(xs_train_canny2)\n\n #Evaluate its performance on the training and test set\n print(\"AdaBoost- accuracy on validation set:\", accuracy_score(ys_train_canny2 ,y_pred))\n print('--- {} seconds ---'.format(time.time() - start_time))", "_____no_output_____" ] ], [ [ "### Proceed with canny data", "_____no_output_____" ], [ "#### Split train to train and validation", "_____no_output_____" ] ], [ [ "if do_hyper_parameter_tuning:\n #split 500 training data\n start_time = time.time()\n\n test_size=0.33\n\n from sklearn.model_selection import train_test_split\n X_train, X_val, Y_train, Y_val = train_test_split(\n xs_train_canny,ys_train_canny, test_size=0.33, random_state=0)\n print(np.array(X_train).shape, np.array(X_val).shape, np.array(Y_train).shape, np.array(Y_val).shape)\n print('--- {} seconds ---'.format(time.time() - start_time))", "_____no_output_____" ] ], [ [ "#### Undersampling traing set ", "_____no_output_____" ] ], [ [ "if do_hyper_parameter_tuning:\n # test sift -resampled\n\n start_time = time.time()\n\n X_resampled, Y_resampled = undersampling_ClusterCentroids_canny(X_train,Y_train)\n print('--- {} seconds ---'.format(time.time() - start_time))\n print('reduce grid number from',len(np.array(Y_train))*64,'to',len(np.array(Y_resampled)))\n print('shape of resampled data:',np.array(X_resampled).shape, np.array(Y_resampled).shape)", "_____no_output_____" ], [ "if do_hyper_parameter_tuning:\n # reshpe train daa, validation data, and testing data as resampled\n\n X_train= np.array(X_train).reshape(335*64,25*25)\n Y_train = np.array(Y_train).reshape(335*64)\n print('shape of train data:',np.array(X_train).shape, np.array(Y_train).shape)\n\n X_val= np.array(X_val).reshape(165*64,25*25)\n Y_val = np.array(Y_val).reshape(165*64)\n print('shape of validation data:',np.array(X_val).shape, np.array(Y_val).shape)\n\n xs_test_canny= np.array(xs_test_canny).reshape(num_test*64,25*25)\n ys_test_canny = np.array(ys_test_canny).reshape(num_test*64)\n print('shape of test data:',np.array(xs_test_canny).shape, np.array(ys_test_canny).shape)\n", "_____no_output_____" ] ], [ [ "#### Base classifier- Tree (with canny & canny resampling)", "_____no_output_____" ] ], [ [ "if do_hyper_parameter_tuning:\n # Tree classifier - Canny data \n from sklearn.tree import DecisionTreeClassifier\n\n start_time = time.time()\n #Apply pre-pruning by limiting the depth of the tree - max_depth=2\n tree = DecisionTreeClassifier(criterion='gini', max_depth=5)\n tree.fit(X_train, Y_train)\n #Evaluate its performance on the training and test set\n print(\"Accuracy on training set: {:.3f}\".format(tree.score(X_train, Y_train)))\n print(\"Accuracy on validation set: {:.3f}\".format(tree.score(X_val, Y_val)))\n print(\"Accuracy on testing set: {:.3f}\".format(tree.score(xs_test_canny, ys_test_canny)))\n print('--- {} seconds ---'.format(time.time() - start_time))", "_____no_output_____" ], [ "if do_hyper_parameter_tuning:\n # Tree classifier - Canny & resampling data\n from sklearn.tree import DecisionTreeClassifier\n\n start_time = time.time()\n #Apply pre-pruning by limiting the depth of the tree \n tree = DecisionTreeClassifier(criterion='gini', max_depth=5)\n tree.fit(X_resampled, Y_resampled)\n #Evaluate its performance on the training and test set\n print(\"Accuracy on training set: {:.3f}\".format(tree.score(X_train, Y_train)))\n print(\"Accuracy on validation set: {:.3f}\".format(tree.score(X_val, Y_val)))\n print(\"Accuracy on testing set: {:.3f}\".format(tree.score(xs_test_canny, ys_test_canny)))\n print('--- {} seconds ---'.format(time.time() - start_time))", "_____no_output_____" ] ], [ [ "#### AdaBoostClassifier- Tree (with canny & canny resampling)", "_____no_output_____" ] ], [ [ "if do_hyper_parameter_tuning:\n # ABC classifier- Canny data\n\n from sklearn.ensemble import AdaBoostClassifier\n from sklearn.metrics import accuracy_score\n from sklearn.metrics import classification_report\n start_time = time.time()\n\n ada = AdaBoostClassifier(n_estimators=50,\n base_estimator = DecisionTreeClassifier(criterion='gini', max_depth=5),\n learning_rate=0.5,\n random_state=42)\n ada.fit(X_train, Y_train)\n y_pred_val = ada.predict(X_val)\n y_pred_test = ada.predict(xs_test_canny)\n\n #Evaluate its performance on the training and test set\n print(\"AdaBoost- accuracy on validation set:\", accuracy_score(Y_val, y_pred_val))\n print(\"AdaBoost- accuracy on testing set:\", accuracy_score(ys_test_canny, y_pred_test))\n print('--- {} seconds ---'.format(time.time() - start_time))", "_____no_output_____" ], [ "if do_hyper_parameter_tuning:\n # ABC classifier- Canny & resampling data\n\n start_time = time.time()\n\n ada = AdaBoostClassifier(n_estimators=50,\n base_estimator = DecisionTreeClassifier(criterion='gini', max_depth=5),\n learning_rate=0.5,\n random_state=42)\n ada.fit(X_resampled, Y_resampled)\n y_pred_val = ada.predict(X_val)\n y_pred_test = ada.predict(xs_test_canny)\n\n #Evaluate its performance on the training and test set\n print(\"AdaBoost- accuracy on validation set:\", accuracy_score(Y_val, y_pred_val))\n print(\"AdaBoost- accuracy on testing set:\", accuracy_score(ys_test_canny, y_pred_test))\n print('--- {} seconds ---'.format(time.time() - start_time))", "_____no_output_____" ] ], [ [ "#### Hyper parameter tunning", "_____no_output_____" ] ], [ [ "###https://machinelearningmastery.com/adaboost-ensemble-in-python/\n\nif do_hyper_parameter_tuning:\n \n from sklearn.ensemble import AdaBoostClassifier\n from sklearn.model_selection import GridSearchCV\n\n # AdaBoost\n param_grid = [{'n_estimators': np.arange(10,50,5),\n 'learning_rate': [0.01, 0.05, 0.1, 1,5,10]\n }]\n\n start_time = time.time()\n abc = GridSearchCV(AdaBoostClassifier(random_state=42), param_grid)\n abc.fit(X_resampled, Y_resampled) \n\n print('--- {} seconds ---'.format(time.time() - start_time))\n\n # SVC\n param_grid = [{'n_estimators': np.arange(10,50,5),\n 'learning_rate': [0.01, 0.05, 0.1, 1,5,10],\n \"kernel\" : [\"linear\", \"poly\", \"rbf\", \"sigmoid\"],\n \"C\" : [0.01, 1, 10, 100]\n }]\n\n start_time = time.time()\n svc = GridSearchCV(svm.SVC(random_state=42), param_grid)\n abc.fit(X_resampled, Y_resampled) \n\n print('--- {} seconds ---'.format(time.time() - start_time))\n \n \n \nelse:\n print(\"Hyper parameter tuning skipped.\")", "Hyper parameter tuning skipped.\n" ] ], [ [ "#### GridSearchCV Result", "_____no_output_____" ] ], [ [ "\nif do_hyper_parameter_tuning:\n\n # Print grid search results\n from sklearn.metrics import classification_report\n\n means = abc.cv_results_['mean_test_score']\n stds = abc.cv_results_['std_test_score']\n params = abc.cv_results_['params']\n\n print('Grid search mean and stdev:\\n')\n\n for mean, std, p in zip(means, stds, params):\n print(\"%0.3f (+/-%0.03f) for %r\"% (mean, std * 2, p))\n\n # Print best params\n print('\\nBest parameters:', abc.best_params_)\n print(\"Detailed classification report:\")\n print()\n print(classification_report(Y_val, abc.predict(X_val)))\n print()\nelse:\n print(\"Adaboost: Hyper parameter report skipped.\")", "Adaboost: Hyper parameter report skipped.\n" ] ], [ [ "## Section 3.1 SVM Classifier (SVC)", "_____no_output_____" ], [ "#### Base class for all classifiers", "_____no_output_____" ] ], [ [ "import abc\n\n# interface of the classifiers\nclass IClassifier:\n\n # this method should accept a list of file names of the training data\n @abc.abstractmethod\n def Train(self, train_file_names):\n raise NotImplementedError()\n\n # this should accept a 400 * 400 * 3 numpy array as query data, and returns the fen notation of the board.\n @abc.abstractmethod\n def Predict(self, query_data):\n raise NotImplementedError()\n \n # this should accept a list of file names, and returns the predicted labels as 1d numpy array.\n @abc.abstractmethod\n def Predict(self, query_data):\n raise NotImplementedError() ", "_____no_output_____" ] ], [ [ "#### Class definition for SVC", "_____no_output_____" ] ], [ [ "from sklearn import svm\nimport numpy as np\n\n# image io and plotting\nfrom skimage import io, transform\nimport skimage.util\nfrom skimage.util.shape import view_as_blocks\nfrom matplotlib import pyplot as plt\n\n# parallel processing\nfrom joblib import Parallel, delayed\n# model save and load\nimport pickle\nimport os\n# profiling\nimport time\n\n# joblib needs the kernel to be a top-level function, so we defined it here.\ndef PreprocessKernel(name):\n img = ReadImage(name, gray = True)\n grids = SVCClassifier.SVCPreprocess(img)\n labels = np.array(FENtoOneHot(GetCleanNameByPath(name))).argmax(axis=1)\n return grids, labels\n\n# SVM Classifier\nclass SVCClassifier(IClassifier):\n\n def __init__(self):\n self.__svc__ = svm.SVC(C=1.0, cache_size=200, class_weight=None, coef0=0.0,\n decision_function_shape='ovr', degree=3, gamma=0.001, kernel='rbf',\n max_iter=-1, probability=False, random_state=None, shrinking=True,\n tol=0.001, verbose=False)\n\n # this method should accept a list of file names of the training data\n def Train(self, train_file_names):\n print(\"svc: reading image.\")\n start_time = time.time()\n xs, ys = SVCClassifier.PreprocessParallelWrapperFunc(train_file_names)\n print(\"svc: finished reading image, {} sec.\".format(time.time() - start_time))\n # train\n print(\"svc: start training.\")\n start_time = time.time()\n self.__svc__.fit(xs, ys)\n print(\"svc: finished. {} sec.\".format(time.time() - start_time))\n\n # this should accept a 400 * 400 * 3 numpy array as query data, and returns the fen notation of the board.\n def Predict(self, query_data):\n grids = SVCClassifier.SVCPreprocess(query_data)\n y_pred = self.__svc__.predict(grids)\n \n return LabelArrayToL(y_pred)\n\n # predict by file name:\n def PredictMultiple(self, file_names):\n preds = []\n truth = []\n for f in file_names:\n img = ReadImage(f, gray = True)\n y_pred = self.Predict(img)\n y_true = FENtoL(GetCleanNameByPath(f))\n preds.append(y_pred)\n truth.append(y_true)\n \n all_pred = np.vstack(preds)\n all_truth = np.vstack(truth)\n return all_pred, all_truth\n \n # parallel pre-process wrapper:\n @staticmethod\n def PreprocessParallelWrapperFunc(file_names, num_thread = g_thread_num):\n result = Parallel(n_jobs = num_thread)(delayed(PreprocessKernel)(file_name) for file_name in file_names)\n xs, ys = zip(*result)\n xs = np.concatenate(xs, axis=0)\n ys = np.concatenate(ys)\n return xs, ys\n\n @staticmethod\n def SVCPreprocess(img):\n img = transform.resize(img, (g_down_sampled_size, g_down_sampled_size), mode='constant')\n grids = skimage.util.shape.view_as_blocks(img, block_shape = (g_down_sampled_grid_size, g_down_sampled_grid_size))\n grids = grids.reshape((-1, grids.shape[3], grids.shape[3]))\n grids = grids.reshape((grids.shape[0], grids.shape[1] * grids.shape[1]))\n return grids\n\n def SaveModel(self, save_file_name):\n os.makedirs(os.path.dirname(save_file_name), exist_ok = True)\n with open(save_file_name, 'wb') as file:\n pickle.dump(self.__svc__, file)\n\n def LoadModel(self, load_file_name):\n with open(load_file_name, 'rb') as file:\n self.__svc__ = pickle.load(file)", "_____no_output_____" ] ], [ [ "#### Test code for SVC", "_____no_output_____" ] ], [ [ "if run_test_code_for_classifiers:\n svc = SVCClassifier()\n train_names = GetFileNamesInDir(g_train_dir)\n\n if load_saved_model:\n print(\"svc: loading model from \" + svc_model_file)\n svc.LoadModel(svc_model_file)\n else:\n svc.Train(train_names[:500])\n\n y_truth = FENtoL(GetCleanNameByPath(a_random_file))\n img = ReadImage(a_random_file, gray = True)\n pred = svc.Predict(img)\n print(\"truth: \", ''.join(y_truth))\n print(\"pred : \", ''.join(pred))\n\n # save model\n if not load_saved_model:\n print(\"svc: saving model to \" + svc_model_file)\n svc.SaveModel(svc_model_file)", "svc: loading model from ./saved_model/svc_dump.pkl\ntruth: 0b0B0b0000pK00q00000p0rB0000000k0000000000000000000B0000000rb000\npred : 000B0000000K00000000p0rB0000000k0000000000000000000B0000000r0000\n" ] ], [ [ "### Section 3.2 Convolutional Neural Network Classifier (CNN)", "_____no_output_____" ], [ "#### Class definition for CNN", "_____no_output_____" ] ], [ [ "import tensorflow as tf\nfrom tensorflow import keras\nfrom tensorflow.keras import layers\n\nimport cv2\nfrom skimage import io, transform\nimport numpy as np\nimport os\n\n#import tensorflow as tf\n#from tensorflow import keras\n#from tf.keras.models import Sequential\n#from tf.keras.layers.core import Flatten, Dense, Dropout, Activation\n#from tf.keras.layers.convolutional import Convolution2D\n\nclass CNNClassifier(IClassifier):\n\n # the file name format does not accept batch as parameter. link:\n # https://github.com/tensorflow/tensorflow/issues/38668\n s_check_point_file_name = \"./CNN_training_checkpoint/cp_{epoch:02d}-{accuracy:.2f}.ckpt\"\n s_check_point_path = os.path.dirname(s_check_point_file_name)\n s_save_frequence = 10000 # save a checkpoint every s_save_frequence batches\n\n def __init__(self):\n \n #tf.config.threading.set_inter_op_parallelism_threads(3)\n #tf.config.threading.set_intra_op_parallelism_threads(3)\n\n # define our model\n self.__model__ = keras.Sequential(\n [\n layers.Convolution2D(32, (3, 3), input_shape = (g_down_sampled_grid_size, g_down_sampled_grid_size, 3)),\n layers.Activation('relu'),\n layers.Dropout(0.1),\n layers.Convolution2D(32, (3, 3)),\n layers.Activation('relu'),\n\n layers.Convolution2D(32, (3, 3)),\n layers.Activation('relu'),\n\n layers.Flatten(),\n \n layers.Dense(128),\n layers.Activation('relu'),\n layers.Dropout(0.3),\n\n layers.Dense(13),\n layers.Activation(\"softmax\")\n ]\n )\n\n self.__model__.compile(loss = \"categorical_crossentropy\", optimizer = 'adam', metrics = [\"accuracy\"])\n \n self.__save_check_point_callback__ = tf.keras.callbacks.ModelCheckpoint(\n filepath = CNNClassifier.s_check_point_file_name,\n monitor='val_accuracy',\n save_weights_only = True,\n save_freq = CNNClassifier.s_save_frequence,\n verbose = 1\n )\n\n\n # generator\n @staticmethod\n def func_generator(train_file_names):\n for image_file_name in train_file_names:\n img = ReadImage(image_file_name)\n x = CNNClassifier.PreprocessImage(img)\n y = np.array(FENtoOneHot(GetCleanNameByPath(image_file_name)))\n yield x, y\n\n # this method should accept N * 64 * m * n numpy array as train data, and N lists of 64 chars as label.\n def Train(self, train_data_names):\n train_size = len(train_data_names)\n\n ## try load last checkpoint\n #if not self.LoadMostRecentModel():\n # os.makedirs(CNNClassifier.s_check_point_path, exist_ok = True)\n\n # train\n self.__model__.fit(CNNClassifier.func_generator(train_data_names),\n use_multiprocessing = False,\n #batch_size = 1000,\n steps_per_epoch = train_size / 20,\n epochs = 2,\n #callbacks = [self.__save_check_point_callback__],\n verbose = 1)\n\n\n # this should accept a 64 * m * n numpy array as query data, and returns the fen notation of the board.\n def Predict(self, query_data):\n grids = CNNClassifier.PreprocessImage(query_data)\n y_pred = self.__model__.predict(grids).argmax(axis=1)\n\n return y_pred\n\n \n # predict by file name:\n def PredictMultiple(self, file_names):\n preds = []\n truth = []\n for f in file_names:\n img = ReadImage(f, gray = False)\n y_pred = LabelArrayToL(self.Predict(img))\n y_true = FENtoL(GetCleanNameByPath(f))\n preds.append(y_pred)\n truth.append(y_true)\n \n all_pred = np.vstack(preds)\n all_truth = np.vstack(truth)\n return all_pred, all_truth\n \n \n def LoadModel(self, name):\n self.__model__.load_weights(name)\n \n def SaveModel(self, name):\n os.makedirs(os.path.dirname(name), exist_ok = True)\n self.__model__.save_weights(name)\n \n def PrintModel(self):\n self.__model__.summary()\n \n def LoadMostRecentModel(self):\n return self.LoadMostRecentModelFromDirectory(CNNClassifier.s_check_point_path)\n \n def LoadMostRecentModelFromDirectory(self, path):\n try:\n last_cp = tf.train.latest_checkpoint(path)\n self.__model__.load_weights(last_cp)\n print(\"Loaded checkpoint from \" + last_cp)\n return True\n except:\n print(\"No checkpoint is loaded.\")\n return False\n\n def TestAccuracy(self, test_file_names):\n num_files = len(test_file_names)\n\n predict_result = self.__model__.predict(CNNClassifier.func_generator(test_file_names)).argmax(axis=1)\n predict_result = predict_result.reshape(num_files, -1)\n predicted_fen_arr = np.array([LtoFEN(LabelArrayToL(labels)) for labels in predict_result])\n test_fens = np.array([GetCleanNameByPath(file_name) for file_name in test_file_names])\n\n final_accuracy = (predicted_fen_arr == test_fens).astype(np.float).mean()\n return final_accuracy\n\n @staticmethod\n def PreprocessImage(image):\n image = transform.resize(image, (g_down_sampled_size, g_down_sampled_size), mode='constant')\n \n # 1st and 2nd dim is 8\n grids = ImageToGrids(image, g_down_sampled_grid_size, g_down_sampled_grid_size)\n\n # debug\n #plt.imshow(grids[0][3])\n #plt.show()\n\n return grids.reshape(g_grid_row * g_grid_col, g_down_sampled_grid_size, g_down_sampled_grid_size, 3)", "_____no_output_____" ] ], [ [ "#### test code for CNN", "_____no_output_____" ] ], [ [ "if run_test_code_for_classifiers:\n if not load_saved_model:\n cnn = CNNClassifier()\n train_names = GetFileNamesInDir(g_train_dir)\n cnn.Train(train_names)\n cnn.SaveModel(cnn_model_file)\n else:\n cnn = CNNClassifier()\n cnn.PrintModel()\n print(\"cnn: loading model from \" + cnn_model_file)\n cnn.LoadModel(cnn_model_file)\n predicted_label = cnn.Predict(ReadImage(a_random_file))\n L = predicted_label\n FEN = LtoFEN(LabelArrayToL(L))\n print(\"predicted: \" + FEN)\n print(\"Original: \" + GetCleanNameByPath(a_random_file))\n\n #test_file_names = GetFileNamesInDir(g_test_dir)[:1000]\n #print(\"CNN: Testing accuracy for {} board images.\".format(len(test_file_names)))\n #accuracy = cnn.TestAccuracy(test_file_names)\n #print(\"CNN: Final accuracy: {}\".format(accuracy))\n\n labels = cnn.PredictMultiple(GetFileNamesInDir(g_test_dir)[:100])", "Model: \"sequential\"\n_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\nconv2d (Conv2D) (None, 23, 23, 32) 896 \n_________________________________________________________________\nactivation (Activation) (None, 23, 23, 32) 0 \n_________________________________________________________________\ndropout (Dropout) (None, 23, 23, 32) 0 \n_________________________________________________________________\nconv2d_1 (Conv2D) (None, 21, 21, 32) 9248 \n_________________________________________________________________\nactivation_1 (Activation) (None, 21, 21, 32) 0 \n_________________________________________________________________\nconv2d_2 (Conv2D) (None, 19, 19, 32) 9248 \n_________________________________________________________________\nactivation_2 (Activation) (None, 19, 19, 32) 0 \n_________________________________________________________________\nflatten (Flatten) (None, 11552) 0 \n_________________________________________________________________\ndense (Dense) (None, 128) 1478784 \n_________________________________________________________________\nactivation_3 (Activation) (None, 128) 0 \n_________________________________________________________________\ndropout_1 (Dropout) (None, 128) 0 \n_________________________________________________________________\ndense_1 (Dense) (None, 13) 1677 \n_________________________________________________________________\nactivation_4 (Activation) (None, 13) 0 \n=================================================================\nTotal params: 1,499,853\nTrainable params: 1,499,853\nNon-trainable params: 0\n_________________________________________________________________\ncnn: loading model from ./saved_model/cnn_weights\npredicted: 1b1B1b2-2pK2q1-4p1rB-7k-8-8-3B4-3rb3\nOriginal: 1b1B1b2-2pK2q1-4p1rB-7k-8-8-3B4-3rb3\n" ] ], [ [ "### Section 3.3 AdaBoost Classifier", "_____no_output_____" ], [ "#### class definition", "_____no_output_____" ] ], [ [ "from sklearn.ensemble import AdaBoostClassifier\nfrom sklearn.svm import SVC\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.metrics import accuracy_score\nimport numpy as np\n\n# image io and plotting\nfrom skimage import io, transform\nimport skimage.util\nfrom skimage.util.shape import view_as_blocks\nfrom matplotlib import pyplot as plt\n# parallel processing\nfrom joblib import Parallel, delayed\n# model save and load\nimport pickle\nimport os\n# profiling\nimport time\n\n# joblib needs the kernel to be a top-level function, so we defined it here.\ndef PreprocessKernel(name):\n img = ReadImage(name, gray = True)\n grids = ABClassifier.ABCPreprocess(img)\n labels = np.array(FENtoOneHot(GetCleanNameByPath(name))).argmax(axis=1)\n return grids, labels\n\n# Adaboost Classifier\nclass ABClassifier(IClassifier):\n\n def __init__(self):\n self.__abc__ = AdaBoostClassifier(n_estimators=30,\n base_estimator = DecisionTreeClassifier(criterion='gini', max_depth=5),\n learning_rate=0.5)\n\n # this method should accept a list of file names of the training data\n def Train(self, train_file_names):\n print(\"abc: reading image.\")\n start_time = time.time()\n xs, ys = ABClassifier.PreprocessParallelWrapperFunc(train_file_names)\n print(\"abc: finished reading image, {} sec.\".format(time.time() - start_time))\n # train\n print(\"abc: start training.\")\n start_time = time.time()\n self.__abc__.fit(xs, ys)\n print(\"abc: finished. {} sec.\".format(time.time() - start_time))\n\n\n # this should accept a 400 * 400 * 3 numpy array as query data, and returns the fen notation of the board.\n def Predict(self, query_data):\n grids = ABClassifier.ABCPreprocess(query_data)\n y_pred = self.__abc__.predict(grids)\n \n return LabelArrayToL(y_pred)\n\n\n # parallel pre-process wrapper:\n @staticmethod\n def PreprocessParallelWrapperFunc(file_names, num_thread = g_thread_num):\n result = Parallel(n_jobs = num_thread)(delayed(PreprocessKernel)(file_name) for file_name in file_names)\n xs, ys = zip(*result)\n xs = np.concatenate(xs, axis=0)\n ys = np.concatenate(ys)\n return xs, ys\n\n\n @staticmethod\n def ABCPreprocess(img):\n img = transform.resize(img, (g_down_sampled_size, g_down_sampled_size), mode='constant')\n grids = skimage.util.shape.view_as_blocks(img, block_shape = (g_down_sampled_grid_size, g_down_sampled_grid_size))\n grids = grids.reshape((-1, grids.shape[3], grids.shape[3]))\n grids = grids.reshape((grids.shape[0], grids.shape[1] * grids.shape[1]))\n return grids\n\n def SaveModel(self, save_file_name):\n os.makedirs(os.path.dirname(save_file_name), exist_ok = True)\n with open(save_file_name, 'wb') as file:\n pickle.dump(self.__abc__, file)\n\n def LoadModel(self, load_file_name):\n with open(load_file_name, 'rb') as file:\n self.__abc__ = pickle.load(file)\n \n # predict by file name:\n def PredictMultiple(self, file_names):\n preds = []\n truth = []\n for f in file_names:\n img = ReadImage(f, gray = True)\n y_pred = self.Predict(img)\n y_true = FENtoL(GetCleanNameByPath(f))\n preds.append(y_pred)\n truth.append(y_true)\n \n all_pred = np.vstack(preds)\n all_truth = np.vstack(truth)\n return all_pred, all_truth", "_____no_output_____" ] ], [ [ "#### Test code for ABC", "_____no_output_____" ] ], [ [ "if run_test_code_for_classifiers:\n abc = ABClassifier()\n train_names = GetFileNamesInDir(g_train_dir)\n\n if load_saved_model:\n print(\"abc: loading model from \" + abc_model_file)\n abc.LoadModel(abc_model_file)\n else:\n abc.Train(train_names)\n\n y_truth = FENtoL(GetCleanNameByPath(a_random_file))\n img = ReadImage(a_random_file, gray = True)\n pred = abc.Predict(img)\n print(\"truth: \", ''.join(y_truth))\n print(\"pred : \", ''.join(pred))\n\n # save model\n if not load_saved_model:\n print(\"abc: saving model to \" + abc_model_file)\n abc.SaveModel(abc_model_file)", "abc: loading model from ./saved_model/abc_dump.pkl\ntruth: 0b0B0b0000pK00q00000p0rB0000000k0000000000000000000B0000000rb000\npred : 0k0B0k0000pP00q00000p0rB0000000k0000000000000000000B0000000rb000\n" ] ], [ [ "## 10-fold cross validation for 3 classifiers", "_____no_output_____" ], [ "Cv reference\nhttps://scikit-learn.org/stable/modules/cross_validation.html\n\noptions for 10 fold\n 1. https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.ShuffleSplit.html\n 2. https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.StratifiedKFold.html (Preferred)\n \" StratifiedKFold is a variation of k-fold which returns stratified folds: each set contains approximately the same percentage of samples of each target class as the complete set.\" \n ", "_____no_output_____" ], [ "### helper functions", "_____no_output_____" ] ], [ [ "# filters accepts a list of file names, and return the data matrix and labels\nimport random\nfrom sklearn.metrics import confusion_matrix\n\n# get balanced accuracy from confusion matrix\ndef BalancedAccuracyFromConfusionMatrix(cm):\n ret = np.empty((cm.shape[0]))\n \n for idx, row in enumerate(cm):\n ret[idx] = row[idx] / row.sum()\n \n return ret.mean()\n\n# dummy filter to return all files\ndef DefaultFilter(file_names, rate = 1):\n return file_names\n\n# filter using random_sampling:\ndef RandomFilter(file_names, rate = 1):\n # we fix the random part to assure the results are consistent\n random_seed = 4242\n random.seed(random_seed)\n return random.sample(file_names, k = int(len(file_names) * rate))\n \ndef ConfusionMatrix(classifier, test_file_names, filter = RandomFilter, sampling_rate = 0.001):\n \n confusion_matrices = []\n accuracies = []\n accuracies_balanced = []\n train_time_cost = []\n validation_time_cost = []\n \n # split name list into 10 equal parts\n division = len(test_file_names) / float(10) \n complete_name_folds = [ test_file_names[int(round(division * i)): int(round(division * (i + 1)))] for i in range(10) ]\n filtered_name_folds = complete_name_folds.copy()\n for i in range(10):\n filtered_name_folds[i] = filter(complete_name_folds[i], rate = sampling_rate)\n\n # we use filtered name folds to train, and validation.\n for iv in range(10):\n \n # merge the 9 folds:\n train_names = []\n validation_names = []\n for i in range(10):\n if i != iv:\n train_names.extend(filtered_name_folds[i])\n else:\n # validation_names = complete_name_folds[i].copy()\n validation_names = filtered_name_folds[i].copy()\n\n \n # train the classifier:\n print(\"training started: \", type(classifier).__name__, \"for fold #\", iv, \"# train files:\", len(train_names))\n t = time.time()\n classifier.Train(train_names)\n train_time_cost.append(time.time() - t)\n print(\"training finished: \", type(classifier).__name__, \"for fold #\", iv,\n \"time: {}s\".format(time.time() - t))\n \n print(\"predicting started: \", type(classifier).__name__, \"for fold #\", iv)\n t = time.time()\n ypreds, y_true = classifier.PredictMultiple(validation_names)\n validation_time_cost.append(time.time() - t)\n \n ypreds = ypreds.reshape((-1, 1))\n y_true = y_true.reshape((-1, 1))\n\n conf_mat = confusion_matrix(y_true, ypreds, labels = g_labels)\n confusion_matrices.append(conf_mat)\n accuracy = np.trace(conf_mat) / float(np.sum(conf_mat))\n accuracies.append(accuracy)\n accuracy_balanced = BalancedAccuracyFromConfusionMatrix(conf_mat)\n accuracies_balanced.append(accuracy_balanced)\n \n \n \n print(\"predicting finished: \", type(classifier).__name__, \"for fold #\", iv,\n \"time: {}s\".format(time.time() - t), \" accuracy: \", accuracy, \" balanced_accuracy:\", accuracy_balanced)\n \n return confusion_matrices, accuracies, accuracies_balanced, train_time_cost, validation_time_cost\n", "_____no_output_____" ] ], [ [ "### 10-fold routine", "_____no_output_____" ] ], [ [ "if run_ten_fold:\n # 10-fold for ABC\n train_file_names = GetFileNamesInDir(g_train_dir, extension = \"jpeg\")\n # random sampling rate of the each fold in 10-fold\n abc_random_sampling_rate = 0.005\n\n abc_tf = ABClassifier()\n\n confusion_matrices_abc, accuracies_abc, accuracies_balanced_abc, train_time_cost_abc, validation_time_cost_abc = \\\n ConfusionMatrix(abc_tf, train_file_names, RandomFilter, sampling_rate = abc_random_sampling_rate)\n\n", "training started: ABClassifier for fold # 0 # train files: 360\nabc: reading image.\nabc: finished reading image, 5.398272275924683 sec.\nabc: start training.\nabc: finished. 105.41199851036072 sec.\ntraining finished: ABClassifier for fold # 0 time: 110.818776845932s\npredicting started: ABClassifier for fold # 0\npredicting finished: ABClassifier for fold # 0 time: 1.8081109523773193s accuracy: 0.915234375 balanced_accuracy: 0.9887159507361434\ntraining started: ABClassifier for fold # 1 # train files: 360\nabc: reading image.\nabc: finished reading image, 1.353529453277588 sec.\nabc: start training.\nabc: finished. 106.2824718952179 sec.\ntraining finished: ABClassifier for fold # 1 time: 107.64400005340576s\npredicting started: ABClassifier for fold # 1\npredicting finished: ABClassifier for fold # 1 time: 0.588024377822876s accuracy: 0.9953125 balanced_accuracy: 0.9773043222050123\ntraining started: ABClassifier for fold # 2 # train files: 360\nabc: reading image.\nabc: finished reading image, 1.4164981842041016 sec.\nabc: start training.\nabc: finished. 108.95200252532959 sec.\ntraining finished: ABClassifier for fold # 2 time: 110.37699913978577s\npredicting started: ABClassifier for fold # 2\npredicting finished: ABClassifier for fold # 2 time: 0.5855247974395752s accuracy: 0.994921875 balanced_accuracy: 0.9726729328379256\ntraining started: ABClassifier for fold # 3 # train files: 360\nabc: reading image.\nabc: finished reading image, 1.3769776821136475 sec.\nabc: start training.\nabc: finished. 105.25149726867676 sec.\ntraining finished: ABClassifier for fold # 3 time: 106.63697409629822s\npredicting started: ABClassifier for fold # 3\npredicting finished: ABClassifier for fold # 3 time: 0.5955328941345215s accuracy: 0.997265625 balanced_accuracy: 0.9829801695186311\ntraining started: ABClassifier for fold # 4 # train files: 360\nabc: reading image.\nabc: finished reading image, 1.3645029067993164 sec.\nabc: start training.\nabc: finished. 105.66096711158752 sec.\ntraining finished: ABClassifier for fold # 4 time: 107.03396940231323s\npredicting started: ABClassifier for fold # 4\npredicting finished: ABClassifier for fold # 4 time: 0.6035027503967285s accuracy: 0.9984375 balanced_accuracy: 0.9871866850188351\ntraining started: ABClassifier for fold # 5 # train files: 360\nabc: reading image.\nabc: finished reading image, 1.3729960918426514 sec.\nabc: start training.\nabc: finished. 106.87903761863708 sec.\ntraining finished: ABClassifier for fold # 5 time: 108.26099824905396s\npredicting started: ABClassifier for fold # 5\npredicting finished: ABClassifier for fold # 5 time: 0.5815010070800781s accuracy: 0.996484375 balanced_accuracy: 0.9763087452358707\ntraining started: ABClassifier for fold # 6 # train files: 360\nabc: reading image.\nabc: finished reading image, 1.3584940433502197 sec.\nabc: start training.\nabc: finished. 104.68600416183472 sec.\ntraining finished: ABClassifier for fold # 6 time: 106.0530149936676s\npredicting started: ABClassifier for fold # 6\npredicting finished: ABClassifier for fold # 6 time: 0.6030011177062988s accuracy: 0.9984375 balanced_accuracy: 0.9848901098901099\ntraining started: ABClassifier for fold # 7 # train files: 360\nabc: reading image.\nabc: finished reading image, 1.4029998779296875 sec.\nabc: start training.\nabc: finished. 106.33550238609314 sec.\ntraining finished: ABClassifier for fold # 7 time: 107.74799847602844s\npredicting started: ABClassifier for fold # 7\npredicting finished: ABClassifier for fold # 7 time: 0.5720021724700928s accuracy: 0.994140625 balanced_accuracy: 0.9539770270297656\ntraining started: ABClassifier for fold # 8 # train files: 360\nabc: reading image.\nabc: finished reading image, 1.4405317306518555 sec.\nabc: start training.\nabc: finished. 103.98052430152893 sec.\ntraining finished: ABClassifier for fold # 8 time: 105.43050003051758s\npredicting started: ABClassifier for fold # 8\npredicting finished: ABClassifier for fold # 8 time: 0.6030175685882568s accuracy: 0.989453125 balanced_accuracy: 0.9828409905842151\ntraining started: ABClassifier for fold # 9 # train files: 360\nabc: reading image.\nabc: finished reading image, 1.3639986515045166 sec.\nabc: start training.\nabc: finished. 104.17997884750366 sec.\ntraining finished: ABClassifier for fold # 9 time: 105.55300045013428s\npredicting started: ABClassifier for fold # 9\npredicting finished: ABClassifier for fold # 9 time: 0.5954928398132324s accuracy: 0.97421875 balanced_accuracy: 0.9726182149323749\n" ], [ "if run_ten_fold:\n # 10-fold for CNN\n # random sampling rate of the each fold in 10-fold\n cnn_random_sampling_rate = 0.5\n\n train_file_names = GetFileNamesInDir(g_train_dir, extension = \"jpeg\")\n\n cnn_tf = CNNClassifier()\n\n confusion_matrices_cnn, accuracies_cnn, accuracies_balanced_cnn, train_time_cost_cnn, validation_time_cost_cnn = \\\n ConfusionMatrix(cnn_tf, train_file_names, RandomFilter, sampling_rate = cnn_random_sampling_rate)\n", "training started: CNNClassifier for fold # 0 # train files: 36000\nEpoch 1/2\n1800/1800 [==============================] - 92s 51ms/step - loss: 0.0742 - accuracy: 0.9811\nEpoch 2/2\n1800/1800 [==============================] - 92s 51ms/step - loss: 0.0129 - accuracy: 0.9967\ntraining finished: CNNClassifier for fold # 0 time: 184.50335025787354s\npredicting started: CNNClassifier for fold # 0\npredicting finished: CNNClassifier for fold # 0 time: 255.72573709487915s accuracy: 0.9953046875 balanced_accuracy: 0.972284597528154\ntraining started: CNNClassifier for fold # 1 # train files: 36000\nEpoch 1/2\n1800/1800 [==============================] - 88s 49ms/step - loss: 0.0044 - accuracy: 0.9987\nEpoch 2/2\n1800/1800 [==============================] - 88s 49ms/step - loss: 0.0026 - accuracy: 0.9992\ntraining finished: CNNClassifier for fold # 1 time: 176.32050108909607s\npredicting started: CNNClassifier for fold # 1\npredicting finished: CNNClassifier for fold # 1 time: 203.46679401397705s accuracy: 0.99998828125 balanced_accuracy: 0.9999432022567637\ntraining started: CNNClassifier for fold # 2 # train files: 36000\nEpoch 1/2\n1800/1800 [==============================] - 86s 48ms/step - loss: 0.0042 - accuracy: 0.9990\nEpoch 2/2\n1800/1800 [==============================] - 87s 49ms/step - loss: 0.0023 - accuracy: 0.9994\ntraining finished: CNNClassifier for fold # 2 time: 173.61347317695618s\npredicting started: CNNClassifier for fold # 2\npredicting finished: CNNClassifier for fold # 2 time: 258.44522190093994s accuracy: 0.999921875 balanced_accuracy: 0.9993312211503912\ntraining started: CNNClassifier for fold # 3 # train files: 36000\nEpoch 1/2\n1800/1800 [==============================] - 81s 45ms/step - loss: 0.0023 - accuracy: 0.9994\nEpoch 2/2\n1800/1800 [==============================] - 83s 46ms/step - loss: 0.0053 - accuracy: 0.9993\ntraining finished: CNNClassifier for fold # 3 time: 164.83446884155273s\npredicting started: CNNClassifier for fold # 3\npredicting finished: CNNClassifier for fold # 3 time: 253.1358721256256s accuracy: 1.0 balanced_accuracy: 1.0\ntraining started: CNNClassifier for fold # 4 # train files: 36000\nEpoch 1/2\n1800/1800 [==============================] - 81s 45ms/step - loss: 0.0039 - accuracy: 0.9993\nEpoch 2/2\n1800/1800 [==============================] - 86s 48ms/step - loss: 0.0014 - accuracy: 0.9996\ntraining finished: CNNClassifier for fold # 4 time: 167.50047063827515s\npredicting started: CNNClassifier for fold # 4\npredicting finished: CNNClassifier for fold # 4 time: 257.837361574173s accuracy: 1.0 balanced_accuracy: 1.0\ntraining started: CNNClassifier for fold # 5 # train files: 36000\nEpoch 1/2\n1800/1800 [==============================] - 89s 50ms/step - loss: 0.0028 - accuracy: 0.9995\nEpoch 2/2\n1800/1800 [==============================] - 88s 49ms/step - loss: 0.0025 - accuracy: 0.9995\ntraining finished: CNNClassifier for fold # 5 time: 176.9005012512207s\npredicting started: CNNClassifier for fold # 5\npredicting finished: CNNClassifier for fold # 5 time: 255.32304525375366s accuracy: 1.0 balanced_accuracy: 1.0\ntraining started: CNNClassifier for fold # 6 # train files: 36000\nEpoch 1/2\n1800/1800 [==============================] - 87s 48ms/step - loss: 2.2082e-04 - accuracy: 0.9999\nEpoch 2/2\n1800/1800 [==============================] - 87s 48ms/step - loss: 0.0058 - accuracy: 0.9994\ntraining finished: CNNClassifier for fold # 6 time: 174.0054988861084s\npredicting started: CNNClassifier for fold # 6\npredicting finished: CNNClassifier for fold # 6 time: 254.33951449394226s accuracy: 0.99997265625 balanced_accuracy: 0.9996995388807377\ntraining started: CNNClassifier for fold # 7 # train files: 36000\nEpoch 1/2\n1800/1800 [==============================] - 86s 48ms/step - loss: 6.0853e-04 - accuracy: 0.9999\nEpoch 2/2\n1800/1800 [==============================] - 91s 51ms/step - loss: 0.0046 - accuracy: 0.9994\ntraining finished: CNNClassifier for fold # 7 time: 177.45500087738037s\npredicting started: CNNClassifier for fold # 7\npredicting finished: CNNClassifier for fold # 7 time: 260.65016198158264s accuracy: 1.0 balanced_accuracy: 1.0\ntraining started: CNNClassifier for fold # 8 # train files: 36000\nEpoch 1/2\n1800/1800 [==============================] - 87s 48ms/step - loss: 0.0012 - accuracy: 0.9997\nEpoch 2/2\n1800/1800 [==============================] - 87s 48ms/step - loss: 0.0066 - accuracy: 0.9993\ntraining finished: CNNClassifier for fold # 8 time: 173.66347432136536s\npredicting started: CNNClassifier for fold # 8\npredicting finished: CNNClassifier for fold # 8 time: 258.1064829826355s accuracy: 0.999984375 balanced_accuracy: 0.9999181962289339\ntraining started: CNNClassifier for fold # 9 # train files: 36000\nEpoch 1/2\n1800/1800 [==============================] - 87s 48ms/step - loss: 0.0017 - accuracy: 0.9997\nEpoch 2/2\n1800/1800 [==============================] - 91s 51ms/step - loss: 6.9238e-04 - accuracy: 0.9999\ntraining finished: CNNClassifier for fold # 9 time: 178.00898122787476s\npredicting started: CNNClassifier for fold # 9\npredicting finished: CNNClassifier for fold # 9 time: 261.0290925502777s accuracy: 1.0 balanced_accuracy: 1.0\n" ], [ "if run_ten_fold:\n # 10-fold for SVM\n train_file_names = GetFileNamesInDir(g_train_dir, extension = \"jpeg\")\n # random sampling rate of the each fold in 10-fold\n svc_random_sampling_rate = 0.01\n\n svc_tf = SVCClassifier()\n\n confusion_matrices_svc, accuracies_svc, accuracies_balanced_svc, train_time_cost_svc, validation_time_cost_svc = \\\n ConfusionMatrix(svc_tf, train_file_names, RandomFilter, sampling_rate = svc_random_sampling_rate)", "training started: SVCClassifier for fold # 0 # train files: 720\nsvc: reading image.\nsvc: finished reading image, 4.1754982471466064 sec.\nsvc: start training.\nsvc: finished. 275.877498626709 sec.\ntraining finished: SVCClassifier for fold # 0 time: 280.06999683380127s\npredicting started: SVCClassifier for fold # 0\npredicting finished: SVCClassifier for fold # 0 time: 25.094476461410522s accuracy: 0.9755859375 balanced_accuracy: 0.8390801614481257\ntraining started: SVCClassifier for fold # 1 # train files: 720\nsvc: reading image.\nsvc: finished reading image, 4.161530256271362 sec.\nsvc: start training.\nsvc: finished. 276.26797008514404 sec.\ntraining finished: SVCClassifier for fold # 1 time: 280.44550037384033s\npredicting started: SVCClassifier for fold # 1\npredicting finished: SVCClassifier for fold # 1 time: 25.063472747802734s accuracy: 0.97578125 balanced_accuracy: 0.851003508486225\ntraining started: SVCClassifier for fold # 2 # train files: 720\nsvc: reading image.\nsvc: finished reading image, 4.096996545791626 sec.\nsvc: start training.\nsvc: finished. 273.4409935474396 sec.\ntraining finished: SVCClassifier for fold # 2 time: 277.56246972084045s\npredicting started: SVCClassifier for fold # 2\npredicting finished: SVCClassifier for fold # 2 time: 25.160999536514282s accuracy: 0.9748046875 balanced_accuracy: 0.8442856239581942\ntraining started: SVCClassifier for fold # 3 # train files: 720\nsvc: reading image.\nsvc: finished reading image, 2.193498373031616 sec.\nsvc: start training.\nsvc: finished. 271.1005029678345 sec.\ntraining finished: SVCClassifier for fold # 3 time: 273.30952429771423s\npredicting started: SVCClassifier for fold # 3\npredicting finished: SVCClassifier for fold # 3 time: 24.9065043926239s accuracy: 0.9771484375 balanced_accuracy: 0.8418881733972017\ntraining started: SVCClassifier for fold # 4 # train files: 720\nsvc: reading image.\nsvc: finished reading image, 2.113495111465454 sec.\nsvc: start training.\nsvc: finished. 271.2329773902893 sec.\ntraining finished: SVCClassifier for fold # 4 time: 273.3629765510559s\npredicting started: SVCClassifier for fold # 4\npredicting finished: SVCClassifier for fold # 4 time: 24.966519832611084s accuracy: 0.9759765625 balanced_accuracy: 0.8401999249500759\ntraining started: SVCClassifier for fold # 5 # train files: 720\nsvc: reading image.\nsvc: finished reading image, 2.094999074935913 sec.\nsvc: start training.\nsvc: finished. 270.80147099494934 sec.\ntraining finished: SVCClassifier for fold # 5 time: 272.9129693508148s\npredicting started: SVCClassifier for fold # 5\npredicting finished: SVCClassifier for fold # 5 time: 24.80652356147766s accuracy: 0.9701171875 balanced_accuracy: 0.8018837193943575\ntraining started: SVCClassifier for fold # 6 # train files: 720\nsvc: reading image.\nsvc: finished reading image, 2.1854727268218994 sec.\nsvc: start training.\nsvc: finished. 276.1960005760193 sec.\ntraining finished: SVCClassifier for fold # 6 time: 278.39747190475464s\npredicting started: SVCClassifier for fold # 6\npredicting finished: SVCClassifier for fold # 6 time: 25.331998825073242s accuracy: 0.980078125 balanced_accuracy: 0.8347615121472585\ntraining started: SVCClassifier for fold # 7 # train files: 720\nsvc: reading image.\nsvc: finished reading image, 3.927501916885376 sec.\nsvc: start training.\nsvc: finished. 274.04150009155273 sec.\ntraining finished: SVCClassifier for fold # 7 time: 277.98550176620483s\npredicting started: SVCClassifier for fold # 7\npredicting finished: SVCClassifier for fold # 7 time: 25.360967874526978s accuracy: 0.9787109375 balanced_accuracy: 0.8173997864853184\ntraining started: SVCClassifier for fold # 8 # train files: 720\nsvc: reading image.\nsvc: finished reading image, 2.1504993438720703 sec.\nsvc: start training.\nsvc: finished. 273.974002122879 sec.\ntraining finished: SVCClassifier for fold # 8 time: 276.14250111579895s\npredicting started: SVCClassifier for fold # 8\npredicting finished: SVCClassifier for fold # 8 time: 25.19000005722046s accuracy: 0.97578125 balanced_accuracy: 0.8372536721897584\ntraining started: SVCClassifier for fold # 9 # train files: 720\nsvc: reading image.\nsvc: finished reading image, 2.29949951171875 sec.\nsvc: start training.\nsvc: finished. 273.2610285282135 sec.\ntraining finished: SVCClassifier for fold # 9 time: 275.57649850845337s\npredicting started: SVCClassifier for fold # 9\npredicting finished: SVCClassifier for fold # 9 time: 25.151024341583252s accuracy: 0.9740234375 balanced_accuracy: 0.8392838619821293\n" ] ], [ [ "### Serialize the results (export to hard drive)", "_____no_output_____" ] ], [ [ "if run_ten_fold:\n # dump the matrices for report.\n os.makedirs(os.path.dirname(ten_fold_result_path), exist_ok = True)\n\n np.save(ten_fold_result_path + \"confusion_matrices_abc.npy\", confusion_matrices_abc)\n np.save(ten_fold_result_path + \"accuracies_abc.npy\", accuracies_abc)\n np.save(ten_fold_result_path + \"accuracies_balanced_abc.npy\", accuracies_balanced_abc)\n np.save(ten_fold_result_path + \"train_time_cost_abc.npy\", train_time_cost_abc)\n np.save(ten_fold_result_path + \"validation_time_cost_abc.npy\", validation_time_cost_abc)\n\n np.save(ten_fold_result_path + \"confusion_matrices_cnn.npy\", confusion_matrices_cnn)\n np.save(ten_fold_result_path + \"accuracies_cnn.npy\", accuracies_cnn)\n np.save(ten_fold_result_path + \"accuracies_balanced_cnn.npy\", accuracies_balanced_cnn)\n np.save(ten_fold_result_path + \"train_time_cost_cnn.npy\", train_time_cost_cnn)\n np.save(ten_fold_result_path + \"validation_time_cost_cnn.npy\", validation_time_cost_cnn)\n\n np.save(ten_fold_result_path + \"confusion_matrices_svc.npy\", confusion_matrices_svc)\n np.save(ten_fold_result_path + \"accuracies_svc.npy\", accuracies_svc)\n np.save(ten_fold_result_path + \"accuracies_balanced_svc.npy\", accuracies_balanced_svc)\n np.save(ten_fold_result_path + \"train_time_cost_svc.npy\", train_time_cost_svc)\n np.save(ten_fold_result_path + \"validation_time_cost_svc.npy\", validation_time_cost_svc)\n\n\n svc_tf.SaveModel(svc_model_file)\n abc_tf.SaveModel(abc_model_file)\n cnn_tf.SaveModel(cnn_model_file)\n", "_____no_output_____" ] ], [ [ "### Read the results from hard drive", "_____no_output_____" ] ], [ [ "if not run_ten_fold:\n \n import numpy as np\n confusion_matrices_abc = np.load(ten_fold_result_path + \"confusion_matrices_abc.npy\")\n accuracies_abc = np.load(ten_fold_result_path + \"accuracies_abc.npy\")\n accuracies_balanced_abc = np.load(ten_fold_result_path + \"accuracies_balanced_abc.npy\")\n train_time_cost_abc = np.load(ten_fold_result_path + \"train_time_cost_abc.npy\")\n validation_time_cost_abc = np.load(ten_fold_result_path + \"validation_time_cost_abc.npy\")\n\n confusion_matrices_cnn = np.load(ten_fold_result_path + \"confusion_matrices_cnn.npy\")\n accuracies_cnn = np.load(ten_fold_result_path + \"accuracies_cnn.npy\")\n accuracies_balanced_cnn = np.load(ten_fold_result_path + \"accuracies_balanced_cnn.npy\")\n train_time_cost_cnn = np.load(ten_fold_result_path + \"train_time_cost_cnn.npy\")\n validation_time_cost_cnn = np.load(ten_fold_result_path + \"validation_time_cost_cnn.npy\")\n\n confusion_matrices_svc = np.load(ten_fold_result_path + \"confusion_matrices_svc.npy\")\n accuracies_svc = np.load(ten_fold_result_path + \"accuracies_svc.npy\")\n accuracies_balanced_svc = np.load(ten_fold_result_path + \"accuracies_balanced_svc.npy\")\n train_time_cost_svc = np.load(ten_fold_result_path + \"train_time_cost_svc.npy\")\n validation_time_cost_svc = np.load(ten_fold_result_path + \"validation_time_cost_svc.npy\")\n\n\n", "_____no_output_____" ] ], [ [ "### Plot the results", "_____no_output_____" ] ], [ [ "import matplotlib.pyplot as plt\ndef plot_accuracy(mat_abc, mat_cnn, mat_svc, title):\n line, = plt.plot([i for i in range(len(mat_abc))],mat_abc)\n line.set_label('AdaBoost')\n line, = plt.plot([i for i in range(len(mat_cnn))],mat_cnn)\n line.set_label('CNN')\n line, = plt.plot([i for i in range(len(mat_svc))],mat_svc)\n line.set_label('SVM')\n plt.title(title)\n plt.xlabel('10 - folds')\n plt.ylabel('accuracy')\n plt.ylim(0.5, 1.1)\n plt.legend()\n plt.show()\n\n \nplot_accuracy(accuracies_abc, accuracies_cnn, accuracies_svc, \"Accuracy of 3 Classifiers\")\nplot_accuracy(accuracies_balanced_abc, accuracies_balanced_cnn, accuracies_balanced_svc, \"Balanced Accuracy of 3 Classifiers\")\n\n", "_____no_output_____" ], [ "import seaborn as sns\ndef plot_confusion_mat(conf_mat, title = \"\"):\n plt.figure(figsize=(10,7))\n ax = sns.heatmap(conf_mat, annot=True, fmt=\"d\")\n plt.ylabel('True')\n plt.xlabel('Predicted')\n plt.title(title)\n plt.show()\n \nplot_confusion_mat(sum(confusion_matrices_abc), title = \"AdaBoost Confusion Matrix\" )\nplot_confusion_mat(sum(confusion_matrices_cnn), title = \"CNN Confusion Matrix\" )\nplot_confusion_mat(sum(confusion_matrices_svc), title = \"SVM Confusion Matrix\" )\n", "_____no_output_____" ] ], [ [ "### Bonus: GUI: see GUI_with_Classifiers.ipynb", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ] ]
d0ddd73a99ef0ae0fc81c021acdf2942d5189d00
30,082
ipynb
Jupyter Notebook
we-visualizing-eeg-hamilton/EEG_viz_mne.ipynb
bastivkl/nh2020-curriculum
245a72af3f325495448cbf6c0c6baa2499d43d94
[ "CC-BY-4.0" ]
94
2020-06-27T19:04:11.000Z
2022-03-28T00:44:44.000Z
we-visualizing-eeg-hamilton/EEG_viz_mne.ipynb
bastivkl/nh2020-curriculum
245a72af3f325495448cbf6c0c6baa2499d43d94
[ "CC-BY-4.0" ]
13
2020-07-23T02:11:40.000Z
2020-09-09T21:28:36.000Z
we-visualizing-eeg-hamilton/EEG_viz_mne.ipynb
bastivkl/nh2020-curriculum
245a72af3f325495448cbf6c0c6baa2499d43d94
[ "CC-BY-4.0" ]
50
2020-07-15T03:37:49.000Z
2022-02-27T23:07:14.000Z
38.865633
711
0.626621
[ [ [ "# Visualizing invasive and non-invasive EEG data\n\n[Liberty Hamilton, PhD](https://csd.utexas.edu/research/hamilton-lab)\nAssistant Professor, University of Texas at Austin\nDepartment of Speech, Language, and Hearing Sciences\nand Department of Neurology, Dell Medical School \n\nWelcome! In this notebook we will be discussing how to look at time series electrophysiological 🧠 data that is recorded noninvasively at the scalp (scalp electroencephalography or EEG), or invasively in patients who are undergoing surgical treatment for epilepsy (sometimes called intracranial EEG or iEEG, also called stereo EEG/sEEG, or electrocorticography/ECoG). \n\n### Python libraries you will be using in this tutorial:\n* MNE-python\n* matplotlib\n* numpy\n\n![MNE-python logo](https://mne.tools/stable/_static/mne_logo.png)\nMNE-python is open source python software for exploring and analyzing human neurophysiological data (EEG/MEG/iEEG).\n\n### What you will learn to do \n* Load some sample EEG data\n* Load some sample intracranial EEG data\n* Plot the raw EEG data/iEEG data\n* Plot the power spectrum of your data\n* Epoch data according to specific task conditions (sentences)\n* Plot all epochs and averaged evoked activity\n* Plot average evoked activity in response to specific task conditions (ERPs)\n * Plot by channel as well as averaging across channels\n* Plot EEG activity at specific time points on the scalp (topomaps)\n* Customize your plots\n\n### Other Resources:\n* [MNE-python tutorials](https://mne.tools/stable/auto_tutorials/index.html) -- This has many additional resources above and beyond that also include how to preprocess your data, remove artifacts, and more!", "_____no_output_____" ], [ "<a id=\"basics1\"></a>\n# 1. The basics: loading in your data", "_____no_output_____" ] ], [ [ "!pip install matplotlib==3.2\n\nimport mne # This is the mne library\nimport numpy as np # This gives us the power of numpy, which is just generally useful for array manipulation\n%matplotlib inline\nfrom matplotlib import pyplot as plt\nfrom matplotlib import cm\n\ndatasets = {'ecog': '/home/jovyan/data/we_eeg_viz_data/ecog/sub-S0006/S0006_ecog_hg.fif',\n 'eeg': '/home/jovyan/data/we_eeg_viz_data/eeg/sub-MT0002/MT0002-eeg.fif'}\nevent_files = {'ecog': '/home/jovyan/data/we_eeg_viz_data/ecog/sub-S0006/S0006_eve.txt',\n 'eeg': '/home/jovyan/data/we_eeg_viz_data/eeg/sub-MT0002/MT0002_eve.txt'}\nstim_file = '/home/jovyan/data/we_eeg_viz_data/stimulus_list.csv'", "_____no_output_____" ], [ "# Get some information about the stimuli (here, the names of the sound files that were played)\nev_names=np.genfromtxt(stim_file, skip_header=1, delimiter=',',dtype=np.str, usecols=[1],encoding='utf-8')\nev_nums=np.genfromtxt(stim_file, skip_header=1, delimiter=',',dtype=np.int, usecols=[0], encoding='utf-8')\nevent_id = dict()\nfor i, ev_name in enumerate(ev_names):\n event_id[ev_name] = ev_nums[i]", "_____no_output_____" ] ], [ [ "## 1.1. Choose which dataset to look at (start with EEG)\n\nFor the purposes of this tutorial, we'll be looking at some scalp EEG and intracranial EEG datasets from my lab. Participants provided written informed consent for participation in our research. These data were collected from two distinct participants listening to sentences from the [TIMIT acoustic-phonetic corpus](https://catalog.ldc.upenn.edu/LDC93S1). This is a database of English sentences spoken by multiple talkers from throughout the United States, and has been used in speech recognition research, neuroscience research, and more!\n\nThe list of stimuli is in the `stimulus_list.csv` file. Each stimulus starts with either a \"f\" or a \"m\" to indicate a female or male talker. The rest of the alphanumeric string has to do with other characteristics of the talkers that we won't go into here. The stimulus timings have been provided for you in the event files (ending with the suffix `_eve.txt`. We'll talk about those more later. \n\n### EEG Data\nThe EEG data was recorded with a 64-channel [BrainVision ActiCHamp](https://www.brainproducts.com/productdetails.php?id=74) system. These data are part of an ongoing project in our lab and are unpublished. You can find similar (larger) datasets from [Broderick et al.](https://datadryad.org/stash/dataset/doi:10.5061/dryad.070jc), or Bradley Voytek's lab has a list of [Open Electrophysiology datasets](https://github.com/openlists/ElectrophysiologyData).\n\n### The ECoG Data\nThe ECoG data was recorded from 106 electrodes across multiple regions of the brain while our participant listened to TIMIT sentences. This is a smaller subset of sentences than the EEG dataset and so is a bit faster to load. The areas we recorded from are labeled according to a clinical montage. For iEEG and ECoG datasets, these names are rarely standardized, so it can be hard to know exactly what is what without additional information. Here, each channel is named according to the general location of the electrode probe to which it belongs.\n\n| Device | General location |\n|---|---|\n| RAST | Right anterior superior temporal |\n| RMST | Right middle superior temporal |\n| RPST | Right posterior superior temporal |\n| RPPST | Right posterior parietal/superior temporal |\n| RAIF | Right anterior insula |\n| RPI | Right posterior insula |\n| ROF | Right orbitofrontal |\n| RAC | Right anterior cingulate |", "_____no_output_____" ] ], [ [ "data_type = 'eeg' # Can choose from 'eeg' or 'ecog'", "_____no_output_____" ] ], [ [ "## 1.2. Load the data\n\nThis next command loads the data from our fif file of interest. The `preload=True` flag means that the data will be loaded (necessary for some operations). If `preload=False`, you can still perform some aspects of this tutorial, and this is a great option if you have a large dataset and would like to look at some of the header information and metadata before you start to analyze it.", "_____no_output_____" ] ], [ [ "raw = mne.io.read_raw_fif(datasets[data_type], preload=True) ", "_____no_output_____" ] ], [ [ "There is a lot of useful information in the info structure. For example, we can get the sampling frequency (`raw.info['sfreq']`), the channel names (`raw.info['ch_names']`), the channel types and locations (in `raw.info['chs']`), and whether any filtering operations have been performed already (`raw.info['highpass']` and `raw.info['lowpass']` show the cut-offs for the data).", "_____no_output_____" ] ], [ [ "print(raw.info)", "_____no_output_____" ], [ "sampling_freq = raw.info['sfreq'] \nnchans = raw.info['nchan']\n\nprint('The sampling frequency of our data is %d'%(sampling_freq))\nprint('Here is our list of %d channels: '%nchans)\nprint(raw.ch_names)", "_____no_output_____" ], [ "eeg_colors = {'eeg': 'k', 'eog': 'steelblue'}\nfig = raw.plot(show=False, color=eeg_colors, scalings='auto');\nfig.set_figwidth(8)\nfig.set_figheight(4)\n", "_____no_output_____" ] ], [ [ "<a id=\"plots2\"></a>\n# 2. Let's make some plots!\n\nMNE-python makes creating some plots *super easy*, which is great for data quality checking, exploration, and eventually manuscript figure generation. For example, one might wish to plot the power spectral density (PSD), which \n\n## 2.2. Power spectral density", "_____no_output_____" ] ], [ [ "raw.plot_psd();", "_____no_output_____" ] ], [ [ "## 2.3. Sensor positions (for EEG)\n\nFor EEG, MNE-python also has convenient functions for showing the location of the sensors used. Here, we have a 64-channel montage. You can also use this information to help interpret some of your plots if you're plotting a single channel or a group of channels.\n\nFor ECoG, we will not be plotting sensors in this way. If you would like read more about that process, please see [this tutorial](https://mne.tools/stable/auto_tutorials/misc/plot_ecog.html). You can also check out [Noah Benson's session](https://neurohackademy.org/course/introduction-to-the-geometry-and-structure-of-the-human-brain/) (happening in parallel with this tutorial!) for plotting 3D brains.", "_____no_output_____" ] ], [ [ "if data_type == 'eeg':\n raw.plot_sensors(kind='topomap',show_names=True);\n", "_____no_output_____" ] ], [ [ "Ok, awesome! So now we know where the sensors are, how densely they tile the space, and what their names are. *Knowledge = Power!*\n\nSo what if we wanted to look at the power spectral density plot we saw above by channel? We can use `plot_psd_topo` for that! There are also customizable options for playing with the colors.", "_____no_output_____" ] ], [ [ "if data_type == 'eeg':\n raw.plot_psd_topo(fig_facecolor='w', axis_facecolor='w', color='k');", "_____no_output_____" ] ], [ [ "Finally, this one works for both EEG and ECoG. Here we are looking at the power spectral density plot again, but taking the average across trials and showing +/- 1 standard deviation from the mean across channels. ", "_____no_output_____" ] ], [ [ "raw.plot_psd(area_mode='std', average=True);", "_____no_output_____" ] ], [ [ "Finally, we can plot these same figures using a narrower frequency range, and looking at a smaller set of channels using `picks`. For `plot_psd` and other functions, `picks` is a list of integer indices corresponding to your channels of interest. You can choose these by their number, or you can use the convenient `mne.pick_channels` function to choose them by name. For example, in EEG, we often see strong responses to auditory stimuli at the top of the head, so here we will restrict our EEG channels to a few at the top of the head at the midline. For ECoG, we are more likely to see responses to auditory stimuli in temporal lobe electrodes (potentially RPPST, RPST, RMST, RAST), so we'll try those.", "_____no_output_____" ] ], [ [ "if data_type == 'eeg':\n picks = mne.pick_channels(raw.ch_names, include=['Pz','CPz','Cz','FCz','Fz','C1','C2','FC1','FC2','CP1','CP2'])\nelif data_type == 'ecog':\n picks = mne.pick_channels(raw.ch_names, include=['RPPST9','RPPST10','RPPST11'])\n\nraw.plot_psd(picks = picks, fmin=1, fmax=raw.info['sfreq']/2, xscale='log');", "_____no_output_____" ] ], [ [ "## Plotting responses to events\n\nOk, so this is all well and good. We can plot our raw data, the power spectrum, and the locations of the sensors. But what if we care about responses to the stimuli we described above? What if we want to look at responses to specific sentences, or the average response across all sentences, or something else? How can we determine which EEG sensors or ECoG electrodes respond to the speech stimuli?\n\nEnter.... *Epoching!* MNE-python gives you a very convenient way of rearranging your data according to events of interest. These can actually even be found automatically from a stimulus channel, if you have one (using [`mne.find_events`](https://mne.tools/stable/generated/mne.find_events.html)), which we won't use here because we already have the timings from another procedure. You can also find other types of epochs, like those based on EMG or [eye movements (EOG)](https://mne.tools/stable/generated/mne.preprocessing.find_eog_events.html). \n\nHere, we will load our event files (ending with `_eve.txt`). These contain information about the start sample, stop sample, and event ID for each stimulus. Each row in the file is one stimulus. The timings are in samples rather than in seconds, so if you are creating these on your own, pay attention to your sampling rate (in `raw.info['sfreq']`).", "_____no_output_____" ] ], [ [ "# Load some events. The format of these is start sample, end sample, and event ID.\nevents = mne.read_events(event_files[data_type])\nprint(events)\n\nnum_events = len(events)\nunique_stimuli = np.unique(np.array(events)[:,2])\nnum_unique = len(unique_stimuli)\nprint('There are %d total events, corresponding to %d unique stimuli'%(num_events, num_unique))", "_____no_output_____" ] ], [ [ "## Epochs\n\nGreat. So now that we have the events, we will \"epoch\" our data, which basically uses these timings to split up our data into trials of a given length. We will also set some parameters for data rejection to get rid of noisy trials. ", "_____no_output_____" ] ], [ [ "# Set some rejection criteria. This will be based on the peak-to-peak\n# amplitude of your data.\n\nif data_type=='eeg':\n reject = {'eeg': 60e-6} # Higher than peak to peak amplitude of 60 µV will be rejected\n scalings = None\n units = None\nelif data_type=='ecog':\n reject = {'ecog': 10} # Higher than Z-score of 10 will be rejected\n scalings = {'ecog': 1} # Don't rescale these as if they should be in µV\n units = {'ecog': 'Z-score'}\n ", "_____no_output_____" ], [ "tmin = -0.2\ntmax = 1.0\nepochs = mne.Epochs(raw, events, tmin=tmin, tmax=tmax, baseline=(None, 0), reject=reject, verbose=True)", "_____no_output_____" ] ], [ [ "So what's in this epochs data structure? If we look at it, we can see that we have an entry for each event ID, and we can see how many times that stimulus was played. You can also see whether baseline correction was done and for what time period, and whether any data was rejected.", "_____no_output_____" ] ], [ [ "epochs", "_____no_output_____" ] ], [ [ "Now, you could decide at this point that you just want to work with the data directly as a numpy array. Luckily, that's super easy to do! We can just call `get_data()` on our epochs data structure, and this will output a matrix of `[events x channels x time points]`. If you do not limit the channel type, you will get all of them (including any EOG, stimulus channels, or other non-EEG/ECoG channels).", "_____no_output_____" ] ], [ [ "ep_data = epochs.get_data()\nprint(ep_data.shape)", "_____no_output_____" ] ], [ [ "## Plotting Epoched data\n\nOk... so we are getting ahead of ourselves. MNE-python provides a lot of ways to plot our data so that we don't have to deal with writing functions to do this ourselves! For example, if we'd like to plot the EEG/ECoG for all of the single trials we just loaded, along with an average across all of these trials (and channels of interest), we can do that easily with `epochs.plot_image()`.", "_____no_output_____" ] ], [ [ "epochs.plot_image(combine='mean', scalings=scalings, units=units)", "_____no_output_____" ] ], [ [ "As before, we can choose specific channels to look at instead of looking at all of them at once. For which method do you think this would make the most difference? Why? ", "_____no_output_____" ] ], [ [ "if data_type == 'eeg':\n picks = mne.pick_channels(raw.ch_names, include=['Fz','FCz','Cz','CPz','Pz'])\nelif data_type == 'ecog':\n picks = mne.pick_channels(raw.ch_names, include=['RPPST9','RPPST10','RPPST11'])\n \nepochs.plot_image(picks = picks, combine='mean', scalings=scalings, units=units)", "_____no_output_____" ] ], [ [ "We can also sort the trials, if we would like. This can be very convenient if you have reaction times or some other portion of the trial where reordering would make sense. Here, we'll just pick a channel and order by the mean activity within each trial.", "_____no_output_____" ] ], [ [ "if data_type == 'eeg':\n picks = mne.pick_channels(raw.ch_names, include=['CP6'])\nelif data_type == 'ecog':\n picks = mne.pick_channels(raw.ch_names, include=['RPPST2'])\n\n# Get the data as a numpy array\neps_data = epochs.get_data()\n\n# Sort the data \nnew_order = eps_data[:,picks[0],:].mean(1).argsort(0)\n\nepochs.plot_image(picks=picks, order=new_order, scalings=scalings, units=units)", "_____no_output_____" ] ], [ [ "## Other ways to view epoched data\n\nFor EEG, another way to view these epochs by trial is using the scalp topography information. This allows us to quickly assess differences across the scalp in response to the stimuli. What do you notice about the responses?", "_____no_output_____" ] ], [ [ "if data_type == 'eeg':\n epochs.plot_topo_image(vmin=-30, vmax=30, fig_facecolor='w',font_color='k');", "_____no_output_____" ] ], [ [ "## Comparing epochs of different trial types\n\nSo far we have just shown averages of activity across many different sentences. However, as mentioned above, the sentences come from multiple male and female talkers. So -- one quick split we could try is just to compare the responses to female vs. male talkers. This is relatively simple with the TIMIT stimuli because their file name starts with \"f\" or \"m\" to indicate this. ", "_____no_output_____" ] ], [ [ "# Make lists of the event ID numbers corresponding to \"f\" and \"m\" sentences\nf_evs = []\nm_evs = []\nfor k in event_id.keys():\n if k[0] == 'f':\n f_evs.append(event_id[k])\n elif k[0] == 'm':\n m_evs.append(event_id[k])\n\nprint(unique_stimuli)\nf_evs_new = [v for v in f_evs if v in unique_stimuli]\nm_evs_new = [v for v in m_evs if v in unique_stimuli]\n\n# Epoch the data separately for \"f\" and \"m\" epochs\nf_epochs = mne.Epochs(raw, events, event_id=f_evs_new, tmin=tmin, tmax=tmax, reject=reject)\nm_epochs = mne.Epochs(raw, events, event_id=m_evs_new, tmin=tmin, tmax=tmax, reject=reject)", "_____no_output_____" ] ], [ [ "Now we can plot the epochs just as we did above.", "_____no_output_____" ] ], [ [ "f_epochs.plot_image(combine='mean', show=False, scalings=scalings, units=units)\nm_epochs.plot_image(combine='mean', show=False, scalings=scalings, units=units)", "_____no_output_____" ] ], [ [ "Cool! So now we have a separate plot for the \"f\" and \"m\" talkers. However, it's not super convenient to compare the traces this way... we kind of want them on the same axis. MNE easily allows us to do this too! Instead of using the epochs, we can create `evoked` data structures, which are averaged epochs. You can [read more about evoked data structures here](https://mne.tools/dev/auto_tutorials/evoked/plot_10_evoked_overview.html).\n\n## Compare evoked data", "_____no_output_____" ] ], [ [ "evokeds = {'female': f_epochs.average(), 'male': m_epochs.average()}\nmne.viz.plot_compare_evokeds(evokeds, show_sensors='upper right',picks=picks);", "_____no_output_____" ] ], [ [ "If we actually want errorbars on this plot, we need to do this a bit differently. We can use the `iter_evoked()` method on our epochs structures to create a dictionary of conditions for which we will plot our comparisons with `plot_compare_evokeds`.", "_____no_output_____" ] ], [ [ "evokeds = {'f':list(f_epochs.iter_evoked()), 'm':list(m_epochs.iter_evoked())}\nmne.viz.plot_compare_evokeds(evokeds, picks=picks);\n", "_____no_output_____" ] ], [ [ "## Plotting scalp topography\n\nFor EEG, another common plot you may see is a topographic map showing activity (or other data like p-values, or differences between conditions). In this example, we'll show the activity at -0.2, 0, 0.1, 0.2, 0.3, and 1 second. You can also of course choose just one time to look at.", "_____no_output_____" ] ], [ [ "if data_type == 'eeg':\n times=[tmin, 0, 0.1, 0.2, 0.3, tmax]\n epochs.average().plot_topomap(times, ch_type='eeg', cmap='PRGn', res=32,\n outlines='skirt', time_unit='s');", "_____no_output_____" ] ], [ [ "We can also plot arbitrary data using `mne.viz.plot_topomap`, and passing in a vector of data matching the number of EEG channels, and `raw.info` to give specifics on those channel locations.", "_____no_output_____" ] ], [ [ "if data_type == 'eeg':\n chans = mne.pick_types(raw.info, eeg=True)\n data = np.random.randn(len(chans),)\n plt.figure()\n mne.viz.plot_topomap(data, raw.info, show=True)\n", "_____no_output_____" ] ], [ [ "We can even animate these topo maps! This won't work well in jupyterhub, but feel free to try on your own!", "_____no_output_____" ] ], [ [ "if data_type == 'eeg':\n fig,anim=epochs.average().animate_topomap(blit=False, times=np.linspace(tmin, tmax, 100))", "_____no_output_____" ] ], [ [ "## A few more fancy EEG plots\n\nIf we want to get especially fancy, we can also use `plot_joint` with our evoked data (or averaged epoched data, as shown below). This allows us to combine the ERPs for individual channels with topographic maps at time points that we specify. Pretty awesome!", "_____no_output_____" ] ], [ [ "if data_type == 'eeg':\n epochs.average().plot_joint(picks='eeg', times=[0.1, 0.2, 0.3])", "_____no_output_____" ] ], [ [ "# What if I need more control? - matplotlib alternatives\n\nIf you feel you need more specific control over your plots, it's easy to get the data into a usable format for plotting with matplotlib. You can export both the raw and epoched data using the `get_data()` function, which will allow you to save your data as a numpy array `[ntrials x nchannels x ntimepoints]`.\n\nThen, you can do whatever you want with the data! Throw it into matplotlib, use seaborn, or whatever your heart desires!", "_____no_output_____" ] ], [ [ "if data_type == 'eeg':\n picks = mne.pick_channels(raw.ch_names, include=['Fz','FCz','Cz','CPz','Pz'])\nelif data_type == 'ecog':\n picks = mne.pick_channels(raw.ch_names, include=['RPPST9','RPPST10','RPPST11'])\n \nf_data = f_epochs.get_data(picks=picks)\nm_data = m_epochs.get_data(picks=picks)\ntimes = f_epochs.times\n\nprint(f_data.shape)", "_____no_output_____" ] ], [ [ "## Plot evoked data with errorbars \n\nWe can recreate some similar plots to those in MNE-python with some of the matplotlib functions. Here we'll create something similar to what was plotted in `plot_compare_evokeds`.", "_____no_output_____" ] ], [ [ "def plot_errorbar(x, ydata, label=None, axlines=True, alpha=0.5, **kwargs):\n '''\n Plot the mean +/- standard error of ydata.\n Inputs:\n x : vector of x values\n ydata : matrix of your data (this will be averaged along the 0th dimension)\n label : A string containing the label for this plot\n axlines : [bool], whether to draw the horizontal and vertical axes\n alpha: opacity of the standard error area\n '''\n ymean = ydata.mean(0)\n ystderr = ydata.std(0)/np.sqrt(ydata.shape[0])\n plt.plot(x, ydata.mean(0), label=label, **kwargs)\n plt.fill_between(x, ymean+ystderr, ymean-ystderr, alpha=alpha, **kwargs)\n if axlines:\n plt.axvline(0, color='k', linestyle='--')\n plt.axhline(0, color='k', linestyle='--')\n plt.gca().set_xlim([x.min(), x.max()])", "_____no_output_____" ], [ "plt.figure()\nplot_errorbar(times, f_data.mean(0), label='female')\nplot_errorbar(times, m_data.mean(0), label='male')\nplt.xlabel('Time (s)')\nplt.ylabel('Z-scored high gamma')\nplt.legend()", "_____no_output_____" ] ], [ [ "## ECoG Exercise:\n\n1. If you wanted to look at each ECoG electrode individually to find which ones have responses to the speech data, how would you do this?\n2. Can you plot the comparison between \"f\" and \"m\" trials for each electrode as a subplot (try using `plt.subplot()` from `matplotlib`)", "_____no_output_____" ] ], [ [ "# Get the data for f trials\n\n# Get the data for m trials\n\n# Loop through each channel, and create a set of subplots for each", "_____no_output_____" ] ], [ [ "# Hooray, the End!\n\nYou did it! Go forth and use MNE-python in your own projects, or even contribute to the code! 🧠", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
d0dde346917bd09161aab43f0e61fb78dff95c84
9,400
ipynb
Jupyter Notebook
nbs/03_binaryHorsePoo.ipynb
SubmitCode/PooDetector
54148783488c69c5f6429121686fd35bb00d0d1e
[ "Apache-2.0" ]
null
null
null
nbs/03_binaryHorsePoo.ipynb
SubmitCode/PooDetector
54148783488c69c5f6429121686fd35bb00d0d1e
[ "Apache-2.0" ]
3
2021-01-18T06:57:43.000Z
2022-02-26T06:14:47.000Z
nbs/03_binaryHorsePoo.ipynb
SubmitCode/PooDetector
54148783488c69c5f6429121686fd35bb00d0d1e
[ "Apache-2.0" ]
null
null
null
26.038781
208
0.531277
[ [ [ "# all_no_testing\n# default_exp models.binaryClassification\n# default_cls_lvl 2", "_____no_output_____" ] ], [ [ "# Binary Horse Poo Model\n\n> Simple model to detect HorsePoo vs noHorsePoo", "_____no_output_____" ], [ "## export data", "_____no_output_____" ] ], [ [ "%load_ext autoreload\n%autoreload 2", "_____no_output_____" ], [ "#!rm -R data/tmp/horse_poo/ && rm -R data/tmp/no_horse_poo/ ", "_____no_output_____" ], [ "#!prodigy db-out binary_horse_poo ./data/tmp", "_____no_output_____" ] ], [ [ "## Description\nWith this model we will start of with a very simple binary classification. We will try to use most of the default settings from fastai. This will also be our benchmark model for further investigations. ", "_____no_output_____" ] ], [ [ "#export\nfrom fastai.vision import * \nfrom fastai.callbacks import EarlyStoppingCallback\nfrom prodigy.util import read_jsonl, write_jsonl\nfrom prodigy.components.db import connect\nfrom PooDetector.dataset_operations import extract_jsonl_to_binary_folders\nimport os\nimport shutil\nfrom fastscript import *", "_____no_output_____" ], [ "#export\ndef prepare_data(fld_input:str='data/tmp', bs=256):\n \"\"\"function to get a fastai databunch which can be used for training\"\"\"\n #tfms = get_transforms(do_flip=False, max_zoom=1, max_warp=None)\n #t_tfms = []\n #t_tfms.append(flip_lr(p=0.5))\n #t_tfms.append(symmetric_warp(magnitude=(-0.2,0.2), p=0.75))\n #t_tfms.append(rotate(degrees=(-10,10), p=0.75))\n #t_tfms.append(rand_zoom(scale=(1.,1.1), p=0.75))\n #t_tfms.append(brightness(change=(0.5*(1-0.2), 0.5*(1+0.2)), p=0.75))\n #t_tfms.append(contrast(scale=(1-0.2, 1/(1-0.2)), p=0.75))\n #tfms = (t_tfms , [])\n tfms = get_transforms()\n return (ImageList.from_folder(fld_input)\n .split_by_rand_pct(0.2) \n .label_from_folder()\n .transform(tfms, size=224)\n .databunch(bs=bs)\n .normalize(imagenet_stats))\n", "_____no_output_____" ], [ "#no_testing\ndata = prepare_data(fld_input='test_data/', bs=16)", "_____no_output_____" ], [ "#no_testing\ndata.show_batch()", "_____no_output_____" ], [ "#export \ndef get_learner(data:ImageDataBunch=None, model:Module=None):\n \"\"\"get a lerner object for training\"\"\"\n if data is None:\n data = prepare_data()\n if model is None:\n model = models.resnet50\n \n early_stopping = partial(EarlyStoppingCallback, min_delta=0.005, patience=8)\n \n return cnn_learner(data, base_arch=model, callback_fns=[early_stopping])", "_____no_output_____" ], [ "#no_testing\nlearn = get_learner(data=data)", "_____no_output_____" ], [ "#no_testing\nlearn.fit_one_cycle(2, 5e-2)\n#learn.fit_one_cycle(2, 5e-2)\nlearn.save('stage1')", "_____no_output_____" ], [ "#no_testing\nlearn.export()", "_____no_output_____" ], [ "#export \n@call_parse\ndef train_model(path_jsonl:Param(\"path to jsonl file\", str)='test_data/binary_horse_poo.jsonl',\n cycles_to_fit:Param(\"number of cycles to fit\", int)=10, \n bs:Param(\"batch size\", int)=128,\n label:Param(\"positive label for binary classification\", str)=\"horse_poo\"\n ):\n \"\"\"start training a new model with early stopping and export it\"\"\"\n path_jsonl = Path(path_jsonl)\n if path_jsonl.exists():\n path_jsonl.unlink()\n \n db = connect() # uses settings from your prodigy.json\n images = db.get_dataset(\"binary_horse_poo\")\n write_jsonl(path_jsonl, images)\n \n remove_subfolders(str(path_jsonl.parent))\n \n extract_jsonl_to_binary_folders(str(path_jsonl), label)\n \n data = prepare_data(path_jsonl.parent, bs=bs)\n \n learn = get_learner(data)\n learn.fit_one_cycle(cycles_to_fit, 5e-2)\n learn.export()\n return learn\n \n \n \n \n", "_____no_output_____" ], [ "#export \ndef remove_subfolders(path_parent:[Path, str]):\n \"\"\"reomve all subfolders\"\"\"\n path_parent = Path(path_parent)\n for root, dirs, files in os.walk(str(path_parent), topdown=False):\n for directory in dirs:\n print(f\"remove {str(Path(root) / Path(directory))}\")\n shutil.rmtree(str(Path(root) / Path(directory)))\n ", "_____no_output_____" ], [ "#no_testing\npath = Path('test_data/tmp/')\n\nif os.path.exists(str(path)) is False:\n os.mkdir(str(path))\n \nif os.path.exists(str(path / 'horse')) is False:\n os.mkdir(str(path / 'horse'))\n \nif os.path.exists(str(path / 'no_horse')) is False:\n os.mkdir(str(path / 'no_horse'))\n \nassert os.path.exists(str(path))\nassert os.path.exists(str(path / 'horse'))\nassert os.path.exists(str(path / 'no_horse'))\n\nremove_subfolders(str(path))\n\nassert not os.path.exists(str(path / 'horse'))\nassert not os.path.exists(str(path / 'no_horse'))\n\n", "_____no_output_____" ], [ "# prepare test\npath_jsonl = 'test_data/binary_horse_poo.jsonl' \npath_jsonl = Path(path_jsonl)\n\nif os.path.exists('test_data/tmp') is False:\n os.mkdir('test_data/tmp')\n\npath_fld_target = path_jsonl.parent / 'tmp'\n\nshutil.copy(str(path_jsonl), str(path_fld_target) )\npath_jsonl = path_fld_target / path_jsonl.name\nassert os.path.exists(path_jsonl)\n\n#test\n#learn = train_model(path_jsonl=path_jsonl, cycles_to_fit=2, bs=4)\n \n \n#assert os.path.exists(str(path_jsonl.parent / 'export.pkl'))\n", "_____no_output_____" ], [ "#no_testing\n#!prodigy db-out binary_horse_poo > data/tmp/binary_horse_poo.jsonl\npath_jsonl = 'data/tmp/binary_horse_poo.jsonl'\nlearn = train_model(path_jsonl=path_jsonl, cycles_to_fit=15, bs=128)", "_____no_output_____" ], [ "#no_testing\nlearn.unfreeze()", "_____no_output_____" ], [ "#no_testing\nlearn.fit_one_cycle(8)", "_____no_output_____" ], [ "#no_testing\nlearn.fit_one_cycle(8)", "_____no_output_____" ], [ "#no_testing\nlearn.export() ", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code" ]
[ [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
d0dde5e9c4705c3161751c282094a22f9c01726b
745
ipynb
Jupyter Notebook
Untitled.ipynb
giuliacern/Malignant-Mesothelioma
2183aa200824ef04147638bf069f2c088a0bc4ca
[ "MIT" ]
1
2020-04-22T14:18:04.000Z
2020-04-22T14:18:04.000Z
Untitled.ipynb
giuliacassara/Malignant-Mesothelioma
2183aa200824ef04147638bf069f2c088a0bc4ca
[ "MIT" ]
null
null
null
Untitled.ipynb
giuliacassara/Malignant-Mesothelioma
2183aa200824ef04147638bf069f2c088a0bc4ca
[ "MIT" ]
null
null
null
16.931818
34
0.515436
[ [ [ "import pandas as pd\nimport numpy as np\nimport csv ", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code" ] ]
d0ddf22685d46a120baebc1e48822c7fa21bc138
20,921
ipynb
Jupyter Notebook
transformer-mdn-kanji.ipynb
cpmpercussion/keras-transformer-mdn
ae30186fe7bb9b142f85757df519970858151400
[ "MIT" ]
1
2022-02-01T16:46:29.000Z
2022-02-01T16:46:29.000Z
transformer-mdn-kanji.ipynb
cpmpercussion/keras-transformer-mdn
ae30186fe7bb9b142f85757df519970858151400
[ "MIT" ]
null
null
null
transformer-mdn-kanji.ipynb
cpmpercussion/keras-transformer-mdn
ae30186fe7bb9b142f85757df519970858151400
[ "MIT" ]
1
2022-01-28T04:49:04.000Z
2022-01-28T04:49:04.000Z
34.694859
149
0.547536
[ [ [ "# MDN-transformer with examples\n\n- What kind of data can be predicted by a mixture density network Transformer?\n - Continuous sequential data\n- Drawing data and RoboJam Touch Screem would be good examples for this, continuous values yield high resolution in 2d space.", "_____no_output_____" ], [ "# 1. Kanji Generation\n\n- Firstly, let's try modelling some drawing data for Kanji writing using an MDN-Transformer.\n\n- This work is inspired by previous work \"MDN-RNN for Kanji Generation\", hardmaru's Kanji tutorial and the original Sketch-RNN repository:\n\n - http://blog.otoro.net/2015/12/28/recurrent-net-dreams-up-fake-chinese-characters-in-vector-format-with-tensorflow/\n - https://github.com/hardmaru/sketch-rnn\n\n - The idea is to learn how to draw kanji characters from a dataset of vector representations. \n - This means learning how to move a pen in 2D space.\n - The data consists of a sequence of pen movements (loations in 2D) and whether the pen is up or down.\n - In this example, we will use one 3D MDN to model everything!\n\nWe will end up with a system that will continue writing Kanji given a short sequence, like this:\n\n", "_____no_output_____" ] ], [ [ "# Setup and modules\nimport sys\n!{sys.executable} -m pip install keras-mdn-layer \n!{sys.executable} -m pip install tensorflow\n!{sys.executable} -m pip install tensorflow-probability\n!{sys.executable} -m pip install matplotlib\n!{sys.executable} -m pip install pandas\n!{sys.executable} -m pip install svgwrite\n\nimport mdn\nimport numpy as np\nimport random\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D \n%matplotlib inline\nimport pandas as pd\nimport tensorflow as tf\nfrom tensorflow import keras\nfrom tensorflow.keras import layers\n%matplotlib notebook\n\n# Only for GPU use:\ngpus = tf.config.list_physical_devices('GPU')\nif gpus:\n try:\n # Currently, memory growth needs to be the same across GPUs\n for gpu in gpus:\n tf.config.experimental.set_memory_growth(gpu, True)\n logical_gpus = tf.config.list_logical_devices('GPU')\n print(len(gpus), \"Physical GPUs,\", len(logical_gpus), \"Logical GPUs\")\n except RuntimeError as e:\n # Memory growth must be set before GPUs have been initialized\n print(e)", "_____no_output_____" ] ], [ [ "### Download and process the data set", "_____no_output_____" ] ], [ [ "# Train from David Ha's Kanji dataset from Sketch-RNN: https://github.com/hardmaru/sketch-rnn-datasets\n# Other datasets in \"Sketch 3\" format should also work.\nimport urllib.request\nurl = 'https://github.com/hardmaru/sketch-rnn-datasets/raw/master/kanji/kanji.rdp25.npz' \nurllib.request.urlretrieve(url, './kanji.rdp25.npz') ", "_____no_output_____" ] ], [ [ "### Dataset\n\nIncludes about 11000 handwritten kanji characters divied into training, validation, and testing sets.", "_____no_output_____" ] ], [ [ "with np.load('./kanji.rdp25.npz', allow_pickle=True) as data:\n train_set = data['train']\n valid_set = data['valid']\n test_set = data['test']\n \nprint(\"Training kanji:\", len(train_set))\nprint(\"Validation kanji:\", len(valid_set))\nprint(\"Testing kanji:\", len(test_set))", "_____no_output_____" ], [ "# Functions for slicing up data\ndef slice_sequence_examples(sequence, num_steps):\n xs = []\n for i in range(len(sequence) - num_steps - 1):\n example = sequence[i: i + num_steps]\n xs.append(example)\n return xs\n\ndef seq_to_singleton_format(examples):\n xs = []\n ys = []\n for ex in examples:\n xs.append(ex[:SEQ_LEN])\n ys.append(ex)\n return xs, ys\n\n# Functions for making the data set\ndef format_dataset(x, y):\n return ({\n \"input\": x,\n \"target\": y[:, :-1, :],\n }, y[:, 1:, :])\n\ndef make_dataset(X, y):\n dataset = tf.data.Dataset.from_tensor_slices((X, y))\n dataset = dataset.batch(batch_size)\n dataset = dataset.map(format_dataset)\n return dataset.shuffle(2048).prefetch(16).cache()", "_____no_output_____" ], [ "# Data shapes\nNUM_FEATS = 3\nSEQ_LEN = 20\ngap_len = 1\nbatch_size = 128\n\n# Prepare training data as X and Y.\nslices = []\nfor seq in train_set:\n slices += slice_sequence_examples(seq, SEQ_LEN+gap_len)\nX, y = seq_to_singleton_format(slices)\n\nX = np.array(X)\ny = np.array(y)\ntrain_ds = make_dataset(X, y)\nprint(\"Number of training examples:\")\nprint(\"X:\", X.shape)\nprint(\"y:\", y.shape)\nprint(train_ds)", "_____no_output_____" ] ], [ [ "### Constructing the MDN Transformer\n\nOur MDN Transformer has the following settings:\n- an embedding layer with positional embedding\n- a transformer encoder\n- a transformer decoder\n- a three-dimensional mixture layer with 10 mixtures\n- train for sequence length ___\n- training for ___ epochs with a batch size of ___\n\nHere's a diagram:\n", "_____no_output_____" ] ], [ [ "class PositionalEmbedding(layers.Layer):\n def __init__(self, sequence_length, input_dim, output_dim, **kwargs):\n super().__init__(**kwargs)\n self.token_embeddings = layers.Dense(output_dim)\n self.position_embeddings = layers.Embedding(\n input_dim=sequence_length, output_dim=output_dim)\n self.sequence_length = sequence_length\n self.input_dim = input_dim\n self.output_dim = output_dim\n def call(self, inputs, padding_mask=None):\n length = inputs.shape[1]\n positions = tf.range(start=0, limit=length, delta=1)\n embedded_tokens = self.token_embeddings(inputs)\n embedded_positions = self.position_embeddings(positions)\n return embedded_tokens + embedded_positions\n def get_config(self):\n config = super().get_config()\n config.update({\n \"output_dim\": self.output_dim,\n \"sequence_length\": self.sequence_length,\n \"input_dim\": self.input_dim,\n })\n return config\n\n \nclass TransformerEncoder(layers.Layer):\n def __init__(self, embed_dim, dense_dim, num_heads, **kwargs):\n super().__init__(**kwargs)\n self.embed_dim = embed_dim\n self.dense_dim = dense_dim\n self.num_heads = num_heads\n self.attention = layers.MultiHeadAttention(\n num_heads=num_heads, key_dim=embed_dim)\n self.dense_proj = keras.Sequential(\n [layers.Dense(dense_dim, activation=\"relu\"),\n layers.Dense(embed_dim),]\n )\n self.layernorm_1 = layers.LayerNormalization()\n self.layernorm_2 = layers.LayerNormalization()\n def call(self, inputs, mask=None):\n attention_output = self.attention(inputs, inputs, attention_mask=mask)\n proj_input = self.layernorm_1(inputs + attention_output)\n proj_output = self.dense_proj(proj_input)\n return self.layernorm_2(proj_input + proj_output)\n def get_config(self):\n config = super().get_config()\n config.update({\n \"embed_dim\": self.embed_dim,\n \"num_heads\": self.num_heads,\n \"dense_dim\": self.dense_dim,\n })\n return config\n\n\nclass TransformerDecoder(layers.Layer):\n def __init__(self, embed_dim, dense_dim, num_heads, **kwargs):\n super().__init__(**kwargs)\n self.embed_dim = embed_dim\n self.dense_dim = dense_dim\n self.num_heads = num_heads\n self.attention_1 = layers.MultiHeadAttention(\n num_heads=num_heads, key_dim=embed_dim)\n self.attention_2 = layers.MultiHeadAttention(\n num_heads=num_heads, key_dim=embed_dim)\n self.dense_proj = keras.Sequential(\n [layers.Dense(dense_dim, activation=\"relu\"),\n layers.Dense(embed_dim),]\n )\n self.layernorm_1 = layers.LayerNormalization()\n self.layernorm_2 = layers.LayerNormalization()\n self.layernorm_3 = layers.LayerNormalization()\n self.supports_masking = True\n def get_causal_attention_mask(self, inputs):\n input_shape = tf.shape(inputs)\n batch_size, sequence_length = input_shape[0], input_shape[1]\n i = tf.range(sequence_length)[:, tf.newaxis]\n j = tf.range(sequence_length)\n mask = tf.cast(i >= j, dtype=\"int32\")\n mask = tf.reshape(mask, (1, input_shape[1], input_shape[1]))\n mult = tf.concat(\n [tf.expand_dims(batch_size, -1),\n tf.constant([1, 1], dtype=tf.int32)], axis=0)\n return tf.tile(mask, mult)\n def call(self, inputs, encoder_outputs, padding_mask=None):\n causal_mask = self.get_causal_attention_mask(inputs)\n attention_output_1 = self.attention_1(\n query=inputs,\n value=inputs,\n key=inputs,\n attention_mask=causal_mask)\n attention_output_1 = self.layernorm_1(inputs + attention_output_1)\n if padding_mask==None:\n attention_output_2 = self.attention_2(\n query=attention_output_1,\n value=encoder_outputs,\n key=encoder_outputs)\n else:\n attention_output_2 = self.attention_2(\n query=attention_output_1,\n value=encoder_outputs,\n key=encoder_outputs,\n attention_mask=padding_mask)\n attention_output_2 = self.layernorm_2(\n attention_output_1 + attention_output_2)\n proj_output = self.dense_proj(attention_output_2)\n return self.layernorm_3(attention_output_2 + proj_output)\n def get_config(self):\n config = super().get_config()\n config.update({\n \"embed_dim\": self.embed_dim,\n \"num_heads\": self.num_heads,\n \"dense_dim\": self.dense_dim,\n })\n return config", "_____no_output_____" ], [ "# Training Hyperparameters:\ninput_dim = 3\nsequence_length = 20\ntarget_length = 20\nembed_dim = 256\ndense_dim = 128\nnum_heads = 2\noutput_dim = 3\nnumber_mixtures = 10\n\nEPOCHS = 20\nSEED = 2345 # set random seed for reproducibility\nrandom.seed(SEED)\nnp.random.seed(SEED)\n\nencoder_inputs = keras.Input(shape=(sequence_length, input_dim), dtype=\"float64\", name=\"input\")\nx = PositionalEmbedding(sequence_length, input_dim, embed_dim)(encoder_inputs)\nencoder_outputs = TransformerEncoder(embed_dim, dense_dim, num_heads)(x)\nprint(encoder_outputs.shape)\n# encoder_outputs2 = TransformerEncoder(embed_dim, dense_dim, num_heads)(encoder_outputs)\n\ndecoder_inputs = keras.Input(shape=(target_length, input_dim), dtype=\"float64\", name=\"target\")\nx = PositionalEmbedding(target_length, input_dim, embed_dim)(decoder_inputs)\nx = TransformerDecoder(embed_dim, dense_dim, num_heads)(x, encoder_outputs)\n# x = TransformerDecoder(embed_dim, dense_dim, num_heads)(x, encoder_outputs2)\nx = layers.Dropout(0.2)(x)\ndecoder_outputs = layers.Dense(input_dim, activation=\"softmax\")(x)\noutputs = mdn.MDN(output_dim, number_mixtures) (decoder_outputs)\nmodel = keras.Model([encoder_inputs, decoder_inputs], outputs)\nmodel.compile(loss=mdn.get_mixture_loss_func(output_dim,number_mixtures), \n optimizer=keras.optimizers.Adam())\nmodel.summary()", "_____no_output_____" ], [ "callbacks = [\n keras.callbacks.ModelCheckpoint(\"full_transformer.keras\",\n save_best_only=True)\n]\n\nhistory=model.fit(train_ds, batch_size=batch_size, epochs=EPOCHS, callbacks=callbacks)\n\n!mkdir -p saved_model\nmodel.save('my_model_100_128_256_128_2_02')\n\n# print(f\"Test acc: {model.evaluate(int_test_ds)[1]:.3f}\")", "_____no_output_____" ], [ "plt.figure()\nplt.plot(history.history['loss'])\nplt.show()", "_____no_output_____" ], [ "ls saved_model/my_model6", "_____no_output_____" ], [ "model = keras.models.load_model('saved_model/my_model6')", "_____no_output_____" ] ], [ [ "## Generating drawings\n\nFirst need some helper functions to view the output.", "_____no_output_____" ] ], [ [ "def zero_start_position():\n \"\"\"A zeroed out start position with pen down\"\"\"\n out = np.zeros((1, 1, 3), dtype=np.float32)\n out[0, 0, 2] = 1 # set pen down.\n return out\n\ndef generate_sketch(model, start_pos, num_points=100):\n return None\n\ndef cutoff_stroke(x):\n return np.greater(x,0.5) * 1.0\n\ndef plot_sketch(sketch_array):\n \"\"\"Plot a sketch quickly to see what it looks like.\"\"\"\n sketch_df = pd.DataFrame({'x':sketch_array.T[0],'y':sketch_array.T[1],'z':sketch_array.T[2]})\n sketch_df.x = sketch_df.x.cumsum()\n sketch_df.y = -1 * sketch_df.y.cumsum()\n # Do the plot\n fig = plt.figure(figsize=(8, 8))\n ax1 = fig.add_subplot(111)\n #ax1.scatter(sketch_df.x,sketch_df.y,marker='o', c='r', alpha=1.0)\n # Need to do something with sketch_df.z\n ax1.plot(sketch_df.x,sketch_df.y,'r-')\n plt.show()", "_____no_output_____" ] ], [ [ "## SVG Drawing Function\n\nHere's Hardmaru's Drawing Functions from _write-rnn-tensorflow_. Big hat tip to Hardmaru for this!\n\nHere's the source: https://github.com/hardmaru/write-rnn-tensorflow/blob/master/utils.py", "_____no_output_____" ] ], [ [ "import svgwrite\nfrom IPython.display import SVG, display\n\ndef get_bounds(data, factor):\n min_x = 0\n max_x = 0\n min_y = 0\n max_y = 0\n\n abs_x = 0\n abs_y = 0\n for i in range(len(data)):\n x = float(data[i, 0]) / factor\n y = float(data[i, 1]) / factor\n abs_x += x\n abs_y += y\n min_x = min(min_x, abs_x)\n min_y = min(min_y, abs_y)\n max_x = max(max_x, abs_x)\n max_y = max(max_y, abs_y)\n\n return (min_x, max_x, min_y, max_y)\n\ndef draw_strokes(data, factor=1, svg_filename='sample.svg'):\n min_x, max_x, min_y, max_y = get_bounds(data, factor)\n dims = (50 + max_x - min_x, 50 + max_y - min_y)\n\n dwg = svgwrite.Drawing(svg_filename, size=dims)\n dwg.add(dwg.rect(insert=(0, 0), size=dims, fill='white'))\n\n lift_pen = 1\n\n abs_x = 25 - min_x\n abs_y = 25 - min_y\n p = \"M%s,%s \" % (abs_x, abs_y)\n\n command = \"m\"\n\n for i in range(len(data)):\n if (lift_pen == 1):\n command = \"m\"\n elif (command != \"l\"):\n command = \"l\"\n else:\n command = \"\"\n x = float(data[i, 0]) / factor\n y = float(data[i, 1]) / factor\n lift_pen = data[i, 2]\n p += command + str(x) + \",\" + str(y) + \" \"\n\n the_color = \"black\"\n stroke_width = 1\n\n dwg.add(dwg.path(p).stroke(the_color, stroke_width).fill(\"none\"))\n\n dwg.save()\n display(SVG(dwg.tostring()))", "_____no_output_____" ], [ "original = valid_set[0]\nx0 = np.array([valid_set[0][:SEQ_LEN]])\ny0 = x0\n# y0 = np.array([valid_set[0][:(SEQ_LEN+9)]])", "_____no_output_____" ], [ "# Predict a character and plot the result.\npi_temperature = 3 # seems to work well with rather high temperature (2.5)\nsigma_temp = 0.1 # seems to work well with low temp\n\n### Generation using one example from validation set as \np = x0\nsketch = p\n\nfor i in range(100):\n params = model.predict([p, p])\n out = mdn.sample_from_output(params[0][49], output_dim, number_mixtures, temp=pi_temperature, sigma_temp=sigma_temp)\n p = np.concatenate((p[:,1:],np.array([out])), axis=1)\n sketch = np.concatenate((sketch, np.array([out])), axis=1)\n\nsketch.T[2] = cutoff_stroke(sketch.T[2])\ndraw_strokes(sketch[0], factor=0.5)\ndraw_strokes(x0[0], factor=0.5)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ] ]
d0ddf5213aba6d9bfcac6b5adb1400d05dc99524
21,391
ipynb
Jupyter Notebook
6_WeatherPy_with_Python_APIs/Challenge_P3_Vacation_Itinerary.ipynb
razariah/UC_Berkeley_Data_Analytics
885814e059efb0510e7f88ec60691b5a2fc8897b
[ "MIT" ]
null
null
null
6_WeatherPy_with_Python_APIs/Challenge_P3_Vacation_Itinerary.ipynb
razariah/UC_Berkeley_Data_Analytics
885814e059efb0510e7f88ec60691b5a2fc8897b
[ "MIT" ]
null
null
null
6_WeatherPy_with_Python_APIs/Challenge_P3_Vacation_Itinerary.ipynb
razariah/UC_Berkeley_Data_Analytics
885814e059efb0510e7f88ec60691b5a2fc8897b
[ "MIT" ]
null
null
null
34.170927
124
0.335234
[ [ [ "import pandas as pd\nimport random\nfile = pd.read_csv('WeatherPy_vacation.csv')", "_____no_output_____" ], [ "vacation_search = file.copy().iloc[:,1:]\nvacation_search", "_____no_output_____" ], [ "cities = vacation_search.iloc[[0,1,2,4],:].reset_index().iloc[:,1:]\ncities", "_____no_output_____" ], [ "#### HOW TO ADD THIS LAYER (MARKER LAYER) INTO THE MAP BELOW WITH THE DIRECTIONS\n\n# Set up marker layer for #10 in part 3\nc_s = cities.copy()\nc_s['Location'] = [(Lat, Lng) for Lat, Lng in zip(c_s.Lat, c_s.Lng)]\nc_s = c_s.to_dict('r')\n\ncity_locations = [c['Location'] for c in c_s]\ninfo_box_template = \"\"\"\n<dl>\n<dt>City</dt><dd>{City}</dd>\n<dt>Country</dt><dd>{Country}</dd>\n<dt>Hotel</dt><dd>{Hotel}</dd>\n<dt>Max Temp</dt><dd>{Max_Temp}</dd>\n<dt>Humidity</dt><dd>{Humidity}</dd>\n<dt>Cloudiness</dt><dd>{Cloudiness}</dd>\n<dt>Rain</dt><dd>{Rain_Inches}</dd>\n<dt>Snow</dt><dd>{Snow_Inches}</dd>\n</dl>\n\"\"\"\n\ncity_info = [info_box_template.format(**c) for c in c_s]", "_____no_output_____" ], [ "## From gmaps documentation\nimport gmaps\nimport gmaps.datasets\ngmaps.configure(api_key='AIzaSyCuOo58QNcG7KZUczdwzq8p0HZWCaFg-_M')\n\n# Latitude-longitude pairs\nbalaipungut = (cities.loc[0, 'Lat'], cities.loc[0, 'Lng'])\nca_mau = (cities.loc[1, 'Lat'], cities.loc[1, 'Lng'])\ncao_bang = (cities.loc[2, 'Lat'], cities.loc[2, 'Lng'])\ndwarka = (cities.loc[3, 'Lat'], cities.loc[3, 'Lng'])\n\nfig = gmaps.figure()\ntravel_itinerary = gmaps.directions_layer(balaipungut, dwarka, waypoints=[ca_mau, cao_bang], travel_mode='DRIVING')\n\n### MARKER LAYER IS NOT ADDING TO THE GMAPS\nmarker_layer = gmaps.marker_layer(city_locations, info_box_content=city_info)\n\nfig.add_layer(marker_layer)\nfig.add_layer(travel_itinerary)\n\nfig", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code" ] ]
d0ddf942b242aa98a9231e9cf54c5c6cbd076fc8
529,136
ipynb
Jupyter Notebook
notebooks/inefficiency-analysis-pandas.ipynb
tarcisobraz/trips-optimality-exp
78ed0204eaf2eff87983447d26d619250cc6069d
[ "CNRI-Python", "Naumen", "Condor-1.1", "MS-PL" ]
1
2019-05-11T13:58:15.000Z
2019-05-11T13:58:15.000Z
notebooks/inefficiency-analysis-pandas.ipynb
tarcisocomp/trips-optimality-exp
78ed0204eaf2eff87983447d26d619250cc6069d
[ "CNRI-Python", "Naumen", "Condor-1.1", "MS-PL" ]
null
null
null
notebooks/inefficiency-analysis-pandas.ipynb
tarcisocomp/trips-optimality-exp
78ed0204eaf2eff87983447d26d619250cc6069d
[ "CNRI-Python", "Naumen", "Condor-1.1", "MS-PL" ]
null
null
null
99.724086
20,902
0.758988
[ [ [ "### Imports", "_____no_output_____" ] ], [ [ "import pandas as pd\nimport numpy as np\n\n#Python Standard Libs Imports\nimport json\nimport urllib2\nimport sys\nfrom datetime import datetime\nfrom os.path import isfile, join, splitext\nfrom glob import glob\n\n#Imports to enable visualizations\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n%matplotlib inline", "_____no_output_____" ] ], [ [ "### Functions", "_____no_output_____" ], [ "#### Basic Functions", "_____no_output_____" ], [ "#### OTP Functions", "_____no_output_____" ], [ "#### Analysis Functions", "_____no_output_____" ], [ "### Main Code", "_____no_output_____" ], [ "#### Reading itinerary alternatives data", "_____no_output_____" ] ], [ [ "all_itineraries = pd.read_csv('/local/tarciso/data/its/itineraries/all_itineraries.csv', parse_dates=['planned_start_time','actual_start_time','exec_start_time'])", "_____no_output_____" ], [ "all_itineraries.head()", "_____no_output_____" ], [ "all_itineraries.dtypes", "_____no_output_____" ], [ "len(all_itineraries)", "_____no_output_____" ], [ "len(all_itineraries.user_trip_id.unique())", "_____no_output_____" ] ], [ [ "#### Adding metadata for further analysis", "_____no_output_____" ] ], [ [ "def get_trip_len_bucket(trip_duration):\n if (trip_duration < 10):\n return '<10'\n elif (trip_duration < 20):\n return '10-20'\n elif (trip_duration < 30):\n return '20-30'\n elif (trip_duration < 40):\n return '30-40'\n elif (trip_duration < 50):\n return '40-50'\n elif (trip_duration >= 50):\n return '50+'\n else:\n return 'NA'\n \ndef get_day_type(trip_start_time):\n trip_weekday = trip_start_time.weekday()\n if ((trip_weekday == 0) | (trip_weekday == 4)):\n return 'MON/FRI'\n elif ((trip_weekday > 0) & (trip_weekday < 4)):\n return 'TUE/WED/THU'\n elif (trip_weekday > 4):\n return 'SAT/SUN'\n else:\n return 'NA'\n\n\nall_itineraries['trip_length_bucket'] = all_itineraries['exec_duration_mins'].apply(get_trip_len_bucket)\nall_itineraries['hour_of_day'] = all_itineraries['exec_start_time'].dt.hour\n\nperiod_of_day_list = [('hour_of_day', [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23]),\n ('period_of_day', ['very_late_night','very_late_night','very_late_night','very_late_night','early_morning','early_morning','early_morning','morning','morning','morning','morning','midday','midday','midday','afternoon','afternoon','afternoon','evening','evening','evening','night','night','late_night','late_night'])]\nperiod_of_day_df = pd.DataFrame.from_items(period_of_day_list)\nperiod_of_day_df.period_of_day = period_of_day_df.period_of_day.astype('category', ordered=True)\n\nall_itineraries = all_itineraries.merge(period_of_day_df, how='inner', on='hour_of_day')\nall_itineraries['weekday'] = all_itineraries['exec_start_time'].apply(lambda x: x.weekday() < 5)\nall_itineraries['day_type'] = all_itineraries['exec_start_time'].apply(get_day_type)", "_____no_output_____" ], [ "all_itineraries", "_____no_output_____" ] ], [ [ "#### Filtering trips for whose executed itineraries there is no schedule information", "_____no_output_____" ] ], [ [ "def filter_trips_alternatives(trips_alternatives):\n min_trip_dur = 10\n max_trip_dur = 50\n max_trip_start_diff = 20\n \n return trips_alternatives[(trips_alternatives['actual_duration_mins'] >= min_trip_dur) & (trips_alternatives['actual_duration_mins'] <= max_trip_dur)] \\\n .assign(start_diff = lambda x: np.absolute(x['exec_start_time'] - x['actual_start_time'])/pd.Timedelta(minutes=1)) \\\n [lambda x: x['start_diff'] <= 20]", "_____no_output_____" ], [ "def filter_trips_with_insufficient_alternatives(trips_alternatives):\n num_trips_alternatives = trips_alternatives.groupby(['date','user_trip_id']).size().reset_index(name='num_alternatives')\n trips_with_executed_alternative = trips_alternatives[trips_alternatives['itinerary_id'] == 0][['date','user_trip_id']]\n \n return trips_alternatives.merge(trips_with_executed_alternative, on=['date','user_trip_id'], how='inner') \\\n .merge(num_trips_alternatives, on=['date','user_trip_id'], how='inner') \\\n [lambda x: x['num_alternatives'] > 1] \\\n .sort_values(['user_trip_id','itinerary_id']) ", "_____no_output_____" ], [ "clean_itineraries = filter_trips_with_insufficient_alternatives(filter_trips_alternatives(all_itineraries))", "_____no_output_____" ], [ "clean_itineraries.head()", "_____no_output_____" ], [ "sns.distplot(clean_itineraries['start_diff'])", "_____no_output_____" ], [ "len(clean_itineraries)", "_____no_output_____" ], [ "len(clean_itineraries.user_trip_id.unique())", "_____no_output_____" ], [ "exec_itineraries_with_scheduled_info = all_itineraries[(all_itineraries['itinerary_id'] == 0) & (pd.notnull(all_itineraries['planned_duration_mins']))][['date','user_trip_id']]", "_____no_output_____" ], [ "clean_itineraries2 = filter_trips_with_insufficient_alternatives(filter_trips_alternatives(all_itineraries.merge(exec_itineraries_with_scheduled_info, on=['date','user_trip_id'], how='inner')))", "_____no_output_____" ], [ "clean_itineraries2.head()", "_____no_output_____" ], [ "len(clean_itineraries2)", "_____no_output_____" ], [ "len(clean_itineraries2.user_trip_id.unique())", "_____no_output_____" ] ], [ [ "## Compute Inefficiency Metrics", "_____no_output_____" ], [ "![title](img/math_model.png)", "_____no_output_____" ] ], [ [ "def select_best_itineraries(trips_itineraries,metric_name):\n return trips_itineraries.sort_values([metric_name]) \\\n .groupby(['date','user_trip_id']) \\\n .nth(0) \\\n .reset_index()", "_____no_output_____" ] ], [ [ "### Observed Inefficiency", "_____no_output_____" ] ], [ [ "#Choose best itinerary for each trip by selecting the ones with lower actual duration\nbest_trips_itineraries = select_best_itineraries(clean_itineraries,'actual_duration_mins')", "_____no_output_____" ], [ "best_trips_itineraries.head()", "_____no_output_____" ], [ "trips_inefficiency = best_trips_itineraries \\\n .assign(dur_diff = lambda x: x['exec_duration_mins'] - x['actual_duration_mins']) \\\n .assign(observed_inef = lambda x: x['dur_diff']/x['exec_duration_mins'])", "_____no_output_____" ], [ "trips_inefficiency.head(10)", "_____no_output_____" ], [ "sns.distplot(trips_inefficiency.observed_inef)", "_____no_output_____" ], [ "sns.violinplot(trips_inefficiency.observed_inef)", "_____no_output_____" ], [ "pos_trips_inefficiency = trips_inefficiency[trips_inefficiency['dur_diff'] > 1]", "_____no_output_____" ], [ "pos_trips_inefficiency.head()", "_____no_output_____" ] ], [ [ "#### Number of Trips with/without improvent per Trip Length Bucket", "_____no_output_____" ] ], [ [ "trips_per_length = trips_inefficiency.groupby('trip_length_bucket').size().reset_index(name='total')", "_____no_output_____" ], [ "trips_per_length", "_____no_output_____" ], [ "trips_per_length_improved = pos_trips_inefficiency.groupby('trip_length_bucket').size().reset_index(name='total')", "_____no_output_____" ], [ "trips_per_length_improved", "_____no_output_____" ], [ "sns.set_style(\"whitegrid\")\n\n#Plot 1 - background - \"total\" (top) series\nax = sns.barplot(x = trips_per_length.trip_length_bucket, y = trips_per_length.total, color = \"#66c2a5\")\n\n#Plot 2 - overlay - \"bottom\" series\nbottom_plot = sns.barplot(x = trips_per_length_improved.trip_length_bucket, y = trips_per_length_improved.total, color = \"#fc8d62\")\n\nbottom_plot.set(xlabel='Trip Length (minutes)',ylabel='Number of Trips')\n\ntopbar = plt.Rectangle((0,0),1,1,fc=\"#66c2a5\", edgecolor = 'none')\nbottombar = plt.Rectangle((0,0),1,1,fc='#fc8d62', edgecolor = 'none')\nl = plt.legend([bottombar, topbar], ['Not Optimal', 'Optimal'], bbox_to_anchor=(0, 1.2), loc=2, ncol = 2, prop={'size':10})\nl.draw_frame(False)\n\nfig = ax.get_figure()\nfig.set_size_inches(4.5, 3)\n#fig.savefig('/local/tarciso/masters/data/results/trip_length_by_optimality.pdf', bbox_inches='tight')", "_____no_output_____" ] ], [ [ "#### Per Trip Length Bucket", "_____no_output_____" ] ], [ [ "trip_len_order=['10-20','20-30','30-40','40-50']\nax = sns.boxplot(x='observed_inef',y='trip_length_bucket', orient='h', data=pos_trips_inefficiency, order=trip_len_order, color='#fc8d62')\nax.set(xlabel='Inefficiency (%)',ylabel='Trip Length')\n\nfig = ax.get_figure()\nfig.set_size_inches(4.5, 3)\n#fig.savefig('/local/tarciso/masters/data/results/imp_capacity_per_trip_length.pdf', bbox_inches='tight')", "_____no_output_____" ] ], [ [ "#### Per Period of Day", "_____no_output_____" ] ], [ [ "period_of_day_order = ['very_late_night','early_morning','morning','midday','afternoon','evening','night','late_night']\nax = sns.boxplot(x='observed_inef',y='period_of_day', data=pos_trips_inefficiency, order=period_of_day_order, color='#fc8d62')\nax.set(xlabel='Inefficiency (%)',ylabel='Period of Day')\n\nfig = ax.get_figure()\nfig.set_size_inches(4.5, 3)\n#fig.savefig('/local/tarciso/masters/data/results/imp_capacity_per_day_period.pdf', bbox_inches='tight')", "_____no_output_____" ] ], [ [ "#### Per Weekday/Weekend", "_____no_output_____" ] ], [ [ "ax = sns.barplot(x='trip_length_bucket',y='observed_inef', hue='weekday', data=pos_trips_inefficiency, color='#fc8d62')\nax.set(xlabel='Trip Length',ylabel='Inefficiency (%)')\n\nfig = ax.get_figure()\nfig.set_size_inches(4.5, 3)\n#fig.savefig('/local/tarciso/masters/data/results/imp_capacity_per_day_period.pdf', bbox_inches='tight')", "_____no_output_____" ] ], [ [ "#### Per Day Type", "_____no_output_____" ] ], [ [ "ax = sns.barplot(x='trip_length_bucket',y='observed_inef', hue='day_type', data=pos_trips_inefficiency, color='#fc8d62')\nax.set(xlabel='Trip Length',ylabel='Inefficiency (%)')\n\nfig = ax.get_figure()\nfig.set_size_inches(4.5, 3)\n#fig.savefig('/local/tarciso/masters/data/results/imp_capacity_per_day_period.pdf', bbox_inches='tight')", "_____no_output_____" ] ], [ [ "### Schedule Inefficiency", "_____no_output_____" ] ], [ [ "shortest_planned_itineraries = select_best_itineraries(clean_itineraries[pd.notnull(clean_itineraries['planned_duration_mins'])],'planned_duration_mins') \\\n [['date','user_trip_id','planned_duration_mins','actual_duration_mins']] \\\n .rename(index=str,columns={'planned_duration_mins':'shortest_scheduled_planned_duration',\n 'actual_duration_mins':'shortest_scheduled_observed_duration'})\n\nshortest_planned_itineraries.head()", "_____no_output_____" ], [ "sched_inef = best_trips_itineraries \\\n .rename(index=str,columns={'actual_duration_mins':'shortest_observed_duration'}) \\\n .merge(shortest_planned_itineraries, on=['date','user_trip_id'], how='inner') \\\n .assign(sched_dur_diff = lambda x: x['shortest_scheduled_observed_duration'] - x['shortest_observed_duration']) \\\n .assign(sched_inef = lambda x: x['sched_dur_diff']/x['shortest_scheduled_observed_duration'])\n\nsched_inef.head()", "_____no_output_____" ], [ "sns.distplot(sched_inef.sched_inef)", "_____no_output_____" ], [ "sns.violinplot(sched_inef.sched_inef)", "_____no_output_____" ], [ "pos_sched_inef = sched_inef[sched_inef['sched_dur_diff'] > 1]", "_____no_output_____" ], [ "sns.distplot(pos_sched_inef.sched_inef)", "_____no_output_____" ] ], [ [ "#### Number of Trips with/without improvent per Trip Length Bucket", "_____no_output_____" ] ], [ [ "trips_per_length = sched_inef.groupby('trip_length_bucket').size().reset_index(name='total')", "_____no_output_____" ], [ "trips_per_length", "_____no_output_____" ], [ "trips_per_length_improved = pos_sched_inef.groupby('trip_length_bucket').size().reset_index(name='total')", "_____no_output_____" ], [ "trips_per_length_improved", "_____no_output_____" ], [ "sns.set_style(\"whitegrid\")\n\n#Plot 1 - background - \"total\" (top) series\nax = sns.barplot(x = trips_per_length.trip_length_bucket, y = trips_per_length.total, color = \"#66c2a5\")\n\n#Plot 2 - overlay - \"bottom\" series\nbottom_plot = sns.barplot(x = trips_per_length_improved.trip_length_bucket, y = trips_per_length_improved.total, color = \"#fc8d62\")\n\nbottom_plot.set(xlabel='Trip Length (minutes)',ylabel='Number of Trips')\n\ntopbar = plt.Rectangle((0,0),1,1,fc=\"#66c2a5\", edgecolor = 'none')\nbottombar = plt.Rectangle((0,0),1,1,fc='#fc8d62', edgecolor = 'none')\nl = plt.legend([bottombar, topbar], ['Not Optimal', 'Optimal'], bbox_to_anchor=(0, 1.2), loc=2, ncol = 2, prop={'size':10})\nl.draw_frame(False)\n\nfig = ax.get_figure()\nfig.set_size_inches(4.5, 3)\n#fig.savefig('/local/tarciso/masters/data/results/trip_length_by_optimality.pdf', bbox_inches='tight')", "_____no_output_____" ] ], [ [ "#### Per Trip Length Bucket", "_____no_output_____" ] ], [ [ "trip_len_order=['10-20','20-30','30-40','40-50']\nax = sns.boxplot(x='sched_inef',y='trip_length_bucket', orient='h', data=pos_sched_inef, order=trip_len_order, color='#fc8d62')\nax.set(xlabel='Schedule Inefficiency (%)',ylabel='Trip Length')\n\nfig = ax.get_figure()\nfig.set_size_inches(4.5, 3)\n#fig.savefig('/local/tarciso/masters/data/results/imp_capacity_per_trip_length.pdf', bbox_inches='tight')", "_____no_output_____" ] ], [ [ "#### Per Period of Day", "_____no_output_____" ] ], [ [ "period_of_day_order = ['very_late_night','early_morning','morning','midday','afternoon','evening','night','late_night']\nax = sns.boxplot(x='sched_inef',y='period_of_day', data=pos_sched_inef, order=period_of_day_order, color='#fc8d62')\nax.set(xlabel='Schedule Inefficiency (%)',ylabel='Period of Day')\n\nfig = ax.get_figure()\nfig.set_size_inches(4.5, 3)\n#fig.savefig('/local/tarciso/masters/data/results/imp_capacity_per_day_period.pdf', bbox_inches='tight')", "_____no_output_____" ] ], [ [ "#### Per Weekday/Weekend", "_____no_output_____" ] ], [ [ "ax = sns.barplot(x='trip_length_bucket',y='sched_inef', hue='weekday', data=pos_sched_inef, color='#fc8d62')\nax.set(xlabel='Trip Length',ylabel='Schedule Inefficiency (%)')\n\nfig = ax.get_figure()\nfig.set_size_inches(4.5, 3)\n#fig.savefig('/local/tarciso/masters/data/results/imp_capacity_per_day_period.pdf', bbox_inches='tight')", "_____no_output_____" ] ], [ [ "#### Per Day Type", "_____no_output_____" ] ], [ [ "ax = sns.barplot(x='trip_length_bucket',y='sched_inef', hue='day_type', data=pos_sched_inef, color='#fc8d62')\nax.set(xlabel='Trip Length',ylabel='Schedule Inefficiency (%)')\n\nfig = ax.get_figure()\nfig.set_size_inches(4.5, 3)\n#fig.savefig('/local/tarciso/masters/data/results/imp_capacity_per_day_period.pdf', bbox_inches='tight')", "_____no_output_____" ] ], [ [ "### User choice plan inefficiency", "_____no_output_____" ] ], [ [ "best_scheduled_itineraries = select_best_itineraries(clean_itineraries2,'planned_duration_mins') \\\n [['date','user_trip_id','planned_duration_mins']] \\\n .rename(index=str,columns={'planned_duration_mins':'best_planned_duration_mins'})", "_____no_output_____" ], [ "best_scheduled_itineraries.head()", "_____no_output_____" ], [ "plan_inef = clean_itineraries2.merge(best_scheduled_itineraries, on=['date','user_trip_id'], how='inner') \\\n [lambda x: x['itinerary_id'] == 0] \\\n .assign(plan_dur_diff = lambda x: x['planned_duration_mins'] - x['best_planned_duration_mins']) \\\n .assign(plan_inef = lambda x: x['plan_dur_diff']/x['planned_duration_mins'])", "_____no_output_____" ], [ "sns.distplot(plan_inef.plan_inef)", "_____no_output_____" ], [ "pos_plan_inef = plan_inef[plan_inef['plan_dur_diff'] > 1]", "_____no_output_____" ], [ "sns.distplot(pos_plan_inef.plan_inef)", "_____no_output_____" ] ], [ [ "#### Number of Trips with/without improvent per Trip Length Bucket", "_____no_output_____" ] ], [ [ "trips_per_length = plan_inef.groupby('trip_length_bucket').size().reset_index(name='total')", "_____no_output_____" ], [ "trips_per_length", "_____no_output_____" ], [ "trips_per_length_improved = pos_plan_inef.groupby('trip_length_bucket').size().reset_index(name='total')", "_____no_output_____" ], [ "trips_per_length_improved", "_____no_output_____" ], [ "sns.set_style(\"whitegrid\")\n\n#Plot 1 - background - \"total\" (top) series\nax = sns.barplot(x = trips_per_length.trip_length_bucket, y = trips_per_length.total, color = \"#66c2a5\")\n\n#Plot 2 - overlay - \"bottom\" series\nbottom_plot = sns.barplot(x = trips_per_length_improved.trip_length_bucket, y = trips_per_length_improved.total, color = \"#fc8d62\")\n\nbottom_plot.set(xlabel='Trip Length (minutes)',ylabel='Number of Trips')\n\ntopbar = plt.Rectangle((0,0),1,1,fc=\"#66c2a5\", edgecolor = 'none')\nbottombar = plt.Rectangle((0,0),1,1,fc='#fc8d62', edgecolor = 'none')\nl = plt.legend([bottombar, topbar], ['Not Optimal', 'Optimal'], bbox_to_anchor=(0, 1.2), loc=2, ncol = 2, prop={'size':10})\nl.draw_frame(False)\n\nfig = ax.get_figure()\nfig.set_size_inches(4.5, 3)\n#fig.savefig('/local/tarciso/masters/data/results/trip_length_by_optimality.pdf', bbox_inches='tight')", "_____no_output_____" ] ], [ [ "#### Per Trip Length Bucket", "_____no_output_____" ] ], [ [ "trip_len_order=['10-20','20-30','30-40','40-50']\nax = sns.boxplot(x='plan_inef',y='trip_length_bucket', orient='h', data=pos_plan_inef, order=trip_len_order, color='#fc8d62')\nax.set(xlabel='Plan Inefficiency (%)',ylabel='Trip Length')\n\nfig = ax.get_figure()\nfig.set_size_inches(4.5, 3)\n#fig.savefig('/local/tarciso/masters/data/results/imp_capacity_per_trip_length.pdf', bbox_inches='tight')", "_____no_output_____" ] ], [ [ "#### Per Period of Day", "_____no_output_____" ] ], [ [ "period_of_day_order = ['very_late_night','early_morning','morning','midday','afternoon','evening','night','late_night']\nax = sns.boxplot(x='plan_inef',y='period_of_day', data=pos_plan_inef, order=period_of_day_order, color='#fc8d62')\nax.set(xlabel='Plan Inefficiency (%)',ylabel='Period of Day')\n\nfig = ax.get_figure()\nfig.set_size_inches(4.5, 3)\n#fig.savefig('/local/tarciso/masters/data/results/imp_capacity_per_day_period.pdf', bbox_inches='tight')", "_____no_output_____" ] ], [ [ "#### Per Weekday/Weekend", "_____no_output_____" ] ], [ [ "ax = sns.barplot(x='trip_length_bucket',y='plan_inef', hue='weekday', data=pos_plan_inef, color='#fc8d62')\nax.set(xlabel='Trip Length',ylabel='Plan Inefficiency (%)')\n\nfig = ax.get_figure()\nfig.set_size_inches(4.5, 3)\n#fig.savefig('/local/tarciso/masters/data/results/imp_capacity_per_day_period.pdf', bbox_inches='tight')", "_____no_output_____" ] ], [ [ "#### Per Day Type", "_____no_output_____" ] ], [ [ "ax = sns.barplot(x='trip_length_bucket',y='plan_inef', hue='day_type', data=pos_plan_inef, color='#fc8d62')\nax.set(xlabel='Trip Length',ylabel='Plan Inefficiency (%)')\n\nfig = ax.get_figure()\nfig.set_size_inches(4.5, 3)\n#fig.savefig('/local/tarciso/masters/data/results/imp_capacity_per_day_period.pdf', bbox_inches='tight')", "_____no_output_____" ] ], [ [ "#### System Schedule Deviation\n$$\n\\begin{equation*}\n {Oe - Op}\n\\end{equation*}\n$$", "_____no_output_____" ] ], [ [ "sched_deviation = clean_itineraries[clean_itineraries['itinerary_id'] > 0] \\\n .assign(sched_dev = lambda x: x['actual_duration_mins'] - x['planned_duration_mins'])\n \nsched_deviation.head()", "_____no_output_____" ], [ "sns.distplot(sched_deviation.sched_dev)", "_____no_output_____" ] ], [ [ "#### User stop waiting time offset\n$$\n\\begin{equation*}\n {start(Oe) - start(Op)}\n\\end{equation*}\n$$", "_____no_output_____" ] ], [ [ "user_boarding_timediff = clean_itineraries[clean_itineraries['itinerary_id'] > 0] \\\n .assign(boarding_timediff = lambda x: (x['actual_start_time'] - x['planned_start_time'])/pd.Timedelta(minutes=1))\n \nuser_boarding_timediff.head()", "_____no_output_____" ], [ "sns.distplot(user_boarding_timediff.boarding_timediff)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ] ]
d0de0c4616f16ecc72f425ef7cfbfd11069b0c8b
480,374
ipynb
Jupyter Notebook
notebooks/1_data_loading_colab.ipynb
valentina-s/Oceans19-data-science-tutorial
7c7ed1cc1c477598cacf112a46de224533b50d2f
[ "CC-BY-4.0" ]
null
null
null
notebooks/1_data_loading_colab.ipynb
valentina-s/Oceans19-data-science-tutorial
7c7ed1cc1c477598cacf112a46de224533b50d2f
[ "CC-BY-4.0" ]
null
null
null
notebooks/1_data_loading_colab.ipynb
valentina-s/Oceans19-data-science-tutorial
7c7ed1cc1c477598cacf112a46de224533b50d2f
[ "CC-BY-4.0" ]
null
null
null
42.405897
25,222
0.539981
[ [ [ "<a href=\"https://colab.research.google.com/github/valentina-s/Oceans19-data-science-tutorial/blob/master/notebooks/1_data_loading_colab.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ], [ "## Whale Sound Exploration\n\nIn this tutorial we will explore some data which contain right whale up-calls. The dataset was shared as part of a [2013 Kaggle competition](https://www.kaggle.com/c/whale-detection-challenge). Our goal is not to show the best winning algorithm to detect a call, but share a simple pipeline for processing oscillatory data, which possibly can be used on wide range of time series.\n\nObjectives:\n* read and extract features form audio data\n* apply dimensionality reduction techiques\n* perform supervised classification\n* learn how to evaluate machine learning models\n* train a neural network to detect whale calls", "_____no_output_____" ], [ "### Data Loading and Exploration\n---", "_____no_output_____" ] ], [ [ "# ignore warnings\nimport warnings\nwarnings.filterwarnings(\"ignore\")", "_____no_output_____" ], [ "# importing multiple visualization libraries\n%matplotlib inline\nimport matplotlib.pyplot as plt\nfrom matplotlib import mlab\nimport pylab as pl\nimport seaborn", "_____no_output_____" ], [ "# importing libraries to manipulate the data files\nimport os\nfrom glob import glob", "_____no_output_____" ], [ "# importing scientific python packages\nimport numpy as np", "_____no_output_____" ], [ "# import a library to read the .aiff format\nimport aifc", "_____no_output_____" ] ], [ [ "The `train` folder contains many `.aiff` files (2 second snippets) and we have `.csv` document which contains the corresponding labels. ", "_____no_output_____" ] ], [ [ "!wget http://oceanhackweek2018.s3.amazonaws.com/oceans_data/train.tar.gz\n!wget http://oceanhackweek2018.s3.amazonaws.com/oceans_data/labels.csv", "--2019-10-31 10:19:20-- http://oceanhackweek2018.s3.amazonaws.com/oceans_data/train.tar.gz\nResolving oceanhackweek2018.s3.amazonaws.com (oceanhackweek2018.s3.amazonaws.com)... 52.218.241.34\nConnecting to oceanhackweek2018.s3.amazonaws.com (oceanhackweek2018.s3.amazonaws.com)|52.218.241.34|:80... connected.\nHTTP request sent, awaiting response... 200 OK\nLength: 64212938 (61M) [application/x-gzip]\nSaving to: ‘train.tar.gz’\n\ntrain.tar.gz 100%[===================>] 61.24M 17.3MB/s in 3.5s \n\n2019-10-31 10:19:24 (17.3 MB/s) - ‘train.tar.gz’ saved [64212938/64212938]\n\n--2019-10-31 10:19:24-- http://oceanhackweek2018.s3.amazonaws.com/oceans_data/labels.csv\nResolving oceanhackweek2018.s3.amazonaws.com (oceanhackweek2018.s3.amazonaws.com)... 52.218.234.43\nConnecting to oceanhackweek2018.s3.amazonaws.com (oceanhackweek2018.s3.amazonaws.com)|52.218.234.43|:80... connected.\nHTTP request sent, awaiting response... 200 OK\nLength: 540016 (527K) [text/csv]\nSaving to: ‘labels.csv’\n\nlabels.csv 100%[===================>] 527.36K 786KB/s in 0.7s \n\n2019-10-31 10:19:25 (786 KB/s) - ‘labels.csv’ saved [540016/540016]\n\n" ], [ "!tar xvzf train.tar.gz", "train/\ntrain/train00001.aiff\ntrain/train00002.aiff\ntrain/train00003.aiff\ntrain/train00004.aiff\ntrain/train00005.aiff\ntrain/train00006.aiff\ntrain/train00007.aiff\ntrain/train00008.aiff\ntrain/train00009.aiff\ntrain/train00010.aiff\ntrain/train00011.aiff\ntrain/train00012.aiff\ntrain/train00013.aiff\ntrain/train00014.aiff\ntrain/train00015.aiff\ntrain/train00016.aiff\ntrain/train00017.aiff\ntrain/train00018.aiff\ntrain/train00019.aiff\ntrain/train00020.aiff\ntrain/train00021.aiff\ntrain/train00022.aiff\ntrain/train00023.aiff\ntrain/train00024.aiff\ntrain/train00025.aiff\ntrain/train00026.aiff\ntrain/train00027.aiff\ntrain/train00028.aiff\ntrain/train00029.aiff\ntrain/train00030.aiff\ntrain/train00031.aiff\ntrain/train00032.aiff\ntrain/train00033.aiff\ntrain/train00034.aiff\ntrain/train00035.aiff\ntrain/train00036.aiff\ntrain/train00037.aiff\ntrain/train00038.aiff\ntrain/train00039.aiff\ntrain/train00040.aiff\ntrain/train00041.aiff\ntrain/train00042.aiff\ntrain/train00043.aiff\ntrain/train00044.aiff\ntrain/train00045.aiff\ntrain/train00046.aiff\ntrain/train00047.aiff\ntrain/train00048.aiff\ntrain/train00049.aiff\ntrain/train00050.aiff\ntrain/train00051.aiff\ntrain/train00052.aiff\ntrain/train00053.aiff\ntrain/train00054.aiff\ntrain/train00055.aiff\ntrain/train00056.aiff\ntrain/train00057.aiff\ntrain/train00058.aiff\ntrain/train00059.aiff\ntrain/train00060.aiff\ntrain/train00061.aiff\ntrain/train00062.aiff\ntrain/train00063.aiff\ntrain/train00064.aiff\ntrain/train00065.aiff\ntrain/train00066.aiff\ntrain/train00067.aiff\ntrain/train00068.aiff\ntrain/train00069.aiff\ntrain/train00070.aiff\ntrain/train00071.aiff\ntrain/train00072.aiff\ntrain/train00073.aiff\ntrain/train00074.aiff\ntrain/train00075.aiff\ntrain/train00076.aiff\ntrain/train00077.aiff\ntrain/train00078.aiff\ntrain/train00079.aiff\ntrain/train00080.aiff\ntrain/train00081.aiff\ntrain/train00082.aiff\ntrain/train00083.aiff\ntrain/train00084.aiff\ntrain/train00085.aiff\ntrain/train00086.aiff\ntrain/train00087.aiff\ntrain/train00088.aiff\ntrain/train00089.aiff\ntrain/train00090.aiff\ntrain/train00091.aiff\ntrain/train00092.aiff\ntrain/train00093.aiff\ntrain/train00094.aiff\ntrain/train00095.aiff\ntrain/train00096.aiff\ntrain/train00097.aiff\ntrain/train00098.aiff\ntrain/train00099.aiff\ntrain/train00100.aiff\ntrain/train00101.aiff\ntrain/train00102.aiff\ntrain/train00103.aiff\ntrain/train00104.aiff\ntrain/train00105.aiff\ntrain/train00106.aiff\ntrain/train00107.aiff\ntrain/train00108.aiff\ntrain/train00109.aiff\ntrain/train00110.aiff\ntrain/train00111.aiff\ntrain/train00112.aiff\ntrain/train00113.aiff\ntrain/train00114.aiff\ntrain/train00115.aiff\ntrain/train00116.aiff\ntrain/train00117.aiff\ntrain/train00118.aiff\ntrain/train00119.aiff\ntrain/train00120.aiff\ntrain/train00121.aiff\ntrain/train00122.aiff\ntrain/train00123.aiff\ntrain/train00124.aiff\ntrain/train00125.aiff\ntrain/train00126.aiff\ntrain/train00127.aiff\ntrain/train00128.aiff\ntrain/train00129.aiff\ntrain/train00130.aiff\ntrain/train00131.aiff\ntrain/train00132.aiff\ntrain/train00133.aiff\ntrain/train00134.aiff\ntrain/train00135.aiff\ntrain/train00136.aiff\ntrain/train00137.aiff\ntrain/train00138.aiff\ntrain/train00139.aiff\ntrain/train00140.aiff\ntrain/train00141.aiff\ntrain/train00142.aiff\ntrain/train00143.aiff\ntrain/train00144.aiff\ntrain/train00145.aiff\ntrain/train00146.aiff\ntrain/train00147.aiff\ntrain/train00148.aiff\ntrain/train00149.aiff\ntrain/train00150.aiff\ntrain/train00151.aiff\ntrain/train00152.aiff\ntrain/train00153.aiff\ntrain/train00154.aiff\ntrain/train00155.aiff\ntrain/train00156.aiff\ntrain/train00157.aiff\ntrain/train00158.aiff\ntrain/train00159.aiff\ntrain/train00160.aiff\ntrain/train00161.aiff\ntrain/train00162.aiff\ntrain/train00163.aiff\ntrain/train00164.aiff\ntrain/train00165.aiff\ntrain/train00166.aiff\ntrain/train00167.aiff\ntrain/train00168.aiff\ntrain/train00169.aiff\ntrain/train00170.aiff\ntrain/train00171.aiff\ntrain/train00172.aiff\ntrain/train00173.aiff\ntrain/train00174.aiff\ntrain/train00175.aiff\ntrain/train00176.aiff\ntrain/train00177.aiff\ntrain/train00178.aiff\ntrain/train00179.aiff\ntrain/train00180.aiff\ntrain/train00181.aiff\ntrain/train00182.aiff\ntrain/train00183.aiff\ntrain/train00184.aiff\ntrain/train00185.aiff\ntrain/train00186.aiff\ntrain/train00187.aiff\ntrain/train00188.aiff\ntrain/train00189.aiff\ntrain/train00190.aiff\ntrain/train00191.aiff\ntrain/train00192.aiff\ntrain/train00193.aiff\ntrain/train00194.aiff\ntrain/train00195.aiff\ntrain/train00196.aiff\ntrain/train00197.aiff\ntrain/train00198.aiff\ntrain/train00199.aiff\ntrain/train00200.aiff\ntrain/train00201.aiff\ntrain/train00202.aiff\ntrain/train00203.aiff\ntrain/train00204.aiff\ntrain/train00205.aiff\ntrain/train00206.aiff\ntrain/train00207.aiff\ntrain/train00208.aiff\ntrain/train00209.aiff\ntrain/train00210.aiff\ntrain/train00211.aiff\ntrain/train00212.aiff\ntrain/train00213.aiff\ntrain/train00214.aiff\ntrain/train00215.aiff\ntrain/train00216.aiff\ntrain/train00217.aiff\ntrain/train00218.aiff\ntrain/train00219.aiff\ntrain/train00220.aiff\ntrain/train00221.aiff\ntrain/train00222.aiff\ntrain/train00223.aiff\ntrain/train00224.aiff\ntrain/train00225.aiff\ntrain/train00226.aiff\ntrain/train00227.aiff\ntrain/train00228.aiff\ntrain/train00229.aiff\ntrain/train00230.aiff\ntrain/train00231.aiff\ntrain/train00232.aiff\ntrain/train00233.aiff\ntrain/train00234.aiff\ntrain/train00235.aiff\ntrain/train00236.aiff\ntrain/train00237.aiff\ntrain/train00238.aiff\ntrain/train00239.aiff\ntrain/train00240.aiff\ntrain/train00241.aiff\ntrain/train00242.aiff\ntrain/train00243.aiff\ntrain/train00244.aiff\ntrain/train00245.aiff\ntrain/train00246.aiff\ntrain/train00247.aiff\ntrain/train00248.aiff\ntrain/train00249.aiff\ntrain/train00250.aiff\ntrain/train00251.aiff\ntrain/train00252.aiff\ntrain/train00253.aiff\ntrain/train00254.aiff\ntrain/train00255.aiff\ntrain/train00256.aiff\ntrain/train00257.aiff\ntrain/train00258.aiff\ntrain/train00259.aiff\ntrain/train00260.aiff\ntrain/train00261.aiff\ntrain/train00262.aiff\ntrain/train00263.aiff\ntrain/train00264.aiff\ntrain/train00265.aiff\ntrain/train00266.aiff\ntrain/train00267.aiff\ntrain/train00268.aiff\ntrain/train00269.aiff\ntrain/train00270.aiff\ntrain/train00271.aiff\ntrain/train00272.aiff\ntrain/train00273.aiff\ntrain/train00274.aiff\ntrain/train00275.aiff\ntrain/train00276.aiff\ntrain/train00277.aiff\ntrain/train00278.aiff\ntrain/train00279.aiff\ntrain/train00280.aiff\ntrain/train00281.aiff\ntrain/train00282.aiff\ntrain/train00283.aiff\ntrain/train00284.aiff\ntrain/train00285.aiff\ntrain/train00286.aiff\ntrain/train00287.aiff\ntrain/train00288.aiff\ntrain/train00289.aiff\ntrain/train00290.aiff\ntrain/train00291.aiff\ntrain/train00292.aiff\ntrain/train00293.aiff\ntrain/train00294.aiff\ntrain/train00295.aiff\ntrain/train00296.aiff\ntrain/train00297.aiff\ntrain/train00298.aiff\ntrain/train00299.aiff\ntrain/train00300.aiff\ntrain/train00301.aiff\ntrain/train00302.aiff\ntrain/train00303.aiff\ntrain/train00304.aiff\ntrain/train00305.aiff\ntrain/train00306.aiff\ntrain/train00307.aiff\ntrain/train00308.aiff\ntrain/train00309.aiff\ntrain/train00310.aiff\ntrain/train00311.aiff\ntrain/train00312.aiff\ntrain/train00313.aiff\ntrain/train00314.aiff\ntrain/train00315.aiff\ntrain/train00316.aiff\ntrain/train00317.aiff\ntrain/train00318.aiff\ntrain/train00319.aiff\ntrain/train00320.aiff\ntrain/train00321.aiff\ntrain/train00322.aiff\ntrain/train00323.aiff\ntrain/train00324.aiff\ntrain/train00325.aiff\ntrain/train00326.aiff\ntrain/train00327.aiff\ntrain/train00328.aiff\ntrain/train00329.aiff\ntrain/train00330.aiff\ntrain/train00331.aiff\ntrain/train00332.aiff\ntrain/train00333.aiff\ntrain/train00334.aiff\ntrain/train00335.aiff\ntrain/train00336.aiff\ntrain/train00337.aiff\ntrain/train00338.aiff\ntrain/train00339.aiff\ntrain/train00340.aiff\ntrain/train00341.aiff\ntrain/train00342.aiff\ntrain/train00343.aiff\ntrain/train00344.aiff\ntrain/train00345.aiff\ntrain/train00346.aiff\ntrain/train00347.aiff\ntrain/train00348.aiff\ntrain/train00349.aiff\ntrain/train00350.aiff\ntrain/train00351.aiff\ntrain/train00352.aiff\ntrain/train00353.aiff\ntrain/train00354.aiff\ntrain/train00355.aiff\ntrain/train00356.aiff\ntrain/train00357.aiff\ntrain/train00358.aiff\ntrain/train00359.aiff\ntrain/train00360.aiff\ntrain/train00361.aiff\ntrain/train00362.aiff\ntrain/train00363.aiff\ntrain/train00364.aiff\ntrain/train00365.aiff\ntrain/train00366.aiff\ntrain/train00367.aiff\ntrain/train00368.aiff\ntrain/train00369.aiff\ntrain/train00370.aiff\ntrain/train00371.aiff\ntrain/train00372.aiff\ntrain/train00373.aiff\ntrain/train00374.aiff\ntrain/train00375.aiff\ntrain/train00376.aiff\ntrain/train00377.aiff\ntrain/train00378.aiff\ntrain/train00379.aiff\ntrain/train00380.aiff\ntrain/train00381.aiff\ntrain/train00382.aiff\ntrain/train00383.aiff\ntrain/train00384.aiff\ntrain/train00385.aiff\ntrain/train00386.aiff\ntrain/train00387.aiff\ntrain/train00388.aiff\ntrain/train00389.aiff\ntrain/train00390.aiff\ntrain/train00391.aiff\ntrain/train00392.aiff\ntrain/train00393.aiff\ntrain/train00394.aiff\ntrain/train00395.aiff\ntrain/train00396.aiff\ntrain/train00397.aiff\ntrain/train00398.aiff\ntrain/train00399.aiff\ntrain/train00400.aiff\ntrain/train00401.aiff\ntrain/train00402.aiff\ntrain/train00403.aiff\ntrain/train00404.aiff\ntrain/train00405.aiff\ntrain/train00406.aiff\ntrain/train00407.aiff\ntrain/train00408.aiff\ntrain/train00409.aiff\ntrain/train00410.aiff\ntrain/train00411.aiff\ntrain/train00412.aiff\ntrain/train00413.aiff\ntrain/train00414.aiff\ntrain/train00415.aiff\ntrain/train00416.aiff\ntrain/train00417.aiff\ntrain/train00418.aiff\ntrain/train00419.aiff\ntrain/train00420.aiff\ntrain/train00421.aiff\ntrain/train00422.aiff\ntrain/train00423.aiff\ntrain/train00424.aiff\ntrain/train00425.aiff\ntrain/train00426.aiff\ntrain/train00427.aiff\ntrain/train00428.aiff\ntrain/train00429.aiff\ntrain/train00430.aiff\ntrain/train00431.aiff\ntrain/train00432.aiff\ntrain/train00433.aiff\ntrain/train00434.aiff\ntrain/train00435.aiff\ntrain/train00436.aiff\ntrain/train00437.aiff\ntrain/train00438.aiff\ntrain/train00439.aiff\ntrain/train00440.aiff\ntrain/train00441.aiff\ntrain/train00442.aiff\ntrain/train00443.aiff\ntrain/train00444.aiff\ntrain/train00445.aiff\ntrain/train00446.aiff\ntrain/train00447.aiff\ntrain/train00448.aiff\ntrain/train00449.aiff\ntrain/train00450.aiff\ntrain/train00451.aiff\ntrain/train00452.aiff\ntrain/train00453.aiff\ntrain/train00454.aiff\ntrain/train00455.aiff\ntrain/train00456.aiff\ntrain/train00457.aiff\ntrain/train00458.aiff\ntrain/train00459.aiff\ntrain/train00460.aiff\ntrain/train00461.aiff\ntrain/train00462.aiff\ntrain/train00463.aiff\ntrain/train00464.aiff\ntrain/train00465.aiff\ntrain/train00466.aiff\ntrain/train00467.aiff\ntrain/train00468.aiff\ntrain/train00469.aiff\ntrain/train00470.aiff\ntrain/train00471.aiff\ntrain/train00472.aiff\ntrain/train00473.aiff\ntrain/train00474.aiff\ntrain/train00475.aiff\ntrain/train00476.aiff\ntrain/train00477.aiff\ntrain/train00478.aiff\ntrain/train00479.aiff\ntrain/train00480.aiff\ntrain/train00481.aiff\ntrain/train00482.aiff\ntrain/train00483.aiff\ntrain/train00484.aiff\ntrain/train00485.aiff\ntrain/train00486.aiff\ntrain/train00487.aiff\ntrain/train00488.aiff\ntrain/train00489.aiff\ntrain/train00490.aiff\ntrain/train00491.aiff\ntrain/train00492.aiff\ntrain/train00493.aiff\ntrain/train00494.aiff\ntrain/train00495.aiff\ntrain/train00496.aiff\ntrain/train00497.aiff\ntrain/train00498.aiff\ntrain/train00499.aiff\ntrain/train00500.aiff\ntrain/train00501.aiff\ntrain/train00502.aiff\ntrain/train00503.aiff\ntrain/train00504.aiff\ntrain/train00505.aiff\ntrain/train00506.aiff\ntrain/train00507.aiff\ntrain/train00508.aiff\ntrain/train00509.aiff\ntrain/train00510.aiff\ntrain/train00511.aiff\ntrain/train00512.aiff\ntrain/train00513.aiff\ntrain/train00514.aiff\ntrain/train00515.aiff\ntrain/train00516.aiff\ntrain/train00517.aiff\ntrain/train00518.aiff\ntrain/train00519.aiff\ntrain/train00520.aiff\ntrain/train00521.aiff\ntrain/train00522.aiff\ntrain/train00523.aiff\ntrain/train00524.aiff\ntrain/train00525.aiff\ntrain/train00526.aiff\ntrain/train00527.aiff\ntrain/train00528.aiff\ntrain/train00529.aiff\ntrain/train00530.aiff\ntrain/train00531.aiff\ntrain/train00532.aiff\ntrain/train00533.aiff\ntrain/train00534.aiff\ntrain/train00535.aiff\ntrain/train00536.aiff\ntrain/train00537.aiff\ntrain/train00538.aiff\ntrain/train00539.aiff\ntrain/train00540.aiff\ntrain/train00541.aiff\ntrain/train00542.aiff\ntrain/train00543.aiff\ntrain/train00544.aiff\ntrain/train00545.aiff\ntrain/train00546.aiff\ntrain/train00547.aiff\ntrain/train00548.aiff\ntrain/train00549.aiff\ntrain/train00550.aiff\ntrain/train00551.aiff\ntrain/train00552.aiff\ntrain/train00553.aiff\ntrain/train00554.aiff\ntrain/train00555.aiff\ntrain/train00556.aiff\ntrain/train00557.aiff\ntrain/train00558.aiff\ntrain/train00559.aiff\ntrain/train00560.aiff\ntrain/train00561.aiff\ntrain/train00562.aiff\ntrain/train00563.aiff\ntrain/train00564.aiff\ntrain/train00565.aiff\ntrain/train00566.aiff\ntrain/train00567.aiff\ntrain/train00568.aiff\ntrain/train00569.aiff\ntrain/train00570.aiff\ntrain/train00571.aiff\ntrain/train00572.aiff\ntrain/train00573.aiff\ntrain/train00574.aiff\ntrain/train00575.aiff\ntrain/train00576.aiff\ntrain/train00577.aiff\ntrain/train00578.aiff\ntrain/train00579.aiff\ntrain/train00580.aiff\ntrain/train00581.aiff\ntrain/train00582.aiff\ntrain/train00583.aiff\ntrain/train00584.aiff\ntrain/train00585.aiff\ntrain/train00586.aiff\ntrain/train00587.aiff\ntrain/train00588.aiff\ntrain/train00589.aiff\ntrain/train00590.aiff\ntrain/train00591.aiff\ntrain/train00592.aiff\ntrain/train00593.aiff\ntrain/train00594.aiff\ntrain/train00595.aiff\ntrain/train00596.aiff\ntrain/train00597.aiff\ntrain/train00598.aiff\ntrain/train00599.aiff\ntrain/train00600.aiff\ntrain/train00601.aiff\ntrain/train00602.aiff\ntrain/train00603.aiff\ntrain/train00604.aiff\ntrain/train00605.aiff\ntrain/train00606.aiff\ntrain/train00607.aiff\ntrain/train00608.aiff\ntrain/train00609.aiff\ntrain/train00610.aiff\ntrain/train00611.aiff\ntrain/train00612.aiff\ntrain/train00613.aiff\ntrain/train00614.aiff\ntrain/train00615.aiff\ntrain/train00616.aiff\ntrain/train00617.aiff\ntrain/train00618.aiff\ntrain/train00619.aiff\ntrain/train00620.aiff\ntrain/train00621.aiff\ntrain/train00622.aiff\ntrain/train00623.aiff\ntrain/train00624.aiff\ntrain/train00625.aiff\ntrain/train00626.aiff\ntrain/train00627.aiff\ntrain/train00628.aiff\ntrain/train00629.aiff\ntrain/train00630.aiff\ntrain/train00631.aiff\ntrain/train00632.aiff\ntrain/train00633.aiff\ntrain/train00634.aiff\ntrain/train00635.aiff\ntrain/train00636.aiff\ntrain/train00637.aiff\ntrain/train00638.aiff\ntrain/train00639.aiff\ntrain/train00640.aiff\ntrain/train00641.aiff\ntrain/train00642.aiff\ntrain/train00643.aiff\ntrain/train00644.aiff\ntrain/train00645.aiff\ntrain/train00646.aiff\ntrain/train00647.aiff\ntrain/train00648.aiff\ntrain/train00649.aiff\ntrain/train00650.aiff\ntrain/train00651.aiff\ntrain/train00652.aiff\ntrain/train00653.aiff\ntrain/train00654.aiff\ntrain/train00655.aiff\ntrain/train00656.aiff\ntrain/train00657.aiff\ntrain/train00658.aiff\ntrain/train00659.aiff\ntrain/train00660.aiff\ntrain/train00661.aiff\ntrain/train00662.aiff\ntrain/train00663.aiff\ntrain/train00664.aiff\ntrain/train00665.aiff\ntrain/train00666.aiff\ntrain/train00667.aiff\ntrain/train00668.aiff\ntrain/train00669.aiff\ntrain/train00670.aiff\ntrain/train00671.aiff\ntrain/train00672.aiff\ntrain/train00673.aiff\ntrain/train00674.aiff\ntrain/train00675.aiff\ntrain/train00676.aiff\ntrain/train00677.aiff\ntrain/train00678.aiff\ntrain/train00679.aiff\ntrain/train00680.aiff\ntrain/train00681.aiff\ntrain/train00682.aiff\ntrain/train00683.aiff\ntrain/train00684.aiff\ntrain/train00685.aiff\ntrain/train00686.aiff\ntrain/train00687.aiff\ntrain/train00688.aiff\ntrain/train00689.aiff\ntrain/train00690.aiff\ntrain/train00691.aiff\ntrain/train00692.aiff\ntrain/train00693.aiff\ntrain/train00694.aiff\ntrain/train00695.aiff\ntrain/train00696.aiff\ntrain/train00697.aiff\ntrain/train00698.aiff\ntrain/train00699.aiff\ntrain/train00700.aiff\ntrain/train00701.aiff\ntrain/train00702.aiff\ntrain/train00703.aiff\ntrain/train00704.aiff\ntrain/train00705.aiff\ntrain/train00706.aiff\ntrain/train00707.aiff\ntrain/train00708.aiff\ntrain/train00709.aiff\ntrain/train00710.aiff\ntrain/train00711.aiff\ntrain/train00712.aiff\ntrain/train00713.aiff\ntrain/train00714.aiff\ntrain/train00715.aiff\ntrain/train00716.aiff\ntrain/train00717.aiff\ntrain/train00718.aiff\ntrain/train00719.aiff\ntrain/train00720.aiff\ntrain/train00721.aiff\ntrain/train00722.aiff\ntrain/train00723.aiff\ntrain/train00724.aiff\ntrain/train00725.aiff\ntrain/train00726.aiff\ntrain/train00727.aiff\ntrain/train00728.aiff\ntrain/train00729.aiff\ntrain/train00730.aiff\ntrain/train00731.aiff\ntrain/train00732.aiff\ntrain/train00733.aiff\ntrain/train00734.aiff\ntrain/train00735.aiff\ntrain/train00736.aiff\ntrain/train00737.aiff\ntrain/train00738.aiff\ntrain/train00739.aiff\ntrain/train00740.aiff\ntrain/train00741.aiff\ntrain/train00742.aiff\ntrain/train00743.aiff\ntrain/train00744.aiff\ntrain/train00745.aiff\ntrain/train00746.aiff\ntrain/train00747.aiff\ntrain/train00748.aiff\ntrain/train00749.aiff\ntrain/train00750.aiff\ntrain/train00751.aiff\ntrain/train00752.aiff\ntrain/train00753.aiff\ntrain/train00754.aiff\ntrain/train00755.aiff\ntrain/train00756.aiff\ntrain/train00757.aiff\ntrain/train00758.aiff\ntrain/train00759.aiff\ntrain/train00760.aiff\ntrain/train00761.aiff\ntrain/train00762.aiff\ntrain/train00763.aiff\ntrain/train00764.aiff\ntrain/train00765.aiff\ntrain/train00766.aiff\ntrain/train00767.aiff\ntrain/train00768.aiff\ntrain/train00769.aiff\ntrain/train00770.aiff\ntrain/train00771.aiff\ntrain/train00772.aiff\ntrain/train00773.aiff\ntrain/train00774.aiff\ntrain/train00775.aiff\ntrain/train00776.aiff\ntrain/train00777.aiff\ntrain/train00778.aiff\ntrain/train00779.aiff\ntrain/train00780.aiff\ntrain/train00781.aiff\ntrain/train00782.aiff\ntrain/train00783.aiff\ntrain/train00784.aiff\ntrain/train00785.aiff\ntrain/train00786.aiff\ntrain/train00787.aiff\ntrain/train00788.aiff\ntrain/train00789.aiff\ntrain/train00790.aiff\ntrain/train00791.aiff\ntrain/train00792.aiff\ntrain/train00793.aiff\ntrain/train00794.aiff\ntrain/train00795.aiff\ntrain/train00796.aiff\ntrain/train00797.aiff\ntrain/train00798.aiff\ntrain/train00799.aiff\ntrain/train00800.aiff\ntrain/train00801.aiff\ntrain/train00802.aiff\ntrain/train00803.aiff\ntrain/train00804.aiff\ntrain/train00805.aiff\ntrain/train00806.aiff\ntrain/train00807.aiff\ntrain/train00808.aiff\ntrain/train00809.aiff\ntrain/train00810.aiff\ntrain/train00811.aiff\ntrain/train00812.aiff\ntrain/train00813.aiff\ntrain/train00814.aiff\ntrain/train00815.aiff\ntrain/train00816.aiff\ntrain/train00817.aiff\ntrain/train00818.aiff\ntrain/train00819.aiff\ntrain/train00820.aiff\ntrain/train00821.aiff\ntrain/train00822.aiff\ntrain/train00823.aiff\ntrain/train00824.aiff\ntrain/train00825.aiff\ntrain/train00826.aiff\ntrain/train00827.aiff\ntrain/train00828.aiff\ntrain/train00829.aiff\ntrain/train00830.aiff\ntrain/train00831.aiff\ntrain/train00832.aiff\ntrain/train00833.aiff\ntrain/train00834.aiff\ntrain/train00835.aiff\ntrain/train00836.aiff\ntrain/train00837.aiff\ntrain/train00838.aiff\ntrain/train00839.aiff\ntrain/train00840.aiff\ntrain/train00841.aiff\ntrain/train00842.aiff\ntrain/train00843.aiff\ntrain/train00844.aiff\ntrain/train00845.aiff\ntrain/train00846.aiff\ntrain/train00847.aiff\ntrain/train00848.aiff\ntrain/train00849.aiff\ntrain/train00850.aiff\ntrain/train00851.aiff\ntrain/train00852.aiff\ntrain/train00853.aiff\ntrain/train00854.aiff\ntrain/train00855.aiff\ntrain/train00856.aiff\ntrain/train00857.aiff\ntrain/train00858.aiff\ntrain/train00859.aiff\ntrain/train00860.aiff\ntrain/train00861.aiff\ntrain/train00862.aiff\ntrain/train00863.aiff\ntrain/train00864.aiff\ntrain/train00865.aiff\ntrain/train00866.aiff\ntrain/train00867.aiff\ntrain/train00868.aiff\ntrain/train00869.aiff\ntrain/train00870.aiff\ntrain/train00871.aiff\ntrain/train00872.aiff\ntrain/train00873.aiff\ntrain/train00874.aiff\ntrain/train00875.aiff\ntrain/train00876.aiff\ntrain/train00877.aiff\ntrain/train00878.aiff\ntrain/train00879.aiff\ntrain/train00880.aiff\ntrain/train00881.aiff\ntrain/train00882.aiff\ntrain/train00883.aiff\ntrain/train00884.aiff\ntrain/train00885.aiff\ntrain/train00886.aiff\ntrain/train00887.aiff\ntrain/train00888.aiff\ntrain/train00889.aiff\ntrain/train00890.aiff\ntrain/train00891.aiff\ntrain/train00892.aiff\ntrain/train00893.aiff\ntrain/train00894.aiff\ntrain/train00895.aiff\ntrain/train00896.aiff\ntrain/train00897.aiff\ntrain/train00898.aiff\ntrain/train00899.aiff\ntrain/train00900.aiff\ntrain/train00901.aiff\ntrain/train00902.aiff\ntrain/train00903.aiff\ntrain/train00904.aiff\ntrain/train00905.aiff\ntrain/train00906.aiff\ntrain/train00907.aiff\ntrain/train00908.aiff\ntrain/train00909.aiff\ntrain/train00910.aiff\ntrain/train00911.aiff\ntrain/train00912.aiff\ntrain/train00913.aiff\ntrain/train00914.aiff\ntrain/train00915.aiff\ntrain/train00916.aiff\ntrain/train00917.aiff\ntrain/train00918.aiff\ntrain/train00919.aiff\ntrain/train00920.aiff\ntrain/train00921.aiff\ntrain/train00922.aiff\ntrain/train00923.aiff\ntrain/train00924.aiff\ntrain/train00925.aiff\ntrain/train00926.aiff\ntrain/train00927.aiff\ntrain/train00928.aiff\ntrain/train00929.aiff\ntrain/train00930.aiff\ntrain/train00931.aiff\ntrain/train00932.aiff\ntrain/train00933.aiff\ntrain/train00934.aiff\ntrain/train00935.aiff\ntrain/train00936.aiff\ntrain/train00937.aiff\ntrain/train00938.aiff\ntrain/train00939.aiff\ntrain/train00940.aiff\ntrain/train00941.aiff\ntrain/train00942.aiff\ntrain/train00943.aiff\ntrain/train00944.aiff\ntrain/train00945.aiff\ntrain/train00946.aiff\ntrain/train00947.aiff\ntrain/train00948.aiff\ntrain/train00949.aiff\ntrain/train00950.aiff\ntrain/train00951.aiff\ntrain/train00952.aiff\ntrain/train00953.aiff\ntrain/train00954.aiff\ntrain/train00955.aiff\ntrain/train00956.aiff\ntrain/train00957.aiff\ntrain/train00958.aiff\ntrain/train00959.aiff\ntrain/train00960.aiff\ntrain/train00961.aiff\ntrain/train00962.aiff\ntrain/train00963.aiff\ntrain/train00964.aiff\ntrain/train00965.aiff\ntrain/train00966.aiff\ntrain/train00967.aiff\ntrain/train00968.aiff\ntrain/train00969.aiff\ntrain/train00970.aiff\ntrain/train00971.aiff\ntrain/train00972.aiff\ntrain/train00973.aiff\ntrain/train00974.aiff\ntrain/train00975.aiff\ntrain/train00976.aiff\ntrain/train00977.aiff\ntrain/train00978.aiff\ntrain/train00979.aiff\ntrain/train00980.aiff\ntrain/train00981.aiff\ntrain/train00982.aiff\ntrain/train00983.aiff\ntrain/train00984.aiff\ntrain/train00985.aiff\ntrain/train00986.aiff\ntrain/train00987.aiff\ntrain/train00988.aiff\ntrain/train00989.aiff\ntrain/train00990.aiff\ntrain/train00991.aiff\ntrain/train00992.aiff\ntrain/train00993.aiff\ntrain/train00994.aiff\ntrain/train00995.aiff\ntrain/train00996.aiff\ntrain/train00997.aiff\ntrain/train00998.aiff\ntrain/train00999.aiff\ntrain/train01000.aiff\ntrain/train01001.aiff\ntrain/train01002.aiff\ntrain/train01003.aiff\ntrain/train01004.aiff\ntrain/train01005.aiff\ntrain/train01006.aiff\ntrain/train01007.aiff\ntrain/train01008.aiff\ntrain/train01009.aiff\ntrain/train01010.aiff\ntrain/train01011.aiff\ntrain/train01012.aiff\ntrain/train01013.aiff\ntrain/train01014.aiff\ntrain/train01015.aiff\ntrain/train01016.aiff\ntrain/train01017.aiff\ntrain/train01018.aiff\ntrain/train01019.aiff\ntrain/train01020.aiff\ntrain/train01021.aiff\ntrain/train01022.aiff\ntrain/train01023.aiff\ntrain/train01024.aiff\ntrain/train01025.aiff\ntrain/train01026.aiff\ntrain/train01027.aiff\ntrain/train01028.aiff\ntrain/train01029.aiff\ntrain/train01030.aiff\ntrain/train01031.aiff\ntrain/train01032.aiff\ntrain/train01033.aiff\ntrain/train01034.aiff\ntrain/train01035.aiff\ntrain/train01036.aiff\ntrain/train01037.aiff\ntrain/train01038.aiff\ntrain/train01039.aiff\ntrain/train01040.aiff\ntrain/train01041.aiff\ntrain/train01042.aiff\ntrain/train01043.aiff\ntrain/train01044.aiff\ntrain/train01045.aiff\ntrain/train01046.aiff\ntrain/train01047.aiff\ntrain/train01048.aiff\ntrain/train01049.aiff\ntrain/train01050.aiff\ntrain/train01051.aiff\ntrain/train01052.aiff\ntrain/train01053.aiff\ntrain/train01054.aiff\ntrain/train01055.aiff\ntrain/train01056.aiff\ntrain/train01057.aiff\ntrain/train01058.aiff\ntrain/train01059.aiff\ntrain/train01060.aiff\ntrain/train01061.aiff\ntrain/train01062.aiff\ntrain/train01063.aiff\ntrain/train01064.aiff\ntrain/train01065.aiff\ntrain/train01066.aiff\ntrain/train01067.aiff\ntrain/train01068.aiff\ntrain/train01069.aiff\ntrain/train01070.aiff\ntrain/train01071.aiff\ntrain/train01072.aiff\ntrain/train01073.aiff\ntrain/train01074.aiff\ntrain/train01075.aiff\ntrain/train01076.aiff\ntrain/train01077.aiff\ntrain/train01078.aiff\ntrain/train01079.aiff\ntrain/train01080.aiff\ntrain/train01081.aiff\ntrain/train01082.aiff\ntrain/train01083.aiff\ntrain/train01084.aiff\ntrain/train01085.aiff\ntrain/train01086.aiff\ntrain/train01087.aiff\ntrain/train01088.aiff\ntrain/train01089.aiff\ntrain/train01090.aiff\ntrain/train01091.aiff\ntrain/train01092.aiff\ntrain/train01093.aiff\ntrain/train01094.aiff\ntrain/train01095.aiff\ntrain/train01096.aiff\ntrain/train01097.aiff\ntrain/train01098.aiff\ntrain/train01099.aiff\ntrain/train01100.aiff\ntrain/train01101.aiff\ntrain/train01102.aiff\ntrain/train01103.aiff\ntrain/train01104.aiff\ntrain/train01105.aiff\ntrain/train01106.aiff\ntrain/train01107.aiff\ntrain/train01108.aiff\ntrain/train01109.aiff\ntrain/train01110.aiff\ntrain/train01111.aiff\ntrain/train01112.aiff\ntrain/train01113.aiff\ntrain/train01114.aiff\ntrain/train01115.aiff\ntrain/train01116.aiff\ntrain/train01117.aiff\ntrain/train01118.aiff\ntrain/train01119.aiff\ntrain/train01120.aiff\ntrain/train01121.aiff\ntrain/train01122.aiff\ntrain/train01123.aiff\ntrain/train01124.aiff\ntrain/train01125.aiff\ntrain/train01126.aiff\ntrain/train01127.aiff\ntrain/train01128.aiff\ntrain/train01129.aiff\ntrain/train01130.aiff\ntrain/train01131.aiff\ntrain/train01132.aiff\ntrain/train01133.aiff\ntrain/train01134.aiff\ntrain/train01135.aiff\ntrain/train01136.aiff\ntrain/train01137.aiff\ntrain/train01138.aiff\ntrain/train01139.aiff\ntrain/train01140.aiff\ntrain/train01141.aiff\ntrain/train01142.aiff\ntrain/train01143.aiff\ntrain/train01144.aiff\ntrain/train01145.aiff\ntrain/train01146.aiff\ntrain/train01147.aiff\ntrain/train01148.aiff\ntrain/train01149.aiff\ntrain/train01150.aiff\ntrain/train01151.aiff\ntrain/train01152.aiff\ntrain/train01153.aiff\ntrain/train01154.aiff\ntrain/train01155.aiff\ntrain/train01156.aiff\ntrain/train01157.aiff\ntrain/train01158.aiff\ntrain/train01159.aiff\ntrain/train01160.aiff\ntrain/train01161.aiff\ntrain/train01162.aiff\ntrain/train01163.aiff\ntrain/train01164.aiff\ntrain/train01165.aiff\ntrain/train01166.aiff\ntrain/train01167.aiff\ntrain/train01168.aiff\ntrain/train01169.aiff\ntrain/train01170.aiff\ntrain/train01171.aiff\ntrain/train01172.aiff\ntrain/train01173.aiff\ntrain/train01174.aiff\ntrain/train01175.aiff\ntrain/train01176.aiff\ntrain/train01177.aiff\ntrain/train01178.aiff\ntrain/train01179.aiff\ntrain/train01180.aiff\ntrain/train01181.aiff\ntrain/train01182.aiff\ntrain/train01183.aiff\ntrain/train01184.aiff\ntrain/train01185.aiff\ntrain/train01186.aiff\ntrain/train01187.aiff\ntrain/train01188.aiff\ntrain/train01189.aiff\ntrain/train01190.aiff\ntrain/train01191.aiff\ntrain/train01192.aiff\ntrain/train01193.aiff\ntrain/train01194.aiff\ntrain/train01195.aiff\ntrain/train01196.aiff\ntrain/train01197.aiff\ntrain/train01198.aiff\ntrain/train01199.aiff\ntrain/train01200.aiff\ntrain/train01201.aiff\ntrain/train01202.aiff\ntrain/train01203.aiff\ntrain/train01204.aiff\ntrain/train01205.aiff\ntrain/train01206.aiff\ntrain/train01207.aiff\ntrain/train01208.aiff\ntrain/train01209.aiff\ntrain/train01210.aiff\ntrain/train01211.aiff\ntrain/train01212.aiff\ntrain/train01213.aiff\ntrain/train01214.aiff\ntrain/train01215.aiff\ntrain/train01216.aiff\ntrain/train01217.aiff\ntrain/train01218.aiff\ntrain/train01219.aiff\ntrain/train01220.aiff\ntrain/train01221.aiff\ntrain/train01222.aiff\ntrain/train01223.aiff\ntrain/train01224.aiff\ntrain/train01225.aiff\ntrain/train01226.aiff\ntrain/train01227.aiff\ntrain/train01228.aiff\ntrain/train01229.aiff\ntrain/train01230.aiff\ntrain/train01231.aiff\ntrain/train01232.aiff\ntrain/train01233.aiff\ntrain/train01234.aiff\ntrain/train01235.aiff\ntrain/train01236.aiff\ntrain/train01237.aiff\ntrain/train01238.aiff\ntrain/train01239.aiff\ntrain/train01240.aiff\ntrain/train01241.aiff\ntrain/train01242.aiff\ntrain/train01243.aiff\ntrain/train01244.aiff\ntrain/train01245.aiff\ntrain/train01246.aiff\ntrain/train01247.aiff\ntrain/train01248.aiff\ntrain/train01249.aiff\ntrain/train01250.aiff\ntrain/train01251.aiff\ntrain/train01252.aiff\ntrain/train01253.aiff\ntrain/train01254.aiff\ntrain/train01255.aiff\ntrain/train01256.aiff\ntrain/train01257.aiff\ntrain/train01258.aiff\ntrain/train01259.aiff\ntrain/train01260.aiff\ntrain/train01261.aiff\ntrain/train01262.aiff\ntrain/train01263.aiff\ntrain/train01264.aiff\ntrain/train01265.aiff\ntrain/train01266.aiff\ntrain/train01267.aiff\ntrain/train01268.aiff\ntrain/train01269.aiff\ntrain/train01270.aiff\ntrain/train01271.aiff\ntrain/train01272.aiff\ntrain/train01273.aiff\ntrain/train01274.aiff\ntrain/train01275.aiff\ntrain/train01276.aiff\ntrain/train01277.aiff\ntrain/train01278.aiff\ntrain/train01279.aiff\ntrain/train01280.aiff\ntrain/train01281.aiff\ntrain/train01282.aiff\ntrain/train01283.aiff\ntrain/train01284.aiff\ntrain/train01285.aiff\ntrain/train01286.aiff\ntrain/train01287.aiff\ntrain/train01288.aiff\ntrain/train01289.aiff\ntrain/train01290.aiff\ntrain/train01291.aiff\ntrain/train01292.aiff\ntrain/train01293.aiff\ntrain/train01294.aiff\ntrain/train01295.aiff\ntrain/train01296.aiff\ntrain/train01297.aiff\ntrain/train01298.aiff\ntrain/train01299.aiff\ntrain/train01300.aiff\ntrain/train01301.aiff\ntrain/train01302.aiff\ntrain/train01303.aiff\ntrain/train01304.aiff\ntrain/train01305.aiff\ntrain/train01306.aiff\ntrain/train01307.aiff\ntrain/train01308.aiff\ntrain/train01309.aiff\ntrain/train01310.aiff\ntrain/train01311.aiff\ntrain/train01312.aiff\ntrain/train01313.aiff\ntrain/train01314.aiff\ntrain/train01315.aiff\ntrain/train01316.aiff\ntrain/train01317.aiff\ntrain/train01318.aiff\ntrain/train01319.aiff\ntrain/train01320.aiff\ntrain/train01321.aiff\ntrain/train01322.aiff\ntrain/train01323.aiff\ntrain/train01324.aiff\ntrain/train01325.aiff\ntrain/train01326.aiff\ntrain/train01327.aiff\ntrain/train01328.aiff\ntrain/train01329.aiff\ntrain/train01330.aiff\ntrain/train01331.aiff\ntrain/train01332.aiff\ntrain/train01333.aiff\ntrain/train01334.aiff\ntrain/train01335.aiff\ntrain/train01336.aiff\ntrain/train01337.aiff\ntrain/train01338.aiff\ntrain/train01339.aiff\ntrain/train01340.aiff\ntrain/train01341.aiff\ntrain/train01342.aiff\ntrain/train01343.aiff\ntrain/train01344.aiff\ntrain/train01345.aiff\ntrain/train01346.aiff\ntrain/train01347.aiff\ntrain/train01348.aiff\ntrain/train01349.aiff\ntrain/train01350.aiff\ntrain/train01351.aiff\ntrain/train01352.aiff\ntrain/train01353.aiff\ntrain/train01354.aiff\ntrain/train01355.aiff\ntrain/train01356.aiff\ntrain/train01357.aiff\ntrain/train01358.aiff\ntrain/train01359.aiff\ntrain/train01360.aiff\ntrain/train01361.aiff\ntrain/train01362.aiff\ntrain/train01363.aiff\ntrain/train01364.aiff\ntrain/train01365.aiff\ntrain/train01366.aiff\ntrain/train01367.aiff\ntrain/train01368.aiff\ntrain/train01369.aiff\ntrain/train01370.aiff\ntrain/train01371.aiff\ntrain/train01372.aiff\ntrain/train01373.aiff\ntrain/train01374.aiff\ntrain/train01375.aiff\ntrain/train01376.aiff\ntrain/train01377.aiff\ntrain/train01378.aiff\ntrain/train01379.aiff\ntrain/train01380.aiff\ntrain/train01381.aiff\ntrain/train01382.aiff\ntrain/train01383.aiff\ntrain/train01384.aiff\ntrain/train01385.aiff\ntrain/train01386.aiff\ntrain/train01387.aiff\ntrain/train01388.aiff\ntrain/train01389.aiff\ntrain/train01390.aiff\ntrain/train01391.aiff\ntrain/train01392.aiff\ntrain/train01393.aiff\ntrain/train01394.aiff\ntrain/train01395.aiff\ntrain/train01396.aiff\ntrain/train01397.aiff\ntrain/train01398.aiff\ntrain/train01399.aiff\ntrain/train01400.aiff\ntrain/train01401.aiff\ntrain/train01402.aiff\ntrain/train01403.aiff\ntrain/train01404.aiff\ntrain/train01405.aiff\ntrain/train01406.aiff\ntrain/train01407.aiff\ntrain/train01408.aiff\ntrain/train01409.aiff\ntrain/train01410.aiff\ntrain/train01411.aiff\ntrain/train01412.aiff\ntrain/train01413.aiff\ntrain/train01414.aiff\ntrain/train01415.aiff\ntrain/train01416.aiff\ntrain/train01417.aiff\ntrain/train01418.aiff\ntrain/train01419.aiff\ntrain/train01420.aiff\ntrain/train01421.aiff\ntrain/train01422.aiff\ntrain/train01423.aiff\ntrain/train01424.aiff\ntrain/train01425.aiff\ntrain/train01426.aiff\ntrain/train01427.aiff\ntrain/train01428.aiff\ntrain/train01429.aiff\ntrain/train01430.aiff\ntrain/train01431.aiff\ntrain/train01432.aiff\ntrain/train01433.aiff\ntrain/train01434.aiff\ntrain/train01435.aiff\ntrain/train01436.aiff\ntrain/train01437.aiff\ntrain/train01438.aiff\ntrain/train01439.aiff\ntrain/train01440.aiff\ntrain/train01441.aiff\ntrain/train01442.aiff\ntrain/train01443.aiff\ntrain/train01444.aiff\ntrain/train01445.aiff\ntrain/train01446.aiff\ntrain/train01447.aiff\ntrain/train01448.aiff\ntrain/train01449.aiff\ntrain/train01450.aiff\ntrain/train01451.aiff\ntrain/train01452.aiff\ntrain/train01453.aiff\ntrain/train01454.aiff\ntrain/train01455.aiff\ntrain/train01456.aiff\ntrain/train01457.aiff\ntrain/train01458.aiff\ntrain/train01459.aiff\ntrain/train01460.aiff\ntrain/train01461.aiff\ntrain/train01462.aiff\ntrain/train01463.aiff\ntrain/train01464.aiff\ntrain/train01465.aiff\ntrain/train01466.aiff\ntrain/train01467.aiff\ntrain/train01468.aiff\ntrain/train01469.aiff\ntrain/train01470.aiff\ntrain/train01471.aiff\ntrain/train01472.aiff\ntrain/train01473.aiff\ntrain/train01474.aiff\ntrain/train01475.aiff\ntrain/train01476.aiff\ntrain/train01477.aiff\ntrain/train01478.aiff\ntrain/train01479.aiff\ntrain/train01480.aiff\ntrain/train01481.aiff\ntrain/train01482.aiff\ntrain/train01483.aiff\ntrain/train01484.aiff\ntrain/train01485.aiff\ntrain/train01486.aiff\ntrain/train01487.aiff\ntrain/train01488.aiff\ntrain/train01489.aiff\ntrain/train01490.aiff\ntrain/train01491.aiff\ntrain/train01492.aiff\ntrain/train01493.aiff\ntrain/train01494.aiff\ntrain/train01495.aiff\ntrain/train01496.aiff\ntrain/train01497.aiff\ntrain/train01498.aiff\ntrain/train01499.aiff\ntrain/train01500.aiff\ntrain/train01501.aiff\ntrain/train01502.aiff\ntrain/train01503.aiff\ntrain/train01504.aiff\ntrain/train01505.aiff\ntrain/train01506.aiff\ntrain/train01507.aiff\ntrain/train01508.aiff\ntrain/train01509.aiff\ntrain/train01510.aiff\ntrain/train01511.aiff\ntrain/train01512.aiff\ntrain/train01513.aiff\ntrain/train01514.aiff\ntrain/train01515.aiff\ntrain/train01516.aiff\ntrain/train01517.aiff\ntrain/train01518.aiff\ntrain/train01519.aiff\ntrain/train01520.aiff\ntrain/train01521.aiff\ntrain/train01522.aiff\ntrain/train01523.aiff\ntrain/train01524.aiff\ntrain/train01525.aiff\ntrain/train01526.aiff\ntrain/train01527.aiff\ntrain/train01528.aiff\ntrain/train01529.aiff\ntrain/train01530.aiff\ntrain/train01531.aiff\ntrain/train01532.aiff\ntrain/train01533.aiff\ntrain/train01534.aiff\ntrain/train01535.aiff\ntrain/train01536.aiff\ntrain/train01537.aiff\ntrain/train01538.aiff\ntrain/train01539.aiff\ntrain/train01540.aiff\ntrain/train01541.aiff\ntrain/train01542.aiff\ntrain/train01543.aiff\ntrain/train01544.aiff\ntrain/train01545.aiff\ntrain/train01546.aiff\ntrain/train01547.aiff\ntrain/train01548.aiff\ntrain/train01549.aiff\ntrain/train01550.aiff\ntrain/train01551.aiff\ntrain/train01552.aiff\ntrain/train01553.aiff\ntrain/train01554.aiff\ntrain/train01555.aiff\ntrain/train01556.aiff\ntrain/train01557.aiff\ntrain/train01558.aiff\ntrain/train01559.aiff\ntrain/train01560.aiff\ntrain/train01561.aiff\ntrain/train01562.aiff\ntrain/train01563.aiff\ntrain/train01564.aiff\ntrain/train01565.aiff\ntrain/train01566.aiff\ntrain/train01567.aiff\ntrain/train01568.aiff\ntrain/train01569.aiff\ntrain/train01570.aiff\ntrain/train01571.aiff\ntrain/train01572.aiff\ntrain/train01573.aiff\ntrain/train01574.aiff\ntrain/train01575.aiff\ntrain/train01576.aiff\ntrain/train01577.aiff\ntrain/train01578.aiff\ntrain/train01579.aiff\ntrain/train01580.aiff\ntrain/train01581.aiff\ntrain/train01582.aiff\ntrain/train01583.aiff\ntrain/train01584.aiff\ntrain/train01585.aiff\ntrain/train01586.aiff\ntrain/train01587.aiff\ntrain/train01588.aiff\ntrain/train01589.aiff\ntrain/train01590.aiff\ntrain/train01591.aiff\ntrain/train01592.aiff\ntrain/train01593.aiff\ntrain/train01594.aiff\ntrain/train01595.aiff\ntrain/train01596.aiff\ntrain/train01597.aiff\ntrain/train01598.aiff\ntrain/train01599.aiff\ntrain/train01600.aiff\ntrain/train01601.aiff\ntrain/train01602.aiff\ntrain/train01603.aiff\ntrain/train01604.aiff\ntrain/train01605.aiff\ntrain/train01606.aiff\ntrain/train01607.aiff\ntrain/train01608.aiff\ntrain/train01609.aiff\ntrain/train01610.aiff\ntrain/train01611.aiff\ntrain/train01612.aiff\ntrain/train01613.aiff\ntrain/train01614.aiff\ntrain/train01615.aiff\ntrain/train01616.aiff\ntrain/train01617.aiff\ntrain/train01618.aiff\ntrain/train01619.aiff\ntrain/train01620.aiff\ntrain/train01621.aiff\ntrain/train01622.aiff\ntrain/train01623.aiff\ntrain/train01624.aiff\ntrain/train01625.aiff\ntrain/train01626.aiff\ntrain/train01627.aiff\ntrain/train01628.aiff\ntrain/train01629.aiff\ntrain/train01630.aiff\ntrain/train01631.aiff\ntrain/train01632.aiff\ntrain/train01633.aiff\ntrain/train01634.aiff\ntrain/train01635.aiff\ntrain/train01636.aiff\ntrain/train01637.aiff\ntrain/train01638.aiff\ntrain/train01639.aiff\ntrain/train01640.aiff\ntrain/train01641.aiff\ntrain/train01642.aiff\ntrain/train01643.aiff\ntrain/train01644.aiff\ntrain/train01645.aiff\ntrain/train01646.aiff\ntrain/train01647.aiff\ntrain/train01648.aiff\ntrain/train01649.aiff\ntrain/train01650.aiff\ntrain/train01651.aiff\ntrain/train01652.aiff\ntrain/train01653.aiff\ntrain/train01654.aiff\ntrain/train01655.aiff\ntrain/train01656.aiff\ntrain/train01657.aiff\ntrain/train01658.aiff\ntrain/train01659.aiff\ntrain/train01660.aiff\ntrain/train01661.aiff\ntrain/train01662.aiff\ntrain/train01663.aiff\ntrain/train01664.aiff\ntrain/train01665.aiff\ntrain/train01666.aiff\ntrain/train01667.aiff\ntrain/train01668.aiff\ntrain/train01669.aiff\ntrain/train01670.aiff\ntrain/train01671.aiff\ntrain/train01672.aiff\ntrain/train01673.aiff\ntrain/train01674.aiff\ntrain/train01675.aiff\ntrain/train01676.aiff\ntrain/train01677.aiff\ntrain/train01678.aiff\ntrain/train01679.aiff\ntrain/train01680.aiff\ntrain/train01681.aiff\ntrain/train01682.aiff\ntrain/train01683.aiff\ntrain/train01684.aiff\ntrain/train01685.aiff\ntrain/train01686.aiff\ntrain/train01687.aiff\ntrain/train01688.aiff\ntrain/train01689.aiff\ntrain/train01690.aiff\ntrain/train01691.aiff\ntrain/train01692.aiff\ntrain/train01693.aiff\ntrain/train01694.aiff\ntrain/train01695.aiff\ntrain/train01696.aiff\ntrain/train01697.aiff\ntrain/train01698.aiff\ntrain/train01699.aiff\ntrain/train01700.aiff\ntrain/train01701.aiff\ntrain/train01702.aiff\ntrain/train01703.aiff\ntrain/train01704.aiff\ntrain/train01705.aiff\ntrain/train01706.aiff\ntrain/train01707.aiff\ntrain/train01708.aiff\ntrain/train01709.aiff\ntrain/train01710.aiff\ntrain/train01711.aiff\ntrain/train01712.aiff\ntrain/train01713.aiff\ntrain/train01714.aiff\ntrain/train01715.aiff\ntrain/train01716.aiff\ntrain/train01717.aiff\ntrain/train01718.aiff\ntrain/train01719.aiff\ntrain/train01720.aiff\ntrain/train01721.aiff\ntrain/train01722.aiff\ntrain/train01723.aiff\ntrain/train01724.aiff\ntrain/train01725.aiff\ntrain/train01726.aiff\ntrain/train01727.aiff\ntrain/train01728.aiff\ntrain/train01729.aiff\ntrain/train01730.aiff\ntrain/train01731.aiff\ntrain/train01732.aiff\ntrain/train01733.aiff\ntrain/train01734.aiff\ntrain/train01735.aiff\ntrain/train01736.aiff\ntrain/train01737.aiff\ntrain/train01738.aiff\ntrain/train01739.aiff\ntrain/train01740.aiff\ntrain/train01741.aiff\ntrain/train01742.aiff\ntrain/train01743.aiff\ntrain/train01744.aiff\ntrain/train01745.aiff\ntrain/train01746.aiff\ntrain/train01747.aiff\ntrain/train01748.aiff\ntrain/train01749.aiff\ntrain/train01750.aiff\ntrain/train01751.aiff\ntrain/train01752.aiff\ntrain/train01753.aiff\ntrain/train01754.aiff\ntrain/train01755.aiff\ntrain/train01756.aiff\ntrain/train01757.aiff\ntrain/train01758.aiff\ntrain/train01759.aiff\ntrain/train01760.aiff\ntrain/train01761.aiff\ntrain/train01762.aiff\ntrain/train01763.aiff\ntrain/train01764.aiff\ntrain/train01765.aiff\ntrain/train01766.aiff\ntrain/train01767.aiff\ntrain/train01768.aiff\ntrain/train01769.aiff\ntrain/train01770.aiff\ntrain/train01771.aiff\ntrain/train01772.aiff\ntrain/train01773.aiff\ntrain/train01774.aiff\ntrain/train01775.aiff\ntrain/train01776.aiff\ntrain/train01777.aiff\ntrain/train01778.aiff\ntrain/train01779.aiff\ntrain/train01780.aiff\ntrain/train01781.aiff\ntrain/train01782.aiff\ntrain/train01783.aiff\ntrain/train01784.aiff\ntrain/train01785.aiff\ntrain/train01786.aiff\ntrain/train01787.aiff\ntrain/train01788.aiff\ntrain/train01789.aiff\ntrain/train01790.aiff\ntrain/train01791.aiff\ntrain/train01792.aiff\ntrain/train01793.aiff\ntrain/train01794.aiff\ntrain/train01795.aiff\ntrain/train01796.aiff\ntrain/train01797.aiff\ntrain/train01798.aiff\ntrain/train01799.aiff\ntrain/train01800.aiff\ntrain/train01801.aiff\ntrain/train01802.aiff\ntrain/train01803.aiff\ntrain/train01804.aiff\ntrain/train01805.aiff\ntrain/train01806.aiff\ntrain/train01807.aiff\ntrain/train01808.aiff\ntrain/train01809.aiff\ntrain/train01810.aiff\ntrain/train01811.aiff\ntrain/train01812.aiff\ntrain/train01813.aiff\ntrain/train01814.aiff\ntrain/train01815.aiff\ntrain/train01816.aiff\ntrain/train01817.aiff\ntrain/train01818.aiff\ntrain/train01819.aiff\ntrain/train01820.aiff\ntrain/train01821.aiff\ntrain/train01822.aiff\ntrain/train01823.aiff\ntrain/train01824.aiff\ntrain/train01825.aiff\ntrain/train01826.aiff\ntrain/train01827.aiff\ntrain/train01828.aiff\ntrain/train01829.aiff\ntrain/train01830.aiff\ntrain/train01831.aiff\ntrain/train01832.aiff\ntrain/train01833.aiff\ntrain/train01834.aiff\ntrain/train01835.aiff\ntrain/train01836.aiff\ntrain/train01837.aiff\ntrain/train01838.aiff\ntrain/train01839.aiff\ntrain/train01840.aiff\ntrain/train01841.aiff\ntrain/train01842.aiff\ntrain/train01843.aiff\ntrain/train01844.aiff\ntrain/train01845.aiff\ntrain/train01846.aiff\ntrain/train01847.aiff\ntrain/train01848.aiff\ntrain/train01849.aiff\ntrain/train01850.aiff\ntrain/train01851.aiff\ntrain/train01852.aiff\ntrain/train01853.aiff\ntrain/train01854.aiff\ntrain/train01855.aiff\ntrain/train01856.aiff\ntrain/train01857.aiff\ntrain/train01858.aiff\ntrain/train01859.aiff\ntrain/train01860.aiff\ntrain/train01861.aiff\ntrain/train01862.aiff\ntrain/train01863.aiff\ntrain/train01864.aiff\ntrain/train01865.aiff\ntrain/train01866.aiff\ntrain/train01867.aiff\ntrain/train01868.aiff\ntrain/train01869.aiff\ntrain/train01870.aiff\ntrain/train01871.aiff\ntrain/train01872.aiff\ntrain/train01873.aiff\ntrain/train01874.aiff\ntrain/train01875.aiff\ntrain/train01876.aiff\ntrain/train01877.aiff\ntrain/train01878.aiff\ntrain/train01879.aiff\ntrain/train01880.aiff\ntrain/train01881.aiff\ntrain/train01882.aiff\ntrain/train01883.aiff\ntrain/train01884.aiff\ntrain/train01885.aiff\ntrain/train01886.aiff\ntrain/train01887.aiff\ntrain/train01888.aiff\ntrain/train01889.aiff\ntrain/train01890.aiff\ntrain/train01891.aiff\ntrain/train01892.aiff\ntrain/train01893.aiff\ntrain/train01894.aiff\ntrain/train01895.aiff\ntrain/train01896.aiff\ntrain/train01897.aiff\ntrain/train01898.aiff\ntrain/train01899.aiff\ntrain/train01900.aiff\ntrain/train01901.aiff\ntrain/train01902.aiff\ntrain/train01903.aiff\ntrain/train01904.aiff\ntrain/train01905.aiff\ntrain/train01906.aiff\ntrain/train01907.aiff\ntrain/train01908.aiff\ntrain/train01909.aiff\ntrain/train01910.aiff\ntrain/train01911.aiff\ntrain/train01912.aiff\ntrain/train01913.aiff\ntrain/train01914.aiff\ntrain/train01915.aiff\ntrain/train01916.aiff\ntrain/train01917.aiff\ntrain/train01918.aiff\ntrain/train01919.aiff\ntrain/train01920.aiff\ntrain/train01921.aiff\ntrain/train01922.aiff\ntrain/train01923.aiff\ntrain/train01924.aiff\ntrain/train01925.aiff\ntrain/train01926.aiff\ntrain/train01927.aiff\ntrain/train01928.aiff\ntrain/train01929.aiff\ntrain/train01930.aiff\ntrain/train01931.aiff\ntrain/train01932.aiff\ntrain/train01933.aiff\ntrain/train01934.aiff\ntrain/train01935.aiff\ntrain/train01936.aiff\ntrain/train01937.aiff\ntrain/train01938.aiff\ntrain/train01939.aiff\ntrain/train01940.aiff\ntrain/train01941.aiff\ntrain/train01942.aiff\ntrain/train01943.aiff\ntrain/train01944.aiff\ntrain/train01945.aiff\ntrain/train01946.aiff\ntrain/train01947.aiff\ntrain/train01948.aiff\ntrain/train01949.aiff\ntrain/train01950.aiff\ntrain/train01951.aiff\ntrain/train01952.aiff\ntrain/train01953.aiff\ntrain/train01954.aiff\ntrain/train01955.aiff\ntrain/train01956.aiff\ntrain/train01957.aiff\ntrain/train01958.aiff\ntrain/train01959.aiff\ntrain/train01960.aiff\ntrain/train01961.aiff\ntrain/train01962.aiff\ntrain/train01963.aiff\ntrain/train01964.aiff\ntrain/train01965.aiff\ntrain/train01966.aiff\ntrain/train01967.aiff\ntrain/train01968.aiff\ntrain/train01969.aiff\ntrain/train01970.aiff\ntrain/train01971.aiff\ntrain/train01972.aiff\ntrain/train01973.aiff\ntrain/train01974.aiff\ntrain/train01975.aiff\ntrain/train01976.aiff\ntrain/train01977.aiff\ntrain/train01978.aiff\ntrain/train01979.aiff\ntrain/train01980.aiff\ntrain/train01981.aiff\ntrain/train01982.aiff\ntrain/train01983.aiff\ntrain/train01984.aiff\ntrain/train01985.aiff\ntrain/train01986.aiff\ntrain/train01987.aiff\ntrain/train01988.aiff\ntrain/train01989.aiff\ntrain/train01990.aiff\ntrain/train01991.aiff\ntrain/train01992.aiff\ntrain/train01993.aiff\ntrain/train01994.aiff\ntrain/train01995.aiff\ntrain/train01996.aiff\ntrain/train01997.aiff\ntrain/train01998.aiff\ntrain/train01999.aiff\ntrain/train02000.aiff\ntrain/train02001.aiff\ntrain/train02002.aiff\ntrain/train02003.aiff\ntrain/train02004.aiff\ntrain/train02005.aiff\ntrain/train02006.aiff\ntrain/train02007.aiff\ntrain/train02008.aiff\ntrain/train02009.aiff\ntrain/train02010.aiff\ntrain/train02011.aiff\ntrain/train02012.aiff\ntrain/train02013.aiff\ntrain/train02014.aiff\ntrain/train02015.aiff\ntrain/train02016.aiff\ntrain/train02017.aiff\ntrain/train02018.aiff\ntrain/train02019.aiff\ntrain/train02020.aiff\ntrain/train02021.aiff\ntrain/train02022.aiff\ntrain/train02023.aiff\ntrain/train02024.aiff\ntrain/train02025.aiff\ntrain/train02026.aiff\ntrain/train02027.aiff\ntrain/train02028.aiff\ntrain/train02029.aiff\ntrain/train02030.aiff\ntrain/train02031.aiff\ntrain/train02032.aiff\ntrain/train02033.aiff\ntrain/train02034.aiff\ntrain/train02035.aiff\ntrain/train02036.aiff\ntrain/train02037.aiff\ntrain/train02038.aiff\ntrain/train02039.aiff\ntrain/train02040.aiff\ntrain/train02041.aiff\ntrain/train02042.aiff\ntrain/train02043.aiff\ntrain/train02044.aiff\ntrain/train02045.aiff\ntrain/train02046.aiff\ntrain/train02047.aiff\ntrain/train02048.aiff\ntrain/train02049.aiff\ntrain/train02050.aiff\ntrain/train02051.aiff\ntrain/train02052.aiff\ntrain/train02053.aiff\ntrain/train02054.aiff\ntrain/train02055.aiff\ntrain/train02056.aiff\ntrain/train02057.aiff\ntrain/train02058.aiff\ntrain/train02059.aiff\ntrain/train02060.aiff\ntrain/train02061.aiff\ntrain/train02062.aiff\ntrain/train02063.aiff\ntrain/train02064.aiff\ntrain/train02065.aiff\ntrain/train02066.aiff\ntrain/train02067.aiff\ntrain/train02068.aiff\ntrain/train02069.aiff\ntrain/train02070.aiff\ntrain/train02071.aiff\ntrain/train02072.aiff\ntrain/train02073.aiff\ntrain/train02074.aiff\ntrain/train02075.aiff\ntrain/train02076.aiff\ntrain/train02077.aiff\ntrain/train02078.aiff\ntrain/train02079.aiff\ntrain/train02080.aiff\ntrain/train02081.aiff\ntrain/train02082.aiff\ntrain/train02083.aiff\ntrain/train02084.aiff\ntrain/train02085.aiff\ntrain/train02086.aiff\ntrain/train02087.aiff\ntrain/train02088.aiff\ntrain/train02089.aiff\ntrain/train02090.aiff\ntrain/train02091.aiff\ntrain/train02092.aiff\ntrain/train02093.aiff\ntrain/train02094.aiff\ntrain/train02095.aiff\ntrain/train02096.aiff\ntrain/train02097.aiff\ntrain/train02098.aiff\ntrain/train02099.aiff\ntrain/train02100.aiff\ntrain/train02101.aiff\ntrain/train02102.aiff\ntrain/train02103.aiff\ntrain/train02104.aiff\ntrain/train02105.aiff\ntrain/train02106.aiff\ntrain/train02107.aiff\ntrain/train02108.aiff\ntrain/train02109.aiff\ntrain/train02110.aiff\ntrain/train02111.aiff\ntrain/train02112.aiff\ntrain/train02113.aiff\ntrain/train02114.aiff\ntrain/train02115.aiff\ntrain/train02116.aiff\ntrain/train02117.aiff\ntrain/train02118.aiff\ntrain/train02119.aiff\ntrain/train02120.aiff\ntrain/train02121.aiff\ntrain/train02122.aiff\ntrain/train02123.aiff\ntrain/train02124.aiff\ntrain/train02125.aiff\ntrain/train02126.aiff\ntrain/train02127.aiff\ntrain/train02128.aiff\ntrain/train02129.aiff\ntrain/train02130.aiff\ntrain/train02131.aiff\ntrain/train02132.aiff\ntrain/train02133.aiff\ntrain/train02134.aiff\ntrain/train02135.aiff\ntrain/train02136.aiff\ntrain/train02137.aiff\ntrain/train02138.aiff\ntrain/train02139.aiff\ntrain/train02140.aiff\ntrain/train02141.aiff\ntrain/train02142.aiff\ntrain/train02143.aiff\ntrain/train02144.aiff\ntrain/train02145.aiff\ntrain/train02146.aiff\ntrain/train02147.aiff\ntrain/train02148.aiff\ntrain/train02149.aiff\ntrain/train02150.aiff\ntrain/train02151.aiff\ntrain/train02152.aiff\ntrain/train02153.aiff\ntrain/train02154.aiff\ntrain/train02155.aiff\ntrain/train02156.aiff\ntrain/train02157.aiff\ntrain/train02158.aiff\ntrain/train02159.aiff\ntrain/train02160.aiff\ntrain/train02161.aiff\ntrain/train02162.aiff\ntrain/train02163.aiff\ntrain/train02164.aiff\ntrain/train02165.aiff\ntrain/train02166.aiff\ntrain/train02167.aiff\ntrain/train02168.aiff\ntrain/train02169.aiff\ntrain/train02170.aiff\ntrain/train02171.aiff\ntrain/train02172.aiff\ntrain/train02173.aiff\ntrain/train02174.aiff\ntrain/train02175.aiff\ntrain/train02176.aiff\ntrain/train02177.aiff\ntrain/train02178.aiff\ntrain/train02179.aiff\ntrain/train02180.aiff\ntrain/train02181.aiff\ntrain/train02182.aiff\ntrain/train02183.aiff\ntrain/train02184.aiff\ntrain/train02185.aiff\ntrain/train02186.aiff\ntrain/train02187.aiff\ntrain/train02188.aiff\ntrain/train02189.aiff\ntrain/train02190.aiff\ntrain/train02191.aiff\ntrain/train02192.aiff\ntrain/train02193.aiff\ntrain/train02194.aiff\ntrain/train02195.aiff\ntrain/train02196.aiff\ntrain/train02197.aiff\ntrain/train02198.aiff\ntrain/train02199.aiff\ntrain/train02200.aiff\ntrain/train02201.aiff\ntrain/train02202.aiff\ntrain/train02203.aiff\ntrain/train02204.aiff\ntrain/train02205.aiff\ntrain/train02206.aiff\ntrain/train02207.aiff\ntrain/train02208.aiff\ntrain/train02209.aiff\ntrain/train02210.aiff\ntrain/train02211.aiff\ntrain/train02212.aiff\ntrain/train02213.aiff\ntrain/train02214.aiff\ntrain/train02215.aiff\ntrain/train02216.aiff\ntrain/train02217.aiff\ntrain/train02218.aiff\ntrain/train02219.aiff\ntrain/train02220.aiff\ntrain/train02221.aiff\ntrain/train02222.aiff\ntrain/train02223.aiff\ntrain/train02224.aiff\ntrain/train02225.aiff\ntrain/train02226.aiff\ntrain/train02227.aiff\ntrain/train02228.aiff\ntrain/train02229.aiff\ntrain/train02230.aiff\ntrain/train02231.aiff\ntrain/train02232.aiff\ntrain/train02233.aiff\ntrain/train02234.aiff\ntrain/train02235.aiff\ntrain/train02236.aiff\ntrain/train02237.aiff\ntrain/train02238.aiff\ntrain/train02239.aiff\ntrain/train02240.aiff\ntrain/train02241.aiff\ntrain/train02242.aiff\ntrain/train02243.aiff\ntrain/train02244.aiff\ntrain/train02245.aiff\ntrain/train02246.aiff\ntrain/train02247.aiff\ntrain/train02248.aiff\ntrain/train02249.aiff\ntrain/train02250.aiff\ntrain/train02251.aiff\ntrain/train02252.aiff\ntrain/train02253.aiff\ntrain/train02254.aiff\ntrain/train02255.aiff\ntrain/train02256.aiff\ntrain/train02257.aiff\ntrain/train02258.aiff\ntrain/train02259.aiff\ntrain/train02260.aiff\ntrain/train02261.aiff\ntrain/train02262.aiff\ntrain/train02263.aiff\ntrain/train02264.aiff\ntrain/train02265.aiff\ntrain/train02266.aiff\ntrain/train02267.aiff\ntrain/train02268.aiff\ntrain/train02269.aiff\ntrain/train02270.aiff\ntrain/train02271.aiff\ntrain/train02272.aiff\ntrain/train02273.aiff\ntrain/train02274.aiff\ntrain/train02275.aiff\ntrain/train02276.aiff\ntrain/train02277.aiff\ntrain/train02278.aiff\ntrain/train02279.aiff\ntrain/train02280.aiff\ntrain/train02281.aiff\ntrain/train02282.aiff\ntrain/train02283.aiff\ntrain/train02284.aiff\ntrain/train02285.aiff\ntrain/train02286.aiff\ntrain/train02287.aiff\ntrain/train02288.aiff\ntrain/train02289.aiff\ntrain/train02290.aiff\ntrain/train02291.aiff\ntrain/train02292.aiff\ntrain/train02293.aiff\ntrain/train02294.aiff\ntrain/train02295.aiff\ntrain/train02296.aiff\ntrain/train02297.aiff\ntrain/train02298.aiff\ntrain/train02299.aiff\ntrain/train02300.aiff\ntrain/train02301.aiff\ntrain/train02302.aiff\ntrain/train02303.aiff\ntrain/train02304.aiff\ntrain/train02305.aiff\ntrain/train02306.aiff\ntrain/train02307.aiff\ntrain/train02308.aiff\ntrain/train02309.aiff\ntrain/train02310.aiff\ntrain/train02311.aiff\ntrain/train02312.aiff\ntrain/train02313.aiff\ntrain/train02314.aiff\ntrain/train02315.aiff\ntrain/train02316.aiff\ntrain/train02317.aiff\ntrain/train02318.aiff\ntrain/train02319.aiff\ntrain/train02320.aiff\ntrain/train02321.aiff\ntrain/train02322.aiff\ntrain/train02323.aiff\ntrain/train02324.aiff\ntrain/train02325.aiff\ntrain/train02326.aiff\ntrain/train02327.aiff\ntrain/train02328.aiff\ntrain/train02329.aiff\ntrain/train02330.aiff\ntrain/train02331.aiff\ntrain/train02332.aiff\ntrain/train02333.aiff\ntrain/train02334.aiff\ntrain/train02335.aiff\ntrain/train02336.aiff\ntrain/train02337.aiff\ntrain/train02338.aiff\ntrain/train02339.aiff\ntrain/train02340.aiff\ntrain/train02341.aiff\ntrain/train02342.aiff\ntrain/train02343.aiff\ntrain/train02344.aiff\ntrain/train02345.aiff\ntrain/train02346.aiff\ntrain/train02347.aiff\ntrain/train02348.aiff\ntrain/train02349.aiff\ntrain/train02350.aiff\ntrain/train02351.aiff\ntrain/train02352.aiff\ntrain/train02353.aiff\ntrain/train02354.aiff\ntrain/train02355.aiff\ntrain/train02356.aiff\ntrain/train02357.aiff\ntrain/train02358.aiff\ntrain/train02359.aiff\ntrain/train02360.aiff\ntrain/train02361.aiff\ntrain/train02362.aiff\ntrain/train02363.aiff\ntrain/train02364.aiff\ntrain/train02365.aiff\ntrain/train02366.aiff\ntrain/train02367.aiff\ntrain/train02368.aiff\ntrain/train02369.aiff\ntrain/train02370.aiff\ntrain/train02371.aiff\ntrain/train02372.aiff\ntrain/train02373.aiff\ntrain/train02374.aiff\ntrain/train02375.aiff\ntrain/train02376.aiff\ntrain/train02377.aiff\ntrain/train02378.aiff\ntrain/train02379.aiff\ntrain/train02380.aiff\ntrain/train02381.aiff\ntrain/train02382.aiff\ntrain/train02383.aiff\ntrain/train02384.aiff\ntrain/train02385.aiff\ntrain/train02386.aiff\ntrain/train02387.aiff\ntrain/train02388.aiff\ntrain/train02389.aiff\ntrain/train02390.aiff\ntrain/train02391.aiff\ntrain/train02392.aiff\ntrain/train02393.aiff\ntrain/train02394.aiff\ntrain/train02395.aiff\ntrain/train02396.aiff\ntrain/train02397.aiff\ntrain/train02398.aiff\ntrain/train02399.aiff\ntrain/train02400.aiff\ntrain/train02401.aiff\ntrain/train02402.aiff\ntrain/train02403.aiff\ntrain/train02404.aiff\ntrain/train02405.aiff\ntrain/train02406.aiff\ntrain/train02407.aiff\ntrain/train02408.aiff\ntrain/train02409.aiff\ntrain/train02410.aiff\ntrain/train02411.aiff\ntrain/train02412.aiff\ntrain/train02413.aiff\ntrain/train02414.aiff\ntrain/train02415.aiff\ntrain/train02416.aiff\ntrain/train02417.aiff\ntrain/train02418.aiff\ntrain/train02419.aiff\ntrain/train02420.aiff\ntrain/train02421.aiff\ntrain/train02422.aiff\ntrain/train02423.aiff\ntrain/train02424.aiff\ntrain/train02425.aiff\ntrain/train02426.aiff\ntrain/train02427.aiff\ntrain/train02428.aiff\ntrain/train02429.aiff\ntrain/train02430.aiff\ntrain/train02431.aiff\ntrain/train02432.aiff\ntrain/train02433.aiff\ntrain/train02434.aiff\ntrain/train02435.aiff\ntrain/train02436.aiff\ntrain/train02437.aiff\ntrain/train02438.aiff\ntrain/train02439.aiff\ntrain/train02440.aiff\ntrain/train02441.aiff\ntrain/train02442.aiff\ntrain/train02443.aiff\ntrain/train02444.aiff\ntrain/train02445.aiff\ntrain/train02446.aiff\ntrain/train02447.aiff\ntrain/train02448.aiff\ntrain/train02449.aiff\ntrain/train02450.aiff\ntrain/train02451.aiff\ntrain/train02452.aiff\ntrain/train02453.aiff\ntrain/train02454.aiff\ntrain/train02455.aiff\ntrain/train02456.aiff\ntrain/train02457.aiff\ntrain/train02458.aiff\ntrain/train02459.aiff\ntrain/train02460.aiff\ntrain/train02461.aiff\ntrain/train02462.aiff\ntrain/train02463.aiff\ntrain/train02464.aiff\ntrain/train02465.aiff\ntrain/train02466.aiff\ntrain/train02467.aiff\ntrain/train02468.aiff\ntrain/train02469.aiff\ntrain/train02470.aiff\ntrain/train02471.aiff\ntrain/train02472.aiff\ntrain/train02473.aiff\ntrain/train02474.aiff\ntrain/train02475.aiff\ntrain/train02476.aiff\ntrain/train02477.aiff\ntrain/train02478.aiff\ntrain/train02479.aiff\ntrain/train02480.aiff\ntrain/train02481.aiff\ntrain/train02482.aiff\ntrain/train02483.aiff\ntrain/train02484.aiff\ntrain/train02485.aiff\ntrain/train02486.aiff\ntrain/train02487.aiff\ntrain/train02488.aiff\ntrain/train02489.aiff\ntrain/train02490.aiff\ntrain/train02491.aiff\ntrain/train02492.aiff\ntrain/train02493.aiff\ntrain/train02494.aiff\ntrain/train02495.aiff\ntrain/train02496.aiff\ntrain/train02497.aiff\ntrain/train02498.aiff\ntrain/train02499.aiff\ntrain/train02500.aiff\ntrain/train02501.aiff\ntrain/train02502.aiff\ntrain/train02503.aiff\ntrain/train02504.aiff\ntrain/train02505.aiff\ntrain/train02506.aiff\ntrain/train02507.aiff\ntrain/train02508.aiff\ntrain/train02509.aiff\ntrain/train02510.aiff\ntrain/train02511.aiff\ntrain/train02512.aiff\ntrain/train02513.aiff\ntrain/train02514.aiff\ntrain/train02515.aiff\ntrain/train02516.aiff\ntrain/train02517.aiff\ntrain/train02518.aiff\ntrain/train02519.aiff\ntrain/train02520.aiff\ntrain/train02521.aiff\ntrain/train02522.aiff\ntrain/train02523.aiff\ntrain/train02524.aiff\ntrain/train02525.aiff\ntrain/train02526.aiff\ntrain/train02527.aiff\ntrain/train02528.aiff\ntrain/train02529.aiff\ntrain/train02530.aiff\ntrain/train02531.aiff\ntrain/train02532.aiff\ntrain/train02533.aiff\ntrain/train02534.aiff\ntrain/train02535.aiff\ntrain/train02536.aiff\ntrain/train02537.aiff\ntrain/train02538.aiff\ntrain/train02539.aiff\ntrain/train02540.aiff\ntrain/train02541.aiff\ntrain/train02542.aiff\ntrain/train02543.aiff\ntrain/train02544.aiff\ntrain/train02545.aiff\ntrain/train02546.aiff\ntrain/train02547.aiff\ntrain/train02548.aiff\ntrain/train02549.aiff\ntrain/train02550.aiff\ntrain/train02551.aiff\ntrain/train02552.aiff\ntrain/train02553.aiff\ntrain/train02554.aiff\ntrain/train02555.aiff\ntrain/train02556.aiff\ntrain/train02557.aiff\ntrain/train02558.aiff\ntrain/train02559.aiff\ntrain/train02560.aiff\ntrain/train02561.aiff\ntrain/train02562.aiff\ntrain/train02563.aiff\ntrain/train02564.aiff\ntrain/train02565.aiff\ntrain/train02566.aiff\ntrain/train02567.aiff\ntrain/train02568.aiff\ntrain/train02569.aiff\ntrain/train02570.aiff\ntrain/train02571.aiff\ntrain/train02572.aiff\ntrain/train02573.aiff\ntrain/train02574.aiff\ntrain/train02575.aiff\ntrain/train02576.aiff\ntrain/train02577.aiff\ntrain/train02578.aiff\ntrain/train02579.aiff\ntrain/train02580.aiff\ntrain/train02581.aiff\ntrain/train02582.aiff\ntrain/train02583.aiff\ntrain/train02584.aiff\ntrain/train02585.aiff\ntrain/train02586.aiff\ntrain/train02587.aiff\ntrain/train02588.aiff\ntrain/train02589.aiff\ntrain/train02590.aiff\ntrain/train02591.aiff\ntrain/train02592.aiff\ntrain/train02593.aiff\ntrain/train02594.aiff\ntrain/train02595.aiff\ntrain/train02596.aiff\ntrain/train02597.aiff\ntrain/train02598.aiff\ntrain/train02599.aiff\ntrain/train02600.aiff\ntrain/train02601.aiff\ntrain/train02602.aiff\ntrain/train02603.aiff\ntrain/train02604.aiff\ntrain/train02605.aiff\ntrain/train02606.aiff\ntrain/train02607.aiff\ntrain/train02608.aiff\ntrain/train02609.aiff\ntrain/train02610.aiff\ntrain/train02611.aiff\ntrain/train02612.aiff\ntrain/train02613.aiff\ntrain/train02614.aiff\ntrain/train02615.aiff\ntrain/train02616.aiff\ntrain/train02617.aiff\ntrain/train02618.aiff\ntrain/train02619.aiff\ntrain/train02620.aiff\ntrain/train02621.aiff\ntrain/train02622.aiff\ntrain/train02623.aiff\ntrain/train02624.aiff\ntrain/train02625.aiff\ntrain/train02626.aiff\ntrain/train02627.aiff\ntrain/train02628.aiff\ntrain/train02629.aiff\ntrain/train02630.aiff\ntrain/train02631.aiff\ntrain/train02632.aiff\ntrain/train02633.aiff\ntrain/train02634.aiff\ntrain/train02635.aiff\ntrain/train02636.aiff\ntrain/train02637.aiff\ntrain/train02638.aiff\ntrain/train02639.aiff\ntrain/train02640.aiff\ntrain/train02641.aiff\ntrain/train02642.aiff\ntrain/train02643.aiff\ntrain/train02644.aiff\ntrain/train02645.aiff\ntrain/train02646.aiff\ntrain/train02647.aiff\ntrain/train02648.aiff\ntrain/train02649.aiff\ntrain/train02650.aiff\ntrain/train02651.aiff\ntrain/train02652.aiff\ntrain/train02653.aiff\ntrain/train02654.aiff\ntrain/train02655.aiff\ntrain/train02656.aiff\ntrain/train02657.aiff\ntrain/train02658.aiff\ntrain/train02659.aiff\ntrain/train02660.aiff\ntrain/train02661.aiff\ntrain/train02662.aiff\ntrain/train02663.aiff\ntrain/train02664.aiff\ntrain/train02665.aiff\ntrain/train02666.aiff\ntrain/train02667.aiff\ntrain/train02668.aiff\ntrain/train02669.aiff\ntrain/train02670.aiff\ntrain/train02671.aiff\ntrain/train02672.aiff\ntrain/train02673.aiff\ntrain/train02674.aiff\ntrain/train02675.aiff\ntrain/train02676.aiff\ntrain/train02677.aiff\ntrain/train02678.aiff\ntrain/train02679.aiff\ntrain/train02680.aiff\ntrain/train02681.aiff\ntrain/train02682.aiff\ntrain/train02683.aiff\ntrain/train02684.aiff\ntrain/train02685.aiff\ntrain/train02686.aiff\ntrain/train02687.aiff\ntrain/train02688.aiff\ntrain/train02689.aiff\ntrain/train02690.aiff\ntrain/train02691.aiff\ntrain/train02692.aiff\ntrain/train02693.aiff\ntrain/train02694.aiff\ntrain/train02695.aiff\ntrain/train02696.aiff\ntrain/train02697.aiff\ntrain/train02698.aiff\ntrain/train02699.aiff\ntrain/train02700.aiff\ntrain/train02701.aiff\ntrain/train02702.aiff\ntrain/train02703.aiff\ntrain/train02704.aiff\ntrain/train02705.aiff\ntrain/train02706.aiff\ntrain/train02707.aiff\ntrain/train02708.aiff\ntrain/train02709.aiff\ntrain/train02710.aiff\ntrain/train02711.aiff\ntrain/train02712.aiff\ntrain/train02713.aiff\ntrain/train02714.aiff\ntrain/train02715.aiff\ntrain/train02716.aiff\ntrain/train02717.aiff\ntrain/train02718.aiff\ntrain/train02719.aiff\ntrain/train02720.aiff\ntrain/train02721.aiff\ntrain/train02722.aiff\ntrain/train02723.aiff\ntrain/train02724.aiff\ntrain/train02725.aiff\ntrain/train02726.aiff\ntrain/train02727.aiff\ntrain/train02728.aiff\ntrain/train02729.aiff\ntrain/train02730.aiff\ntrain/train02731.aiff\ntrain/train02732.aiff\ntrain/train02733.aiff\ntrain/train02734.aiff\ntrain/train02735.aiff\ntrain/train02736.aiff\ntrain/train02737.aiff\ntrain/train02738.aiff\ntrain/train02739.aiff\ntrain/train02740.aiff\ntrain/train02741.aiff\ntrain/train02742.aiff\ntrain/train02743.aiff\ntrain/train02744.aiff\ntrain/train02745.aiff\ntrain/train02746.aiff\ntrain/train02747.aiff\ntrain/train02748.aiff\ntrain/train02749.aiff\ntrain/train02750.aiff\ntrain/train02751.aiff\ntrain/train02752.aiff\ntrain/train02753.aiff\ntrain/train02754.aiff\ntrain/train02755.aiff\ntrain/train02756.aiff\ntrain/train02757.aiff\ntrain/train02758.aiff\ntrain/train02759.aiff\ntrain/train02760.aiff\ntrain/train02761.aiff\ntrain/train02762.aiff\ntrain/train02763.aiff\ntrain/train02764.aiff\ntrain/train02765.aiff\ntrain/train02766.aiff\ntrain/train02767.aiff\ntrain/train02768.aiff\ntrain/train02769.aiff\ntrain/train02770.aiff\ntrain/train02771.aiff\ntrain/train02772.aiff\ntrain/train02773.aiff\ntrain/train02774.aiff\ntrain/train02775.aiff\ntrain/train02776.aiff\ntrain/train02777.aiff\ntrain/train02778.aiff\ntrain/train02779.aiff\ntrain/train02780.aiff\ntrain/train02781.aiff\ntrain/train02782.aiff\ntrain/train02783.aiff\ntrain/train02784.aiff\ntrain/train02785.aiff\ntrain/train02786.aiff\ntrain/train02787.aiff\ntrain/train02788.aiff\ntrain/train02789.aiff\ntrain/train02790.aiff\ntrain/train02791.aiff\ntrain/train02792.aiff\ntrain/train02793.aiff\ntrain/train02794.aiff\ntrain/train02795.aiff\ntrain/train02796.aiff\ntrain/train02797.aiff\ntrain/train02798.aiff\ntrain/train02799.aiff\ntrain/train02800.aiff\ntrain/train02801.aiff\ntrain/train02802.aiff\ntrain/train02803.aiff\ntrain/train02804.aiff\ntrain/train02805.aiff\ntrain/train02806.aiff\ntrain/train02807.aiff\ntrain/train02808.aiff\ntrain/train02809.aiff\ntrain/train02810.aiff\ntrain/train02811.aiff\ntrain/train02812.aiff\ntrain/train02813.aiff\ntrain/train02814.aiff\ntrain/train02815.aiff\ntrain/train02816.aiff\ntrain/train02817.aiff\ntrain/train02818.aiff\ntrain/train02819.aiff\ntrain/train02820.aiff\ntrain/train02821.aiff\ntrain/train02822.aiff\ntrain/train02823.aiff\ntrain/train02824.aiff\ntrain/train02825.aiff\ntrain/train02826.aiff\ntrain/train02827.aiff\ntrain/train02828.aiff\ntrain/train02829.aiff\ntrain/train02830.aiff\ntrain/train02831.aiff\ntrain/train02832.aiff\ntrain/train02833.aiff\ntrain/train02834.aiff\ntrain/train02835.aiff\ntrain/train02836.aiff\ntrain/train02837.aiff\ntrain/train02838.aiff\ntrain/train02839.aiff\ntrain/train02840.aiff\ntrain/train02841.aiff\ntrain/train02842.aiff\ntrain/train02843.aiff\ntrain/train02844.aiff\ntrain/train02845.aiff\ntrain/train02846.aiff\ntrain/train02847.aiff\ntrain/train02848.aiff\ntrain/train02849.aiff\ntrain/train02850.aiff\ntrain/train02851.aiff\ntrain/train02852.aiff\ntrain/train02853.aiff\ntrain/train02854.aiff\ntrain/train02855.aiff\ntrain/train02856.aiff\ntrain/train02857.aiff\ntrain/train02858.aiff\ntrain/train02859.aiff\ntrain/train02860.aiff\ntrain/train02861.aiff\ntrain/train02862.aiff\ntrain/train02863.aiff\ntrain/train02864.aiff\ntrain/train02865.aiff\ntrain/train02866.aiff\ntrain/train02867.aiff\ntrain/train02868.aiff\ntrain/train02869.aiff\ntrain/train02870.aiff\ntrain/train02871.aiff\ntrain/train02872.aiff\ntrain/train02873.aiff\ntrain/train02874.aiff\ntrain/train02875.aiff\ntrain/train02876.aiff\ntrain/train02877.aiff\ntrain/train02878.aiff\ntrain/train02879.aiff\ntrain/train02880.aiff\ntrain/train02881.aiff\ntrain/train02882.aiff\ntrain/train02883.aiff\ntrain/train02884.aiff\ntrain/train02885.aiff\ntrain/train02886.aiff\ntrain/train02887.aiff\ntrain/train02888.aiff\ntrain/train02889.aiff\ntrain/train02890.aiff\ntrain/train02891.aiff\ntrain/train02892.aiff\ntrain/train02893.aiff\ntrain/train02894.aiff\ntrain/train02895.aiff\ntrain/train02896.aiff\ntrain/train02897.aiff\ntrain/train02898.aiff\ntrain/train02899.aiff\ntrain/train02900.aiff\ntrain/train02901.aiff\ntrain/train02902.aiff\ntrain/train02903.aiff\ntrain/train02904.aiff\ntrain/train02905.aiff\ntrain/train02906.aiff\ntrain/train02907.aiff\ntrain/train02908.aiff\ntrain/train02909.aiff\ntrain/train02910.aiff\ntrain/train02911.aiff\ntrain/train02912.aiff\ntrain/train02913.aiff\ntrain/train02914.aiff\ntrain/train02915.aiff\ntrain/train02916.aiff\ntrain/train02917.aiff\ntrain/train02918.aiff\ntrain/train02919.aiff\ntrain/train02920.aiff\ntrain/train02921.aiff\ntrain/train02922.aiff\ntrain/train02923.aiff\ntrain/train02924.aiff\ntrain/train02925.aiff\ntrain/train02926.aiff\ntrain/train02927.aiff\ntrain/train02928.aiff\ntrain/train02929.aiff\ntrain/train02930.aiff\ntrain/train02931.aiff\ntrain/train02932.aiff\ntrain/train02933.aiff\ntrain/train02934.aiff\ntrain/train02935.aiff\ntrain/train02936.aiff\ntrain/train02937.aiff\ntrain/train02938.aiff\ntrain/train02939.aiff\ntrain/train02940.aiff\ntrain/train02941.aiff\ntrain/train02942.aiff\ntrain/train02943.aiff\ntrain/train02944.aiff\ntrain/train02945.aiff\ntrain/train02946.aiff\ntrain/train02947.aiff\ntrain/train02948.aiff\ntrain/train02949.aiff\ntrain/train02950.aiff\ntrain/train02951.aiff\ntrain/train02952.aiff\ntrain/train02953.aiff\ntrain/train02954.aiff\ntrain/train02955.aiff\ntrain/train02956.aiff\ntrain/train02957.aiff\ntrain/train02958.aiff\ntrain/train02959.aiff\ntrain/train02960.aiff\ntrain/train02961.aiff\ntrain/train02962.aiff\ntrain/train02963.aiff\ntrain/train02964.aiff\ntrain/train02965.aiff\ntrain/train02966.aiff\ntrain/train02967.aiff\ntrain/train02968.aiff\ntrain/train02969.aiff\ntrain/train02970.aiff\ntrain/train02971.aiff\ntrain/train02972.aiff\ntrain/train02973.aiff\ntrain/train02974.aiff\ntrain/train02975.aiff\ntrain/train02976.aiff\ntrain/train02977.aiff\ntrain/train02978.aiff\ntrain/train02979.aiff\ntrain/train02980.aiff\ntrain/train02981.aiff\ntrain/train02982.aiff\ntrain/train02983.aiff\ntrain/train02984.aiff\ntrain/train02985.aiff\ntrain/train02986.aiff\ntrain/train02987.aiff\ntrain/train02988.aiff\ntrain/train02989.aiff\ntrain/train02990.aiff\ntrain/train02991.aiff\ntrain/train02992.aiff\ntrain/train02993.aiff\ntrain/train02994.aiff\ntrain/train02995.aiff\ntrain/train02996.aiff\ntrain/train02997.aiff\ntrain/train02998.aiff\ntrain/train02999.aiff\ntrain/train03000.aiff\ntrain/train03001.aiff\ntrain/train03002.aiff\ntrain/train03003.aiff\ntrain/train03004.aiff\ntrain/train03005.aiff\ntrain/train03006.aiff\ntrain/train03007.aiff\ntrain/train03008.aiff\ntrain/train03009.aiff\ntrain/train03010.aiff\ntrain/train03011.aiff\ntrain/train03012.aiff\ntrain/train03013.aiff\ntrain/train03014.aiff\ntrain/train03015.aiff\ntrain/train03016.aiff\ntrain/train03017.aiff\ntrain/train03018.aiff\ntrain/train03019.aiff\ntrain/train03020.aiff\ntrain/train03021.aiff\ntrain/train03022.aiff\ntrain/train03023.aiff\ntrain/train03024.aiff\ntrain/train03025.aiff\ntrain/train03026.aiff\ntrain/train03027.aiff\ntrain/train03028.aiff\ntrain/train03029.aiff\ntrain/train03030.aiff\ntrain/train03031.aiff\ntrain/train03032.aiff\ntrain/train03033.aiff\ntrain/train03034.aiff\ntrain/train03035.aiff\ntrain/train03036.aiff\ntrain/train03037.aiff\ntrain/train03038.aiff\ntrain/train03039.aiff\ntrain/train03040.aiff\ntrain/train03041.aiff\ntrain/train03042.aiff\ntrain/train03043.aiff\ntrain/train03044.aiff\ntrain/train03045.aiff\ntrain/train03046.aiff\ntrain/train03047.aiff\ntrain/train03048.aiff\ntrain/train03049.aiff\ntrain/train03050.aiff\ntrain/train03051.aiff\ntrain/train03052.aiff\ntrain/train03053.aiff\ntrain/train03054.aiff\ntrain/train03055.aiff\ntrain/train03056.aiff\ntrain/train03057.aiff\ntrain/train03058.aiff\ntrain/train03059.aiff\ntrain/train03060.aiff\ntrain/train03061.aiff\ntrain/train03062.aiff\ntrain/train03063.aiff\ntrain/train03064.aiff\ntrain/train03065.aiff\ntrain/train03066.aiff\ntrain/train03067.aiff\ntrain/train03068.aiff\ntrain/train03069.aiff\ntrain/train03070.aiff\ntrain/train03071.aiff\ntrain/train03072.aiff\ntrain/train03073.aiff\ntrain/train03074.aiff\ntrain/train03075.aiff\ntrain/train03076.aiff\ntrain/train03077.aiff\ntrain/train03078.aiff\ntrain/train03079.aiff\ntrain/train03080.aiff\ntrain/train03081.aiff\ntrain/train03082.aiff\ntrain/train03083.aiff\ntrain/train03084.aiff\ntrain/train03085.aiff\ntrain/train03086.aiff\ntrain/train03087.aiff\ntrain/train03088.aiff\ntrain/train03089.aiff\ntrain/train03090.aiff\ntrain/train03091.aiff\ntrain/train03092.aiff\ntrain/train03093.aiff\ntrain/train03094.aiff\ntrain/train03095.aiff\ntrain/train03096.aiff\ntrain/train03097.aiff\ntrain/train03098.aiff\ntrain/train03099.aiff\ntrain/train03100.aiff\ntrain/train03101.aiff\ntrain/train03102.aiff\ntrain/train03103.aiff\ntrain/train03104.aiff\ntrain/train03105.aiff\ntrain/train03106.aiff\ntrain/train03107.aiff\ntrain/train03108.aiff\ntrain/train03109.aiff\ntrain/train03110.aiff\ntrain/train03111.aiff\ntrain/train03112.aiff\ntrain/train03113.aiff\ntrain/train03114.aiff\ntrain/train03115.aiff\ntrain/train03116.aiff\ntrain/train03117.aiff\ntrain/train03118.aiff\ntrain/train03119.aiff\ntrain/train03120.aiff\ntrain/train03121.aiff\ntrain/train03122.aiff\ntrain/train03123.aiff\ntrain/train03124.aiff\ntrain/train03125.aiff\ntrain/train03126.aiff\ntrain/train03127.aiff\ntrain/train03128.aiff\ntrain/train03129.aiff\ntrain/train03130.aiff\ntrain/train03131.aiff\ntrain/train03132.aiff\ntrain/train03133.aiff\ntrain/train03134.aiff\ntrain/train03135.aiff\ntrain/train03136.aiff\ntrain/train03137.aiff\ntrain/train03138.aiff\ntrain/train03139.aiff\ntrain/train03140.aiff\ntrain/train03141.aiff\ntrain/train03142.aiff\ntrain/train03143.aiff\ntrain/train03144.aiff\ntrain/train03145.aiff\ntrain/train03146.aiff\ntrain/train03147.aiff\ntrain/train03148.aiff\ntrain/train03149.aiff\ntrain/train03150.aiff\ntrain/train03151.aiff\ntrain/train03152.aiff\ntrain/train03153.aiff\ntrain/train03154.aiff\ntrain/train03155.aiff\ntrain/train03156.aiff\ntrain/train03157.aiff\ntrain/train03158.aiff\ntrain/train03159.aiff\ntrain/train03160.aiff\ntrain/train03161.aiff\ntrain/train03162.aiff\ntrain/train03163.aiff\ntrain/train03164.aiff\ntrain/train03165.aiff\ntrain/train03166.aiff\ntrain/train03167.aiff\ntrain/train03168.aiff\ntrain/train03169.aiff\ntrain/train03170.aiff\ntrain/train03171.aiff\ntrain/train03172.aiff\ntrain/train03173.aiff\ntrain/train03174.aiff\ntrain/train03175.aiff\ntrain/train03176.aiff\ntrain/train03177.aiff\ntrain/train03178.aiff\ntrain/train03179.aiff\ntrain/train03180.aiff\ntrain/train03181.aiff\ntrain/train03182.aiff\ntrain/train03183.aiff\ntrain/train03184.aiff\ntrain/train03185.aiff\ntrain/train03186.aiff\ntrain/train03187.aiff\ntrain/train03188.aiff\ntrain/train03189.aiff\ntrain/train03190.aiff\ntrain/train03191.aiff\ntrain/train03192.aiff\ntrain/train03193.aiff\ntrain/train03194.aiff\ntrain/train03195.aiff\ntrain/train03196.aiff\ntrain/train03197.aiff\ntrain/train03198.aiff\ntrain/train03199.aiff\ntrain/train03200.aiff\ntrain/train03201.aiff\ntrain/train03202.aiff\ntrain/train03203.aiff\ntrain/train03204.aiff\ntrain/train03205.aiff\ntrain/train03206.aiff\ntrain/train03207.aiff\ntrain/train03208.aiff\ntrain/train03209.aiff\ntrain/train03210.aiff\ntrain/train03211.aiff\ntrain/train03212.aiff\ntrain/train03213.aiff\ntrain/train03214.aiff\ntrain/train03215.aiff\ntrain/train03216.aiff\ntrain/train03217.aiff\ntrain/train03218.aiff\ntrain/train03219.aiff\ntrain/train03220.aiff\ntrain/train03221.aiff\ntrain/train03222.aiff\ntrain/train03223.aiff\ntrain/train03224.aiff\ntrain/train03225.aiff\ntrain/train03226.aiff\ntrain/train03227.aiff\ntrain/train03228.aiff\ntrain/train03229.aiff\ntrain/train03230.aiff\ntrain/train03231.aiff\ntrain/train03232.aiff\ntrain/train03233.aiff\ntrain/train03234.aiff\ntrain/train03235.aiff\ntrain/train03236.aiff\ntrain/train03237.aiff\ntrain/train03238.aiff\ntrain/train03239.aiff\ntrain/train03240.aiff\ntrain/train03241.aiff\ntrain/train03242.aiff\ntrain/train03243.aiff\ntrain/train03244.aiff\ntrain/train03245.aiff\ntrain/train03246.aiff\ntrain/train03247.aiff\ntrain/train03248.aiff\ntrain/train03249.aiff\ntrain/train03250.aiff\ntrain/train03251.aiff\ntrain/train03252.aiff\ntrain/train03253.aiff\ntrain/train03254.aiff\ntrain/train03255.aiff\ntrain/train03256.aiff\ntrain/train03257.aiff\ntrain/train03258.aiff\ntrain/train03259.aiff\ntrain/train03260.aiff\ntrain/train03261.aiff\ntrain/train03262.aiff\ntrain/train03263.aiff\ntrain/train03264.aiff\ntrain/train03265.aiff\ntrain/train03266.aiff\ntrain/train03267.aiff\ntrain/train03268.aiff\ntrain/train03269.aiff\ntrain/train03270.aiff\ntrain/train03271.aiff\ntrain/train03272.aiff\ntrain/train03273.aiff\ntrain/train03274.aiff\ntrain/train03275.aiff\ntrain/train03276.aiff\ntrain/train03277.aiff\ntrain/train03278.aiff\ntrain/train03279.aiff\ntrain/train03280.aiff\ntrain/train03281.aiff\ntrain/train03282.aiff\ntrain/train03283.aiff\ntrain/train03284.aiff\ntrain/train03285.aiff\ntrain/train03286.aiff\ntrain/train03287.aiff\ntrain/train03288.aiff\ntrain/train03289.aiff\ntrain/train03290.aiff\ntrain/train03291.aiff\ntrain/train03292.aiff\ntrain/train03293.aiff\ntrain/train03294.aiff\ntrain/train03295.aiff\ntrain/train03296.aiff\ntrain/train03297.aiff\ntrain/train03298.aiff\ntrain/train03299.aiff\ntrain/train03300.aiff\ntrain/train03301.aiff\ntrain/train03302.aiff\ntrain/train03303.aiff\ntrain/train03304.aiff\ntrain/train03305.aiff\ntrain/train03306.aiff\ntrain/train03307.aiff\ntrain/train03308.aiff\ntrain/train03309.aiff\ntrain/train03310.aiff\ntrain/train03311.aiff\ntrain/train03312.aiff\ntrain/train03313.aiff\ntrain/train03314.aiff\ntrain/train03315.aiff\ntrain/train03316.aiff\ntrain/train03317.aiff\ntrain/train03318.aiff\ntrain/train03319.aiff\ntrain/train03320.aiff\ntrain/train03321.aiff\ntrain/train03322.aiff\ntrain/train03323.aiff\ntrain/train03324.aiff\ntrain/train03325.aiff\ntrain/train03326.aiff\ntrain/train03327.aiff\ntrain/train03328.aiff\ntrain/train03329.aiff\ntrain/train03330.aiff\ntrain/train03331.aiff\ntrain/train03332.aiff\ntrain/train03333.aiff\ntrain/train03334.aiff\ntrain/train03335.aiff\ntrain/train03336.aiff\ntrain/train03337.aiff\ntrain/train03338.aiff\ntrain/train03339.aiff\ntrain/train03340.aiff\ntrain/train03341.aiff\ntrain/train03342.aiff\ntrain/train03343.aiff\ntrain/train03344.aiff\ntrain/train03345.aiff\ntrain/train03346.aiff\ntrain/train03347.aiff\ntrain/train03348.aiff\ntrain/train03349.aiff\ntrain/train03350.aiff\ntrain/train03351.aiff\ntrain/train03352.aiff\ntrain/train03353.aiff\ntrain/train03354.aiff\ntrain/train03355.aiff\ntrain/train03356.aiff\ntrain/train03357.aiff\ntrain/train03358.aiff\ntrain/train03359.aiff\ntrain/train03360.aiff\ntrain/train03361.aiff\ntrain/train03362.aiff\ntrain/train03363.aiff\ntrain/train03364.aiff\ntrain/train03365.aiff\ntrain/train03366.aiff\ntrain/train03367.aiff\ntrain/train03368.aiff\ntrain/train03369.aiff\ntrain/train03370.aiff\ntrain/train03371.aiff\ntrain/train03372.aiff\ntrain/train03373.aiff\ntrain/train03374.aiff\ntrain/train03375.aiff\ntrain/train03376.aiff\ntrain/train03377.aiff\ntrain/train03378.aiff\ntrain/train03379.aiff\ntrain/train03380.aiff\ntrain/train03381.aiff\ntrain/train03382.aiff\ntrain/train03383.aiff\ntrain/train03384.aiff\ntrain/train03385.aiff\ntrain/train03386.aiff\ntrain/train03387.aiff\ntrain/train03388.aiff\ntrain/train03389.aiff\ntrain/train03390.aiff\ntrain/train03391.aiff\ntrain/train03392.aiff\ntrain/train03393.aiff\ntrain/train03394.aiff\ntrain/train03395.aiff\ntrain/train03396.aiff\ntrain/train03397.aiff\ntrain/train03398.aiff\ntrain/train03399.aiff\ntrain/train03400.aiff\ntrain/train03401.aiff\ntrain/train03402.aiff\ntrain/train03403.aiff\ntrain/train03404.aiff\ntrain/train03405.aiff\ntrain/train03406.aiff\ntrain/train03407.aiff\ntrain/train03408.aiff\ntrain/train03409.aiff\ntrain/train03410.aiff\ntrain/train03411.aiff\ntrain/train03412.aiff\ntrain/train03413.aiff\ntrain/train03414.aiff\ntrain/train03415.aiff\ntrain/train03416.aiff\ntrain/train03417.aiff\ntrain/train03418.aiff\ntrain/train03419.aiff\ntrain/train03420.aiff\ntrain/train03421.aiff\ntrain/train03422.aiff\ntrain/train03423.aiff\ntrain/train03424.aiff\ntrain/train03425.aiff\ntrain/train03426.aiff\ntrain/train03427.aiff\ntrain/train03428.aiff\ntrain/train03429.aiff\ntrain/train03430.aiff\ntrain/train03431.aiff\ntrain/train03432.aiff\ntrain/train03433.aiff\ntrain/train03434.aiff\ntrain/train03435.aiff\ntrain/train03436.aiff\ntrain/train03437.aiff\ntrain/train03438.aiff\ntrain/train03439.aiff\ntrain/train03440.aiff\ntrain/train03441.aiff\ntrain/train03442.aiff\ntrain/train03443.aiff\ntrain/train03444.aiff\ntrain/train03445.aiff\ntrain/train03446.aiff\ntrain/train03447.aiff\ntrain/train03448.aiff\ntrain/train03449.aiff\ntrain/train03450.aiff\ntrain/train03451.aiff\ntrain/train03452.aiff\ntrain/train03453.aiff\ntrain/train03454.aiff\ntrain/train03455.aiff\ntrain/train03456.aiff\ntrain/train03457.aiff\ntrain/train03458.aiff\ntrain/train03459.aiff\ntrain/train03460.aiff\ntrain/train03461.aiff\ntrain/train03462.aiff\ntrain/train03463.aiff\ntrain/train03464.aiff\ntrain/train03465.aiff\ntrain/train03466.aiff\ntrain/train03467.aiff\ntrain/train03468.aiff\ntrain/train03469.aiff\ntrain/train03470.aiff\ntrain/train03471.aiff\ntrain/train03472.aiff\ntrain/train03473.aiff\ntrain/train03474.aiff\ntrain/train03475.aiff\ntrain/train03476.aiff\ntrain/train03477.aiff\ntrain/train03478.aiff\ntrain/train03479.aiff\ntrain/train03480.aiff\ntrain/train03481.aiff\ntrain/train03482.aiff\ntrain/train03483.aiff\ntrain/train03484.aiff\ntrain/train03485.aiff\ntrain/train03486.aiff\ntrain/train03487.aiff\ntrain/train03488.aiff\ntrain/train03489.aiff\ntrain/train03490.aiff\ntrain/train03491.aiff\ntrain/train03492.aiff\ntrain/train03493.aiff\ntrain/train03494.aiff\ntrain/train03495.aiff\ntrain/train03496.aiff\ntrain/train03497.aiff\ntrain/train03498.aiff\ntrain/train03499.aiff\ntrain/train03500.aiff\ntrain/train03501.aiff\ntrain/train03502.aiff\ntrain/train03503.aiff\ntrain/train03504.aiff\ntrain/train03505.aiff\ntrain/train03506.aiff\ntrain/train03507.aiff\ntrain/train03508.aiff\ntrain/train03509.aiff\ntrain/train03510.aiff\ntrain/train03511.aiff\ntrain/train03512.aiff\ntrain/train03513.aiff\ntrain/train03514.aiff\ntrain/train03515.aiff\ntrain/train03516.aiff\ntrain/train03517.aiff\ntrain/train03518.aiff\ntrain/train03519.aiff\ntrain/train03520.aiff\ntrain/train03521.aiff\ntrain/train03522.aiff\ntrain/train03523.aiff\ntrain/train03524.aiff\ntrain/train03525.aiff\ntrain/train03526.aiff\ntrain/train03527.aiff\ntrain/train03528.aiff\ntrain/train03529.aiff\ntrain/train03530.aiff\ntrain/train03531.aiff\ntrain/train03532.aiff\ntrain/train03533.aiff\ntrain/train03534.aiff\ntrain/train03535.aiff\ntrain/train03536.aiff\ntrain/train03537.aiff\ntrain/train03538.aiff\ntrain/train03539.aiff\ntrain/train03540.aiff\ntrain/train03541.aiff\ntrain/train03542.aiff\ntrain/train03543.aiff\ntrain/train03544.aiff\ntrain/train03545.aiff\ntrain/train03546.aiff\ntrain/train03547.aiff\ntrain/train03548.aiff\ntrain/train03549.aiff\ntrain/train03550.aiff\ntrain/train03551.aiff\ntrain/train03552.aiff\ntrain/train03553.aiff\ntrain/train03554.aiff\ntrain/train03555.aiff\ntrain/train03556.aiff\ntrain/train03557.aiff\ntrain/train03558.aiff\ntrain/train03559.aiff\ntrain/train03560.aiff\ntrain/train03561.aiff\ntrain/train03562.aiff\ntrain/train03563.aiff\ntrain/train03564.aiff\ntrain/train03565.aiff\ntrain/train03566.aiff\ntrain/train03567.aiff\ntrain/train03568.aiff\ntrain/train03569.aiff\ntrain/train03570.aiff\ntrain/train03571.aiff\ntrain/train03572.aiff\ntrain/train03573.aiff\ntrain/train03574.aiff\ntrain/train03575.aiff\ntrain/train03576.aiff\ntrain/train03577.aiff\ntrain/train03578.aiff\ntrain/train03579.aiff\ntrain/train03580.aiff\ntrain/train03581.aiff\ntrain/train03582.aiff\ntrain/train03583.aiff\ntrain/train03584.aiff\ntrain/train03585.aiff\ntrain/train03586.aiff\ntrain/train03587.aiff\ntrain/train03588.aiff\ntrain/train03589.aiff\ntrain/train03590.aiff\ntrain/train03591.aiff\ntrain/train03592.aiff\ntrain/train03593.aiff\ntrain/train03594.aiff\ntrain/train03595.aiff\ntrain/train03596.aiff\ntrain/train03597.aiff\ntrain/train03598.aiff\ntrain/train03599.aiff\ntrain/train03600.aiff\ntrain/train03601.aiff\ntrain/train03602.aiff\ntrain/train03603.aiff\ntrain/train03604.aiff\ntrain/train03605.aiff\ntrain/train03606.aiff\ntrain/train03607.aiff\ntrain/train03608.aiff\ntrain/train03609.aiff\ntrain/train03610.aiff\ntrain/train03611.aiff\ntrain/train03612.aiff\ntrain/train03613.aiff\ntrain/train03614.aiff\ntrain/train03615.aiff\ntrain/train03616.aiff\ntrain/train03617.aiff\ntrain/train03618.aiff\ntrain/train03619.aiff\ntrain/train03620.aiff\ntrain/train03621.aiff\ntrain/train03622.aiff\ntrain/train03623.aiff\ntrain/train03624.aiff\ntrain/train03625.aiff\ntrain/train03626.aiff\ntrain/train03627.aiff\ntrain/train03628.aiff\ntrain/train03629.aiff\ntrain/train03630.aiff\ntrain/train03631.aiff\ntrain/train03632.aiff\ntrain/train03633.aiff\ntrain/train03634.aiff\ntrain/train03635.aiff\ntrain/train03636.aiff\ntrain/train03637.aiff\ntrain/train03638.aiff\ntrain/train03639.aiff\ntrain/train03640.aiff\ntrain/train03641.aiff\ntrain/train03642.aiff\ntrain/train03643.aiff\ntrain/train03644.aiff\ntrain/train03645.aiff\ntrain/train03646.aiff\ntrain/train03647.aiff\ntrain/train03648.aiff\ntrain/train03649.aiff\ntrain/train03650.aiff\ntrain/train03651.aiff\ntrain/train03652.aiff\ntrain/train03653.aiff\ntrain/train03654.aiff\ntrain/train03655.aiff\ntrain/train03656.aiff\ntrain/train03657.aiff\ntrain/train03658.aiff\ntrain/train03659.aiff\ntrain/train03660.aiff\ntrain/train03661.aiff\ntrain/train03662.aiff\ntrain/train03663.aiff\ntrain/train03664.aiff\ntrain/train03665.aiff\ntrain/train03666.aiff\ntrain/train03667.aiff\ntrain/train03668.aiff\ntrain/train03669.aiff\ntrain/train03670.aiff\ntrain/train03671.aiff\ntrain/train03672.aiff\ntrain/train03673.aiff\ntrain/train03674.aiff\ntrain/train03675.aiff\ntrain/train03676.aiff\ntrain/train03677.aiff\ntrain/train03678.aiff\ntrain/train03679.aiff\ntrain/train03680.aiff\ntrain/train03681.aiff\ntrain/train03682.aiff\ntrain/train03683.aiff\ntrain/train03684.aiff\ntrain/train03685.aiff\ntrain/train03686.aiff\ntrain/train03687.aiff\ntrain/train03688.aiff\ntrain/train03689.aiff\ntrain/train03690.aiff\ntrain/train03691.aiff\ntrain/train03692.aiff\ntrain/train03693.aiff\ntrain/train03694.aiff\ntrain/train03695.aiff\ntrain/train03696.aiff\ntrain/train03697.aiff\ntrain/train03698.aiff\ntrain/train03699.aiff\ntrain/train03700.aiff\ntrain/train03701.aiff\ntrain/train03702.aiff\ntrain/train03703.aiff\ntrain/train03704.aiff\ntrain/train03705.aiff\ntrain/train03706.aiff\ntrain/train03707.aiff\ntrain/train03708.aiff\ntrain/train03709.aiff\ntrain/train03710.aiff\ntrain/train03711.aiff\ntrain/train03712.aiff\ntrain/train03713.aiff\ntrain/train03714.aiff\ntrain/train03715.aiff\ntrain/train03716.aiff\ntrain/train03717.aiff\ntrain/train03718.aiff\ntrain/train03719.aiff\ntrain/train03720.aiff\ntrain/train03721.aiff\ntrain/train03722.aiff\ntrain/train03723.aiff\ntrain/train03724.aiff\ntrain/train03725.aiff\ntrain/train03726.aiff\ntrain/train03727.aiff\ntrain/train03728.aiff\ntrain/train03729.aiff\ntrain/train03730.aiff\ntrain/train03731.aiff\ntrain/train03732.aiff\ntrain/train03733.aiff\ntrain/train03734.aiff\ntrain/train03735.aiff\ntrain/train03736.aiff\ntrain/train03737.aiff\ntrain/train03738.aiff\ntrain/train03739.aiff\ntrain/train03740.aiff\ntrain/train03741.aiff\ntrain/train03742.aiff\ntrain/train03743.aiff\ntrain/train03744.aiff\ntrain/train03745.aiff\ntrain/train03746.aiff\ntrain/train03747.aiff\ntrain/train03748.aiff\ntrain/train03749.aiff\ntrain/train03750.aiff\ntrain/train03751.aiff\ntrain/train03752.aiff\ntrain/train03753.aiff\ntrain/train03754.aiff\ntrain/train03755.aiff\ntrain/train03756.aiff\ntrain/train03757.aiff\ntrain/train03758.aiff\ntrain/train03759.aiff\ntrain/train03760.aiff\ntrain/train03761.aiff\ntrain/train03762.aiff\ntrain/train03763.aiff\ntrain/train03764.aiff\ntrain/train03765.aiff\ntrain/train03766.aiff\ntrain/train03767.aiff\ntrain/train03768.aiff\ntrain/train03769.aiff\ntrain/train03770.aiff\ntrain/train03771.aiff\ntrain/train03772.aiff\ntrain/train03773.aiff\ntrain/train03774.aiff\ntrain/train03775.aiff\ntrain/train03776.aiff\ntrain/train03777.aiff\ntrain/train03778.aiff\ntrain/train03779.aiff\ntrain/train03780.aiff\ntrain/train03781.aiff\ntrain/train03782.aiff\ntrain/train03783.aiff\ntrain/train03784.aiff\ntrain/train03785.aiff\ntrain/train03786.aiff\ntrain/train03787.aiff\ntrain/train03788.aiff\ntrain/train03789.aiff\ntrain/train03790.aiff\ntrain/train03791.aiff\ntrain/train03792.aiff\ntrain/train03793.aiff\ntrain/train03794.aiff\ntrain/train03795.aiff\ntrain/train03796.aiff\ntrain/train03797.aiff\ntrain/train03798.aiff\ntrain/train03799.aiff\ntrain/train03800.aiff\ntrain/train03801.aiff\ntrain/train03802.aiff\ntrain/train03803.aiff\ntrain/train03804.aiff\ntrain/train03805.aiff\ntrain/train03806.aiff\ntrain/train03807.aiff\ntrain/train03808.aiff\ntrain/train03809.aiff\ntrain/train03810.aiff\ntrain/train03811.aiff\ntrain/train03812.aiff\ntrain/train03813.aiff\ntrain/train03814.aiff\ntrain/train03815.aiff\ntrain/train03816.aiff\ntrain/train03817.aiff\ntrain/train03818.aiff\ntrain/train03819.aiff\ntrain/train03820.aiff\ntrain/train03821.aiff\ntrain/train03822.aiff\ntrain/train03823.aiff\ntrain/train03824.aiff\ntrain/train03825.aiff\ntrain/train03826.aiff\ntrain/train03827.aiff\ntrain/train03828.aiff\ntrain/train03829.aiff\ntrain/train03830.aiff\ntrain/train03831.aiff\ntrain/train03832.aiff\ntrain/train03833.aiff\ntrain/train03834.aiff\ntrain/train03835.aiff\ntrain/train03836.aiff\ntrain/train03837.aiff\ntrain/train03838.aiff\ntrain/train03839.aiff\ntrain/train03840.aiff\ntrain/train03841.aiff\ntrain/train03842.aiff\ntrain/train03843.aiff\ntrain/train03844.aiff\ntrain/train03845.aiff\ntrain/train03846.aiff\ntrain/train03847.aiff\ntrain/train03848.aiff\ntrain/train03849.aiff\ntrain/train03850.aiff\ntrain/train03851.aiff\ntrain/train03852.aiff\ntrain/train03853.aiff\ntrain/train03854.aiff\ntrain/train03855.aiff\ntrain/train03856.aiff\ntrain/train03857.aiff\ntrain/train03858.aiff\ntrain/train03859.aiff\ntrain/train03860.aiff\ntrain/train03861.aiff\ntrain/train03862.aiff\ntrain/train03863.aiff\ntrain/train03864.aiff\ntrain/train03865.aiff\ntrain/train03866.aiff\ntrain/train03867.aiff\ntrain/train03868.aiff\ntrain/train03869.aiff\ntrain/train03870.aiff\ntrain/train03871.aiff\ntrain/train03872.aiff\ntrain/train03873.aiff\ntrain/train03874.aiff\ntrain/train03875.aiff\ntrain/train03876.aiff\ntrain/train03877.aiff\ntrain/train03878.aiff\ntrain/train03879.aiff\ntrain/train03880.aiff\ntrain/train03881.aiff\ntrain/train03882.aiff\ntrain/train03883.aiff\ntrain/train03884.aiff\ntrain/train03885.aiff\ntrain/train03886.aiff\ntrain/train03887.aiff\ntrain/train03888.aiff\ntrain/train03889.aiff\ntrain/train03890.aiff\ntrain/train03891.aiff\ntrain/train03892.aiff\ntrain/train03893.aiff\ntrain/train03894.aiff\ntrain/train03895.aiff\ntrain/train03896.aiff\ntrain/train03897.aiff\ntrain/train03898.aiff\ntrain/train03899.aiff\ntrain/train03900.aiff\ntrain/train03901.aiff\ntrain/train03902.aiff\ntrain/train03903.aiff\ntrain/train03904.aiff\ntrain/train03905.aiff\ntrain/train03906.aiff\ntrain/train03907.aiff\ntrain/train03908.aiff\ntrain/train03909.aiff\ntrain/train03910.aiff\ntrain/train03911.aiff\ntrain/train03912.aiff\ntrain/train03913.aiff\ntrain/train03914.aiff\ntrain/train03915.aiff\ntrain/train03916.aiff\ntrain/train03917.aiff\ntrain/train03918.aiff\ntrain/train03919.aiff\ntrain/train03920.aiff\ntrain/train03921.aiff\ntrain/train03922.aiff\ntrain/train03923.aiff\ntrain/train03924.aiff\ntrain/train03925.aiff\ntrain/train03926.aiff\ntrain/train03927.aiff\ntrain/train03928.aiff\ntrain/train03929.aiff\ntrain/train03930.aiff\ntrain/train03931.aiff\ntrain/train03932.aiff\ntrain/train03933.aiff\ntrain/train03934.aiff\ntrain/train03935.aiff\ntrain/train03936.aiff\ntrain/train03937.aiff\ntrain/train03938.aiff\ntrain/train03939.aiff\ntrain/train03940.aiff\ntrain/train03941.aiff\ntrain/train03942.aiff\ntrain/train03943.aiff\ntrain/train03944.aiff\ntrain/train03945.aiff\ntrain/train03946.aiff\ntrain/train03947.aiff\ntrain/train03948.aiff\ntrain/train03949.aiff\ntrain/train03950.aiff\ntrain/train03951.aiff\ntrain/train03952.aiff\ntrain/train03953.aiff\ntrain/train03954.aiff\ntrain/train03955.aiff\ntrain/train03956.aiff\ntrain/train03957.aiff\ntrain/train03958.aiff\ntrain/train03959.aiff\ntrain/train03960.aiff\ntrain/train03961.aiff\ntrain/train03962.aiff\ntrain/train03963.aiff\ntrain/train03964.aiff\ntrain/train03965.aiff\ntrain/train03966.aiff\ntrain/train03967.aiff\ntrain/train03968.aiff\ntrain/train03969.aiff\ntrain/train03970.aiff\ntrain/train03971.aiff\ntrain/train03972.aiff\ntrain/train03973.aiff\ntrain/train03974.aiff\ntrain/train03975.aiff\ntrain/train03976.aiff\ntrain/train03977.aiff\ntrain/train03978.aiff\ntrain/train03979.aiff\ntrain/train03980.aiff\ntrain/train03981.aiff\ntrain/train03982.aiff\ntrain/train03983.aiff\ntrain/train03984.aiff\ntrain/train03985.aiff\ntrain/train03986.aiff\ntrain/train03987.aiff\ntrain/train03988.aiff\ntrain/train03989.aiff\ntrain/train03990.aiff\ntrain/train03991.aiff\ntrain/train03992.aiff\ntrain/train03993.aiff\ntrain/train03994.aiff\ntrain/train03995.aiff\ntrain/train03996.aiff\ntrain/train03997.aiff\ntrain/train03998.aiff\ntrain/train03999.aiff\ntrain/train04000.aiff\ntrain/train04001.aiff\ntrain/train04002.aiff\ntrain/train04003.aiff\ntrain/train04004.aiff\ntrain/train04005.aiff\ntrain/train04006.aiff\ntrain/train04007.aiff\ntrain/train04008.aiff\ntrain/train04009.aiff\ntrain/train04010.aiff\ntrain/train04011.aiff\ntrain/train04012.aiff\ntrain/train04013.aiff\ntrain/train04014.aiff\ntrain/train04015.aiff\ntrain/train04016.aiff\ntrain/train04017.aiff\ntrain/train04018.aiff\ntrain/train04019.aiff\ntrain/train04020.aiff\ntrain/train04021.aiff\ntrain/train04022.aiff\ntrain/train04023.aiff\ntrain/train04024.aiff\ntrain/train04025.aiff\ntrain/train04026.aiff\ntrain/train04027.aiff\ntrain/train04028.aiff\ntrain/train04029.aiff\ntrain/train04030.aiff\ntrain/train04031.aiff\ntrain/train04032.aiff\ntrain/train04033.aiff\ntrain/train04034.aiff\ntrain/train04035.aiff\ntrain/train04036.aiff\ntrain/train04037.aiff\ntrain/train04038.aiff\ntrain/train04039.aiff\ntrain/train04040.aiff\ntrain/train04041.aiff\ntrain/train04042.aiff\ntrain/train04043.aiff\ntrain/train04044.aiff\ntrain/train04045.aiff\ntrain/train04046.aiff\ntrain/train04047.aiff\ntrain/train04048.aiff\ntrain/train04049.aiff\ntrain/train04050.aiff\ntrain/train04051.aiff\ntrain/train04052.aiff\ntrain/train04053.aiff\ntrain/train04054.aiff\ntrain/train04055.aiff\ntrain/train04056.aiff\ntrain/train04057.aiff\ntrain/train04058.aiff\ntrain/train04059.aiff\ntrain/train04060.aiff\ntrain/train04061.aiff\ntrain/train04062.aiff\ntrain/train04063.aiff\ntrain/train04064.aiff\ntrain/train04065.aiff\ntrain/train04066.aiff\ntrain/train04067.aiff\ntrain/train04068.aiff\ntrain/train04069.aiff\ntrain/train04070.aiff\ntrain/train04071.aiff\ntrain/train04072.aiff\ntrain/train04073.aiff\ntrain/train04074.aiff\ntrain/train04075.aiff\ntrain/train04076.aiff\ntrain/train04077.aiff\ntrain/train04078.aiff\ntrain/train04079.aiff\ntrain/train04080.aiff\ntrain/train04081.aiff\ntrain/train04082.aiff\ntrain/train04083.aiff\ntrain/train04084.aiff\ntrain/train04085.aiff\ntrain/train04086.aiff\ntrain/train04087.aiff\ntrain/train04088.aiff\ntrain/train04089.aiff\ntrain/train04090.aiff\ntrain/train04091.aiff\ntrain/train04092.aiff\ntrain/train04093.aiff\ntrain/train04094.aiff\ntrain/train04095.aiff\ntrain/train04096.aiff\ntrain/train04097.aiff\ntrain/train04098.aiff\ntrain/train04099.aiff\ntrain/train04100.aiff\ntrain/train04101.aiff\ntrain/train04102.aiff\ntrain/train04103.aiff\ntrain/train04104.aiff\ntrain/train04105.aiff\ntrain/train04106.aiff\ntrain/train04107.aiff\ntrain/train04108.aiff\ntrain/train04109.aiff\ntrain/train04110.aiff\ntrain/train04111.aiff\ntrain/train04112.aiff\ntrain/train04113.aiff\ntrain/train04114.aiff\ntrain/train04115.aiff\ntrain/train04116.aiff\ntrain/train04117.aiff\ntrain/train04118.aiff\ntrain/train04119.aiff\ntrain/train04120.aiff\ntrain/train04121.aiff\ntrain/train04122.aiff\ntrain/train04123.aiff\ntrain/train04124.aiff\ntrain/train04125.aiff\ntrain/train04126.aiff\ntrain/train04127.aiff\ntrain/train04128.aiff\ntrain/train04129.aiff\ntrain/train04130.aiff\ntrain/train04131.aiff\ntrain/train04132.aiff\ntrain/train04133.aiff\ntrain/train04134.aiff\ntrain/train04135.aiff\ntrain/train04136.aiff\ntrain/train04137.aiff\ntrain/train04138.aiff\ntrain/train04139.aiff\ntrain/train04140.aiff\ntrain/train04141.aiff\ntrain/train04142.aiff\ntrain/train04143.aiff\ntrain/train04144.aiff\ntrain/train04145.aiff\ntrain/train04146.aiff\ntrain/train04147.aiff\ntrain/train04148.aiff\ntrain/train04149.aiff\ntrain/train04150.aiff\ntrain/train04151.aiff\ntrain/train04152.aiff\ntrain/train04153.aiff\ntrain/train04154.aiff\ntrain/train04155.aiff\ntrain/train04156.aiff\ntrain/train04157.aiff\ntrain/train04158.aiff\ntrain/train04159.aiff\ntrain/train04160.aiff\ntrain/train04161.aiff\ntrain/train04162.aiff\ntrain/train04163.aiff\ntrain/train04164.aiff\ntrain/train04165.aiff\ntrain/train04166.aiff\ntrain/train04167.aiff\ntrain/train04168.aiff\ntrain/train04169.aiff\ntrain/train04170.aiff\ntrain/train04171.aiff\ntrain/train04172.aiff\ntrain/train04173.aiff\ntrain/train04174.aiff\ntrain/train04175.aiff\ntrain/train04176.aiff\ntrain/train04177.aiff\ntrain/train04178.aiff\ntrain/train04179.aiff\ntrain/train04180.aiff\ntrain/train04181.aiff\ntrain/train04182.aiff\ntrain/train04183.aiff\ntrain/train04184.aiff\ntrain/train04185.aiff\ntrain/train04186.aiff\ntrain/train04187.aiff\ntrain/train04188.aiff\ntrain/train04189.aiff\ntrain/train04190.aiff\ntrain/train04191.aiff\ntrain/train04192.aiff\ntrain/train04193.aiff\ntrain/train04194.aiff\ntrain/train04195.aiff\ntrain/train04196.aiff\ntrain/train04197.aiff\ntrain/train04198.aiff\ntrain/train04199.aiff\ntrain/train04200.aiff\ntrain/train04201.aiff\ntrain/train04202.aiff\ntrain/train04203.aiff\ntrain/train04204.aiff\ntrain/train04205.aiff\ntrain/train04206.aiff\ntrain/train04207.aiff\ntrain/train04208.aiff\ntrain/train04209.aiff\ntrain/train04210.aiff\ntrain/train04211.aiff\ntrain/train04212.aiff\ntrain/train04213.aiff\ntrain/train04214.aiff\ntrain/train04215.aiff\ntrain/train04216.aiff\ntrain/train04217.aiff\ntrain/train04218.aiff\ntrain/train04219.aiff\ntrain/train04220.aiff\ntrain/train04221.aiff\ntrain/train04222.aiff\ntrain/train04223.aiff\ntrain/train04224.aiff\ntrain/train04225.aiff\ntrain/train04226.aiff\ntrain/train04227.aiff\ntrain/train04228.aiff\ntrain/train04229.aiff\ntrain/train04230.aiff\ntrain/train04231.aiff\ntrain/train04232.aiff\ntrain/train04233.aiff\ntrain/train04234.aiff\ntrain/train04235.aiff\ntrain/train04236.aiff\ntrain/train04237.aiff\ntrain/train04238.aiff\ntrain/train04239.aiff\ntrain/train04240.aiff\ntrain/train04241.aiff\ntrain/train04242.aiff\ntrain/train04243.aiff\ntrain/train04244.aiff\ntrain/train04245.aiff\ntrain/train04246.aiff\ntrain/train04247.aiff\ntrain/train04248.aiff\ntrain/train04249.aiff\ntrain/train04250.aiff\ntrain/train04251.aiff\ntrain/train04252.aiff\ntrain/train04253.aiff\ntrain/train04254.aiff\ntrain/train04255.aiff\ntrain/train04256.aiff\ntrain/train04257.aiff\ntrain/train04258.aiff\ntrain/train04259.aiff\ntrain/train04260.aiff\ntrain/train04261.aiff\ntrain/train04262.aiff\ntrain/train04263.aiff\ntrain/train04264.aiff\ntrain/train04265.aiff\ntrain/train04266.aiff\ntrain/train04267.aiff\ntrain/train04268.aiff\ntrain/train04269.aiff\ntrain/train04270.aiff\ntrain/train04271.aiff\ntrain/train04272.aiff\ntrain/train04273.aiff\ntrain/train04274.aiff\ntrain/train04275.aiff\ntrain/train04276.aiff\ntrain/train04277.aiff\ntrain/train04278.aiff\ntrain/train04279.aiff\ntrain/train04280.aiff\ntrain/train04281.aiff\ntrain/train04282.aiff\ntrain/train04283.aiff\ntrain/train04284.aiff\ntrain/train04285.aiff\ntrain/train04286.aiff\ntrain/train04287.aiff\ntrain/train04288.aiff\ntrain/train04289.aiff\ntrain/train04290.aiff\ntrain/train04291.aiff\ntrain/train04292.aiff\ntrain/train04293.aiff\ntrain/train04294.aiff\ntrain/train04295.aiff\ntrain/train04296.aiff\ntrain/train04297.aiff\ntrain/train04298.aiff\ntrain/train04299.aiff\ntrain/train04300.aiff\ntrain/train04301.aiff\ntrain/train04302.aiff\ntrain/train04303.aiff\ntrain/train04304.aiff\ntrain/train04305.aiff\ntrain/train04306.aiff\ntrain/train04307.aiff\ntrain/train04308.aiff\ntrain/train04309.aiff\ntrain/train04310.aiff\ntrain/train04311.aiff\ntrain/train04312.aiff\ntrain/train04313.aiff\ntrain/train04314.aiff\ntrain/train04315.aiff\ntrain/train04316.aiff\ntrain/train04317.aiff\ntrain/train04318.aiff\ntrain/train04319.aiff\ntrain/train04320.aiff\ntrain/train04321.aiff\ntrain/train04322.aiff\ntrain/train04323.aiff\ntrain/train04324.aiff\ntrain/train04325.aiff\ntrain/train04326.aiff\ntrain/train04327.aiff\ntrain/train04328.aiff\ntrain/train04329.aiff\ntrain/train04330.aiff\ntrain/train04331.aiff\ntrain/train04332.aiff\ntrain/train04333.aiff\ntrain/train04334.aiff\ntrain/train04335.aiff\ntrain/train04336.aiff\ntrain/train04337.aiff\ntrain/train04338.aiff\ntrain/train04339.aiff\ntrain/train04340.aiff\ntrain/train04341.aiff\ntrain/train04342.aiff\ntrain/train04343.aiff\ntrain/train04344.aiff\ntrain/train04345.aiff\ntrain/train04346.aiff\ntrain/train04347.aiff\ntrain/train04348.aiff\ntrain/train04349.aiff\ntrain/train04350.aiff\ntrain/train04351.aiff\ntrain/train04352.aiff\ntrain/train04353.aiff\ntrain/train04354.aiff\ntrain/train04355.aiff\ntrain/train04356.aiff\ntrain/train04357.aiff\ntrain/train04358.aiff\ntrain/train04359.aiff\ntrain/train04360.aiff\ntrain/train04361.aiff\ntrain/train04362.aiff\ntrain/train04363.aiff\ntrain/train04364.aiff\ntrain/train04365.aiff\ntrain/train04366.aiff\ntrain/train04367.aiff\ntrain/train04368.aiff\ntrain/train04369.aiff\ntrain/train04370.aiff\ntrain/train04371.aiff\ntrain/train04372.aiff\ntrain/train04373.aiff\ntrain/train04374.aiff\ntrain/train04375.aiff\ntrain/train04376.aiff\ntrain/train04377.aiff\ntrain/train04378.aiff\ntrain/train04379.aiff\ntrain/train04380.aiff\ntrain/train04381.aiff\ntrain/train04382.aiff\ntrain/train04383.aiff\ntrain/train04384.aiff\ntrain/train04385.aiff\ntrain/train04386.aiff\ntrain/train04387.aiff\ntrain/train04388.aiff\ntrain/train04389.aiff\ntrain/train04390.aiff\ntrain/train04391.aiff\ntrain/train04392.aiff\ntrain/train04393.aiff\ntrain/train04394.aiff\ntrain/train04395.aiff\ntrain/train04396.aiff\ntrain/train04397.aiff\ntrain/train04398.aiff\ntrain/train04399.aiff\ntrain/train04400.aiff\ntrain/train04401.aiff\ntrain/train04402.aiff\ntrain/train04403.aiff\ntrain/train04404.aiff\ntrain/train04405.aiff\ntrain/train04406.aiff\ntrain/train04407.aiff\ntrain/train04408.aiff\ntrain/train04409.aiff\ntrain/train04410.aiff\ntrain/train04411.aiff\ntrain/train04412.aiff\ntrain/train04413.aiff\ntrain/train04414.aiff\ntrain/train04415.aiff\ntrain/train04416.aiff\ntrain/train04417.aiff\ntrain/train04418.aiff\ntrain/train04419.aiff\ntrain/train04420.aiff\ntrain/train04421.aiff\ntrain/train04422.aiff\ntrain/train04423.aiff\ntrain/train04424.aiff\ntrain/train04425.aiff\ntrain/train04426.aiff\ntrain/train04427.aiff\ntrain/train04428.aiff\ntrain/train04429.aiff\ntrain/train04430.aiff\ntrain/train04431.aiff\ntrain/train04432.aiff\ntrain/train04433.aiff\ntrain/train04434.aiff\ntrain/train04435.aiff\ntrain/train04436.aiff\ntrain/train04437.aiff\ntrain/train04438.aiff\ntrain/train04439.aiff\ntrain/train04440.aiff\ntrain/train04441.aiff\ntrain/train04442.aiff\ntrain/train04443.aiff\ntrain/train04444.aiff\ntrain/train04445.aiff\ntrain/train04446.aiff\ntrain/train04447.aiff\ntrain/train04448.aiff\ntrain/train04449.aiff\ntrain/train04450.aiff\ntrain/train04451.aiff\ntrain/train04452.aiff\ntrain/train04453.aiff\ntrain/train04454.aiff\ntrain/train04455.aiff\ntrain/train04456.aiff\ntrain/train04457.aiff\ntrain/train04458.aiff\ntrain/train04459.aiff\ntrain/train04460.aiff\ntrain/train04461.aiff\ntrain/train04462.aiff\ntrain/train04463.aiff\ntrain/train04464.aiff\ntrain/train04465.aiff\ntrain/train04466.aiff\ntrain/train04467.aiff\ntrain/train04468.aiff\ntrain/train04469.aiff\ntrain/train04470.aiff\ntrain/train04471.aiff\ntrain/train04472.aiff\ntrain/train04473.aiff\ntrain/train04474.aiff\ntrain/train04475.aiff\ntrain/train04476.aiff\ntrain/train04477.aiff\ntrain/train04478.aiff\ntrain/train04479.aiff\ntrain/train04480.aiff\ntrain/train04481.aiff\ntrain/train04482.aiff\ntrain/train04483.aiff\ntrain/train04484.aiff\ntrain/train04485.aiff\ntrain/train04486.aiff\ntrain/train04487.aiff\ntrain/train04488.aiff\ntrain/train04489.aiff\ntrain/train04490.aiff\ntrain/train04491.aiff\ntrain/train04492.aiff\ntrain/train04493.aiff\ntrain/train04494.aiff\ntrain/train04495.aiff\ntrain/train04496.aiff\ntrain/train04497.aiff\ntrain/train04498.aiff\ntrain/train04499.aiff\ntrain/train04500.aiff\ntrain/train04501.aiff\ntrain/train04502.aiff\ntrain/train04503.aiff\ntrain/train04504.aiff\ntrain/train04505.aiff\ntrain/train04506.aiff\ntrain/train04507.aiff\ntrain/train04508.aiff\ntrain/train04509.aiff\ntrain/train04510.aiff\ntrain/train04511.aiff\ntrain/train04512.aiff\ntrain/train04513.aiff\ntrain/train04514.aiff\ntrain/train04515.aiff\ntrain/train04516.aiff\ntrain/train04517.aiff\ntrain/train04518.aiff\ntrain/train04519.aiff\ntrain/train04520.aiff\ntrain/train04521.aiff\ntrain/train04522.aiff\ntrain/train04523.aiff\ntrain/train04524.aiff\ntrain/train04525.aiff\ntrain/train04526.aiff\ntrain/train04527.aiff\ntrain/train04528.aiff\ntrain/train04529.aiff\ntrain/train04530.aiff\ntrain/train04531.aiff\ntrain/train04532.aiff\ntrain/train04533.aiff\ntrain/train04534.aiff\ntrain/train04535.aiff\ntrain/train04536.aiff\ntrain/train04537.aiff\ntrain/train04538.aiff\ntrain/train04539.aiff\ntrain/train04540.aiff\ntrain/train04541.aiff\ntrain/train04542.aiff\ntrain/train04543.aiff\ntrain/train04544.aiff\ntrain/train04545.aiff\ntrain/train04546.aiff\ntrain/train04547.aiff\ntrain/train04548.aiff\ntrain/train04549.aiff\ntrain/train04550.aiff\ntrain/train04551.aiff\ntrain/train04552.aiff\ntrain/train04553.aiff\ntrain/train04554.aiff\ntrain/train04555.aiff\ntrain/train04556.aiff\ntrain/train04557.aiff\ntrain/train04558.aiff\ntrain/train04559.aiff\ntrain/train04560.aiff\ntrain/train04561.aiff\ntrain/train04562.aiff\ntrain/train04563.aiff\ntrain/train04564.aiff\ntrain/train04565.aiff\ntrain/train04566.aiff\ntrain/train04567.aiff\ntrain/train04568.aiff\ntrain/train04569.aiff\ntrain/train04570.aiff\ntrain/train04571.aiff\ntrain/train04572.aiff\ntrain/train04573.aiff\ntrain/train04574.aiff\ntrain/train04575.aiff\ntrain/train04576.aiff\ntrain/train04577.aiff\ntrain/train04578.aiff\ntrain/train04579.aiff\ntrain/train04580.aiff\ntrain/train04581.aiff\ntrain/train04582.aiff\ntrain/train04583.aiff\ntrain/train04584.aiff\ntrain/train04585.aiff\ntrain/train04586.aiff\ntrain/train04587.aiff\ntrain/train04588.aiff\ntrain/train04589.aiff\ntrain/train04590.aiff\ntrain/train04591.aiff\ntrain/train04592.aiff\ntrain/train04593.aiff\ntrain/train04594.aiff\ntrain/train04595.aiff\ntrain/train04596.aiff\ntrain/train04597.aiff\ntrain/train04598.aiff\ntrain/train04599.aiff\ntrain/train04600.aiff\ntrain/train04601.aiff\ntrain/train04602.aiff\ntrain/train04603.aiff\ntrain/train04604.aiff\ntrain/train04605.aiff\ntrain/train04606.aiff\ntrain/train04607.aiff\ntrain/train04608.aiff\ntrain/train04609.aiff\ntrain/train04610.aiff\ntrain/train04611.aiff\ntrain/train04612.aiff\ntrain/train04613.aiff\ntrain/train04614.aiff\ntrain/train04615.aiff\ntrain/train04616.aiff\ntrain/train04617.aiff\ntrain/train04618.aiff\ntrain/train04619.aiff\ntrain/train04620.aiff\ntrain/train04621.aiff\ntrain/train04622.aiff\ntrain/train04623.aiff\ntrain/train04624.aiff\ntrain/train04625.aiff\ntrain/train04626.aiff\ntrain/train04627.aiff\ntrain/train04628.aiff\ntrain/train04629.aiff\ntrain/train04630.aiff\ntrain/train04631.aiff\ntrain/train04632.aiff\ntrain/train04633.aiff\ntrain/train04634.aiff\ntrain/train04635.aiff\ntrain/train04636.aiff\ntrain/train04637.aiff\ntrain/train04638.aiff\ntrain/train04639.aiff\ntrain/train04640.aiff\ntrain/train04641.aiff\ntrain/train04642.aiff\ntrain/train04643.aiff\ntrain/train04644.aiff\ntrain/train04645.aiff\ntrain/train04646.aiff\ntrain/train04647.aiff\ntrain/train04648.aiff\ntrain/train04649.aiff\ntrain/train04650.aiff\ntrain/train04651.aiff\ntrain/train04652.aiff\ntrain/train04653.aiff\ntrain/train04654.aiff\ntrain/train04655.aiff\ntrain/train04656.aiff\ntrain/train04657.aiff\ntrain/train04658.aiff\ntrain/train04659.aiff\ntrain/train04660.aiff\ntrain/train04661.aiff\ntrain/train04662.aiff\ntrain/train04663.aiff\ntrain/train04664.aiff\ntrain/train04665.aiff\ntrain/train04666.aiff\ntrain/train04667.aiff\ntrain/train04668.aiff\ntrain/train04669.aiff\ntrain/train04670.aiff\ntrain/train04671.aiff\ntrain/train04672.aiff\ntrain/train04673.aiff\ntrain/train04674.aiff\ntrain/train04675.aiff\ntrain/train04676.aiff\ntrain/train04677.aiff\ntrain/train04678.aiff\ntrain/train04679.aiff\ntrain/train04680.aiff\ntrain/train04681.aiff\ntrain/train04682.aiff\ntrain/train04683.aiff\ntrain/train04684.aiff\ntrain/train04685.aiff\ntrain/train04686.aiff\ntrain/train04687.aiff\ntrain/train04688.aiff\ntrain/train04689.aiff\ntrain/train04690.aiff\ntrain/train04691.aiff\ntrain/train04692.aiff\ntrain/train04693.aiff\ntrain/train04694.aiff\ntrain/train04695.aiff\ntrain/train04696.aiff\ntrain/train04697.aiff\ntrain/train04698.aiff\ntrain/train04699.aiff\ntrain/train04700.aiff\ntrain/train04701.aiff\ntrain/train04702.aiff\ntrain/train04703.aiff\ntrain/train04704.aiff\ntrain/train04705.aiff\ntrain/train04706.aiff\ntrain/train04707.aiff\ntrain/train04708.aiff\ntrain/train04709.aiff\ntrain/train04710.aiff\ntrain/train04711.aiff\ntrain/train04712.aiff\ntrain/train04713.aiff\ntrain/train04714.aiff\ntrain/train04715.aiff\ntrain/train04716.aiff\ntrain/train04717.aiff\ntrain/train04718.aiff\ntrain/train04719.aiff\ntrain/train04720.aiff\ntrain/train04721.aiff\ntrain/train04722.aiff\ntrain/train04723.aiff\ntrain/train04724.aiff\ntrain/train04725.aiff\ntrain/train04726.aiff\ntrain/train04727.aiff\ntrain/train04728.aiff\ntrain/train04729.aiff\ntrain/train04730.aiff\ntrain/train04731.aiff\ntrain/train04732.aiff\ntrain/train04733.aiff\ntrain/train04734.aiff\ntrain/train04735.aiff\ntrain/train04736.aiff\ntrain/train04737.aiff\ntrain/train04738.aiff\ntrain/train04739.aiff\ntrain/train04740.aiff\ntrain/train04741.aiff\ntrain/train04742.aiff\ntrain/train04743.aiff\ntrain/train04744.aiff\ntrain/train04745.aiff\ntrain/train04746.aiff\ntrain/train04747.aiff\ntrain/train04748.aiff\ntrain/train04749.aiff\ntrain/train04750.aiff\ntrain/train04751.aiff\ntrain/train04752.aiff\ntrain/train04753.aiff\ntrain/train04754.aiff\ntrain/train04755.aiff\ntrain/train04756.aiff\ntrain/train04757.aiff\ntrain/train04758.aiff\ntrain/train04759.aiff\ntrain/train04760.aiff\ntrain/train04761.aiff\ntrain/train04762.aiff\ntrain/train04763.aiff\ntrain/train04764.aiff\ntrain/train04765.aiff\ntrain/train04766.aiff\ntrain/train04767.aiff\ntrain/train04768.aiff\ntrain/train04769.aiff\ntrain/train04770.aiff\ntrain/train04771.aiff\ntrain/train04772.aiff\ntrain/train04773.aiff\ntrain/train04774.aiff\ntrain/train04775.aiff\ntrain/train04776.aiff\ntrain/train04777.aiff\ntrain/train04778.aiff\ntrain/train04779.aiff\ntrain/train04780.aiff\ntrain/train04781.aiff\ntrain/train04782.aiff\ntrain/train04783.aiff\ntrain/train04784.aiff\ntrain/train04785.aiff\ntrain/train04786.aiff\ntrain/train04787.aiff\ntrain/train04788.aiff\ntrain/train04789.aiff\ntrain/train04790.aiff\ntrain/train04791.aiff\ntrain/train04792.aiff\ntrain/train04793.aiff\ntrain/train04794.aiff\ntrain/train04795.aiff\ntrain/train04796.aiff\ntrain/train04797.aiff\ntrain/train04798.aiff\ntrain/train04799.aiff\ntrain/train04800.aiff\ntrain/train04801.aiff\ntrain/train04802.aiff\ntrain/train04803.aiff\ntrain/train04804.aiff\ntrain/train04805.aiff\ntrain/train04806.aiff\ntrain/train04807.aiff\ntrain/train04808.aiff\ntrain/train04809.aiff\ntrain/train04810.aiff\ntrain/train04811.aiff\ntrain/train04812.aiff\ntrain/train04813.aiff\ntrain/train04814.aiff\ntrain/train04815.aiff\ntrain/train04816.aiff\ntrain/train04817.aiff\ntrain/train04818.aiff\ntrain/train04819.aiff\ntrain/train04820.aiff\ntrain/train04821.aiff\ntrain/train04822.aiff\ntrain/train04823.aiff\ntrain/train04824.aiff\ntrain/train04825.aiff\ntrain/train04826.aiff\ntrain/train04827.aiff\ntrain/train04828.aiff\ntrain/train04829.aiff\ntrain/train04830.aiff\ntrain/train04831.aiff\ntrain/train04832.aiff\ntrain/train04833.aiff\ntrain/train04834.aiff\ntrain/train04835.aiff\ntrain/train04836.aiff\ntrain/train04837.aiff\ntrain/train04838.aiff\ntrain/train04839.aiff\ntrain/train04840.aiff\ntrain/train04841.aiff\ntrain/train04842.aiff\ntrain/train04843.aiff\ntrain/train04844.aiff\ntrain/train04845.aiff\ntrain/train04846.aiff\ntrain/train04847.aiff\ntrain/train04848.aiff\ntrain/train04849.aiff\ntrain/train04850.aiff\ntrain/train04851.aiff\ntrain/train04852.aiff\ntrain/train04853.aiff\ntrain/train04854.aiff\ntrain/train04855.aiff\ntrain/train04856.aiff\ntrain/train04857.aiff\ntrain/train04858.aiff\ntrain/train04859.aiff\ntrain/train04860.aiff\ntrain/train04861.aiff\ntrain/train04862.aiff\ntrain/train04863.aiff\ntrain/train04864.aiff\ntrain/train04865.aiff\ntrain/train04866.aiff\ntrain/train04867.aiff\ntrain/train04868.aiff\ntrain/train04869.aiff\ntrain/train04870.aiff\ntrain/train04871.aiff\ntrain/train04872.aiff\ntrain/train04873.aiff\ntrain/train04874.aiff\ntrain/train04875.aiff\ntrain/train04876.aiff\ntrain/train04877.aiff\ntrain/train04878.aiff\ntrain/train04879.aiff\ntrain/train04880.aiff\ntrain/train04881.aiff\ntrain/train04882.aiff\ntrain/train04883.aiff\ntrain/train04884.aiff\ntrain/train04885.aiff\ntrain/train04886.aiff\ntrain/train04887.aiff\ntrain/train04888.aiff\ntrain/train04889.aiff\ntrain/train04890.aiff\ntrain/train04891.aiff\ntrain/train04892.aiff\ntrain/train04893.aiff\ntrain/train04894.aiff\ntrain/train04895.aiff\ntrain/train04896.aiff\ntrain/train04897.aiff\ntrain/train04898.aiff\ntrain/train04899.aiff\ntrain/train04900.aiff\ntrain/train04901.aiff\ntrain/train04902.aiff\ntrain/train04903.aiff\ntrain/train04904.aiff\ntrain/train04905.aiff\ntrain/train04906.aiff\ntrain/train04907.aiff\ntrain/train04908.aiff\ntrain/train04909.aiff\ntrain/train04910.aiff\ntrain/train04911.aiff\ntrain/train04912.aiff\ntrain/train04913.aiff\ntrain/train04914.aiff\ntrain/train04915.aiff\ntrain/train04916.aiff\ntrain/train04917.aiff\ntrain/train04918.aiff\ntrain/train04919.aiff\ntrain/train04920.aiff\ntrain/train04921.aiff\ntrain/train04922.aiff\ntrain/train04923.aiff\ntrain/train04924.aiff\ntrain/train04925.aiff\ntrain/train04926.aiff\ntrain/train04927.aiff\ntrain/train04928.aiff\ntrain/train04929.aiff\ntrain/train04930.aiff\ntrain/train04931.aiff\ntrain/train04932.aiff\ntrain/train04933.aiff\ntrain/train04934.aiff\ntrain/train04935.aiff\ntrain/train04936.aiff\ntrain/train04937.aiff\ntrain/train04938.aiff\ntrain/train04939.aiff\ntrain/train04940.aiff\ntrain/train04941.aiff\ntrain/train04942.aiff\ntrain/train04943.aiff\ntrain/train04944.aiff\ntrain/train04945.aiff\ntrain/train04946.aiff\ntrain/train04947.aiff\ntrain/train04948.aiff\ntrain/train04949.aiff\ntrain/train04950.aiff\ntrain/train04951.aiff\ntrain/train04952.aiff\ntrain/train04953.aiff\ntrain/train04954.aiff\ntrain/train04955.aiff\ntrain/train04956.aiff\ntrain/train04957.aiff\ntrain/train04958.aiff\ntrain/train04959.aiff\ntrain/train04960.aiff\ntrain/train04961.aiff\ntrain/train04962.aiff\ntrain/train04963.aiff\ntrain/train04964.aiff\ntrain/train04965.aiff\ntrain/train04966.aiff\ntrain/train04967.aiff\ntrain/train04968.aiff\ntrain/train04969.aiff\ntrain/train04970.aiff\ntrain/train04971.aiff\ntrain/train04972.aiff\ntrain/train04973.aiff\ntrain/train04974.aiff\ntrain/train04975.aiff\ntrain/train04976.aiff\ntrain/train04977.aiff\ntrain/train04978.aiff\ntrain/train04979.aiff\ntrain/train04980.aiff\ntrain/train04981.aiff\ntrain/train04982.aiff\ntrain/train04983.aiff\ntrain/train04984.aiff\ntrain/train04985.aiff\ntrain/train04986.aiff\ntrain/train04987.aiff\ntrain/train04988.aiff\ntrain/train04989.aiff\ntrain/train04990.aiff\ntrain/train04991.aiff\ntrain/train04992.aiff\ntrain/train04993.aiff\ntrain/train04994.aiff\ntrain/train04995.aiff\ntrain/train04996.aiff\ntrain/train04997.aiff\ntrain/train04998.aiff\ntrain/train04999.aiff\ntrain/train05000.aiff\ntrain/train05001.aiff\ntrain/train05002.aiff\ntrain/train05003.aiff\ntrain/train05004.aiff\ntrain/train05005.aiff\ntrain/train05006.aiff\ntrain/train05007.aiff\ntrain/train05008.aiff\ntrain/train05009.aiff\ntrain/train05010.aiff\ntrain/train05011.aiff\ntrain/train05012.aiff\ntrain/train05013.aiff\ntrain/train05014.aiff\ntrain/train05015.aiff\ntrain/train05016.aiff\ntrain/train05017.aiff\ntrain/train05018.aiff\ntrain/train05019.aiff\ntrain/train05020.aiff\ntrain/train05021.aiff\ntrain/train05022.aiff\ntrain/train05023.aiff\ntrain/train05024.aiff\ntrain/train05025.aiff\ntrain/train05026.aiff\ntrain/train05027.aiff\ntrain/train05028.aiff\ntrain/train05029.aiff\ntrain/train05030.aiff\ntrain/train05031.aiff\ntrain/train05032.aiff\ntrain/train05033.aiff\ntrain/train05034.aiff\ntrain/train05035.aiff\ntrain/train05036.aiff\ntrain/train05037.aiff\ntrain/train05038.aiff\ntrain/train05039.aiff\ntrain/train05040.aiff\ntrain/train05041.aiff\ntrain/train05042.aiff\ntrain/train05043.aiff\ntrain/train05044.aiff\ntrain/train05045.aiff\ntrain/train05046.aiff\ntrain/train05047.aiff\ntrain/train05048.aiff\ntrain/train05049.aiff\ntrain/train05050.aiff\ntrain/train05051.aiff\ntrain/train05052.aiff\ntrain/train05053.aiff\ntrain/train05054.aiff\ntrain/train05055.aiff\ntrain/train05056.aiff\ntrain/train05057.aiff\ntrain/train05058.aiff\ntrain/train05059.aiff\ntrain/train05060.aiff\ntrain/train05061.aiff\ntrain/train05062.aiff\ntrain/train05063.aiff\ntrain/train05064.aiff\ntrain/train05065.aiff\ntrain/train05066.aiff\ntrain/train05067.aiff\ntrain/train05068.aiff\ntrain/train05069.aiff\ntrain/train05070.aiff\ntrain/train05071.aiff\ntrain/train05072.aiff\ntrain/train05073.aiff\ntrain/train05074.aiff\ntrain/train05075.aiff\ntrain/train05076.aiff\ntrain/train05077.aiff\ntrain/train05078.aiff\ntrain/train05079.aiff\ntrain/train05080.aiff\ntrain/train05081.aiff\ntrain/train05082.aiff\ntrain/train05083.aiff\ntrain/train05084.aiff\ntrain/train05085.aiff\ntrain/train05086.aiff\ntrain/train05087.aiff\ntrain/train05088.aiff\ntrain/train05089.aiff\ntrain/train05090.aiff\ntrain/train05091.aiff\ntrain/train05092.aiff\ntrain/train05093.aiff\ntrain/train05094.aiff\ntrain/train05095.aiff\ntrain/train05096.aiff\ntrain/train05097.aiff\ntrain/train05098.aiff\ntrain/train05099.aiff\ntrain/train05100.aiff\ntrain/train05101.aiff\ntrain/train05102.aiff\ntrain/train05103.aiff\ntrain/train05104.aiff\ntrain/train05105.aiff\ntrain/train05106.aiff\ntrain/train05107.aiff\ntrain/train05108.aiff\ntrain/train05109.aiff\ntrain/train05110.aiff\ntrain/train05111.aiff\ntrain/train05112.aiff\ntrain/train05113.aiff\ntrain/train05114.aiff\ntrain/train05115.aiff\ntrain/train05116.aiff\ntrain/train05117.aiff\ntrain/train05118.aiff\ntrain/train05119.aiff\ntrain/train05120.aiff\ntrain/train05121.aiff\ntrain/train05122.aiff\ntrain/train05123.aiff\ntrain/train05124.aiff\ntrain/train05125.aiff\ntrain/train05126.aiff\ntrain/train05127.aiff\ntrain/train05128.aiff\ntrain/train05129.aiff\ntrain/train05130.aiff\ntrain/train05131.aiff\ntrain/train05132.aiff\ntrain/train05133.aiff\ntrain/train05134.aiff\ntrain/train05135.aiff\ntrain/train05136.aiff\ntrain/train05137.aiff\ntrain/train05138.aiff\ntrain/train05139.aiff\ntrain/train05140.aiff\ntrain/train05141.aiff\ntrain/train05142.aiff\ntrain/train05143.aiff\ntrain/train05144.aiff\ntrain/train05145.aiff\ntrain/train05146.aiff\ntrain/train05147.aiff\ntrain/train05148.aiff\ntrain/train05149.aiff\ntrain/train05150.aiff\ntrain/train05151.aiff\ntrain/train05152.aiff\ntrain/train05153.aiff\ntrain/train05154.aiff\ntrain/train05155.aiff\ntrain/train05156.aiff\ntrain/train05157.aiff\ntrain/train05158.aiff\ntrain/train05159.aiff\ntrain/train05160.aiff\ntrain/train05161.aiff\ntrain/train05162.aiff\ntrain/train05163.aiff\ntrain/train05164.aiff\ntrain/train05165.aiff\ntrain/train05166.aiff\ntrain/train05167.aiff\ntrain/train05168.aiff\ntrain/train05169.aiff\ntrain/train05170.aiff\ntrain/train05171.aiff\ntrain/train05172.aiff\ntrain/train05173.aiff\ntrain/train05174.aiff\ntrain/train05175.aiff\ntrain/train05176.aiff\ntrain/train05177.aiff\ntrain/train05178.aiff\ntrain/train05179.aiff\ntrain/train05180.aiff\ntrain/train05181.aiff\ntrain/train05182.aiff\ntrain/train05183.aiff\ntrain/train05184.aiff\ntrain/train05185.aiff\ntrain/train05186.aiff\ntrain/train05187.aiff\ntrain/train05188.aiff\ntrain/train05189.aiff\ntrain/train05190.aiff\ntrain/train05191.aiff\ntrain/train05192.aiff\ntrain/train05193.aiff\ntrain/train05194.aiff\ntrain/train05195.aiff\ntrain/train05196.aiff\ntrain/train05197.aiff\ntrain/train05198.aiff\ntrain/train05199.aiff\ntrain/train05200.aiff\ntrain/train05201.aiff\ntrain/train05202.aiff\ntrain/train05203.aiff\ntrain/train05204.aiff\ntrain/train05205.aiff\ntrain/train05206.aiff\ntrain/train05207.aiff\ntrain/train05208.aiff\ntrain/train05209.aiff\ntrain/train05210.aiff\ntrain/train05211.aiff\ntrain/train05212.aiff\ntrain/train05213.aiff\ntrain/train05214.aiff\ntrain/train05215.aiff\ntrain/train05216.aiff\ntrain/train05217.aiff\ntrain/train05218.aiff\ntrain/train05219.aiff\ntrain/train05220.aiff\ntrain/train05221.aiff\ntrain/train05222.aiff\ntrain/train05223.aiff\ntrain/train05224.aiff\ntrain/train05225.aiff\ntrain/train05226.aiff\ntrain/train05227.aiff\ntrain/train05228.aiff\ntrain/train05229.aiff\ntrain/train05230.aiff\ntrain/train05231.aiff\ntrain/train05232.aiff\ntrain/train05233.aiff\ntrain/train05234.aiff\ntrain/train05235.aiff\ntrain/train05236.aiff\ntrain/train05237.aiff\ntrain/train05238.aiff\ntrain/train05239.aiff\ntrain/train05240.aiff\ntrain/train05241.aiff\ntrain/train05242.aiff\ntrain/train05243.aiff\ntrain/train05244.aiff\ntrain/train05245.aiff\ntrain/train05246.aiff\ntrain/train05247.aiff\ntrain/train05248.aiff\ntrain/train05249.aiff\ntrain/train05250.aiff\ntrain/train05251.aiff\ntrain/train05252.aiff\ntrain/train05253.aiff\ntrain/train05254.aiff\ntrain/train05255.aiff\ntrain/train05256.aiff\ntrain/train05257.aiff\ntrain/train05258.aiff\ntrain/train05259.aiff\ntrain/train05260.aiff\ntrain/train05261.aiff\ntrain/train05262.aiff\ntrain/train05263.aiff\ntrain/train05264.aiff\ntrain/train05265.aiff\ntrain/train05266.aiff\ntrain/train05267.aiff\ntrain/train05268.aiff\ntrain/train05269.aiff\ntrain/train05270.aiff\ntrain/train05271.aiff\ntrain/train05272.aiff\ntrain/train05273.aiff\ntrain/train05274.aiff\ntrain/train05275.aiff\ntrain/train05276.aiff\ntrain/train05277.aiff\ntrain/train05278.aiff\ntrain/train05279.aiff\ntrain/train05280.aiff\ntrain/train05281.aiff\ntrain/train05282.aiff\ntrain/train05283.aiff\ntrain/train05284.aiff\ntrain/train05285.aiff\ntrain/train05286.aiff\ntrain/train05287.aiff\ntrain/train05288.aiff\ntrain/train05289.aiff\ntrain/train05290.aiff\ntrain/train05291.aiff\ntrain/train05292.aiff\ntrain/train05293.aiff\ntrain/train05294.aiff\ntrain/train05295.aiff\ntrain/train05296.aiff\ntrain/train05297.aiff\ntrain/train05298.aiff\ntrain/train05299.aiff\ntrain/train05300.aiff\ntrain/train05301.aiff\ntrain/train05302.aiff\ntrain/train05303.aiff\ntrain/train05304.aiff\ntrain/train05305.aiff\ntrain/train05306.aiff\ntrain/train05307.aiff\ntrain/train05308.aiff\ntrain/train05309.aiff\ntrain/train05310.aiff\ntrain/train05311.aiff\ntrain/train05312.aiff\ntrain/train05313.aiff\ntrain/train05314.aiff\ntrain/train05315.aiff\ntrain/train05316.aiff\ntrain/train05317.aiff\ntrain/train05318.aiff\ntrain/train05319.aiff\ntrain/train05320.aiff\ntrain/train05321.aiff\ntrain/train05322.aiff\ntrain/train05323.aiff\ntrain/train05324.aiff\ntrain/train05325.aiff\ntrain/train05326.aiff\ntrain/train05327.aiff\ntrain/train05328.aiff\ntrain/train05329.aiff\ntrain/train05330.aiff\ntrain/train05331.aiff\ntrain/train05332.aiff\ntrain/train05333.aiff\ntrain/train05334.aiff\ntrain/train05335.aiff\ntrain/train05336.aiff\ntrain/train05337.aiff\ntrain/train05338.aiff\ntrain/train05339.aiff\ntrain/train05340.aiff\ntrain/train05341.aiff\ntrain/train05342.aiff\ntrain/train05343.aiff\ntrain/train05344.aiff\ntrain/train05345.aiff\ntrain/train05346.aiff\ntrain/train05347.aiff\ntrain/train05348.aiff\ntrain/train05349.aiff\ntrain/train05350.aiff\ntrain/train05351.aiff\ntrain/train05352.aiff\ntrain/train05353.aiff\ntrain/train05354.aiff\ntrain/train05355.aiff\ntrain/train05356.aiff\ntrain/train05357.aiff\ntrain/train05358.aiff\ntrain/train05359.aiff\ntrain/train05360.aiff\ntrain/train05361.aiff\ntrain/train05362.aiff\ntrain/train05363.aiff\ntrain/train05364.aiff\ntrain/train05365.aiff\ntrain/train05366.aiff\ntrain/train05367.aiff\ntrain/train05368.aiff\ntrain/train05369.aiff\ntrain/train05370.aiff\ntrain/train05371.aiff\ntrain/train05372.aiff\ntrain/train05373.aiff\ntrain/train05374.aiff\ntrain/train05375.aiff\ntrain/train05376.aiff\ntrain/train05377.aiff\ntrain/train05378.aiff\ntrain/train05379.aiff\ntrain/train05380.aiff\ntrain/train05381.aiff\ntrain/train05382.aiff\ntrain/train05383.aiff\ntrain/train05384.aiff\ntrain/train05385.aiff\ntrain/train05386.aiff\ntrain/train05387.aiff\ntrain/train05388.aiff\ntrain/train05389.aiff\ntrain/train05390.aiff\ntrain/train05391.aiff\ntrain/train05392.aiff\ntrain/train05393.aiff\ntrain/train05394.aiff\ntrain/train05395.aiff\ntrain/train05396.aiff\ntrain/train05397.aiff\ntrain/train05398.aiff\ntrain/train05399.aiff\ntrain/train05400.aiff\ntrain/train05401.aiff\ntrain/train05402.aiff\ntrain/train05403.aiff\ntrain/train05404.aiff\ntrain/train05405.aiff\ntrain/train05406.aiff\ntrain/train05407.aiff\ntrain/train05408.aiff\ntrain/train05409.aiff\ntrain/train05410.aiff\ntrain/train05411.aiff\ntrain/train05412.aiff\ntrain/train05413.aiff\ntrain/train05414.aiff\ntrain/train05415.aiff\ntrain/train05416.aiff\ntrain/train05417.aiff\ntrain/train05418.aiff\ntrain/train05419.aiff\ntrain/train05420.aiff\ntrain/train05421.aiff\ntrain/train05422.aiff\ntrain/train05423.aiff\ntrain/train05424.aiff\ntrain/train05425.aiff\ntrain/train05426.aiff\ntrain/train05427.aiff\ntrain/train05428.aiff\ntrain/train05429.aiff\ntrain/train05430.aiff\ntrain/train05431.aiff\ntrain/train05432.aiff\ntrain/train05433.aiff\ntrain/train05434.aiff\ntrain/train05435.aiff\ntrain/train05436.aiff\ntrain/train05437.aiff\ntrain/train05438.aiff\ntrain/train05439.aiff\ntrain/train05440.aiff\ntrain/train05441.aiff\ntrain/train05442.aiff\ntrain/train05443.aiff\ntrain/train05444.aiff\ntrain/train05445.aiff\ntrain/train05446.aiff\ntrain/train05447.aiff\ntrain/train05448.aiff\ntrain/train05449.aiff\ntrain/train05450.aiff\ntrain/train05451.aiff\ntrain/train05452.aiff\ntrain/train05453.aiff\ntrain/train05454.aiff\ntrain/train05455.aiff\ntrain/train05456.aiff\ntrain/train05457.aiff\ntrain/train05458.aiff\ntrain/train05459.aiff\ntrain/train05460.aiff\ntrain/train05461.aiff\ntrain/train05462.aiff\ntrain/train05463.aiff\ntrain/train05464.aiff\ntrain/train05465.aiff\ntrain/train05466.aiff\ntrain/train05467.aiff\ntrain/train05468.aiff\ntrain/train05469.aiff\ntrain/train05470.aiff\ntrain/train05471.aiff\ntrain/train05472.aiff\ntrain/train05473.aiff\ntrain/train05474.aiff\ntrain/train05475.aiff\ntrain/train05476.aiff\ntrain/train05477.aiff\ntrain/train05478.aiff\ntrain/train05479.aiff\ntrain/train05480.aiff\ntrain/train05481.aiff\ntrain/train05482.aiff\ntrain/train05483.aiff\ntrain/train05484.aiff\ntrain/train05485.aiff\ntrain/train05486.aiff\ntrain/train05487.aiff\ntrain/train05488.aiff\ntrain/train05489.aiff\ntrain/train05490.aiff\ntrain/train05491.aiff\ntrain/train05492.aiff\ntrain/train05493.aiff\ntrain/train05494.aiff\ntrain/train05495.aiff\ntrain/train05496.aiff\ntrain/train05497.aiff\ntrain/train05498.aiff\ntrain/train05499.aiff\ntrain/train05500.aiff\ntrain/train05501.aiff\ntrain/train05502.aiff\ntrain/train05503.aiff\ntrain/train05504.aiff\ntrain/train05505.aiff\ntrain/train05506.aiff\ntrain/train05507.aiff\ntrain/train05508.aiff\ntrain/train05509.aiff\ntrain/train05510.aiff\ntrain/train05511.aiff\ntrain/train05512.aiff\ntrain/train05513.aiff\ntrain/train05514.aiff\ntrain/train05515.aiff\ntrain/train05516.aiff\ntrain/train05517.aiff\ntrain/train05518.aiff\ntrain/train05519.aiff\ntrain/train05520.aiff\ntrain/train05521.aiff\ntrain/train05522.aiff\ntrain/train05523.aiff\ntrain/train05524.aiff\ntrain/train05525.aiff\ntrain/train05526.aiff\ntrain/train05527.aiff\ntrain/train05528.aiff\ntrain/train05529.aiff\ntrain/train05530.aiff\ntrain/train05531.aiff\ntrain/train05532.aiff\ntrain/train05533.aiff\ntrain/train05534.aiff\ntrain/train05535.aiff\ntrain/train05536.aiff\ntrain/train05537.aiff\ntrain/train05538.aiff\ntrain/train05539.aiff\ntrain/train05540.aiff\ntrain/train05541.aiff\ntrain/train05542.aiff\ntrain/train05543.aiff\ntrain/train05544.aiff\ntrain/train05545.aiff\ntrain/train05546.aiff\ntrain/train05547.aiff\ntrain/train05548.aiff\ntrain/train05549.aiff\ntrain/train05550.aiff\ntrain/train05551.aiff\ntrain/train05552.aiff\ntrain/train05553.aiff\ntrain/train05554.aiff\ntrain/train05555.aiff\ntrain/train05556.aiff\ntrain/train05557.aiff\ntrain/train05558.aiff\ntrain/train05559.aiff\ntrain/train05560.aiff\ntrain/train05561.aiff\ntrain/train05562.aiff\ntrain/train05563.aiff\ntrain/train05564.aiff\ntrain/train05565.aiff\ntrain/train05566.aiff\ntrain/train05567.aiff\ntrain/train05568.aiff\ntrain/train05569.aiff\ntrain/train05570.aiff\ntrain/train05571.aiff\ntrain/train05572.aiff\ntrain/train05573.aiff\ntrain/train05574.aiff\ntrain/train05575.aiff\ntrain/train05576.aiff\ntrain/train05577.aiff\ntrain/train05578.aiff\ntrain/train05579.aiff\ntrain/train05580.aiff\ntrain/train05581.aiff\ntrain/train05582.aiff\ntrain/train05583.aiff\ntrain/train05584.aiff\ntrain/train05585.aiff\ntrain/train05586.aiff\ntrain/train05587.aiff\ntrain/train05588.aiff\ntrain/train05589.aiff\ntrain/train05590.aiff\ntrain/train05591.aiff\ntrain/train05592.aiff\ntrain/train05593.aiff\ntrain/train05594.aiff\ntrain/train05595.aiff\ntrain/train05596.aiff\ntrain/train05597.aiff\ntrain/train05598.aiff\ntrain/train05599.aiff\ntrain/train05600.aiff\ntrain/train05601.aiff\ntrain/train05602.aiff\ntrain/train05603.aiff\ntrain/train05604.aiff\ntrain/train05605.aiff\ntrain/train05606.aiff\ntrain/train05607.aiff\ntrain/train05608.aiff\ntrain/train05609.aiff\ntrain/train05610.aiff\ntrain/train05611.aiff\ntrain/train05612.aiff\ntrain/train05613.aiff\ntrain/train05614.aiff\ntrain/train05615.aiff\ntrain/train05616.aiff\ntrain/train05617.aiff\ntrain/train05618.aiff\ntrain/train05619.aiff\ntrain/train05620.aiff\ntrain/train05621.aiff\ntrain/train05622.aiff\ntrain/train05623.aiff\ntrain/train05624.aiff\ntrain/train05625.aiff\ntrain/train05626.aiff\ntrain/train05627.aiff\ntrain/train05628.aiff\ntrain/train05629.aiff\ntrain/train05630.aiff\ntrain/train05631.aiff\ntrain/train05632.aiff\ntrain/train05633.aiff\ntrain/train05634.aiff\ntrain/train05635.aiff\ntrain/train05636.aiff\ntrain/train05637.aiff\ntrain/train05638.aiff\ntrain/train05639.aiff\ntrain/train05640.aiff\ntrain/train05641.aiff\ntrain/train05642.aiff\ntrain/train05643.aiff\ntrain/train05644.aiff\ntrain/train05645.aiff\ntrain/train05646.aiff\ntrain/train05647.aiff\ntrain/train05648.aiff\ntrain/train05649.aiff\ntrain/train05650.aiff\ntrain/train05651.aiff\ntrain/train05652.aiff\ntrain/train05653.aiff\ntrain/train05654.aiff\ntrain/train05655.aiff\ntrain/train05656.aiff\ntrain/train05657.aiff\ntrain/train05658.aiff\ntrain/train05659.aiff\ntrain/train05660.aiff\ntrain/train05661.aiff\ntrain/train05662.aiff\ntrain/train05663.aiff\ntrain/train05664.aiff\ntrain/train05665.aiff\ntrain/train05666.aiff\ntrain/train05667.aiff\ntrain/train05668.aiff\ntrain/train05669.aiff\ntrain/train05670.aiff\ntrain/train05671.aiff\ntrain/train05672.aiff\ntrain/train05673.aiff\ntrain/train05674.aiff\ntrain/train05675.aiff\ntrain/train05676.aiff\ntrain/train05677.aiff\ntrain/train05678.aiff\ntrain/train05679.aiff\ntrain/train05680.aiff\ntrain/train05681.aiff\ntrain/train05682.aiff\ntrain/train05683.aiff\ntrain/train05684.aiff\ntrain/train05685.aiff\ntrain/train05686.aiff\ntrain/train05687.aiff\ntrain/train05688.aiff\ntrain/train05689.aiff\ntrain/train05690.aiff\ntrain/train05691.aiff\ntrain/train05692.aiff\ntrain/train05693.aiff\ntrain/train05694.aiff\ntrain/train05695.aiff\ntrain/train05696.aiff\ntrain/train05697.aiff\ntrain/train05698.aiff\ntrain/train05699.aiff\ntrain/train05700.aiff\ntrain/train05701.aiff\ntrain/train05702.aiff\ntrain/train05703.aiff\ntrain/train05704.aiff\ntrain/train05705.aiff\ntrain/train05706.aiff\ntrain/train05707.aiff\ntrain/train05708.aiff\ntrain/train05709.aiff\ntrain/train05710.aiff\ntrain/train05711.aiff\ntrain/train05712.aiff\ntrain/train05713.aiff\ntrain/train05714.aiff\ntrain/train05715.aiff\ntrain/train05716.aiff\ntrain/train05717.aiff\ntrain/train05718.aiff\ntrain/train05719.aiff\ntrain/train05720.aiff\ntrain/train05721.aiff\ntrain/train05722.aiff\ntrain/train05723.aiff\ntrain/train05724.aiff\ntrain/train05725.aiff\ntrain/train05726.aiff\ntrain/train05727.aiff\ntrain/train05728.aiff\ntrain/train05729.aiff\ntrain/train05730.aiff\ntrain/train05731.aiff\ntrain/train05732.aiff\ntrain/train05733.aiff\ntrain/train05734.aiff\ntrain/train05735.aiff\ntrain/train05736.aiff\ntrain/train05737.aiff\ntrain/train05738.aiff\ntrain/train05739.aiff\ntrain/train05740.aiff\ntrain/train05741.aiff\ntrain/train05742.aiff\ntrain/train05743.aiff\ntrain/train05744.aiff\ntrain/train05745.aiff\ntrain/train05746.aiff\ntrain/train05747.aiff\ntrain/train05748.aiff\ntrain/train05749.aiff\ntrain/train05750.aiff\ntrain/train05751.aiff\ntrain/train05752.aiff\ntrain/train05753.aiff\ntrain/train05754.aiff\ntrain/train05755.aiff\ntrain/train05756.aiff\ntrain/train05757.aiff\ntrain/train05758.aiff\ntrain/train05759.aiff\ntrain/train05760.aiff\ntrain/train05761.aiff\ntrain/train05762.aiff\ntrain/train05763.aiff\ntrain/train05764.aiff\ntrain/train05765.aiff\ntrain/train05766.aiff\ntrain/train05767.aiff\ntrain/train05768.aiff\ntrain/train05769.aiff\ntrain/train05770.aiff\ntrain/train05771.aiff\ntrain/train05772.aiff\ntrain/train05773.aiff\ntrain/train05774.aiff\ntrain/train05775.aiff\ntrain/train05776.aiff\ntrain/train05777.aiff\ntrain/train05778.aiff\ntrain/train05779.aiff\ntrain/train05780.aiff\ntrain/train05781.aiff\ntrain/train05782.aiff\ntrain/train05783.aiff\ntrain/train05784.aiff\ntrain/train05785.aiff\ntrain/train05786.aiff\ntrain/train05787.aiff\ntrain/train05788.aiff\ntrain/train05789.aiff\ntrain/train05790.aiff\ntrain/train05791.aiff\ntrain/train05792.aiff\ntrain/train05793.aiff\ntrain/train05794.aiff\ntrain/train05795.aiff\ntrain/train05796.aiff\ntrain/train05797.aiff\ntrain/train05798.aiff\ntrain/train05799.aiff\ntrain/train05800.aiff\ntrain/train05801.aiff\ntrain/train05802.aiff\ntrain/train05803.aiff\ntrain/train05804.aiff\ntrain/train05805.aiff\ntrain/train05806.aiff\ntrain/train05807.aiff\ntrain/train05808.aiff\ntrain/train05809.aiff\ntrain/train05810.aiff\ntrain/train05811.aiff\ntrain/train05812.aiff\ntrain/train05813.aiff\ntrain/train05814.aiff\ntrain/train05815.aiff\ntrain/train05816.aiff\ntrain/train05817.aiff\ntrain/train05818.aiff\ntrain/train05819.aiff\ntrain/train05820.aiff\ntrain/train05821.aiff\ntrain/train05822.aiff\ntrain/train05823.aiff\ntrain/train05824.aiff\ntrain/train05825.aiff\ntrain/train05826.aiff\ntrain/train05827.aiff\ntrain/train05828.aiff\ntrain/train05829.aiff\ntrain/train05830.aiff\ntrain/train05831.aiff\ntrain/train05832.aiff\ntrain/train05833.aiff\ntrain/train05834.aiff\ntrain/train05835.aiff\ntrain/train05836.aiff\ntrain/train05837.aiff\ntrain/train05838.aiff\ntrain/train05839.aiff\ntrain/train05840.aiff\ntrain/train05841.aiff\ntrain/train05842.aiff\ntrain/train05843.aiff\ntrain/train05844.aiff\ntrain/train05845.aiff\ntrain/train05846.aiff\ntrain/train05847.aiff\ntrain/train05848.aiff\ntrain/train05849.aiff\ntrain/train05850.aiff\ntrain/train05851.aiff\ntrain/train05852.aiff\ntrain/train05853.aiff\ntrain/train05854.aiff\ntrain/train05855.aiff\ntrain/train05856.aiff\ntrain/train05857.aiff\ntrain/train05858.aiff\ntrain/train05859.aiff\ntrain/train05860.aiff\ntrain/train05861.aiff\ntrain/train05862.aiff\ntrain/train05863.aiff\ntrain/train05864.aiff\ntrain/train05865.aiff\ntrain/train05866.aiff\ntrain/train05867.aiff\ntrain/train05868.aiff\ntrain/train05869.aiff\ntrain/train05870.aiff\ntrain/train05871.aiff\ntrain/train05872.aiff\ntrain/train05873.aiff\ntrain/train05874.aiff\ntrain/train05875.aiff\ntrain/train05876.aiff\ntrain/train05877.aiff\ntrain/train05878.aiff\ntrain/train05879.aiff\ntrain/train05880.aiff\ntrain/train05881.aiff\ntrain/train05882.aiff\ntrain/train05883.aiff\ntrain/train05884.aiff\ntrain/train05885.aiff\ntrain/train05886.aiff\ntrain/train05887.aiff\ntrain/train05888.aiff\ntrain/train05889.aiff\ntrain/train05890.aiff\ntrain/train05891.aiff\ntrain/train05892.aiff\ntrain/train05893.aiff\ntrain/train05894.aiff\ntrain/train05895.aiff\ntrain/train05896.aiff\ntrain/train05897.aiff\ntrain/train05898.aiff\ntrain/train05899.aiff\ntrain/train05900.aiff\ntrain/train05901.aiff\ntrain/train05902.aiff\ntrain/train05903.aiff\ntrain/train05904.aiff\ntrain/train05905.aiff\ntrain/train05906.aiff\ntrain/train05907.aiff\ntrain/train05908.aiff\ntrain/train05909.aiff\ntrain/train05910.aiff\ntrain/train05911.aiff\ntrain/train05912.aiff\ntrain/train05913.aiff\ntrain/train05914.aiff\ntrain/train05915.aiff\ntrain/train05916.aiff\ntrain/train05917.aiff\ntrain/train05918.aiff\ntrain/train05919.aiff\ntrain/train05920.aiff\ntrain/train05921.aiff\ntrain/train05922.aiff\ntrain/train05923.aiff\ntrain/train05924.aiff\ntrain/train05925.aiff\ntrain/train05926.aiff\ntrain/train05927.aiff\ntrain/train05928.aiff\ntrain/train05929.aiff\ntrain/train05930.aiff\ntrain/train05931.aiff\ntrain/train05932.aiff\ntrain/train05933.aiff\ntrain/train05934.aiff\ntrain/train05935.aiff\ntrain/train05936.aiff\ntrain/train05937.aiff\ntrain/train05938.aiff\ntrain/train05939.aiff\ntrain/train05940.aiff\ntrain/train05941.aiff\ntrain/train05942.aiff\ntrain/train05943.aiff\ntrain/train05944.aiff\ntrain/train05945.aiff\ntrain/train05946.aiff\ntrain/train05947.aiff\ntrain/train05948.aiff\ntrain/train05949.aiff\ntrain/train05950.aiff\ntrain/train05951.aiff\ntrain/train05952.aiff\ntrain/train05953.aiff\ntrain/train05954.aiff\ntrain/train05955.aiff\ntrain/train05956.aiff\ntrain/train05957.aiff\ntrain/train05958.aiff\ntrain/train05959.aiff\ntrain/train05960.aiff\ntrain/train05961.aiff\ntrain/train05962.aiff\ntrain/train05963.aiff\ntrain/train05964.aiff\ntrain/train05965.aiff\ntrain/train05966.aiff\ntrain/train05967.aiff\ntrain/train05968.aiff\ntrain/train05969.aiff\ntrain/train05970.aiff\ntrain/train05971.aiff\ntrain/train05972.aiff\ntrain/train05973.aiff\ntrain/train05974.aiff\ntrain/train05975.aiff\ntrain/train05976.aiff\ntrain/train05977.aiff\ntrain/train05978.aiff\ntrain/train05979.aiff\ntrain/train05980.aiff\ntrain/train05981.aiff\ntrain/train05982.aiff\ntrain/train05983.aiff\ntrain/train05984.aiff\ntrain/train05985.aiff\ntrain/train05986.aiff\ntrain/train05987.aiff\ntrain/train05988.aiff\ntrain/train05989.aiff\ntrain/train05990.aiff\ntrain/train05991.aiff\ntrain/train05992.aiff\ntrain/train05993.aiff\ntrain/train05994.aiff\ntrain/train05995.aiff\ntrain/train05996.aiff\ntrain/train05997.aiff\ntrain/train05998.aiff\ntrain/train05999.aiff\ntrain/train06000.aiff\ntrain/train06001.aiff\ntrain/train06002.aiff\ntrain/train06003.aiff\ntrain/train06004.aiff\ntrain/train06005.aiff\ntrain/train06006.aiff\ntrain/train06007.aiff\ntrain/train06008.aiff\ntrain/train06009.aiff\ntrain/train06010.aiff\ntrain/train06011.aiff\ntrain/train06012.aiff\ntrain/train06013.aiff\ntrain/train06014.aiff\ntrain/train06015.aiff\ntrain/train06016.aiff\ntrain/train06017.aiff\ntrain/train06018.aiff\ntrain/train06019.aiff\ntrain/train06020.aiff\ntrain/train06021.aiff\ntrain/train06022.aiff\ntrain/train06023.aiff\ntrain/train06024.aiff\ntrain/train06025.aiff\ntrain/train06026.aiff\ntrain/train06027.aiff\ntrain/train06028.aiff\ntrain/train06029.aiff\ntrain/train06030.aiff\ntrain/train06031.aiff\ntrain/train06032.aiff\ntrain/train06033.aiff\ntrain/train06034.aiff\ntrain/train06035.aiff\ntrain/train06036.aiff\ntrain/train06037.aiff\ntrain/train06038.aiff\ntrain/train06039.aiff\ntrain/train06040.aiff\ntrain/train06041.aiff\ntrain/train06042.aiff\ntrain/train06043.aiff\ntrain/train06044.aiff\ntrain/train06045.aiff\ntrain/train06046.aiff\ntrain/train06047.aiff\ntrain/train06048.aiff\ntrain/train06049.aiff\ntrain/train06050.aiff\ntrain/train06051.aiff\ntrain/train06052.aiff\ntrain/train06053.aiff\ntrain/train06054.aiff\ntrain/train06055.aiff\ntrain/train06056.aiff\ntrain/train06057.aiff\ntrain/train06058.aiff\ntrain/train06059.aiff\ntrain/train06060.aiff\ntrain/train06061.aiff\ntrain/train06062.aiff\ntrain/train06063.aiff\ntrain/train06064.aiff\ntrain/train06065.aiff\ntrain/train06066.aiff\ntrain/train06067.aiff\ntrain/train06068.aiff\ntrain/train06069.aiff\ntrain/train06070.aiff\ntrain/train06071.aiff\ntrain/train06072.aiff\ntrain/train06073.aiff\ntrain/train06074.aiff\ntrain/train06075.aiff\ntrain/train06076.aiff\ntrain/train06077.aiff\ntrain/train06078.aiff\ntrain/train06079.aiff\ntrain/train06080.aiff\ntrain/train06081.aiff\ntrain/train06082.aiff\ntrain/train06083.aiff\ntrain/train06084.aiff\ntrain/train06085.aiff\ntrain/train06086.aiff\ntrain/train06087.aiff\ntrain/train06088.aiff\ntrain/train06089.aiff\ntrain/train06090.aiff\ntrain/train06091.aiff\ntrain/train06092.aiff\ntrain/train06093.aiff\ntrain/train06094.aiff\ntrain/train06095.aiff\ntrain/train06096.aiff\ntrain/train06097.aiff\ntrain/train06098.aiff\ntrain/train06099.aiff\ntrain/train06100.aiff\ntrain/train06101.aiff\ntrain/train06102.aiff\ntrain/train06103.aiff\ntrain/train06104.aiff\ntrain/train06105.aiff\ntrain/train06106.aiff\ntrain/train06107.aiff\ntrain/train06108.aiff\ntrain/train06109.aiff\ntrain/train06110.aiff\ntrain/train06111.aiff\ntrain/train06112.aiff\ntrain/train06113.aiff\ntrain/train06114.aiff\ntrain/train06115.aiff\ntrain/train06116.aiff\ntrain/train06117.aiff\ntrain/train06118.aiff\ntrain/train06119.aiff\ntrain/train06120.aiff\ntrain/train06121.aiff\ntrain/train06122.aiff\ntrain/train06123.aiff\ntrain/train06124.aiff\ntrain/train06125.aiff\ntrain/train06126.aiff\ntrain/train06127.aiff\ntrain/train06128.aiff\ntrain/train06129.aiff\ntrain/train06130.aiff\ntrain/train06131.aiff\ntrain/train06132.aiff\ntrain/train06133.aiff\ntrain/train06134.aiff\ntrain/train06135.aiff\ntrain/train06136.aiff\ntrain/train06137.aiff\ntrain/train06138.aiff\ntrain/train06139.aiff\ntrain/train06140.aiff\ntrain/train06141.aiff\ntrain/train06142.aiff\ntrain/train06143.aiff\ntrain/train06144.aiff\ntrain/train06145.aiff\ntrain/train06146.aiff\ntrain/train06147.aiff\ntrain/train06148.aiff\ntrain/train06149.aiff\ntrain/train06150.aiff\ntrain/train06151.aiff\ntrain/train06152.aiff\ntrain/train06153.aiff\ntrain/train06154.aiff\ntrain/train06155.aiff\ntrain/train06156.aiff\ntrain/train06157.aiff\ntrain/train06158.aiff\ntrain/train06159.aiff\ntrain/train06160.aiff\ntrain/train06161.aiff\ntrain/train06162.aiff\ntrain/train06163.aiff\ntrain/train06164.aiff\ntrain/train06165.aiff\ntrain/train06166.aiff\ntrain/train06167.aiff\ntrain/train06168.aiff\ntrain/train06169.aiff\ntrain/train06170.aiff\ntrain/train06171.aiff\ntrain/train06172.aiff\ntrain/train06173.aiff\ntrain/train06174.aiff\ntrain/train06175.aiff\ntrain/train06176.aiff\ntrain/train06177.aiff\ntrain/train06178.aiff\ntrain/train06179.aiff\ntrain/train06180.aiff\ntrain/train06181.aiff\ntrain/train06182.aiff\ntrain/train06183.aiff\ntrain/train06184.aiff\ntrain/train06185.aiff\ntrain/train06186.aiff\ntrain/train06187.aiff\ntrain/train06188.aiff\ntrain/train06189.aiff\ntrain/train06190.aiff\ntrain/train06191.aiff\ntrain/train06192.aiff\ntrain/train06193.aiff\ntrain/train06194.aiff\ntrain/train06195.aiff\ntrain/train06196.aiff\ntrain/train06197.aiff\ntrain/train06198.aiff\ntrain/train06199.aiff\ntrain/train06200.aiff\ntrain/train06201.aiff\ntrain/train06202.aiff\ntrain/train06203.aiff\ntrain/train06204.aiff\ntrain/train06205.aiff\ntrain/train06206.aiff\ntrain/train06207.aiff\ntrain/train06208.aiff\ntrain/train06209.aiff\ntrain/train06210.aiff\ntrain/train06211.aiff\ntrain/train06212.aiff\ntrain/train06213.aiff\ntrain/train06214.aiff\ntrain/train06215.aiff\ntrain/train06216.aiff\ntrain/train06217.aiff\ntrain/train06218.aiff\ntrain/train06219.aiff\ntrain/train06220.aiff\ntrain/train06221.aiff\ntrain/train06222.aiff\ntrain/train06223.aiff\ntrain/train06224.aiff\ntrain/train06225.aiff\ntrain/train06226.aiff\ntrain/train06227.aiff\ntrain/train06228.aiff\ntrain/train06229.aiff\ntrain/train06230.aiff\ntrain/train06231.aiff\ntrain/train06232.aiff\ntrain/train06233.aiff\ntrain/train06234.aiff\ntrain/train06235.aiff\ntrain/train06236.aiff\ntrain/train06237.aiff\ntrain/train06238.aiff\ntrain/train06239.aiff\ntrain/train06240.aiff\ntrain/train06241.aiff\ntrain/train06242.aiff\ntrain/train06243.aiff\ntrain/train06244.aiff\ntrain/train06245.aiff\ntrain/train06246.aiff\ntrain/train06247.aiff\ntrain/train06248.aiff\ntrain/train06249.aiff\ntrain/train06250.aiff\ntrain/train06251.aiff\ntrain/train06252.aiff\ntrain/train06253.aiff\ntrain/train06254.aiff\ntrain/train06255.aiff\ntrain/train06256.aiff\ntrain/train06257.aiff\ntrain/train06258.aiff\ntrain/train06259.aiff\ntrain/train06260.aiff\ntrain/train06261.aiff\ntrain/train06262.aiff\ntrain/train06263.aiff\ntrain/train06264.aiff\ntrain/train06265.aiff\ntrain/train06266.aiff\ntrain/train06267.aiff\ntrain/train06268.aiff\ntrain/train06269.aiff\ntrain/train06270.aiff\ntrain/train06271.aiff\ntrain/train06272.aiff\ntrain/train06273.aiff\ntrain/train06274.aiff\ntrain/train06275.aiff\ntrain/train06276.aiff\ntrain/train06277.aiff\ntrain/train06278.aiff\ntrain/train06279.aiff\ntrain/train06280.aiff\ntrain/train06281.aiff\ntrain/train06282.aiff\ntrain/train06283.aiff\ntrain/train06284.aiff\ntrain/train06285.aiff\ntrain/train06286.aiff\ntrain/train06287.aiff\ntrain/train06288.aiff\ntrain/train06289.aiff\ntrain/train06290.aiff\ntrain/train06291.aiff\ntrain/train06292.aiff\ntrain/train06293.aiff\ntrain/train06294.aiff\ntrain/train06295.aiff\ntrain/train06296.aiff\ntrain/train06297.aiff\ntrain/train06298.aiff\ntrain/train06299.aiff\ntrain/train06300.aiff\ntrain/train06301.aiff\ntrain/train06302.aiff\ntrain/train06303.aiff\ntrain/train06304.aiff\ntrain/train06305.aiff\ntrain/train06306.aiff\ntrain/train06307.aiff\ntrain/train06308.aiff\ntrain/train06309.aiff\ntrain/train06310.aiff\ntrain/train06311.aiff\ntrain/train06312.aiff\ntrain/train06313.aiff\ntrain/train06314.aiff\ntrain/train06315.aiff\ntrain/train06316.aiff\ntrain/train06317.aiff\ntrain/train06318.aiff\ntrain/train06319.aiff\ntrain/train06320.aiff\ntrain/train06321.aiff\ntrain/train06322.aiff\ntrain/train06323.aiff\ntrain/train06324.aiff\ntrain/train06325.aiff\ntrain/train06326.aiff\ntrain/train06327.aiff\ntrain/train06328.aiff\ntrain/train06329.aiff\ntrain/train06330.aiff\ntrain/train06331.aiff\ntrain/train06332.aiff\ntrain/train06333.aiff\ntrain/train06334.aiff\ntrain/train06335.aiff\ntrain/train06336.aiff\ntrain/train06337.aiff\ntrain/train06338.aiff\ntrain/train06339.aiff\ntrain/train06340.aiff\ntrain/train06341.aiff\ntrain/train06342.aiff\ntrain/train06343.aiff\ntrain/train06344.aiff\ntrain/train06345.aiff\ntrain/train06346.aiff\ntrain/train06347.aiff\ntrain/train06348.aiff\ntrain/train06349.aiff\ntrain/train06350.aiff\ntrain/train06351.aiff\ntrain/train06352.aiff\ntrain/train06353.aiff\ntrain/train06354.aiff\ntrain/train06355.aiff\ntrain/train06356.aiff\ntrain/train06357.aiff\ntrain/train06358.aiff\ntrain/train06359.aiff\ntrain/train06360.aiff\ntrain/train06361.aiff\ntrain/train06362.aiff\ntrain/train06363.aiff\ntrain/train06364.aiff\ntrain/train06365.aiff\ntrain/train06366.aiff\ntrain/train06367.aiff\ntrain/train06368.aiff\ntrain/train06369.aiff\ntrain/train06370.aiff\ntrain/train06371.aiff\ntrain/train06372.aiff\ntrain/train06373.aiff\ntrain/train06374.aiff\ntrain/train06375.aiff\ntrain/train06376.aiff\ntrain/train06377.aiff\ntrain/train06378.aiff\ntrain/train06379.aiff\ntrain/train06380.aiff\ntrain/train06381.aiff\ntrain/train06382.aiff\ntrain/train06383.aiff\ntrain/train06384.aiff\ntrain/train06385.aiff\ntrain/train06386.aiff\ntrain/train06387.aiff\ntrain/train06388.aiff\ntrain/train06389.aiff\ntrain/train06390.aiff\ntrain/train06391.aiff\ntrain/train06392.aiff\ntrain/train06393.aiff\ntrain/train06394.aiff\ntrain/train06395.aiff\ntrain/train06396.aiff\ntrain/train06397.aiff\ntrain/train06398.aiff\ntrain/train06399.aiff\ntrain/train06400.aiff\ntrain/train06401.aiff\ntrain/train06402.aiff\ntrain/train06403.aiff\ntrain/train06404.aiff\ntrain/train06405.aiff\ntrain/train06406.aiff\ntrain/train06407.aiff\ntrain/train06408.aiff\ntrain/train06409.aiff\ntrain/train06410.aiff\ntrain/train06411.aiff\ntrain/train06412.aiff\ntrain/train06413.aiff\ntrain/train06414.aiff\ntrain/train06415.aiff\ntrain/train06416.aiff\ntrain/train06417.aiff\ntrain/train06418.aiff\ntrain/train06419.aiff\ntrain/train06420.aiff\ntrain/train06421.aiff\ntrain/train06422.aiff\ntrain/train06423.aiff\ntrain/train06424.aiff\ntrain/train06425.aiff\ntrain/train06426.aiff\ntrain/train06427.aiff\ntrain/train06428.aiff\ntrain/train06429.aiff\ntrain/train06430.aiff\ntrain/train06431.aiff\ntrain/train06432.aiff\ntrain/train06433.aiff\ntrain/train06434.aiff\ntrain/train06435.aiff\ntrain/train06436.aiff\ntrain/train06437.aiff\ntrain/train06438.aiff\ntrain/train06439.aiff\ntrain/train06440.aiff\ntrain/train06441.aiff\ntrain/train06442.aiff\ntrain/train06443.aiff\ntrain/train06444.aiff\ntrain/train06445.aiff\ntrain/train06446.aiff\ntrain/train06447.aiff\ntrain/train06448.aiff\ntrain/train06449.aiff\ntrain/train06450.aiff\ntrain/train06451.aiff\ntrain/train06452.aiff\ntrain/train06453.aiff\ntrain/train06454.aiff\ntrain/train06455.aiff\ntrain/train06456.aiff\ntrain/train06457.aiff\ntrain/train06458.aiff\ntrain/train06459.aiff\ntrain/train06460.aiff\ntrain/train06461.aiff\ntrain/train06462.aiff\ntrain/train06463.aiff\ntrain/train06464.aiff\ntrain/train06465.aiff\ntrain/train06466.aiff\ntrain/train06467.aiff\ntrain/train06468.aiff\ntrain/train06469.aiff\ntrain/train06470.aiff\ntrain/train06471.aiff\ntrain/train06472.aiff\ntrain/train06473.aiff\ntrain/train06474.aiff\ntrain/train06475.aiff\ntrain/train06476.aiff\ntrain/train06477.aiff\ntrain/train06478.aiff\ntrain/train06479.aiff\ntrain/train06480.aiff\ntrain/train06481.aiff\ntrain/train06482.aiff\ntrain/train06483.aiff\ntrain/train06484.aiff\ntrain/train06485.aiff\ntrain/train06486.aiff\ntrain/train06487.aiff\ntrain/train06488.aiff\ntrain/train06489.aiff\ntrain/train06490.aiff\ntrain/train06491.aiff\ntrain/train06492.aiff\ntrain/train06493.aiff\ntrain/train06494.aiff\ntrain/train06495.aiff\ntrain/train06496.aiff\ntrain/train06497.aiff\ntrain/train06498.aiff\ntrain/train06499.aiff\ntrain/train06500.aiff\ntrain/train06501.aiff\ntrain/train06502.aiff\ntrain/train06503.aiff\ntrain/train06504.aiff\ntrain/train06505.aiff\ntrain/train06506.aiff\ntrain/train06507.aiff\ntrain/train06508.aiff\ntrain/train06509.aiff\ntrain/train06510.aiff\ntrain/train06511.aiff\ntrain/train06512.aiff\ntrain/train06513.aiff\ntrain/train06514.aiff\ntrain/train06515.aiff\ntrain/train06516.aiff\ntrain/train06517.aiff\ntrain/train06518.aiff\ntrain/train06519.aiff\ntrain/train06520.aiff\ntrain/train06521.aiff\ntrain/train06522.aiff\ntrain/train06523.aiff\ntrain/train06524.aiff\ntrain/train06525.aiff\ntrain/train06526.aiff\ntrain/train06527.aiff\ntrain/train06528.aiff\ntrain/train06529.aiff\ntrain/train06530.aiff\ntrain/train06531.aiff\ntrain/train06532.aiff\ntrain/train06533.aiff\ntrain/train06534.aiff\ntrain/train06535.aiff\ntrain/train06536.aiff\ntrain/train06537.aiff\ntrain/train06538.aiff\ntrain/train06539.aiff\ntrain/train06540.aiff\ntrain/train06541.aiff\ntrain/train06542.aiff\ntrain/train06543.aiff\ntrain/train06544.aiff\ntrain/train06545.aiff\ntrain/train06546.aiff\ntrain/train06547.aiff\ntrain/train06548.aiff\ntrain/train06549.aiff\ntrain/train06550.aiff\ntrain/train06551.aiff\ntrain/train06552.aiff\ntrain/train06553.aiff\ntrain/train06554.aiff\ntrain/train06555.aiff\ntrain/train06556.aiff\ntrain/train06557.aiff\ntrain/train06558.aiff\ntrain/train06559.aiff\ntrain/train06560.aiff\ntrain/train06561.aiff\ntrain/train06562.aiff\ntrain/train06563.aiff\ntrain/train06564.aiff\ntrain/train06565.aiff\ntrain/train06566.aiff\ntrain/train06567.aiff\ntrain/train06568.aiff\ntrain/train06569.aiff\ntrain/train06570.aiff\ntrain/train06571.aiff\ntrain/train06572.aiff\ntrain/train06573.aiff\ntrain/train06574.aiff\ntrain/train06575.aiff\ntrain/train06576.aiff\ntrain/train06577.aiff\ntrain/train06578.aiff\ntrain/train06579.aiff\ntrain/train06580.aiff\ntrain/train06581.aiff\ntrain/train06582.aiff\ntrain/train06583.aiff\ntrain/train06584.aiff\ntrain/train06585.aiff\ntrain/train06586.aiff\ntrain/train06587.aiff\ntrain/train06588.aiff\ntrain/train06589.aiff\ntrain/train06590.aiff\ntrain/train06591.aiff\ntrain/train06592.aiff\ntrain/train06593.aiff\ntrain/train06594.aiff\ntrain/train06595.aiff\ntrain/train06596.aiff\ntrain/train06597.aiff\ntrain/train06598.aiff\ntrain/train06599.aiff\ntrain/train06600.aiff\ntrain/train06601.aiff\ntrain/train06602.aiff\ntrain/train06603.aiff\ntrain/train06604.aiff\ntrain/train06605.aiff\ntrain/train06606.aiff\ntrain/train06607.aiff\ntrain/train06608.aiff\ntrain/train06609.aiff\ntrain/train06610.aiff\ntrain/train06611.aiff\ntrain/train06612.aiff\ntrain/train06613.aiff\ntrain/train06614.aiff\ntrain/train06615.aiff\ntrain/train06616.aiff\ntrain/train06617.aiff\ntrain/train06618.aiff\ntrain/train06619.aiff\ntrain/train06620.aiff\ntrain/train06621.aiff\ntrain/train06622.aiff\ntrain/train06623.aiff\ntrain/train06624.aiff\ntrain/train06625.aiff\ntrain/train06626.aiff\ntrain/train06627.aiff\ntrain/train06628.aiff\ntrain/train06629.aiff\ntrain/train06630.aiff\ntrain/train06631.aiff\ntrain/train06632.aiff\ntrain/train06633.aiff\ntrain/train06634.aiff\ntrain/train06635.aiff\ntrain/train06636.aiff\ntrain/train06637.aiff\ntrain/train06638.aiff\ntrain/train06639.aiff\ntrain/train06640.aiff\ntrain/train06641.aiff\ntrain/train06642.aiff\ntrain/train06643.aiff\ntrain/train06644.aiff\ntrain/train06645.aiff\ntrain/train06646.aiff\ntrain/train06647.aiff\ntrain/train06648.aiff\ntrain/train06649.aiff\ntrain/train06650.aiff\ntrain/train06651.aiff\ntrain/train06652.aiff\ntrain/train06653.aiff\ntrain/train06654.aiff\ntrain/train06655.aiff\ntrain/train06656.aiff\ntrain/train06657.aiff\ntrain/train06658.aiff\ntrain/train06659.aiff\ntrain/train06660.aiff\ntrain/train06661.aiff\ntrain/train06662.aiff\ntrain/train06663.aiff\ntrain/train06664.aiff\ntrain/train06665.aiff\ntrain/train06666.aiff\ntrain/train06667.aiff\ntrain/train06668.aiff\ntrain/train06669.aiff\ntrain/train06670.aiff\ntrain/train06671.aiff\ntrain/train06672.aiff\ntrain/train06673.aiff\ntrain/train06674.aiff\ntrain/train06675.aiff\ntrain/train06676.aiff\ntrain/train06677.aiff\ntrain/train06678.aiff\ntrain/train06679.aiff\ntrain/train06680.aiff\ntrain/train06681.aiff\ntrain/train06682.aiff\ntrain/train06683.aiff\ntrain/train06684.aiff\ntrain/train06685.aiff\ntrain/train06686.aiff\ntrain/train06687.aiff\ntrain/train06688.aiff\ntrain/train06689.aiff\ntrain/train06690.aiff\ntrain/train06691.aiff\ntrain/train06692.aiff\ntrain/train06693.aiff\ntrain/train06694.aiff\ntrain/train06695.aiff\ntrain/train06696.aiff\ntrain/train06697.aiff\ntrain/train06698.aiff\ntrain/train06699.aiff\ntrain/train06700.aiff\ntrain/train06701.aiff\ntrain/train06702.aiff\ntrain/train06703.aiff\ntrain/train06704.aiff\ntrain/train06705.aiff\ntrain/train06706.aiff\ntrain/train06707.aiff\ntrain/train06708.aiff\ntrain/train06709.aiff\ntrain/train06710.aiff\ntrain/train06711.aiff\ntrain/train06712.aiff\ntrain/train06713.aiff\ntrain/train06714.aiff\ntrain/train06715.aiff\ntrain/train06716.aiff\ntrain/train06717.aiff\ntrain/train06718.aiff\ntrain/train06719.aiff\ntrain/train06720.aiff\ntrain/train06721.aiff\ntrain/train06722.aiff\ntrain/train06723.aiff\ntrain/train06724.aiff\ntrain/train06725.aiff\ntrain/train06726.aiff\ntrain/train06727.aiff\ntrain/train06728.aiff\ntrain/train06729.aiff\ntrain/train06730.aiff\ntrain/train06731.aiff\ntrain/train06732.aiff\ntrain/train06733.aiff\ntrain/train06734.aiff\ntrain/train06735.aiff\ntrain/train06736.aiff\ntrain/train06737.aiff\ntrain/train06738.aiff\ntrain/train06739.aiff\ntrain/train06740.aiff\ntrain/train06741.aiff\ntrain/train06742.aiff\ntrain/train06743.aiff\ntrain/train06744.aiff\ntrain/train06745.aiff\ntrain/train06746.aiff\ntrain/train06747.aiff\ntrain/train06748.aiff\ntrain/train06749.aiff\ntrain/train06750.aiff\ntrain/train06751.aiff\ntrain/train06752.aiff\ntrain/train06753.aiff\ntrain/train06754.aiff\ntrain/train06755.aiff\ntrain/train06756.aiff\ntrain/train06757.aiff\ntrain/train06758.aiff\ntrain/train06759.aiff\ntrain/train06760.aiff\ntrain/train06761.aiff\ntrain/train06762.aiff\ntrain/train06763.aiff\ntrain/train06764.aiff\ntrain/train06765.aiff\ntrain/train06766.aiff\ntrain/train06767.aiff\ntrain/train06768.aiff\ntrain/train06769.aiff\ntrain/train06770.aiff\ntrain/train06771.aiff\ntrain/train06772.aiff\ntrain/train06773.aiff\ntrain/train06774.aiff\ntrain/train06775.aiff\ntrain/train06776.aiff\ntrain/train06777.aiff\ntrain/train06778.aiff\ntrain/train06779.aiff\ntrain/train06780.aiff\ntrain/train06781.aiff\ntrain/train06782.aiff\ntrain/train06783.aiff\ntrain/train06784.aiff\ntrain/train06785.aiff\ntrain/train06786.aiff\ntrain/train06787.aiff\ntrain/train06788.aiff\ntrain/train06789.aiff\ntrain/train06790.aiff\ntrain/train06791.aiff\ntrain/train06792.aiff\ntrain/train06793.aiff\ntrain/train06794.aiff\ntrain/train06795.aiff\ntrain/train06796.aiff\ntrain/train06797.aiff\ntrain/train06798.aiff\ntrain/train06799.aiff\ntrain/train06800.aiff\ntrain/train06801.aiff\ntrain/train06802.aiff\ntrain/train06803.aiff\ntrain/train06804.aiff\ntrain/train06805.aiff\ntrain/train06806.aiff\ntrain/train06807.aiff\ntrain/train06808.aiff\ntrain/train06809.aiff\ntrain/train06810.aiff\ntrain/train06811.aiff\ntrain/train06812.aiff\ntrain/train06813.aiff\ntrain/train06814.aiff\ntrain/train06815.aiff\ntrain/train06816.aiff\ntrain/train06817.aiff\ntrain/train06818.aiff\ntrain/train06819.aiff\ntrain/train06820.aiff\ntrain/train06821.aiff\ntrain/train06822.aiff\ntrain/train06823.aiff\ntrain/train06824.aiff\ntrain/train06825.aiff\ntrain/train06826.aiff\ntrain/train06827.aiff\ntrain/train06828.aiff\ntrain/train06829.aiff\ntrain/train06830.aiff\ntrain/train06831.aiff\ntrain/train06832.aiff\ntrain/train06833.aiff\ntrain/train06834.aiff\ntrain/train06835.aiff\ntrain/train06836.aiff\ntrain/train06837.aiff\ntrain/train06838.aiff\ntrain/train06839.aiff\ntrain/train06840.aiff\ntrain/train06841.aiff\ntrain/train06842.aiff\ntrain/train06843.aiff\ntrain/train06844.aiff\ntrain/train06845.aiff\ntrain/train06846.aiff\ntrain/train06847.aiff\ntrain/train06848.aiff\ntrain/train06849.aiff\ntrain/train06850.aiff\ntrain/train06851.aiff\ntrain/train06852.aiff\ntrain/train06853.aiff\ntrain/train06854.aiff\ntrain/train06855.aiff\ntrain/train06856.aiff\ntrain/train06857.aiff\ntrain/train06858.aiff\ntrain/train06859.aiff\ntrain/train06860.aiff\ntrain/train06861.aiff\ntrain/train06862.aiff\ntrain/train06863.aiff\ntrain/train06864.aiff\ntrain/train06865.aiff\ntrain/train06866.aiff\ntrain/train06867.aiff\ntrain/train06868.aiff\ntrain/train06869.aiff\ntrain/train06870.aiff\ntrain/train06871.aiff\ntrain/train06872.aiff\ntrain/train06873.aiff\ntrain/train06874.aiff\ntrain/train06875.aiff\ntrain/train06876.aiff\ntrain/train06877.aiff\ntrain/train06878.aiff\ntrain/train06879.aiff\ntrain/train06880.aiff\ntrain/train06881.aiff\ntrain/train06882.aiff\ntrain/train06883.aiff\ntrain/train06884.aiff\ntrain/train06885.aiff\ntrain/train06886.aiff\ntrain/train06887.aiff\ntrain/train06888.aiff\ntrain/train06889.aiff\ntrain/train06890.aiff\ntrain/train06891.aiff\ntrain/train06892.aiff\ntrain/train06893.aiff\ntrain/train06894.aiff\ntrain/train06895.aiff\ntrain/train06896.aiff\ntrain/train06897.aiff\ntrain/train06898.aiff\ntrain/train06899.aiff\ntrain/train06900.aiff\ntrain/train06901.aiff\ntrain/train06902.aiff\ntrain/train06903.aiff\ntrain/train06904.aiff\ntrain/train06905.aiff\ntrain/train06906.aiff\ntrain/train06907.aiff\ntrain/train06908.aiff\ntrain/train06909.aiff\ntrain/train06910.aiff\ntrain/train06911.aiff\ntrain/train06912.aiff\ntrain/train06913.aiff\ntrain/train06914.aiff\ntrain/train06915.aiff\ntrain/train06916.aiff\ntrain/train06917.aiff\ntrain/train06918.aiff\ntrain/train06919.aiff\ntrain/train06920.aiff\ntrain/train06921.aiff\ntrain/train06922.aiff\ntrain/train06923.aiff\ntrain/train06924.aiff\ntrain/train06925.aiff\ntrain/train06926.aiff\ntrain/train06927.aiff\ntrain/train06928.aiff\ntrain/train06929.aiff\ntrain/train06930.aiff\ntrain/train06931.aiff\ntrain/train06932.aiff\ntrain/train06933.aiff\ntrain/train06934.aiff\ntrain/train06935.aiff\ntrain/train06936.aiff\ntrain/train06937.aiff\ntrain/train06938.aiff\ntrain/train06939.aiff\ntrain/train06940.aiff\ntrain/train06941.aiff\ntrain/train06942.aiff\ntrain/train06943.aiff\ntrain/train06944.aiff\ntrain/train06945.aiff\ntrain/train06946.aiff\ntrain/train06947.aiff\ntrain/train06948.aiff\ntrain/train06949.aiff\ntrain/train06950.aiff\ntrain/train06951.aiff\ntrain/train06952.aiff\ntrain/train06953.aiff\ntrain/train06954.aiff\ntrain/train06955.aiff\ntrain/train06956.aiff\ntrain/train06957.aiff\ntrain/train06958.aiff\ntrain/train06959.aiff\ntrain/train06960.aiff\ntrain/train06961.aiff\ntrain/train06962.aiff\ntrain/train06963.aiff\ntrain/train06964.aiff\ntrain/train06965.aiff\ntrain/train06966.aiff\ntrain/train06967.aiff\ntrain/train06968.aiff\ntrain/train06969.aiff\ntrain/train06970.aiff\ntrain/train06971.aiff\ntrain/train06972.aiff\ntrain/train06973.aiff\ntrain/train06974.aiff\ntrain/train06975.aiff\ntrain/train06976.aiff\ntrain/train06977.aiff\ntrain/train06978.aiff\ntrain/train06979.aiff\ntrain/train06980.aiff\ntrain/train06981.aiff\ntrain/train06982.aiff\ntrain/train06983.aiff\ntrain/train06984.aiff\ntrain/train06985.aiff\ntrain/train06986.aiff\ntrain/train06987.aiff\ntrain/train06988.aiff\ntrain/train06989.aiff\ntrain/train06990.aiff\ntrain/train06991.aiff\ntrain/train06992.aiff\ntrain/train06993.aiff\ntrain/train06994.aiff\ntrain/train06995.aiff\ntrain/train06996.aiff\ntrain/train06997.aiff\ntrain/train06998.aiff\ntrain/train06999.aiff\ntrain/train07000.aiff\ntrain/train07001.aiff\ntrain/train07002.aiff\ntrain/train07003.aiff\ntrain/train07004.aiff\ntrain/train07005.aiff\ntrain/train07006.aiff\ntrain/train07007.aiff\ntrain/train07008.aiff\ntrain/train07009.aiff\ntrain/train07010.aiff\ntrain/train07011.aiff\ntrain/train07012.aiff\ntrain/train07013.aiff\ntrain/train07014.aiff\ntrain/train07015.aiff\ntrain/train07016.aiff\ntrain/train07017.aiff\ntrain/train07018.aiff\ntrain/train07019.aiff\ntrain/train07020.aiff\ntrain/train07021.aiff\ntrain/train07022.aiff\ntrain/train07023.aiff\ntrain/train07024.aiff\ntrain/train07025.aiff\ntrain/train07026.aiff\ntrain/train07027.aiff\ntrain/train07028.aiff\ntrain/train07029.aiff\ntrain/train07030.aiff\ntrain/train07031.aiff\ntrain/train07032.aiff\ntrain/train07033.aiff\ntrain/train07034.aiff\ntrain/train07035.aiff\ntrain/train07036.aiff\ntrain/train07037.aiff\ntrain/train07038.aiff\ntrain/train07039.aiff\ntrain/train07040.aiff\ntrain/train07041.aiff\ntrain/train07042.aiff\ntrain/train07043.aiff\ntrain/train07044.aiff\ntrain/train07045.aiff\ntrain/train07046.aiff\ntrain/train07047.aiff\ntrain/train07048.aiff\ntrain/train07049.aiff\ntrain/train07050.aiff\ntrain/train07051.aiff\ntrain/train07052.aiff\ntrain/train07053.aiff\ntrain/train07054.aiff\ntrain/train07055.aiff\ntrain/train07056.aiff\ntrain/train07057.aiff\ntrain/train07058.aiff\ntrain/train07059.aiff\ntrain/train07060.aiff\ntrain/train07061.aiff\ntrain/train07062.aiff\ntrain/train07063.aiff\ntrain/train07064.aiff\ntrain/train07065.aiff\ntrain/train07066.aiff\ntrain/train07067.aiff\ntrain/train07068.aiff\ntrain/train07069.aiff\ntrain/train07070.aiff\ntrain/train07071.aiff\ntrain/train07072.aiff\ntrain/train07073.aiff\ntrain/train07074.aiff\ntrain/train07075.aiff\ntrain/train07076.aiff\ntrain/train07077.aiff\ntrain/train07078.aiff\ntrain/train07079.aiff\ntrain/train07080.aiff\ntrain/train07081.aiff\ntrain/train07082.aiff\ntrain/train07083.aiff\ntrain/train07084.aiff\ntrain/train07085.aiff\ntrain/train07086.aiff\ntrain/train07087.aiff\ntrain/train07088.aiff\ntrain/train07089.aiff\ntrain/train07090.aiff\ntrain/train07091.aiff\ntrain/train07092.aiff\ntrain/train07093.aiff\ntrain/train07094.aiff\ntrain/train07095.aiff\ntrain/train07096.aiff\ntrain/train07097.aiff\ntrain/train07098.aiff\ntrain/train07099.aiff\ntrain/train07100.aiff\ntrain/train07101.aiff\ntrain/train07102.aiff\ntrain/train07103.aiff\ntrain/train07104.aiff\ntrain/train07105.aiff\ntrain/train07106.aiff\ntrain/train07107.aiff\ntrain/train07108.aiff\ntrain/train07109.aiff\ntrain/train07110.aiff\ntrain/train07111.aiff\ntrain/train07112.aiff\ntrain/train07113.aiff\ntrain/train07114.aiff\ntrain/train07115.aiff\ntrain/train07116.aiff\ntrain/train07117.aiff\ntrain/train07118.aiff\ntrain/train07119.aiff\ntrain/train07120.aiff\ntrain/train07121.aiff\ntrain/train07122.aiff\ntrain/train07123.aiff\ntrain/train07124.aiff\ntrain/train07125.aiff\ntrain/train07126.aiff\ntrain/train07127.aiff\ntrain/train07128.aiff\ntrain/train07129.aiff\ntrain/train07130.aiff\ntrain/train07131.aiff\ntrain/train07132.aiff\ntrain/train07133.aiff\ntrain/train07134.aiff\ntrain/train07135.aiff\ntrain/train07136.aiff\ntrain/train07137.aiff\ntrain/train07138.aiff\ntrain/train07139.aiff\ntrain/train07140.aiff\ntrain/train07141.aiff\ntrain/train07142.aiff\ntrain/train07143.aiff\ntrain/train07144.aiff\ntrain/train07145.aiff\ntrain/train07146.aiff\ntrain/train07147.aiff\ntrain/train07148.aiff\ntrain/train07149.aiff\ntrain/train07150.aiff\ntrain/train07151.aiff\ntrain/train07152.aiff\ntrain/train07153.aiff\ntrain/train07154.aiff\ntrain/train07155.aiff\ntrain/train07156.aiff\ntrain/train07157.aiff\ntrain/train07158.aiff\ntrain/train07159.aiff\ntrain/train07160.aiff\ntrain/train07161.aiff\ntrain/train07162.aiff\ntrain/train07163.aiff\ntrain/train07164.aiff\ntrain/train07165.aiff\ntrain/train07166.aiff\ntrain/train07167.aiff\ntrain/train07168.aiff\ntrain/train07169.aiff\ntrain/train07170.aiff\ntrain/train07171.aiff\ntrain/train07172.aiff\ntrain/train07173.aiff\ntrain/train07174.aiff\ntrain/train07175.aiff\ntrain/train07176.aiff\ntrain/train07177.aiff\ntrain/train07178.aiff\ntrain/train07179.aiff\ntrain/train07180.aiff\ntrain/train07181.aiff\ntrain/train07182.aiff\ntrain/train07183.aiff\ntrain/train07184.aiff\ntrain/train07185.aiff\ntrain/train07186.aiff\ntrain/train07187.aiff\ntrain/train07188.aiff\ntrain/train07189.aiff\ntrain/train07190.aiff\ntrain/train07191.aiff\ntrain/train07192.aiff\ntrain/train07193.aiff\ntrain/train07194.aiff\ntrain/train07195.aiff\ntrain/train07196.aiff\ntrain/train07197.aiff\ntrain/train07198.aiff\ntrain/train07199.aiff\ntrain/train07200.aiff\ntrain/train07201.aiff\ntrain/train07202.aiff\ntrain/train07203.aiff\ntrain/train07204.aiff\ntrain/train07205.aiff\ntrain/train07206.aiff\ntrain/train07207.aiff\ntrain/train07208.aiff\ntrain/train07209.aiff\ntrain/train07210.aiff\ntrain/train07211.aiff\ntrain/train07212.aiff\ntrain/train07213.aiff\ntrain/train07214.aiff\ntrain/train07215.aiff\ntrain/train07216.aiff\ntrain/train07217.aiff\ntrain/train07218.aiff\ntrain/train07219.aiff\ntrain/train07220.aiff\ntrain/train07221.aiff\ntrain/train07222.aiff\ntrain/train07223.aiff\ntrain/train07224.aiff\ntrain/train07225.aiff\ntrain/train07226.aiff\ntrain/train07227.aiff\ntrain/train07228.aiff\ntrain/train07229.aiff\ntrain/train07230.aiff\ntrain/train07231.aiff\ntrain/train07232.aiff\ntrain/train07233.aiff\ntrain/train07234.aiff\ntrain/train07235.aiff\ntrain/train07236.aiff\ntrain/train07237.aiff\ntrain/train07238.aiff\ntrain/train07239.aiff\ntrain/train07240.aiff\ntrain/train07241.aiff\ntrain/train07242.aiff\ntrain/train07243.aiff\ntrain/train07244.aiff\ntrain/train07245.aiff\ntrain/train07246.aiff\ntrain/train07247.aiff\ntrain/train07248.aiff\ntrain/train07249.aiff\ntrain/train07250.aiff\ntrain/train07251.aiff\ntrain/train07252.aiff\ntrain/train07253.aiff\ntrain/train07254.aiff\ntrain/train07255.aiff\ntrain/train07256.aiff\ntrain/train07257.aiff\ntrain/train07258.aiff\ntrain/train07259.aiff\ntrain/train07260.aiff\ntrain/train07261.aiff\ntrain/train07262.aiff\ntrain/train07263.aiff\ntrain/train07264.aiff\ntrain/train07265.aiff\ntrain/train07266.aiff\ntrain/train07267.aiff\ntrain/train07268.aiff\ntrain/train07269.aiff\ntrain/train07270.aiff\ntrain/train07271.aiff\ntrain/train07272.aiff\ntrain/train07273.aiff\ntrain/train07274.aiff\ntrain/train07275.aiff\ntrain/train07276.aiff\ntrain/train07277.aiff\ntrain/train07278.aiff\ntrain/train07279.aiff\ntrain/train07280.aiff\ntrain/train07281.aiff\ntrain/train07282.aiff\ntrain/train07283.aiff\ntrain/train07284.aiff\ntrain/train07285.aiff\ntrain/train07286.aiff\ntrain/train07287.aiff\ntrain/train07288.aiff\ntrain/train07289.aiff\ntrain/train07290.aiff\ntrain/train07291.aiff\ntrain/train07292.aiff\ntrain/train07293.aiff\ntrain/train07294.aiff\ntrain/train07295.aiff\ntrain/train07296.aiff\ntrain/train07297.aiff\ntrain/train07298.aiff\ntrain/train07299.aiff\ntrain/train07300.aiff\ntrain/train07301.aiff\ntrain/train07302.aiff\ntrain/train07303.aiff\ntrain/train07304.aiff\ntrain/train07305.aiff\ntrain/train07306.aiff\ntrain/train07307.aiff\ntrain/train07308.aiff\ntrain/train07309.aiff\ntrain/train07310.aiff\ntrain/train07311.aiff\ntrain/train07312.aiff\ntrain/train07313.aiff\ntrain/train07314.aiff\ntrain/train07315.aiff\ntrain/train07316.aiff\ntrain/train07317.aiff\ntrain/train07318.aiff\ntrain/train07319.aiff\ntrain/train07320.aiff\ntrain/train07321.aiff\ntrain/train07322.aiff\ntrain/train07323.aiff\ntrain/train07324.aiff\ntrain/train07325.aiff\ntrain/train07326.aiff\ntrain/train07327.aiff\ntrain/train07328.aiff\ntrain/train07329.aiff\ntrain/train07330.aiff\ntrain/train07331.aiff\ntrain/train07332.aiff\ntrain/train07333.aiff\ntrain/train07334.aiff\ntrain/train07335.aiff\ntrain/train07336.aiff\ntrain/train07337.aiff\ntrain/train07338.aiff\ntrain/train07339.aiff\ntrain/train07340.aiff\ntrain/train07341.aiff\ntrain/train07342.aiff\ntrain/train07343.aiff\ntrain/train07344.aiff\ntrain/train07345.aiff\ntrain/train07346.aiff\ntrain/train07347.aiff\ntrain/train07348.aiff\ntrain/train07349.aiff\ntrain/train07350.aiff\ntrain/train07351.aiff\ntrain/train07352.aiff\ntrain/train07353.aiff\ntrain/train07354.aiff\ntrain/train07355.aiff\ntrain/train07356.aiff\ntrain/train07357.aiff\ntrain/train07358.aiff\ntrain/train07359.aiff\ntrain/train07360.aiff\ntrain/train07361.aiff\ntrain/train07362.aiff\ntrain/train07363.aiff\ntrain/train07364.aiff\ntrain/train07365.aiff\ntrain/train07366.aiff\ntrain/train07367.aiff\ntrain/train07368.aiff\ntrain/train07369.aiff\ntrain/train07370.aiff\ntrain/train07371.aiff\ntrain/train07372.aiff\ntrain/train07373.aiff\ntrain/train07374.aiff\ntrain/train07375.aiff\ntrain/train07376.aiff\ntrain/train07377.aiff\ntrain/train07378.aiff\ntrain/train07379.aiff\ntrain/train07380.aiff\ntrain/train07381.aiff\ntrain/train07382.aiff\ntrain/train07383.aiff\ntrain/train07384.aiff\ntrain/train07385.aiff\ntrain/train07386.aiff\ntrain/train07387.aiff\ntrain/train07388.aiff\ntrain/train07389.aiff\ntrain/train07390.aiff\ntrain/train07391.aiff\ntrain/train07392.aiff\ntrain/train07393.aiff\ntrain/train07394.aiff\ntrain/train07395.aiff\ntrain/train07396.aiff\ntrain/train07397.aiff\ntrain/train07398.aiff\ntrain/train07399.aiff\ntrain/train07400.aiff\ntrain/train07401.aiff\ntrain/train07402.aiff\ntrain/train07403.aiff\ntrain/train07404.aiff\ntrain/train07405.aiff\ntrain/train07406.aiff\ntrain/train07407.aiff\ntrain/train07408.aiff\ntrain/train07409.aiff\ntrain/train07410.aiff\ntrain/train07411.aiff\ntrain/train07412.aiff\ntrain/train07413.aiff\ntrain/train07414.aiff\ntrain/train07415.aiff\ntrain/train07416.aiff\ntrain/train07417.aiff\ntrain/train07418.aiff\ntrain/train07419.aiff\ntrain/train07420.aiff\ntrain/train07421.aiff\ntrain/train07422.aiff\ntrain/train07423.aiff\ntrain/train07424.aiff\ntrain/train07425.aiff\ntrain/train07426.aiff\ntrain/train07427.aiff\ntrain/train07428.aiff\ntrain/train07429.aiff\ntrain/train07430.aiff\ntrain/train07431.aiff\ntrain/train07432.aiff\ntrain/train07433.aiff\ntrain/train07434.aiff\ntrain/train07435.aiff\ntrain/train07436.aiff\ntrain/train07437.aiff\ntrain/train07438.aiff\ntrain/train07439.aiff\ntrain/train07440.aiff\ntrain/train07441.aiff\ntrain/train07442.aiff\ntrain/train07443.aiff\ntrain/train07444.aiff\ntrain/train07445.aiff\ntrain/train07446.aiff\ntrain/train07447.aiff\ntrain/train07448.aiff\ntrain/train07449.aiff\ntrain/train07450.aiff\ntrain/train07451.aiff\ntrain/train07452.aiff\ntrain/train07453.aiff\ntrain/train07454.aiff\ntrain/train07455.aiff\ntrain/train07456.aiff\ntrain/train07457.aiff\ntrain/train07458.aiff\ntrain/train07459.aiff\ntrain/train07460.aiff\ntrain/train07461.aiff\ntrain/train07462.aiff\ntrain/train07463.aiff\ntrain/train07464.aiff\ntrain/train07465.aiff\ntrain/train07466.aiff\ntrain/train07467.aiff\ntrain/train07468.aiff\ntrain/train07469.aiff\ntrain/train07470.aiff\ntrain/train07471.aiff\ntrain/train07472.aiff\ntrain/train07473.aiff\ntrain/train07474.aiff\ntrain/train07475.aiff\ntrain/train07476.aiff\ntrain/train07477.aiff\ntrain/train07478.aiff\ntrain/train07479.aiff\ntrain/train07480.aiff\ntrain/train07481.aiff\ntrain/train07482.aiff\ntrain/train07483.aiff\ntrain/train07484.aiff\ntrain/train07485.aiff\ntrain/train07486.aiff\ntrain/train07487.aiff\ntrain/train07488.aiff\ntrain/train07489.aiff\ntrain/train07490.aiff\ntrain/train07491.aiff\ntrain/train07492.aiff\ntrain/train07493.aiff\ntrain/train07494.aiff\ntrain/train07495.aiff\ntrain/train07496.aiff\ntrain/train07497.aiff\ntrain/train07498.aiff\ntrain/train07499.aiff\ntrain/train07500.aiff\ntrain/train07501.aiff\ntrain/train07502.aiff\ntrain/train07503.aiff\ntrain/train07504.aiff\ntrain/train07505.aiff\ntrain/train07506.aiff\ntrain/train07507.aiff\ntrain/train07508.aiff\ntrain/train07509.aiff\ntrain/train07510.aiff\ntrain/train07511.aiff\ntrain/train07512.aiff\ntrain/train07513.aiff\ntrain/train07514.aiff\ntrain/train07515.aiff\ntrain/train07516.aiff\ntrain/train07517.aiff\ntrain/train07518.aiff\ntrain/train07519.aiff\ntrain/train07520.aiff\ntrain/train07521.aiff\ntrain/train07522.aiff\ntrain/train07523.aiff\ntrain/train07524.aiff\ntrain/train07525.aiff\ntrain/train07526.aiff\ntrain/train07527.aiff\ntrain/train07528.aiff\ntrain/train07529.aiff\ntrain/train07530.aiff\ntrain/train07531.aiff\ntrain/train07532.aiff\ntrain/train07533.aiff\ntrain/train07534.aiff\ntrain/train07535.aiff\ntrain/train07536.aiff\ntrain/train07537.aiff\ntrain/train07538.aiff\ntrain/train07539.aiff\ntrain/train07540.aiff\ntrain/train07541.aiff\ntrain/train07542.aiff\ntrain/train07543.aiff\ntrain/train07544.aiff\ntrain/train07545.aiff\ntrain/train07546.aiff\ntrain/train07547.aiff\ntrain/train07548.aiff\ntrain/train07549.aiff\ntrain/train07550.aiff\ntrain/train07551.aiff\ntrain/train07552.aiff\ntrain/train07553.aiff\ntrain/train07554.aiff\ntrain/train07555.aiff\ntrain/train07556.aiff\ntrain/train07557.aiff\ntrain/train07558.aiff\ntrain/train07559.aiff\ntrain/train07560.aiff\ntrain/train07561.aiff\ntrain/train07562.aiff\ntrain/train07563.aiff\ntrain/train07564.aiff\ntrain/train07565.aiff\ntrain/train07566.aiff\ntrain/train07567.aiff\ntrain/train07568.aiff\ntrain/train07569.aiff\ntrain/train07570.aiff\ntrain/train07571.aiff\ntrain/train07572.aiff\ntrain/train07573.aiff\ntrain/train07574.aiff\ntrain/train07575.aiff\ntrain/train07576.aiff\ntrain/train07577.aiff\ntrain/train07578.aiff\ntrain/train07579.aiff\ntrain/train07580.aiff\ntrain/train07581.aiff\ntrain/train07582.aiff\ntrain/train07583.aiff\ntrain/train07584.aiff\ntrain/train07585.aiff\ntrain/train07586.aiff\ntrain/train07587.aiff\ntrain/train07588.aiff\ntrain/train07589.aiff\ntrain/train07590.aiff\ntrain/train07591.aiff\ntrain/train07592.aiff\ntrain/train07593.aiff\ntrain/train07594.aiff\ntrain/train07595.aiff\ntrain/train07596.aiff\ntrain/train07597.aiff\ntrain/train07598.aiff\ntrain/train07599.aiff\ntrain/train07600.aiff\ntrain/train07601.aiff\ntrain/train07602.aiff\ntrain/train07603.aiff\ntrain/train07604.aiff\ntrain/train07605.aiff\ntrain/train07606.aiff\ntrain/train07607.aiff\ntrain/train07608.aiff\ntrain/train07609.aiff\ntrain/train07610.aiff\ntrain/train07611.aiff\ntrain/train07612.aiff\ntrain/train07613.aiff\ntrain/train07614.aiff\ntrain/train07615.aiff\ntrain/train07616.aiff\ntrain/train07617.aiff\ntrain/train07618.aiff\ntrain/train07619.aiff\ntrain/train07620.aiff\ntrain/train07621.aiff\ntrain/train07622.aiff\ntrain/train07623.aiff\ntrain/train07624.aiff\ntrain/train07625.aiff\ntrain/train07626.aiff\ntrain/train07627.aiff\ntrain/train07628.aiff\ntrain/train07629.aiff\ntrain/train07630.aiff\ntrain/train07631.aiff\ntrain/train07632.aiff\ntrain/train07633.aiff\ntrain/train07634.aiff\ntrain/train07635.aiff\ntrain/train07636.aiff\ntrain/train07637.aiff\ntrain/train07638.aiff\ntrain/train07639.aiff\ntrain/train07640.aiff\ntrain/train07641.aiff\ntrain/train07642.aiff\ntrain/train07643.aiff\ntrain/train07644.aiff\ntrain/train07645.aiff\ntrain/train07646.aiff\ntrain/train07647.aiff\ntrain/train07648.aiff\ntrain/train07649.aiff\ntrain/train07650.aiff\ntrain/train07651.aiff\ntrain/train07652.aiff\ntrain/train07653.aiff\ntrain/train07654.aiff\ntrain/train07655.aiff\ntrain/train07656.aiff\ntrain/train07657.aiff\ntrain/train07658.aiff\ntrain/train07659.aiff\ntrain/train07660.aiff\ntrain/train07661.aiff\ntrain/train07662.aiff\ntrain/train07663.aiff\ntrain/train07664.aiff\ntrain/train07665.aiff\ntrain/train07666.aiff\ntrain/train07667.aiff\ntrain/train07668.aiff\ntrain/train07669.aiff\ntrain/train07670.aiff\ntrain/train07671.aiff\ntrain/train07672.aiff\ntrain/train07673.aiff\ntrain/train07674.aiff\ntrain/train07675.aiff\ntrain/train07676.aiff\ntrain/train07677.aiff\ntrain/train07678.aiff\ntrain/train07679.aiff\ntrain/train07680.aiff\ntrain/train07681.aiff\ntrain/train07682.aiff\ntrain/train07683.aiff\ntrain/train07684.aiff\ntrain/train07685.aiff\ntrain/train07686.aiff\ntrain/train07687.aiff\ntrain/train07688.aiff\ntrain/train07689.aiff\ntrain/train07690.aiff\ntrain/train07691.aiff\ntrain/train07692.aiff\ntrain/train07693.aiff\ntrain/train07694.aiff\ntrain/train07695.aiff\ntrain/train07696.aiff\ntrain/train07697.aiff\ntrain/train07698.aiff\ntrain/train07699.aiff\ntrain/train07700.aiff\ntrain/train07701.aiff\ntrain/train07702.aiff\ntrain/train07703.aiff\ntrain/train07704.aiff\ntrain/train07705.aiff\ntrain/train07706.aiff\ntrain/train07707.aiff\ntrain/train07708.aiff\ntrain/train07709.aiff\ntrain/train07710.aiff\ntrain/train07711.aiff\ntrain/train07712.aiff\ntrain/train07713.aiff\ntrain/train07714.aiff\ntrain/train07715.aiff\ntrain/train07716.aiff\ntrain/train07717.aiff\ntrain/train07718.aiff\ntrain/train07719.aiff\ntrain/train07720.aiff\ntrain/train07721.aiff\ntrain/train07722.aiff\ntrain/train07723.aiff\ntrain/train07724.aiff\ntrain/train07725.aiff\ntrain/train07726.aiff\ntrain/train07727.aiff\ntrain/train07728.aiff\ntrain/train07729.aiff\ntrain/train07730.aiff\ntrain/train07731.aiff\ntrain/train07732.aiff\ntrain/train07733.aiff\ntrain/train07734.aiff\ntrain/train07735.aiff\ntrain/train07736.aiff\ntrain/train07737.aiff\ntrain/train07738.aiff\ntrain/train07739.aiff\ntrain/train07740.aiff\ntrain/train07741.aiff\ntrain/train07742.aiff\ntrain/train07743.aiff\ntrain/train07744.aiff\ntrain/train07745.aiff\ntrain/train07746.aiff\ntrain/train07747.aiff\ntrain/train07748.aiff\ntrain/train07749.aiff\ntrain/train07750.aiff\ntrain/train07751.aiff\ntrain/train07752.aiff\ntrain/train07753.aiff\ntrain/train07754.aiff\ntrain/train07755.aiff\ntrain/train07756.aiff\ntrain/train07757.aiff\ntrain/train07758.aiff\ntrain/train07759.aiff\ntrain/train07760.aiff\ntrain/train07761.aiff\ntrain/train07762.aiff\ntrain/train07763.aiff\ntrain/train07764.aiff\ntrain/train07765.aiff\ntrain/train07766.aiff\ntrain/train07767.aiff\ntrain/train07768.aiff\ntrain/train07769.aiff\ntrain/train07770.aiff\ntrain/train07771.aiff\ntrain/train07772.aiff\ntrain/train07773.aiff\ntrain/train07774.aiff\ntrain/train07775.aiff\ntrain/train07776.aiff\ntrain/train07777.aiff\ntrain/train07778.aiff\ntrain/train07779.aiff\ntrain/train07780.aiff\ntrain/train07781.aiff\ntrain/train07782.aiff\ntrain/train07783.aiff\ntrain/train07784.aiff\ntrain/train07785.aiff\ntrain/train07786.aiff\ntrain/train07787.aiff\ntrain/train07788.aiff\ntrain/train07789.aiff\ntrain/train07790.aiff\ntrain/train07791.aiff\ntrain/train07792.aiff\ntrain/train07793.aiff\ntrain/train07794.aiff\ntrain/train07795.aiff\ntrain/train07796.aiff\ntrain/train07797.aiff\ntrain/train07798.aiff\ntrain/train07799.aiff\ntrain/train07800.aiff\ntrain/train07801.aiff\ntrain/train07802.aiff\ntrain/train07803.aiff\ntrain/train07804.aiff\ntrain/train07805.aiff\ntrain/train07806.aiff\ntrain/train07807.aiff\ntrain/train07808.aiff\ntrain/train07809.aiff\ntrain/train07810.aiff\ntrain/train07811.aiff\ntrain/train07812.aiff\ntrain/train07813.aiff\ntrain/train07814.aiff\ntrain/train07815.aiff\ntrain/train07816.aiff\ntrain/train07817.aiff\ntrain/train07818.aiff\ntrain/train07819.aiff\ntrain/train07820.aiff\ntrain/train07821.aiff\ntrain/train07822.aiff\ntrain/train07823.aiff\ntrain/train07824.aiff\ntrain/train07825.aiff\ntrain/train07826.aiff\ntrain/train07827.aiff\ntrain/train07828.aiff\ntrain/train07829.aiff\ntrain/train07830.aiff\ntrain/train07831.aiff\ntrain/train07832.aiff\ntrain/train07833.aiff\ntrain/train07834.aiff\ntrain/train07835.aiff\ntrain/train07836.aiff\ntrain/train07837.aiff\ntrain/train07838.aiff\ntrain/train07839.aiff\ntrain/train07840.aiff\ntrain/train07841.aiff\ntrain/train07842.aiff\ntrain/train07843.aiff\ntrain/train07844.aiff\ntrain/train07845.aiff\ntrain/train07846.aiff\ntrain/train07847.aiff\ntrain/train07848.aiff\ntrain/train07849.aiff\ntrain/train07850.aiff\ntrain/train07851.aiff\ntrain/train07852.aiff\ntrain/train07853.aiff\ntrain/train07854.aiff\ntrain/train07855.aiff\ntrain/train07856.aiff\ntrain/train07857.aiff\ntrain/train07858.aiff\ntrain/train07859.aiff\ntrain/train07860.aiff\ntrain/train07861.aiff\ntrain/train07862.aiff\ntrain/train07863.aiff\ntrain/train07864.aiff\ntrain/train07865.aiff\ntrain/train07866.aiff\ntrain/train07867.aiff\ntrain/train07868.aiff\ntrain/train07869.aiff\ntrain/train07870.aiff\ntrain/train07871.aiff\ntrain/train07872.aiff\ntrain/train07873.aiff\ntrain/train07874.aiff\ntrain/train07875.aiff\ntrain/train07876.aiff\ntrain/train07877.aiff\ntrain/train07878.aiff\ntrain/train07879.aiff\ntrain/train07880.aiff\ntrain/train07881.aiff\ntrain/train07882.aiff\ntrain/train07883.aiff\ntrain/train07884.aiff\ntrain/train07885.aiff\ntrain/train07886.aiff\ntrain/train07887.aiff\ntrain/train07888.aiff\ntrain/train07889.aiff\ntrain/train07890.aiff\ntrain/train07891.aiff\ntrain/train07892.aiff\ntrain/train07893.aiff\ntrain/train07894.aiff\ntrain/train07895.aiff\ntrain/train07896.aiff\ntrain/train07897.aiff\ntrain/train07898.aiff\ntrain/train07899.aiff\ntrain/train07900.aiff\ntrain/train07901.aiff\ntrain/train07902.aiff\ntrain/train07903.aiff\ntrain/train07904.aiff\ntrain/train07905.aiff\ntrain/train07906.aiff\ntrain/train07907.aiff\ntrain/train07908.aiff\ntrain/train07909.aiff\ntrain/train07910.aiff\ntrain/train07911.aiff\ntrain/train07912.aiff\ntrain/train07913.aiff\ntrain/train07914.aiff\ntrain/train07915.aiff\ntrain/train07916.aiff\ntrain/train07917.aiff\ntrain/train07918.aiff\ntrain/train07919.aiff\ntrain/train07920.aiff\ntrain/train07921.aiff\ntrain/train07922.aiff\ntrain/train07923.aiff\ntrain/train07924.aiff\ntrain/train07925.aiff\ntrain/train07926.aiff\ntrain/train07927.aiff\ntrain/train07928.aiff\ntrain/train07929.aiff\ntrain/train07930.aiff\ntrain/train07931.aiff\ntrain/train07932.aiff\ntrain/train07933.aiff\ntrain/train07934.aiff\ntrain/train07935.aiff\ntrain/train07936.aiff\ntrain/train07937.aiff\ntrain/train07938.aiff\ntrain/train07939.aiff\ntrain/train07940.aiff\ntrain/train07941.aiff\ntrain/train07942.aiff\ntrain/train07943.aiff\ntrain/train07944.aiff\ntrain/train07945.aiff\ntrain/train07946.aiff\ntrain/train07947.aiff\ntrain/train07948.aiff\ntrain/train07949.aiff\ntrain/train07950.aiff\ntrain/train07951.aiff\ntrain/train07952.aiff\ntrain/train07953.aiff\ntrain/train07954.aiff\ntrain/train07955.aiff\ntrain/train07956.aiff\ntrain/train07957.aiff\ntrain/train07958.aiff\ntrain/train07959.aiff\ntrain/train07960.aiff\ntrain/train07961.aiff\ntrain/train07962.aiff\ntrain/train07963.aiff\ntrain/train07964.aiff\ntrain/train07965.aiff\ntrain/train07966.aiff\ntrain/train07967.aiff\ntrain/train07968.aiff\ntrain/train07969.aiff\ntrain/train07970.aiff\ntrain/train07971.aiff\ntrain/train07972.aiff\ntrain/train07973.aiff\ntrain/train07974.aiff\ntrain/train07975.aiff\ntrain/train07976.aiff\ntrain/train07977.aiff\ntrain/train07978.aiff\ntrain/train07979.aiff\ntrain/train07980.aiff\ntrain/train07981.aiff\ntrain/train07982.aiff\ntrain/train07983.aiff\ntrain/train07984.aiff\ntrain/train07985.aiff\ntrain/train07986.aiff\ntrain/train07987.aiff\ntrain/train07988.aiff\ntrain/train07989.aiff\ntrain/train07990.aiff\ntrain/train07991.aiff\ntrain/train07992.aiff\ntrain/train07993.aiff\ntrain/train07994.aiff\ntrain/train07995.aiff\ntrain/train07996.aiff\ntrain/train07997.aiff\ntrain/train07998.aiff\ntrain/train07999.aiff\ntrain/train08000.aiff\ntrain/train08001.aiff\ntrain/train08002.aiff\ntrain/train08003.aiff\ntrain/train08004.aiff\ntrain/train08005.aiff\ntrain/train08006.aiff\ntrain/train08007.aiff\ntrain/train08008.aiff\ntrain/train08009.aiff\ntrain/train08010.aiff\ntrain/train08011.aiff\ntrain/train08012.aiff\ntrain/train08013.aiff\ntrain/train08014.aiff\ntrain/train08015.aiff\ntrain/train08016.aiff\ntrain/train08017.aiff\ntrain/train08018.aiff\ntrain/train08019.aiff\ntrain/train08020.aiff\ntrain/train08021.aiff\ntrain/train08022.aiff\ntrain/train08023.aiff\ntrain/train08024.aiff\ntrain/train08025.aiff\ntrain/train08026.aiff\ntrain/train08027.aiff\ntrain/train08028.aiff\ntrain/train08029.aiff\ntrain/train08030.aiff\ntrain/train08031.aiff\ntrain/train08032.aiff\ntrain/train08033.aiff\ntrain/train08034.aiff\ntrain/train08035.aiff\ntrain/train08036.aiff\ntrain/train08037.aiff\ntrain/train08038.aiff\ntrain/train08039.aiff\ntrain/train08040.aiff\ntrain/train08041.aiff\ntrain/train08042.aiff\ntrain/train08043.aiff\ntrain/train08044.aiff\ntrain/train08045.aiff\ntrain/train08046.aiff\ntrain/train08047.aiff\ntrain/train08048.aiff\ntrain/train08049.aiff\ntrain/train08050.aiff\ntrain/train08051.aiff\ntrain/train08052.aiff\ntrain/train08053.aiff\ntrain/train08054.aiff\ntrain/train08055.aiff\ntrain/train08056.aiff\ntrain/train08057.aiff\ntrain/train08058.aiff\ntrain/train08059.aiff\ntrain/train08060.aiff\ntrain/train08061.aiff\ntrain/train08062.aiff\ntrain/train08063.aiff\ntrain/train08064.aiff\ntrain/train08065.aiff\ntrain/train08066.aiff\ntrain/train08067.aiff\ntrain/train08068.aiff\ntrain/train08069.aiff\ntrain/train08070.aiff\ntrain/train08071.aiff\ntrain/train08072.aiff\ntrain/train08073.aiff\ntrain/train08074.aiff\ntrain/train08075.aiff\ntrain/train08076.aiff\ntrain/train08077.aiff\ntrain/train08078.aiff\ntrain/train08079.aiff\ntrain/train08080.aiff\ntrain/train08081.aiff\ntrain/train08082.aiff\ntrain/train08083.aiff\ntrain/train08084.aiff\ntrain/train08085.aiff\ntrain/train08086.aiff\ntrain/train08087.aiff\ntrain/train08088.aiff\ntrain/train08089.aiff\ntrain/train08090.aiff\ntrain/train08091.aiff\ntrain/train08092.aiff\ntrain/train08093.aiff\ntrain/train08094.aiff\ntrain/train08095.aiff\ntrain/train08096.aiff\ntrain/train08097.aiff\ntrain/train08098.aiff\ntrain/train08099.aiff\ntrain/train08100.aiff\ntrain/train08101.aiff\ntrain/train08102.aiff\ntrain/train08103.aiff\ntrain/train08104.aiff\ntrain/train08105.aiff\ntrain/train08106.aiff\ntrain/train08107.aiff\ntrain/train08108.aiff\ntrain/train08109.aiff\ntrain/train08110.aiff\ntrain/train08111.aiff\ntrain/train08112.aiff\ntrain/train08113.aiff\ntrain/train08114.aiff\ntrain/train08115.aiff\ntrain/train08116.aiff\ntrain/train08117.aiff\ntrain/train08118.aiff\ntrain/train08119.aiff\ntrain/train08120.aiff\ntrain/train08121.aiff\ntrain/train08122.aiff\ntrain/train08123.aiff\ntrain/train08124.aiff\ntrain/train08125.aiff\ntrain/train08126.aiff\ntrain/train08127.aiff\ntrain/train08128.aiff\ntrain/train08129.aiff\ntrain/train08130.aiff\ntrain/train08131.aiff\ntrain/train08132.aiff\ntrain/train08133.aiff\ntrain/train08134.aiff\ntrain/train08135.aiff\ntrain/train08136.aiff\ntrain/train08137.aiff\ntrain/train08138.aiff\ntrain/train08139.aiff\ntrain/train08140.aiff\ntrain/train08141.aiff\ntrain/train08142.aiff\ntrain/train08143.aiff\ntrain/train08144.aiff\ntrain/train08145.aiff\ntrain/train08146.aiff\ntrain/train08147.aiff\ntrain/train08148.aiff\ntrain/train08149.aiff\ntrain/train08150.aiff\ntrain/train08151.aiff\ntrain/train08152.aiff\ntrain/train08153.aiff\ntrain/train08154.aiff\ntrain/train08155.aiff\ntrain/train08156.aiff\ntrain/train08157.aiff\ntrain/train08158.aiff\ntrain/train08159.aiff\ntrain/train08160.aiff\ntrain/train08161.aiff\ntrain/train08162.aiff\ntrain/train08163.aiff\ntrain/train08164.aiff\ntrain/train08165.aiff\ntrain/train08166.aiff\ntrain/train08167.aiff\ntrain/train08168.aiff\ntrain/train08169.aiff\ntrain/train08170.aiff\ntrain/train08171.aiff\ntrain/train08172.aiff\ntrain/train08173.aiff\ntrain/train08174.aiff\ntrain/train08175.aiff\ntrain/train08176.aiff\ntrain/train08177.aiff\ntrain/train08178.aiff\ntrain/train08179.aiff\ntrain/train08180.aiff\ntrain/train08181.aiff\ntrain/train08182.aiff\ntrain/train08183.aiff\ntrain/train08184.aiff\ntrain/train08185.aiff\ntrain/train08186.aiff\ntrain/train08187.aiff\ntrain/train08188.aiff\ntrain/train08189.aiff\ntrain/train08190.aiff\ntrain/train08191.aiff\ntrain/train08192.aiff\ntrain/train08193.aiff\ntrain/train08194.aiff\ntrain/train08195.aiff\ntrain/train08196.aiff\ntrain/train08197.aiff\ntrain/train08198.aiff\ntrain/train08199.aiff\ntrain/train08200.aiff\ntrain/train08201.aiff\ntrain/train08202.aiff\ntrain/train08203.aiff\ntrain/train08204.aiff\ntrain/train08205.aiff\ntrain/train08206.aiff\ntrain/train08207.aiff\ntrain/train08208.aiff\ntrain/train08209.aiff\ntrain/train08210.aiff\ntrain/train08211.aiff\ntrain/train08212.aiff\ntrain/train08213.aiff\ntrain/train08214.aiff\ntrain/train08215.aiff\ntrain/train08216.aiff\ntrain/train08217.aiff\ntrain/train08218.aiff\ntrain/train08219.aiff\ntrain/train08220.aiff\ntrain/train08221.aiff\ntrain/train08222.aiff\ntrain/train08223.aiff\ntrain/train08224.aiff\ntrain/train08225.aiff\ntrain/train08226.aiff\ntrain/train08227.aiff\ntrain/train08228.aiff\ntrain/train08229.aiff\ntrain/train08230.aiff\ntrain/train08231.aiff\ntrain/train08232.aiff\ntrain/train08233.aiff\ntrain/train08234.aiff\ntrain/train08235.aiff\ntrain/train08236.aiff\ntrain/train08237.aiff\ntrain/train08238.aiff\ntrain/train08239.aiff\ntrain/train08240.aiff\ntrain/train08241.aiff\ntrain/train08242.aiff\ntrain/train08243.aiff\ntrain/train08244.aiff\ntrain/train08245.aiff\ntrain/train08246.aiff\ntrain/train08247.aiff\ntrain/train08248.aiff\ntrain/train08249.aiff\ntrain/train08250.aiff\ntrain/train08251.aiff\ntrain/train08252.aiff\ntrain/train08253.aiff\ntrain/train08254.aiff\ntrain/train08255.aiff\ntrain/train08256.aiff\ntrain/train08257.aiff\ntrain/train08258.aiff\ntrain/train08259.aiff\ntrain/train08260.aiff\ntrain/train08261.aiff\ntrain/train08262.aiff\ntrain/train08263.aiff\ntrain/train08264.aiff\ntrain/train08265.aiff\ntrain/train08266.aiff\ntrain/train08267.aiff\ntrain/train08268.aiff\ntrain/train08269.aiff\ntrain/train08270.aiff\ntrain/train08271.aiff\ntrain/train08272.aiff\ntrain/train08273.aiff\ntrain/train08274.aiff\ntrain/train08275.aiff\ntrain/train08276.aiff\ntrain/train08277.aiff\ntrain/train08278.aiff\ntrain/train08279.aiff\ntrain/train08280.aiff\ntrain/train08281.aiff\ntrain/train08282.aiff\ntrain/train08283.aiff\ntrain/train08284.aiff\ntrain/train08285.aiff\ntrain/train08286.aiff\ntrain/train08287.aiff\ntrain/train08288.aiff\ntrain/train08289.aiff\ntrain/train08290.aiff\ntrain/train08291.aiff\ntrain/train08292.aiff\ntrain/train08293.aiff\ntrain/train08294.aiff\ntrain/train08295.aiff\ntrain/train08296.aiff\ntrain/train08297.aiff\ntrain/train08298.aiff\ntrain/train08299.aiff\ntrain/train08300.aiff\ntrain/train08301.aiff\ntrain/train08302.aiff\ntrain/train08303.aiff\ntrain/train08304.aiff\ntrain/train08305.aiff\ntrain/train08306.aiff\ntrain/train08307.aiff\ntrain/train08308.aiff\ntrain/train08309.aiff\ntrain/train08310.aiff\ntrain/train08311.aiff\ntrain/train08312.aiff\ntrain/train08313.aiff\ntrain/train08314.aiff\ntrain/train08315.aiff\ntrain/train08316.aiff\ntrain/train08317.aiff\ntrain/train08318.aiff\ntrain/train08319.aiff\ntrain/train08320.aiff\ntrain/train08321.aiff\ntrain/train08322.aiff\ntrain/train08323.aiff\ntrain/train08324.aiff\ntrain/train08325.aiff\ntrain/train08326.aiff\ntrain/train08327.aiff\ntrain/train08328.aiff\ntrain/train08329.aiff\ntrain/train08330.aiff\ntrain/train08331.aiff\ntrain/train08332.aiff\ntrain/train08333.aiff\ntrain/train08334.aiff\ntrain/train08335.aiff\ntrain/train08336.aiff\ntrain/train08337.aiff\ntrain/train08338.aiff\ntrain/train08339.aiff\ntrain/train08340.aiff\ntrain/train08341.aiff\ntrain/train08342.aiff\ntrain/train08343.aiff\ntrain/train08344.aiff\ntrain/train08345.aiff\ntrain/train08346.aiff\ntrain/train08347.aiff\ntrain/train08348.aiff\ntrain/train08349.aiff\ntrain/train08350.aiff\ntrain/train08351.aiff\ntrain/train08352.aiff\ntrain/train08353.aiff\ntrain/train08354.aiff\ntrain/train08355.aiff\ntrain/train08356.aiff\ntrain/train08357.aiff\ntrain/train08358.aiff\ntrain/train08359.aiff\ntrain/train08360.aiff\ntrain/train08361.aiff\ntrain/train08362.aiff\ntrain/train08363.aiff\ntrain/train08364.aiff\ntrain/train08365.aiff\ntrain/train08366.aiff\ntrain/train08367.aiff\ntrain/train08368.aiff\ntrain/train08369.aiff\ntrain/train08370.aiff\ntrain/train08371.aiff\ntrain/train08372.aiff\ntrain/train08373.aiff\ntrain/train08374.aiff\ntrain/train08375.aiff\ntrain/train08376.aiff\ntrain/train08377.aiff\ntrain/train08378.aiff\ntrain/train08379.aiff\ntrain/train08380.aiff\ntrain/train08381.aiff\ntrain/train08382.aiff\ntrain/train08383.aiff\ntrain/train08384.aiff\ntrain/train08385.aiff\ntrain/train08386.aiff\ntrain/train08387.aiff\ntrain/train08388.aiff\ntrain/train08389.aiff\ntrain/train08390.aiff\ntrain/train08391.aiff\ntrain/train08392.aiff\ntrain/train08393.aiff\ntrain/train08394.aiff\ntrain/train08395.aiff\ntrain/train08396.aiff\ntrain/train08397.aiff\ntrain/train08398.aiff\ntrain/train08399.aiff\ntrain/train08400.aiff\ntrain/train08401.aiff\ntrain/train08402.aiff\ntrain/train08403.aiff\ntrain/train08404.aiff\ntrain/train08405.aiff\ntrain/train08406.aiff\ntrain/train08407.aiff\ntrain/train08408.aiff\ntrain/train08409.aiff\ntrain/train08410.aiff\ntrain/train08411.aiff\ntrain/train08412.aiff\ntrain/train08413.aiff\ntrain/train08414.aiff\ntrain/train08415.aiff\ntrain/train08416.aiff\ntrain/train08417.aiff\ntrain/train08418.aiff\ntrain/train08419.aiff\ntrain/train08420.aiff\ntrain/train08421.aiff\ntrain/train08422.aiff\ntrain/train08423.aiff\ntrain/train08424.aiff\ntrain/train08425.aiff\ntrain/train08426.aiff\ntrain/train08427.aiff\ntrain/train08428.aiff\ntrain/train08429.aiff\ntrain/train08430.aiff\ntrain/train08431.aiff\ntrain/train08432.aiff\ntrain/train08433.aiff\ntrain/train08434.aiff\ntrain/train08435.aiff\ntrain/train08436.aiff\ntrain/train08437.aiff\ntrain/train08438.aiff\ntrain/train08439.aiff\ntrain/train08440.aiff\ntrain/train08441.aiff\ntrain/train08442.aiff\ntrain/train08443.aiff\ntrain/train08444.aiff\ntrain/train08445.aiff\ntrain/train08446.aiff\ntrain/train08447.aiff\ntrain/train08448.aiff\ntrain/train08449.aiff\ntrain/train08450.aiff\ntrain/train08451.aiff\ntrain/train08452.aiff\ntrain/train08453.aiff\ntrain/train08454.aiff\ntrain/train08455.aiff\ntrain/train08456.aiff\ntrain/train08457.aiff\ntrain/train08458.aiff\ntrain/train08459.aiff\ntrain/train08460.aiff\ntrain/train08461.aiff\ntrain/train08462.aiff\ntrain/train08463.aiff\ntrain/train08464.aiff\ntrain/train08465.aiff\ntrain/train08466.aiff\ntrain/train08467.aiff\ntrain/train08468.aiff\ntrain/train08469.aiff\ntrain/train08470.aiff\ntrain/train08471.aiff\ntrain/train08472.aiff\ntrain/train08473.aiff\ntrain/train08474.aiff\ntrain/train08475.aiff\ntrain/train08476.aiff\ntrain/train08477.aiff\ntrain/train08478.aiff\ntrain/train08479.aiff\ntrain/train08480.aiff\ntrain/train08481.aiff\ntrain/train08482.aiff\ntrain/train08483.aiff\ntrain/train08484.aiff\ntrain/train08485.aiff\ntrain/train08486.aiff\ntrain/train08487.aiff\ntrain/train08488.aiff\ntrain/train08489.aiff\ntrain/train08490.aiff\ntrain/train08491.aiff\ntrain/train08492.aiff\ntrain/train08493.aiff\ntrain/train08494.aiff\ntrain/train08495.aiff\ntrain/train08496.aiff\ntrain/train08497.aiff\ntrain/train08498.aiff\ntrain/train08499.aiff\ntrain/train08500.aiff\ntrain/train08501.aiff\ntrain/train08502.aiff\ntrain/train08503.aiff\ntrain/train08504.aiff\ntrain/train08505.aiff\ntrain/train08506.aiff\ntrain/train08507.aiff\ntrain/train08508.aiff\ntrain/train08509.aiff\ntrain/train08510.aiff\ntrain/train08511.aiff\ntrain/train08512.aiff\ntrain/train08513.aiff\ntrain/train08514.aiff\ntrain/train08515.aiff\ntrain/train08516.aiff\ntrain/train08517.aiff\ntrain/train08518.aiff\ntrain/train08519.aiff\ntrain/train08520.aiff\ntrain/train08521.aiff\ntrain/train08522.aiff\ntrain/train08523.aiff\ntrain/train08524.aiff\ntrain/train08525.aiff\ntrain/train08526.aiff\ntrain/train08527.aiff\ntrain/train08528.aiff\ntrain/train08529.aiff\ntrain/train08530.aiff\ntrain/train08531.aiff\ntrain/train08532.aiff\ntrain/train08533.aiff\ntrain/train08534.aiff\ntrain/train08535.aiff\ntrain/train08536.aiff\ntrain/train08537.aiff\ntrain/train08538.aiff\ntrain/train08539.aiff\ntrain/train08540.aiff\ntrain/train08541.aiff\ntrain/train08542.aiff\ntrain/train08543.aiff\ntrain/train08544.aiff\ntrain/train08545.aiff\ntrain/train08546.aiff\ntrain/train08547.aiff\ntrain/train08548.aiff\ntrain/train08549.aiff\ntrain/train08550.aiff\ntrain/train08551.aiff\ntrain/train08552.aiff\ntrain/train08553.aiff\ntrain/train08554.aiff\ntrain/train08555.aiff\ntrain/train08556.aiff\ntrain/train08557.aiff\ntrain/train08558.aiff\ntrain/train08559.aiff\ntrain/train08560.aiff\ntrain/train08561.aiff\ntrain/train08562.aiff\ntrain/train08563.aiff\ntrain/train08564.aiff\ntrain/train08565.aiff\ntrain/train08566.aiff\ntrain/train08567.aiff\ntrain/train08568.aiff\ntrain/train08569.aiff\ntrain/train08570.aiff\ntrain/train08571.aiff\ntrain/train08572.aiff\ntrain/train08573.aiff\ntrain/train08574.aiff\ntrain/train08575.aiff\ntrain/train08576.aiff\ntrain/train08577.aiff\ntrain/train08578.aiff\ntrain/train08579.aiff\ntrain/train08580.aiff\ntrain/train08581.aiff\ntrain/train08582.aiff\ntrain/train08583.aiff\ntrain/train08584.aiff\ntrain/train08585.aiff\ntrain/train08586.aiff\ntrain/train08587.aiff\ntrain/train08588.aiff\ntrain/train08589.aiff\ntrain/train08590.aiff\ntrain/train08591.aiff\ntrain/train08592.aiff\ntrain/train08593.aiff\ntrain/train08594.aiff\ntrain/train08595.aiff\ntrain/train08596.aiff\ntrain/train08597.aiff\ntrain/train08598.aiff\ntrain/train08599.aiff\ntrain/train08600.aiff\ntrain/train08601.aiff\ntrain/train08602.aiff\ntrain/train08603.aiff\ntrain/train08604.aiff\ntrain/train08605.aiff\ntrain/train08606.aiff\ntrain/train08607.aiff\ntrain/train08608.aiff\ntrain/train08609.aiff\ntrain/train08610.aiff\ntrain/train08611.aiff\ntrain/train08612.aiff\ntrain/train08613.aiff\ntrain/train08614.aiff\ntrain/train08615.aiff\ntrain/train08616.aiff\ntrain/train08617.aiff\ntrain/train08618.aiff\ntrain/train08619.aiff\ntrain/train08620.aiff\ntrain/train08621.aiff\ntrain/train08622.aiff\ntrain/train08623.aiff\ntrain/train08624.aiff\ntrain/train08625.aiff\ntrain/train08626.aiff\ntrain/train08627.aiff\ntrain/train08628.aiff\ntrain/train08629.aiff\ntrain/train08630.aiff\ntrain/train08631.aiff\ntrain/train08632.aiff\ntrain/train08633.aiff\ntrain/train08634.aiff\ntrain/train08635.aiff\ntrain/train08636.aiff\ntrain/train08637.aiff\ntrain/train08638.aiff\ntrain/train08639.aiff\ntrain/train08640.aiff\ntrain/train08641.aiff\ntrain/train08642.aiff\ntrain/train08643.aiff\ntrain/train08644.aiff\ntrain/train08645.aiff\ntrain/train08646.aiff\ntrain/train08647.aiff\ntrain/train08648.aiff\ntrain/train08649.aiff\ntrain/train08650.aiff\ntrain/train08651.aiff\ntrain/train08652.aiff\ntrain/train08653.aiff\ntrain/train08654.aiff\ntrain/train08655.aiff\ntrain/train08656.aiff\ntrain/train08657.aiff\ntrain/train08658.aiff\ntrain/train08659.aiff\ntrain/train08660.aiff\ntrain/train08661.aiff\ntrain/train08662.aiff\ntrain/train08663.aiff\ntrain/train08664.aiff\ntrain/train08665.aiff\ntrain/train08666.aiff\ntrain/train08667.aiff\ntrain/train08668.aiff\ntrain/train08669.aiff\ntrain/train08670.aiff\ntrain/train08671.aiff\ntrain/train08672.aiff\ntrain/train08673.aiff\ntrain/train08674.aiff\ntrain/train08675.aiff\ntrain/train08676.aiff\ntrain/train08677.aiff\ntrain/train08678.aiff\ntrain/train08679.aiff\ntrain/train08680.aiff\ntrain/train08681.aiff\ntrain/train08682.aiff\ntrain/train08683.aiff\ntrain/train08684.aiff\ntrain/train08685.aiff\ntrain/train08686.aiff\ntrain/train08687.aiff\ntrain/train08688.aiff\ntrain/train08689.aiff\ntrain/train08690.aiff\ntrain/train08691.aiff\ntrain/train08692.aiff\ntrain/train08693.aiff\ntrain/train08694.aiff\ntrain/train08695.aiff\ntrain/train08696.aiff\ntrain/train08697.aiff\ntrain/train08698.aiff\ntrain/train08699.aiff\ntrain/train08700.aiff\ntrain/train08701.aiff\ntrain/train08702.aiff\ntrain/train08703.aiff\ntrain/train08704.aiff\ntrain/train08705.aiff\ntrain/train08706.aiff\ntrain/train08707.aiff\ntrain/train08708.aiff\ntrain/train08709.aiff\ntrain/train08710.aiff\ntrain/train08711.aiff\ntrain/train08712.aiff\ntrain/train08713.aiff\ntrain/train08714.aiff\ntrain/train08715.aiff\ntrain/train08716.aiff\ntrain/train08717.aiff\ntrain/train08718.aiff\ntrain/train08719.aiff\ntrain/train08720.aiff\ntrain/train08721.aiff\ntrain/train08722.aiff\ntrain/train08723.aiff\ntrain/train08724.aiff\ntrain/train08725.aiff\ntrain/train08726.aiff\ntrain/train08727.aiff\ntrain/train08728.aiff\ntrain/train08729.aiff\ntrain/train08730.aiff\ntrain/train08731.aiff\ntrain/train08732.aiff\ntrain/train08733.aiff\ntrain/train08734.aiff\ntrain/train08735.aiff\ntrain/train08736.aiff\ntrain/train08737.aiff\ntrain/train08738.aiff\ntrain/train08739.aiff\ntrain/train08740.aiff\ntrain/train08741.aiff\ntrain/train08742.aiff\ntrain/train08743.aiff\ntrain/train08744.aiff\ntrain/train08745.aiff\ntrain/train08746.aiff\ntrain/train08747.aiff\ntrain/train08748.aiff\ntrain/train08749.aiff\ntrain/train08750.aiff\ntrain/train08751.aiff\ntrain/train08752.aiff\ntrain/train08753.aiff\ntrain/train08754.aiff\ntrain/train08755.aiff\ntrain/train08756.aiff\ntrain/train08757.aiff\ntrain/train08758.aiff\ntrain/train08759.aiff\ntrain/train08760.aiff\ntrain/train08761.aiff\ntrain/train08762.aiff\ntrain/train08763.aiff\ntrain/train08764.aiff\ntrain/train08765.aiff\ntrain/train08766.aiff\ntrain/train08767.aiff\ntrain/train08768.aiff\ntrain/train08769.aiff\ntrain/train08770.aiff\ntrain/train08771.aiff\ntrain/train08772.aiff\ntrain/train08773.aiff\ntrain/train08774.aiff\ntrain/train08775.aiff\ntrain/train08776.aiff\ntrain/train08777.aiff\ntrain/train08778.aiff\ntrain/train08779.aiff\ntrain/train08780.aiff\ntrain/train08781.aiff\ntrain/train08782.aiff\ntrain/train08783.aiff\ntrain/train08784.aiff\ntrain/train08785.aiff\ntrain/train08786.aiff\ntrain/train08787.aiff\ntrain/train08788.aiff\ntrain/train08789.aiff\ntrain/train08790.aiff\ntrain/train08791.aiff\ntrain/train08792.aiff\ntrain/train08793.aiff\ntrain/train08794.aiff\ntrain/train08795.aiff\ntrain/train08796.aiff\ntrain/train08797.aiff\ntrain/train08798.aiff\ntrain/train08799.aiff\ntrain/train08800.aiff\ntrain/train08801.aiff\ntrain/train08802.aiff\ntrain/train08803.aiff\ntrain/train08804.aiff\ntrain/train08805.aiff\ntrain/train08806.aiff\ntrain/train08807.aiff\ntrain/train08808.aiff\ntrain/train08809.aiff\ntrain/train08810.aiff\ntrain/train08811.aiff\ntrain/train08812.aiff\ntrain/train08813.aiff\ntrain/train08814.aiff\ntrain/train08815.aiff\ntrain/train08816.aiff\ntrain/train08817.aiff\ntrain/train08818.aiff\ntrain/train08819.aiff\ntrain/train08820.aiff\ntrain/train08821.aiff\ntrain/train08822.aiff\ntrain/train08823.aiff\ntrain/train08824.aiff\ntrain/train08825.aiff\ntrain/train08826.aiff\ntrain/train08827.aiff\ntrain/train08828.aiff\ntrain/train08829.aiff\ntrain/train08830.aiff\ntrain/train08831.aiff\ntrain/train08832.aiff\ntrain/train08833.aiff\ntrain/train08834.aiff\ntrain/train08835.aiff\ntrain/train08836.aiff\ntrain/train08837.aiff\ntrain/train08838.aiff\ntrain/train08839.aiff\ntrain/train08840.aiff\ntrain/train08841.aiff\ntrain/train08842.aiff\ntrain/train08843.aiff\ntrain/train08844.aiff\ntrain/train08845.aiff\ntrain/train08846.aiff\ntrain/train08847.aiff\ntrain/train08848.aiff\ntrain/train08849.aiff\ntrain/train08850.aiff\ntrain/train08851.aiff\ntrain/train08852.aiff\ntrain/train08853.aiff\ntrain/train08854.aiff\ntrain/train08855.aiff\ntrain/train08856.aiff\ntrain/train08857.aiff\ntrain/train08858.aiff\ntrain/train08859.aiff\ntrain/train08860.aiff\ntrain/train08861.aiff\ntrain/train08862.aiff\ntrain/train08863.aiff\ntrain/train08864.aiff\ntrain/train08865.aiff\ntrain/train08866.aiff\ntrain/train08867.aiff\ntrain/train08868.aiff\ntrain/train08869.aiff\ntrain/train08870.aiff\ntrain/train08871.aiff\ntrain/train08872.aiff\ntrain/train08873.aiff\ntrain/train08874.aiff\ntrain/train08875.aiff\ntrain/train08876.aiff\ntrain/train08877.aiff\ntrain/train08878.aiff\ntrain/train08879.aiff\ntrain/train08880.aiff\ntrain/train08881.aiff\ntrain/train08882.aiff\ntrain/train08883.aiff\ntrain/train08884.aiff\ntrain/train08885.aiff\ntrain/train08886.aiff\ntrain/train08887.aiff\ntrain/train08888.aiff\ntrain/train08889.aiff\ntrain/train08890.aiff\ntrain/train08891.aiff\ntrain/train08892.aiff\ntrain/train08893.aiff\ntrain/train08894.aiff\ntrain/train08895.aiff\ntrain/train08896.aiff\ntrain/train08897.aiff\ntrain/train08898.aiff\ntrain/train08899.aiff\ntrain/train08900.aiff\ntrain/train08901.aiff\ntrain/train08902.aiff\ntrain/train08903.aiff\ntrain/train08904.aiff\ntrain/train08905.aiff\ntrain/train08906.aiff\ntrain/train08907.aiff\ntrain/train08908.aiff\ntrain/train08909.aiff\ntrain/train08910.aiff\ntrain/train08911.aiff\ntrain/train08912.aiff\ntrain/train08913.aiff\ntrain/train08914.aiff\ntrain/train08915.aiff\ntrain/train08916.aiff\ntrain/train08917.aiff\ntrain/train08918.aiff\ntrain/train08919.aiff\ntrain/train08920.aiff\ntrain/train08921.aiff\ntrain/train08922.aiff\ntrain/train08923.aiff\ntrain/train08924.aiff\ntrain/train08925.aiff\ntrain/train08926.aiff\ntrain/train08927.aiff\ntrain/train08928.aiff\ntrain/train08929.aiff\ntrain/train08930.aiff\ntrain/train08931.aiff\ntrain/train08932.aiff\ntrain/train08933.aiff\ntrain/train08934.aiff\ntrain/train08935.aiff\ntrain/train08936.aiff\ntrain/train08937.aiff\ntrain/train08938.aiff\ntrain/train08939.aiff\ntrain/train08940.aiff\ntrain/train08941.aiff\ntrain/train08942.aiff\ntrain/train08943.aiff\ntrain/train08944.aiff\ntrain/train08945.aiff\ntrain/train08946.aiff\ntrain/train08947.aiff\ntrain/train08948.aiff\ntrain/train08949.aiff\ntrain/train08950.aiff\ntrain/train08951.aiff\ntrain/train08952.aiff\ntrain/train08953.aiff\ntrain/train08954.aiff\ntrain/train08955.aiff\ntrain/train08956.aiff\ntrain/train08957.aiff\ntrain/train08958.aiff\ntrain/train08959.aiff\ntrain/train08960.aiff\ntrain/train08961.aiff\ntrain/train08962.aiff\ntrain/train08963.aiff\ntrain/train08964.aiff\ntrain/train08965.aiff\ntrain/train08966.aiff\ntrain/train08967.aiff\ntrain/train08968.aiff\ntrain/train08969.aiff\ntrain/train08970.aiff\ntrain/train08971.aiff\ntrain/train08972.aiff\ntrain/train08973.aiff\ntrain/train08974.aiff\ntrain/train08975.aiff\ntrain/train08976.aiff\ntrain/train08977.aiff\ntrain/train08978.aiff\ntrain/train08979.aiff\ntrain/train08980.aiff\ntrain/train08981.aiff\ntrain/train08982.aiff\ntrain/train08983.aiff\ntrain/train08984.aiff\ntrain/train08985.aiff\ntrain/train08986.aiff\ntrain/train08987.aiff\ntrain/train08988.aiff\ntrain/train08989.aiff\ntrain/train08990.aiff\ntrain/train08991.aiff\ntrain/train08992.aiff\ntrain/train08993.aiff\ntrain/train08994.aiff\ntrain/train08995.aiff\ntrain/train08996.aiff\ntrain/train08997.aiff\ntrain/train08998.aiff\ntrain/train08999.aiff\ntrain/train09000.aiff\ntrain/train09001.aiff\ntrain/train09002.aiff\ntrain/train09003.aiff\ntrain/train09004.aiff\ntrain/train09005.aiff\ntrain/train09006.aiff\ntrain/train09007.aiff\ntrain/train09008.aiff\ntrain/train09009.aiff\ntrain/train09010.aiff\ntrain/train09011.aiff\ntrain/train09012.aiff\ntrain/train09013.aiff\ntrain/train09014.aiff\ntrain/train09015.aiff\ntrain/train09016.aiff\ntrain/train09017.aiff\ntrain/train09018.aiff\ntrain/train09019.aiff\ntrain/train09020.aiff\ntrain/train09021.aiff\ntrain/train09022.aiff\ntrain/train09023.aiff\ntrain/train09024.aiff\ntrain/train09025.aiff\ntrain/train09026.aiff\ntrain/train09027.aiff\ntrain/train09028.aiff\ntrain/train09029.aiff\ntrain/train09030.aiff\ntrain/train09031.aiff\ntrain/train09032.aiff\ntrain/train09033.aiff\ntrain/train09034.aiff\ntrain/train09035.aiff\ntrain/train09036.aiff\ntrain/train09037.aiff\ntrain/train09038.aiff\ntrain/train09039.aiff\ntrain/train09040.aiff\ntrain/train09041.aiff\ntrain/train09042.aiff\ntrain/train09043.aiff\ntrain/train09044.aiff\ntrain/train09045.aiff\ntrain/train09046.aiff\ntrain/train09047.aiff\ntrain/train09048.aiff\ntrain/train09049.aiff\ntrain/train09050.aiff\ntrain/train09051.aiff\ntrain/train09052.aiff\ntrain/train09053.aiff\ntrain/train09054.aiff\ntrain/train09055.aiff\ntrain/train09056.aiff\ntrain/train09057.aiff\ntrain/train09058.aiff\ntrain/train09059.aiff\ntrain/train09060.aiff\ntrain/train09061.aiff\ntrain/train09062.aiff\ntrain/train09063.aiff\ntrain/train09064.aiff\ntrain/train09065.aiff\ntrain/train09066.aiff\ntrain/train09067.aiff\ntrain/train09068.aiff\ntrain/train09069.aiff\ntrain/train09070.aiff\ntrain/train09071.aiff\ntrain/train09072.aiff\ntrain/train09073.aiff\ntrain/train09074.aiff\ntrain/train09075.aiff\ntrain/train09076.aiff\ntrain/train09077.aiff\ntrain/train09078.aiff\ntrain/train09079.aiff\ntrain/train09080.aiff\ntrain/train09081.aiff\ntrain/train09082.aiff\ntrain/train09083.aiff\ntrain/train09084.aiff\ntrain/train09085.aiff\ntrain/train09086.aiff\ntrain/train09087.aiff\ntrain/train09088.aiff\ntrain/train09089.aiff\ntrain/train09090.aiff\ntrain/train09091.aiff\ntrain/train09092.aiff\ntrain/train09093.aiff\ntrain/train09094.aiff\ntrain/train09095.aiff\ntrain/train09096.aiff\ntrain/train09097.aiff\ntrain/train09098.aiff\ntrain/train09099.aiff\ntrain/train09100.aiff\ntrain/train09101.aiff\ntrain/train09102.aiff\ntrain/train09103.aiff\ntrain/train09104.aiff\ntrain/train09105.aiff\ntrain/train09106.aiff\ntrain/train09107.aiff\ntrain/train09108.aiff\ntrain/train09109.aiff\ntrain/train09110.aiff\ntrain/train09111.aiff\ntrain/train09112.aiff\ntrain/train09113.aiff\ntrain/train09114.aiff\ntrain/train09115.aiff\ntrain/train09116.aiff\ntrain/train09117.aiff\ntrain/train09118.aiff\ntrain/train09119.aiff\ntrain/train09120.aiff\ntrain/train09121.aiff\ntrain/train09122.aiff\ntrain/train09123.aiff\ntrain/train09124.aiff\ntrain/train09125.aiff\ntrain/train09126.aiff\ntrain/train09127.aiff\ntrain/train09128.aiff\ntrain/train09129.aiff\ntrain/train09130.aiff\ntrain/train09131.aiff\ntrain/train09132.aiff\ntrain/train09133.aiff\ntrain/train09134.aiff\ntrain/train09135.aiff\ntrain/train09136.aiff\ntrain/train09137.aiff\ntrain/train09138.aiff\ntrain/train09139.aiff\ntrain/train09140.aiff\ntrain/train09141.aiff\ntrain/train09142.aiff\ntrain/train09143.aiff\ntrain/train09144.aiff\ntrain/train09145.aiff\ntrain/train09146.aiff\ntrain/train09147.aiff\ntrain/train09148.aiff\ntrain/train09149.aiff\ntrain/train09150.aiff\ntrain/train09151.aiff\ntrain/train09152.aiff\ntrain/train09153.aiff\ntrain/train09154.aiff\ntrain/train09155.aiff\ntrain/train09156.aiff\ntrain/train09157.aiff\ntrain/train09158.aiff\ntrain/train09159.aiff\ntrain/train09160.aiff\ntrain/train09161.aiff\ntrain/train09162.aiff\ntrain/train09163.aiff\ntrain/train09164.aiff\ntrain/train09165.aiff\ntrain/train09166.aiff\ntrain/train09167.aiff\ntrain/train09168.aiff\ntrain/train09169.aiff\ntrain/train09170.aiff\ntrain/train09171.aiff\ntrain/train09172.aiff\ntrain/train09173.aiff\ntrain/train09174.aiff\ntrain/train09175.aiff\ntrain/train09176.aiff\ntrain/train09177.aiff\ntrain/train09178.aiff\ntrain/train09179.aiff\ntrain/train09180.aiff\ntrain/train09181.aiff\ntrain/train09182.aiff\ntrain/train09183.aiff\ntrain/train09184.aiff\ntrain/train09185.aiff\ntrain/train09186.aiff\ntrain/train09187.aiff\ntrain/train09188.aiff\ntrain/train09189.aiff\ntrain/train09190.aiff\ntrain/train09191.aiff\ntrain/train09192.aiff\ntrain/train09193.aiff\ntrain/train09194.aiff\ntrain/train09195.aiff\ntrain/train09196.aiff\ntrain/train09197.aiff\ntrain/train09198.aiff\ntrain/train09199.aiff\ntrain/train09200.aiff\ntrain/train09201.aiff\ntrain/train09202.aiff\ntrain/train09203.aiff\ntrain/train09204.aiff\ntrain/train09205.aiff\ntrain/train09206.aiff\ntrain/train09207.aiff\ntrain/train09208.aiff\ntrain/train09209.aiff\ntrain/train09210.aiff\ntrain/train09211.aiff\ntrain/train09212.aiff\ntrain/train09213.aiff\ntrain/train09214.aiff\ntrain/train09215.aiff\ntrain/train09216.aiff\ntrain/train09217.aiff\ntrain/train09218.aiff\ntrain/train09219.aiff\ntrain/train09220.aiff\ntrain/train09221.aiff\ntrain/train09222.aiff\ntrain/train09223.aiff\ntrain/train09224.aiff\ntrain/train09225.aiff\ntrain/train09226.aiff\ntrain/train09227.aiff\ntrain/train09228.aiff\ntrain/train09229.aiff\ntrain/train09230.aiff\ntrain/train09231.aiff\ntrain/train09232.aiff\ntrain/train09233.aiff\ntrain/train09234.aiff\ntrain/train09235.aiff\ntrain/train09236.aiff\ntrain/train09237.aiff\ntrain/train09238.aiff\ntrain/train09239.aiff\ntrain/train09240.aiff\ntrain/train09241.aiff\ntrain/train09242.aiff\ntrain/train09243.aiff\ntrain/train09244.aiff\ntrain/train09245.aiff\ntrain/train09246.aiff\ntrain/train09247.aiff\ntrain/train09248.aiff\ntrain/train09249.aiff\ntrain/train09250.aiff\ntrain/train09251.aiff\ntrain/train09252.aiff\ntrain/train09253.aiff\ntrain/train09254.aiff\ntrain/train09255.aiff\ntrain/train09256.aiff\ntrain/train09257.aiff\ntrain/train09258.aiff\ntrain/train09259.aiff\ntrain/train09260.aiff\ntrain/train09261.aiff\ntrain/train09262.aiff\ntrain/train09263.aiff\ntrain/train09264.aiff\ntrain/train09265.aiff\ntrain/train09266.aiff\ntrain/train09267.aiff\ntrain/train09268.aiff\ntrain/train09269.aiff\ntrain/train09270.aiff\ntrain/train09271.aiff\ntrain/train09272.aiff\ntrain/train09273.aiff\ntrain/train09274.aiff\ntrain/train09275.aiff\ntrain/train09276.aiff\ntrain/train09277.aiff\ntrain/train09278.aiff\ntrain/train09279.aiff\ntrain/train09280.aiff\ntrain/train09281.aiff\ntrain/train09282.aiff\ntrain/train09283.aiff\ntrain/train09284.aiff\ntrain/train09285.aiff\ntrain/train09286.aiff\ntrain/train09287.aiff\ntrain/train09288.aiff\ntrain/train09289.aiff\ntrain/train09290.aiff\ntrain/train09291.aiff\ntrain/train09292.aiff\ntrain/train09293.aiff\ntrain/train09294.aiff\ntrain/train09295.aiff\ntrain/train09296.aiff\ntrain/train09297.aiff\ntrain/train09298.aiff\ntrain/train09299.aiff\ntrain/train09300.aiff\ntrain/train09301.aiff\ntrain/train09302.aiff\ntrain/train09303.aiff\ntrain/train09304.aiff\ntrain/train09305.aiff\ntrain/train09306.aiff\ntrain/train09307.aiff\ntrain/train09308.aiff\ntrain/train09309.aiff\ntrain/train09310.aiff\ntrain/train09311.aiff\ntrain/train09312.aiff\ntrain/train09313.aiff\ntrain/train09314.aiff\ntrain/train09315.aiff\ntrain/train09316.aiff\ntrain/train09317.aiff\ntrain/train09318.aiff\ntrain/train09319.aiff\ntrain/train09320.aiff\ntrain/train09321.aiff\ntrain/train09322.aiff\ntrain/train09323.aiff\ntrain/train09324.aiff\ntrain/train09325.aiff\ntrain/train09326.aiff\ntrain/train09327.aiff\ntrain/train09328.aiff\ntrain/train09329.aiff\ntrain/train09330.aiff\ntrain/train09331.aiff\ntrain/train09332.aiff\ntrain/train09333.aiff\ntrain/train09334.aiff\ntrain/train09335.aiff\ntrain/train09336.aiff\ntrain/train09337.aiff\ntrain/train09338.aiff\ntrain/train09339.aiff\ntrain/train09340.aiff\ntrain/train09341.aiff\ntrain/train09342.aiff\ntrain/train09343.aiff\ntrain/train09344.aiff\ntrain/train09345.aiff\ntrain/train09346.aiff\ntrain/train09347.aiff\ntrain/train09348.aiff\ntrain/train09349.aiff\ntrain/train09350.aiff\ntrain/train09351.aiff\ntrain/train09352.aiff\ntrain/train09353.aiff\ntrain/train09354.aiff\ntrain/train09355.aiff\ntrain/train09356.aiff\ntrain/train09357.aiff\ntrain/train09358.aiff\ntrain/train09359.aiff\ntrain/train09360.aiff\ntrain/train09361.aiff\ntrain/train09362.aiff\ntrain/train09363.aiff\ntrain/train09364.aiff\ntrain/train09365.aiff\ntrain/train09366.aiff\ntrain/train09367.aiff\ntrain/train09368.aiff\ntrain/train09369.aiff\ntrain/train09370.aiff\ntrain/train09371.aiff\ntrain/train09372.aiff\ntrain/train09373.aiff\ntrain/train09374.aiff\ntrain/train09375.aiff\ntrain/train09376.aiff\ntrain/train09377.aiff\ntrain/train09378.aiff\ntrain/train09379.aiff\ntrain/train09380.aiff\ntrain/train09381.aiff\ntrain/train09382.aiff\ntrain/train09383.aiff\ntrain/train09384.aiff\ntrain/train09385.aiff\ntrain/train09386.aiff\ntrain/train09387.aiff\ntrain/train09388.aiff\ntrain/train09389.aiff\ntrain/train09390.aiff\ntrain/train09391.aiff\ntrain/train09392.aiff\ntrain/train09393.aiff\ntrain/train09394.aiff\ntrain/train09395.aiff\ntrain/train09396.aiff\ntrain/train09397.aiff\ntrain/train09398.aiff\ntrain/train09399.aiff\ntrain/train09400.aiff\ntrain/train09401.aiff\ntrain/train09402.aiff\ntrain/train09403.aiff\ntrain/train09404.aiff\ntrain/train09405.aiff\ntrain/train09406.aiff\ntrain/train09407.aiff\ntrain/train09408.aiff\ntrain/train09409.aiff\ntrain/train09410.aiff\ntrain/train09411.aiff\ntrain/train09412.aiff\ntrain/train09413.aiff\ntrain/train09414.aiff\ntrain/train09415.aiff\ntrain/train09416.aiff\ntrain/train09417.aiff\ntrain/train09418.aiff\ntrain/train09419.aiff\ntrain/train09420.aiff\ntrain/train09421.aiff\ntrain/train09422.aiff\ntrain/train09423.aiff\ntrain/train09424.aiff\ntrain/train09425.aiff\ntrain/train09426.aiff\ntrain/train09427.aiff\ntrain/train09428.aiff\ntrain/train09429.aiff\ntrain/train09430.aiff\ntrain/train09431.aiff\ntrain/train09432.aiff\ntrain/train09433.aiff\ntrain/train09434.aiff\ntrain/train09435.aiff\ntrain/train09436.aiff\ntrain/train09437.aiff\ntrain/train09438.aiff\ntrain/train09439.aiff\ntrain/train09440.aiff\ntrain/train09441.aiff\ntrain/train09442.aiff\ntrain/train09443.aiff\ntrain/train09444.aiff\ntrain/train09445.aiff\ntrain/train09446.aiff\ntrain/train09447.aiff\ntrain/train09448.aiff\ntrain/train09449.aiff\ntrain/train09450.aiff\ntrain/train09451.aiff\ntrain/train09452.aiff\ntrain/train09453.aiff\ntrain/train09454.aiff\ntrain/train09455.aiff\ntrain/train09456.aiff\ntrain/train09457.aiff\ntrain/train09458.aiff\ntrain/train09459.aiff\ntrain/train09460.aiff\ntrain/train09461.aiff\ntrain/train09462.aiff\ntrain/train09463.aiff\ntrain/train09464.aiff\ntrain/train09465.aiff\ntrain/train09466.aiff\ntrain/train09467.aiff\ntrain/train09468.aiff\ntrain/train09469.aiff\ntrain/train09470.aiff\ntrain/train09471.aiff\ntrain/train09472.aiff\ntrain/train09473.aiff\ntrain/train09474.aiff\ntrain/train09475.aiff\ntrain/train09476.aiff\ntrain/train09477.aiff\ntrain/train09478.aiff\ntrain/train09479.aiff\ntrain/train09480.aiff\ntrain/train09481.aiff\ntrain/train09482.aiff\ntrain/train09483.aiff\ntrain/train09484.aiff\ntrain/train09485.aiff\ntrain/train09486.aiff\ntrain/train09487.aiff\ntrain/train09488.aiff\ntrain/train09489.aiff\ntrain/train09490.aiff\ntrain/train09491.aiff\ntrain/train09492.aiff\ntrain/train09493.aiff\ntrain/train09494.aiff\ntrain/train09495.aiff\ntrain/train09496.aiff\ntrain/train09497.aiff\ntrain/train09498.aiff\ntrain/train09499.aiff\ntrain/train09500.aiff\ntrain/train09501.aiff\ntrain/train09502.aiff\ntrain/train09503.aiff\ntrain/train09504.aiff\ntrain/train09505.aiff\ntrain/train09506.aiff\ntrain/train09507.aiff\ntrain/train09508.aiff\ntrain/train09509.aiff\ntrain/train09510.aiff\ntrain/train09511.aiff\ntrain/train09512.aiff\ntrain/train09513.aiff\ntrain/train09514.aiff\ntrain/train09515.aiff\ntrain/train09516.aiff\ntrain/train09517.aiff\ntrain/train09518.aiff\ntrain/train09519.aiff\ntrain/train09520.aiff\ntrain/train09521.aiff\ntrain/train09522.aiff\ntrain/train09523.aiff\ntrain/train09524.aiff\ntrain/train09525.aiff\ntrain/train09526.aiff\ntrain/train09527.aiff\ntrain/train09528.aiff\ntrain/train09529.aiff\ntrain/train09530.aiff\ntrain/train09531.aiff\ntrain/train09532.aiff\ntrain/train09533.aiff\ntrain/train09534.aiff\ntrain/train09535.aiff\ntrain/train09536.aiff\ntrain/train09537.aiff\ntrain/train09538.aiff\ntrain/train09539.aiff\ntrain/train09540.aiff\ntrain/train09541.aiff\ntrain/train09542.aiff\ntrain/train09543.aiff\ntrain/train09544.aiff\ntrain/train09545.aiff\ntrain/train09546.aiff\ntrain/train09547.aiff\ntrain/train09548.aiff\ntrain/train09549.aiff\ntrain/train09550.aiff\ntrain/train09551.aiff\ntrain/train09552.aiff\ntrain/train09553.aiff\ntrain/train09554.aiff\ntrain/train09555.aiff\ntrain/train09556.aiff\ntrain/train09557.aiff\ntrain/train09558.aiff\ntrain/train09559.aiff\ntrain/train09560.aiff\ntrain/train09561.aiff\ntrain/train09562.aiff\ntrain/train09563.aiff\ntrain/train09564.aiff\ntrain/train09565.aiff\ntrain/train09566.aiff\ntrain/train09567.aiff\ntrain/train09568.aiff\ntrain/train09569.aiff\ntrain/train09570.aiff\ntrain/train09571.aiff\ntrain/train09572.aiff\ntrain/train09573.aiff\ntrain/train09574.aiff\ntrain/train09575.aiff\ntrain/train09576.aiff\ntrain/train09577.aiff\ntrain/train09578.aiff\ntrain/train09579.aiff\ntrain/train09580.aiff\ntrain/train09581.aiff\ntrain/train09582.aiff\ntrain/train09583.aiff\ntrain/train09584.aiff\ntrain/train09585.aiff\ntrain/train09586.aiff\ntrain/train09587.aiff\ntrain/train09588.aiff\ntrain/train09589.aiff\ntrain/train09590.aiff\ntrain/train09591.aiff\ntrain/train09592.aiff\ntrain/train09593.aiff\ntrain/train09594.aiff\ntrain/train09595.aiff\ntrain/train09596.aiff\ntrain/train09597.aiff\ntrain/train09598.aiff\ntrain/train09599.aiff\ntrain/train09600.aiff\ntrain/train09601.aiff\ntrain/train09602.aiff\ntrain/train09603.aiff\ntrain/train09604.aiff\ntrain/train09605.aiff\ntrain/train09606.aiff\ntrain/train09607.aiff\ntrain/train09608.aiff\ntrain/train09609.aiff\ntrain/train09610.aiff\ntrain/train09611.aiff\ntrain/train09612.aiff\ntrain/train09613.aiff\ntrain/train09614.aiff\ntrain/train09615.aiff\ntrain/train09616.aiff\ntrain/train09617.aiff\ntrain/train09618.aiff\ntrain/train09619.aiff\ntrain/train09620.aiff\ntrain/train09621.aiff\ntrain/train09622.aiff\ntrain/train09623.aiff\ntrain/train09624.aiff\ntrain/train09625.aiff\ntrain/train09626.aiff\ntrain/train09627.aiff\ntrain/train09628.aiff\ntrain/train09629.aiff\ntrain/train09630.aiff\ntrain/train09631.aiff\ntrain/train09632.aiff\ntrain/train09633.aiff\ntrain/train09634.aiff\ntrain/train09635.aiff\ntrain/train09636.aiff\ntrain/train09637.aiff\ntrain/train09638.aiff\ntrain/train09639.aiff\ntrain/train09640.aiff\ntrain/train09641.aiff\ntrain/train09642.aiff\ntrain/train09643.aiff\ntrain/train09644.aiff\ntrain/train09645.aiff\ntrain/train09646.aiff\ntrain/train09647.aiff\ntrain/train09648.aiff\ntrain/train09649.aiff\ntrain/train09650.aiff\ntrain/train09651.aiff\ntrain/train09652.aiff\ntrain/train09653.aiff\ntrain/train09654.aiff\ntrain/train09655.aiff\ntrain/train09656.aiff\ntrain/train09657.aiff\ntrain/train09658.aiff\ntrain/train09659.aiff\ntrain/train09660.aiff\ntrain/train09661.aiff\ntrain/train09662.aiff\ntrain/train09663.aiff\ntrain/train09664.aiff\ntrain/train09665.aiff\ntrain/train09666.aiff\ntrain/train09667.aiff\ntrain/train09668.aiff\ntrain/train09669.aiff\ntrain/train09670.aiff\ntrain/train09671.aiff\ntrain/train09672.aiff\ntrain/train09673.aiff\ntrain/train09674.aiff\ntrain/train09675.aiff\ntrain/train09676.aiff\ntrain/train09677.aiff\ntrain/train09678.aiff\ntrain/train09679.aiff\ntrain/train09680.aiff\ntrain/train09681.aiff\ntrain/train09682.aiff\ntrain/train09683.aiff\ntrain/train09684.aiff\ntrain/train09685.aiff\ntrain/train09686.aiff\ntrain/train09687.aiff\ntrain/train09688.aiff\ntrain/train09689.aiff\ntrain/train09690.aiff\ntrain/train09691.aiff\ntrain/train09692.aiff\ntrain/train09693.aiff\ntrain/train09694.aiff\ntrain/train09695.aiff\ntrain/train09696.aiff\ntrain/train09697.aiff\ntrain/train09698.aiff\ntrain/train09699.aiff\ntrain/train09700.aiff\ntrain/train09701.aiff\ntrain/train09702.aiff\ntrain/train09703.aiff\ntrain/train09704.aiff\ntrain/train09705.aiff\ntrain/train09706.aiff\ntrain/train09707.aiff\ntrain/train09708.aiff\ntrain/train09709.aiff\ntrain/train09710.aiff\ntrain/train09711.aiff\ntrain/train09712.aiff\ntrain/train09713.aiff\ntrain/train09714.aiff\ntrain/train09715.aiff\ntrain/train09716.aiff\ntrain/train09717.aiff\ntrain/train09718.aiff\ntrain/train09719.aiff\ntrain/train09720.aiff\ntrain/train09721.aiff\ntrain/train09722.aiff\ntrain/train09723.aiff\ntrain/train09724.aiff\ntrain/train09725.aiff\ntrain/train09726.aiff\ntrain/train09727.aiff\ntrain/train09728.aiff\ntrain/train09729.aiff\ntrain/train09730.aiff\ntrain/train09731.aiff\ntrain/train09732.aiff\ntrain/train09733.aiff\ntrain/train09734.aiff\ntrain/train09735.aiff\ntrain/train09736.aiff\ntrain/train09737.aiff\ntrain/train09738.aiff\ntrain/train09739.aiff\ntrain/train09740.aiff\ntrain/train09741.aiff\ntrain/train09742.aiff\ntrain/train09743.aiff\ntrain/train09744.aiff\ntrain/train09745.aiff\ntrain/train09746.aiff\ntrain/train09747.aiff\ntrain/train09748.aiff\ntrain/train09749.aiff\ntrain/train09750.aiff\ntrain/train09751.aiff\ntrain/train09752.aiff\ntrain/train09753.aiff\ntrain/train09754.aiff\ntrain/train09755.aiff\ntrain/train09756.aiff\ntrain/train09757.aiff\ntrain/train09758.aiff\ntrain/train09759.aiff\ntrain/train09760.aiff\ntrain/train09761.aiff\ntrain/train09762.aiff\ntrain/train09763.aiff\ntrain/train09764.aiff\ntrain/train09765.aiff\ntrain/train09766.aiff\ntrain/train09767.aiff\ntrain/train09768.aiff\ntrain/train09769.aiff\ntrain/train09770.aiff\ntrain/train09771.aiff\ntrain/train09772.aiff\ntrain/train09773.aiff\ntrain/train09774.aiff\ntrain/train09775.aiff\ntrain/train09776.aiff\ntrain/train09777.aiff\ntrain/train09778.aiff\ntrain/train09779.aiff\ntrain/train09780.aiff\ntrain/train09781.aiff\ntrain/train09782.aiff\ntrain/train09783.aiff\ntrain/train09784.aiff\ntrain/train09785.aiff\ntrain/train09786.aiff\ntrain/train09787.aiff\ntrain/train09788.aiff\ntrain/train09789.aiff\ntrain/train09790.aiff\ntrain/train09791.aiff\ntrain/train09792.aiff\ntrain/train09793.aiff\ntrain/train09794.aiff\ntrain/train09795.aiff\ntrain/train09796.aiff\ntrain/train09797.aiff\ntrain/train09798.aiff\ntrain/train09799.aiff\ntrain/train09800.aiff\ntrain/train09801.aiff\ntrain/train09802.aiff\ntrain/train09803.aiff\ntrain/train09804.aiff\ntrain/train09805.aiff\ntrain/train09806.aiff\ntrain/train09807.aiff\ntrain/train09808.aiff\ntrain/train09809.aiff\ntrain/train09810.aiff\ntrain/train09811.aiff\ntrain/train09812.aiff\ntrain/train09813.aiff\ntrain/train09814.aiff\ntrain/train09815.aiff\ntrain/train09816.aiff\ntrain/train09817.aiff\ntrain/train09818.aiff\ntrain/train09819.aiff\ntrain/train09820.aiff\ntrain/train09821.aiff\ntrain/train09822.aiff\ntrain/train09823.aiff\ntrain/train09824.aiff\ntrain/train09825.aiff\ntrain/train09826.aiff\ntrain/train09827.aiff\ntrain/train09828.aiff\ntrain/train09829.aiff\ntrain/train09830.aiff\ntrain/train09831.aiff\ntrain/train09832.aiff\ntrain/train09833.aiff\ntrain/train09834.aiff\ntrain/train09835.aiff\ntrain/train09836.aiff\ntrain/train09837.aiff\ntrain/train09838.aiff\ntrain/train09839.aiff\ntrain/train09840.aiff\ntrain/train09841.aiff\ntrain/train09842.aiff\ntrain/train09843.aiff\ntrain/train09844.aiff\ntrain/train09845.aiff\ntrain/train09846.aiff\ntrain/train09847.aiff\ntrain/train09848.aiff\ntrain/train09849.aiff\ntrain/train09850.aiff\ntrain/train09851.aiff\ntrain/train09852.aiff\ntrain/train09853.aiff\ntrain/train09854.aiff\ntrain/train09855.aiff\ntrain/train09856.aiff\ntrain/train09857.aiff\ntrain/train09858.aiff\ntrain/train09859.aiff\ntrain/train09860.aiff\ntrain/train09861.aiff\ntrain/train09862.aiff\ntrain/train09863.aiff\ntrain/train09864.aiff\ntrain/train09865.aiff\ntrain/train09866.aiff\ntrain/train09867.aiff\ntrain/train09868.aiff\ntrain/train09869.aiff\ntrain/train09870.aiff\ntrain/train09871.aiff\ntrain/train09872.aiff\ntrain/train09873.aiff\ntrain/train09874.aiff\ntrain/train09875.aiff\ntrain/train09876.aiff\ntrain/train09877.aiff\ntrain/train09878.aiff\ntrain/train09879.aiff\ntrain/train09880.aiff\ntrain/train09881.aiff\ntrain/train09882.aiff\ntrain/train09883.aiff\ntrain/train09884.aiff\ntrain/train09885.aiff\ntrain/train09886.aiff\ntrain/train09887.aiff\ntrain/train09888.aiff\ntrain/train09889.aiff\ntrain/train09890.aiff\ntrain/train09891.aiff\ntrain/train09892.aiff\ntrain/train09893.aiff\ntrain/train09894.aiff\ntrain/train09895.aiff\ntrain/train09896.aiff\ntrain/train09897.aiff\ntrain/train09898.aiff\ntrain/train09899.aiff\ntrain/train09900.aiff\ntrain/train09901.aiff\ntrain/train09902.aiff\ntrain/train09903.aiff\ntrain/train09904.aiff\ntrain/train09905.aiff\ntrain/train09906.aiff\ntrain/train09907.aiff\ntrain/train09908.aiff\ntrain/train09909.aiff\ntrain/train09910.aiff\ntrain/train09911.aiff\ntrain/train09912.aiff\ntrain/train09913.aiff\ntrain/train09914.aiff\ntrain/train09915.aiff\ntrain/train09916.aiff\ntrain/train09917.aiff\ntrain/train09918.aiff\ntrain/train09919.aiff\ntrain/train09920.aiff\ntrain/train09921.aiff\ntrain/train09922.aiff\ntrain/train09923.aiff\ntrain/train09924.aiff\ntrain/train09925.aiff\ntrain/train09926.aiff\ntrain/train09927.aiff\ntrain/train09928.aiff\ntrain/train09929.aiff\ntrain/train09930.aiff\ntrain/train09931.aiff\ntrain/train09932.aiff\ntrain/train09933.aiff\ntrain/train09934.aiff\ntrain/train09935.aiff\ntrain/train09936.aiff\ntrain/train09937.aiff\ntrain/train09938.aiff\ntrain/train09939.aiff\ntrain/train09940.aiff\ntrain/train09941.aiff\ntrain/train09942.aiff\ntrain/train09943.aiff\ntrain/train09944.aiff\ntrain/train09945.aiff\ntrain/train09946.aiff\ntrain/train09947.aiff\ntrain/train09948.aiff\ntrain/train09949.aiff\ntrain/train09950.aiff\ntrain/train09951.aiff\ntrain/train09952.aiff\ntrain/train09953.aiff\ntrain/train09954.aiff\ntrain/train09955.aiff\ntrain/train09956.aiff\ntrain/train09957.aiff\ntrain/train09958.aiff\ntrain/train09959.aiff\ntrain/train09960.aiff\ntrain/train09961.aiff\ntrain/train09962.aiff\ntrain/train09963.aiff\ntrain/train09964.aiff\ntrain/train09965.aiff\ntrain/train09966.aiff\ntrain/train09967.aiff\ntrain/train09968.aiff\ntrain/train09969.aiff\ntrain/train09970.aiff\ntrain/train09971.aiff\ntrain/train09972.aiff\ntrain/train09973.aiff\ntrain/train09974.aiff\ntrain/train09975.aiff\ntrain/train09976.aiff\ntrain/train09977.aiff\ntrain/train09978.aiff\ntrain/train09979.aiff\ntrain/train09980.aiff\ntrain/train09981.aiff\ntrain/train09982.aiff\ntrain/train09983.aiff\ntrain/train09984.aiff\ntrain/train09985.aiff\ntrain/train09986.aiff\ntrain/train09987.aiff\ntrain/train09988.aiff\ntrain/train09989.aiff\ntrain/train09990.aiff\ntrain/train09991.aiff\ntrain/train09992.aiff\ntrain/train09993.aiff\ntrain/train09994.aiff\ntrain/train09995.aiff\ntrain/train09996.aiff\ntrain/train09997.aiff\ntrain/train09998.aiff\ntrain/train09999.aiff\n" ], [ "# read the audio filenames\nfilenames = sorted(glob(os.path.join('train','*.aiff')))\nprint('There are '+str(len(filenames))+' files.' )", "There are 9999 files.\n" ], [ "# read the labels\nimport pandas as pd\nlabels = pd.read_csv(os.path.join('labels.csv'), index_col = 0)", "_____no_output_____" ] ], [ [ "The format of the labels is:", "_____no_output_____" ] ], [ [ "labels.head(10)", "_____no_output_____" ] ], [ [ "Let's look at one of those files.", "_____no_output_____" ] ], [ [ "# reading the file info\nwhale_sample_file = 'train00006.aiff'\nwhale_aiff = aifc.open(os.path.join('train',whale_sample_file),'r')\nprint (\"Frames:\", whale_aiff.getnframes() )\nprint (\"Frame rate (frames per second):\", whale_aiff.getframerate())", "Frames: 4000\nFrame rate (frames per second): 2000\n" ], [ "# reading the data\nwhale_strSig = whale_aiff.readframes(whale_aiff.getnframes())\nwhale_array = np.fromstring(whale_strSig, np.short).byteswap()\nplt.plot(whale_array)\nplt.xlabel('Frame number')", "_____no_output_____" ], [ "signal = whale_array.astype('float64')", "_____no_output_____" ], [ "# playing a whale upcall in the notebook\nfrom IPython.display import Audio\n# Audio(signal, rate=3000, autoplay = True)# the rate is set to 3000 make the widget to run (seems the widget does not run with rate below 3000)", "_____no_output_____" ] ], [ [ "Working directly with the signals is hard (there is important frequency information). Let's calculate the spectrograms for each of the signals and use as features.", "_____no_output_____" ] ], [ [ "# a function for plotting spectrograms\ndef PlotSpecgram(P, freqs, bins):\n \"\"\"Spectrogram\"\"\"\n Z = np.flipud(P) # flip rows so that top goes to bottom, bottom to top, etc.\n xextent = 0, np.amax(bins)\n xmin, xmax = xextent\n extent = xmin, xmax, freqs[0], freqs[-1]\n im = pl.imshow(Z, extent=extent,cmap = 'plasma')\n pl.axis('auto')\n pl.xlim([0.0, bins[-1]])\n pl.ylim([0, freqs[-1]])", "_____no_output_____" ], [ "params = {'NFFT':256, 'Fs':2000, 'noverlap':192}\nP, freqs, bins = mlab.specgram(whale_array, **params)\nPlotSpecgram(P, freqs, bins)\nplt.title('Spectrogram with an Upcall')", "_____no_output_____" ] ], [ [ "### Feature Extraction\n---", "_____no_output_____" ], [ "We will go through the files and extract the spectrograms from each of them. We will do it for the first N files.", "_____no_output_____" ] ], [ [ "N = 10000 #number of files to use", "_____no_output_____" ], [ "# create a dictionary which contains all the spectrograms, labeled by the filename\nspec_dict = {}\n\n# threshold to cut higher frequencies\nm = 60\n\n# loop through all the files\nfor filename in filenames[:N]:\n # read the file\n aiff = aifc.open(filename,'r')\n whale_strSig = aiff.readframes(aiff.getnframes())\n whale_array = np.fromstring(whale_strSig, np.short).byteswap()\n # create the spectrogram\n P, freqs, bins = mlab.specgram(whale_array, **params)\n spec_dict[filename] = P[:m,:]\n\n# save the dimensions of the spectrogram\nspec_dim = P[:m,:].shape ", "_____no_output_____" ], [ "", "_____no_output_____" ] ], [ [ "Most machine learning algorithms in Python expect the data to come in a format **observations** x **features**. In order to get the data in this format we need to convert the two-dimensional spectrogram into a long vector. For that we will use the `ravel` function.", "_____no_output_____" ] ], [ [ "# We will put the data in a dictionary\nfeature_dict = {}\nfor key in filenames[:N]:\n # vectorize the spectrogram\n feature_dict[key.split('/')[-1]] = spec_dict[key].ravel()\n\n# convert to a pandas dataframe\nX = pd.DataFrame(feature_dict).T", "_____no_output_____" ], [ "X.head(5)", "_____no_output_____" ], [ "# we do not need these objects anymore so let's release them from memory\ndel feature_dict\ndel spec_dict", "_____no_output_____" ], [ "# let's save these variables for reuse\nnp.save('X.npy',X)\nnp.save('y.npy',np.array(labels['label'][X.index])[:N])", "_____no_output_____" ], [ "", "_____no_output_____" ] ], [ [ "### References:\n\nhttps://www.kaggle.com/c/whale-detection-challenge\n\nhttps://github.com/jaimeps/whale-sound-classification", "_____no_output_____" ] ], [ [ "", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ] ]
d0de216090fe63f7e59d8e4f422e35bafdcb7d7a
111,977
ipynb
Jupyter Notebook
notebook/notes_apflux_magnitude.ipynb
changhoonhahn/feasiBGS
b5f535f12cf64babc9e25bcec75edd45d8668f74
[ "MIT" ]
1
2021-02-24T15:02:34.000Z
2021-02-24T15:02:34.000Z
notebook/notes_apflux_magnitude.ipynb
michaelJwilson/feasiBGS
63975b1e60f6f93f3b5020ee51ca565f325b918d
[ "MIT" ]
7
2020-10-23T16:02:01.000Z
2020-11-04T18:53:20.000Z
notebook/notes_apflux_magnitude.ipynb
changhoonhahn/feasiBGS
b5f535f12cf64babc9e25bcec75edd45d8668f74
[ "MIT" ]
1
2020-11-12T00:19:41.000Z
2020-11-12T00:19:41.000Z
395.678445
57,064
0.936978
[ [ [ "import os\nimport h5py \nimport numpy as np \n# -- local -- \nfrom feasibgs import util as UT\nfrom feasibgs import catalogs as Cat\nfrom feasibgs import forwardmodel as FM", "_____no_output_____" ], [ "import matplotlib as mpl \nimport matplotlib.pyplot as pl \nmpl.rcParams['text.usetex'] = True\nmpl.rcParams['font.family'] = 'serif'\nmpl.rcParams['axes.linewidth'] = 1.5\nmpl.rcParams['axes.xmargin'] = 1\nmpl.rcParams['xtick.labelsize'] = 'x-large'\nmpl.rcParams['xtick.major.size'] = 5\nmpl.rcParams['xtick.major.width'] = 1.5\nmpl.rcParams['ytick.labelsize'] = 'x-large'\nmpl.rcParams['ytick.major.size'] = 5\nmpl.rcParams['ytick.major.width'] = 1.5\nmpl.rcParams['legend.frameon'] = False\n%matplotlib inline", "_____no_output_____" ], [ "cata = Cat.GamaLegacy()\ngleg = cata.Read()", "_____no_output_____" ], [ "r_mag = UT.flux2mag(gleg['legacy-photo']['apflux_r'][:,1], method='log')", "/Users/chang/anaconda2/lib/python2.7/site-packages/feasibgs-0.0.0-py2.7.egg/feasibgs/util.py:30: RuntimeWarning: divide by zero encountered in log10\n" ], [ "float(np.sum(np.isfinite(r_mag)))/float(len(r_mag))", "_____no_output_____" ], [ "no_rmag = np.invert(np.isfinite(r_mag))", "_____no_output_____" ], [ "fig = plt.figure()\nsub = fig.add_subplot(111)\n_ = sub.hist(gleg['gama-photo']['modelmag_r'], color='C0', histtype='stepfilled', range=(16, 21), bins=40)\n_ = sub.hist(gleg['gama-photo']['modelmag_r'][no_rmag], color='C1', histtype='stepfilled', range=(16, 21), bins=40)\nsub.set_xlabel(r'$r$ band model magnitude', fontsize=20)\nsub.set_xlim([16., 21.])", "_____no_output_____" ], [ "fig = plt.figure()\nsub = fig.add_subplot(111)\n_ = sub.hist(gleg['gama-photo']['modelmag_r'], color='C0', histtype='stepfilled', range=(16, 21), bins=40, normed=True)\n_ = sub.hist(gleg['gama-photo']['modelmag_r'][no_rmag], color='C1', histtype='stepfilled', range=(16, 21), bins=40, normed=True)\nsub.set_xlabel(r'$r$ band model magnitude', fontsize=20)\nsub.set_xlim([16., 21.])", "The 'normed' kwarg is deprecated, and has been replaced by the 'density' kwarg.\nThe 'normed' kwarg is deprecated, and has been replaced by the 'density' kwarg.\n" ], [ "fig = plt.figure()\nsub = fig.add_subplot(111)\nsub.scatter(gleg['gama-photo']['modelmag_r'], r_mag, s=2, c='k')\nsub.scatter(gleg['gama-photo']['modelmag_r'], UT.flux2mag(gleg['legacy-photo']['apflux_r'][:,1]), s=1, c='C0')\nsub.plot([0., 25.], [0., 25.], c='C1', lw=1, ls='--')\nsub.set_xlabel('$r$-band model magnitude', fontsize=20)\nsub.set_xlim([13, 21])\nsub.set_ylabel('$r$-band apflux magnitude', fontsize=20)\nsub.set_ylim([15, 25])", "_____no_output_____" ], [ "fig = plt.figure()\nsub = fig.add_subplot(111)\nsub.scatter(gleg['gama-photo']['modelmag_r'], UT.flux2mag(gleg['legacy-photo']['flux_r']), s=2, c='C0', \n label='Legacy flux')\nsub.scatter(gleg['gama-photo']['modelmag_r'], r_mag, s=0.5, c='C1', \n label='Legacy apflux (fiber)')\nsub.plot([0., 25.], [0., 25.], c='k', lw=1, ls='--')\nsub.legend(loc='upper left', markerscale=5, handletextpad=0., prop={'size':15})\nsub.set_xlabel('$r$-band GAMA model magnitude', fontsize=20)\nsub.set_xlim([13, 20])\nsub.set_ylabel('$r$-band Legacy photometry', fontsize=20)\nsub.set_ylim([13, 23])", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
d0de2ac51170caee8a600c5a3d80aa04cf32e6ed
169,957
ipynb
Jupyter Notebook
convolutional-neural-networks/mnist-mlp/mnist_mlp_exercise.ipynb
Oriolac/deep-learning-pytorch-exs
1edc66d1bb4b1196ef47a048129240c9f350da91
[ "MIT" ]
1
2021-10-21T13:24:27.000Z
2021-10-21T13:24:27.000Z
convolutional-neural-networks/mnist-mlp/mnist_mlp_exercise.ipynb
Oriolac/deep-learning-pytorch-exs
1edc66d1bb4b1196ef47a048129240c9f350da91
[ "MIT" ]
null
null
null
convolutional-neural-networks/mnist-mlp/mnist_mlp_exercise.ipynb
Oriolac/deep-learning-pytorch-exs
1edc66d1bb4b1196ef47a048129240c9f350da91
[ "MIT" ]
null
null
null
267.227987
97,380
0.91086
[ [ [ "# Multi-Layer Perceptron, MNIST\n---\nIn this notebook, we will train an MLP to classify images from the [MNIST database](http://yann.lecun.com/exdb/mnist/) hand-written digit database.\n\nThe process will be broken down into the following steps:\n>1. Load and visualize the data\n2. Define a neural network\n3. Train the model\n4. Evaluate the performance of our trained model on a test dataset!\n\nBefore we begin, we have to import the necessary libraries for working with data and PyTorch.", "_____no_output_____" ] ], [ [ "# import libraries\nimport torch\nimport numpy as np", "_____no_output_____" ] ], [ [ "---\n## Load and Visualize the [Data](http://pytorch.org/docs/stable/torchvision/datasets.html)\n\nDownloading may take a few moments, and you should see your progress as the data is loading. You may also choose to change the `batch_size` if you want to load more data at a time.\n\nThis cell will create DataLoaders for each of our datasets.", "_____no_output_____" ] ], [ [ "# The MNIST datasets are hosted on yann.lecun.com that has moved under CloudFlare protection\n# Run this script to enable the datasets download\n# Reference: https://github.com/pytorch/vision/issues/1938\n\nfrom six.moves import urllib\nopener = urllib.request.build_opener()\nopener.addheaders = [('User-agent', 'Mozilla/5.0')]\nurllib.request.install_opener(opener)", "_____no_output_____" ], [ "from torchvision import datasets\nimport torchvision.transforms as transforms\n\n# number of subprocesses to use for data loading\nnum_workers = 0\n# how many samples per batch to load\nbatch_size = 20\n\n# convert data to torch.FloatTensor\ntransform = transforms.ToTensor()\n\n# choose the training and test datasets\ntrain_data = datasets.MNIST(root='data', train=True,\n download=True, transform=transform)\ntest_data = datasets.MNIST(root='data', train=False,\n download=True, transform=transform)\n\n# prepare data loaders\ntrain_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size,\n num_workers=num_workers)\ntest_loader = torch.utils.data.DataLoader(test_data, batch_size=batch_size, \n num_workers=num_workers)", "Downloading http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz\nDownloading http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz to data/MNIST/raw/train-images-idx3-ubyte.gz\n" ] ], [ [ "### Visualize a Batch of Training Data\n\nThe first step in a classification task is to take a look at the data, make sure it is loaded in correctly, then make any initial observations about patterns in that data.", "_____no_output_____" ] ], [ [ "import matplotlib.pyplot as plt\n%matplotlib inline\n \n# obtain one batch of training images\ndataiter = iter(train_loader)\nimages, labels = dataiter.next()\nimages = images.numpy()\n\n# plot the images in the batch, along with the corresponding labels\nfig = plt.figure(figsize=(25, 4))\nfor idx in np.arange(20):\n ax = fig.add_subplot(2, 20/2, idx+1, xticks=[], yticks=[])\n ax.imshow(np.squeeze(images[idx]), cmap='gray')\n # print out the correct label for each image\n # .item() gets the value contained in a Tensor\n ax.set_title(str(labels[idx].item()))", "/tmp/ipykernel_2060/611598023.py:12: MatplotlibDeprecationWarning: Passing non-integers as three-element position specification is deprecated since 3.3 and will be removed two minor releases later.\n ax = fig.add_subplot(2, 20/2, idx+1, xticks=[], yticks=[])\n" ] ], [ [ "### View an Image in More Detail", "_____no_output_____" ] ], [ [ "img = np.squeeze(images[1])\n\nfig = plt.figure(figsize = (12,12)) \nax = fig.add_subplot(111)\nax.imshow(img, cmap='gray')\nwidth, height = img.shape\nthresh = img.max()/2.5\nfor x in range(width):\n for y in range(height):\n val = round(img[x][y],2) if img[x][y] !=0 else 0\n ax.annotate(str(val), xy=(y,x),\n horizontalalignment='center',\n verticalalignment='center',\n color='white' if img[x][y]<thresh else 'black')", "_____no_output_____" ] ], [ [ "---\n## Define the Network [Architecture](http://pytorch.org/docs/stable/nn.html)\n\nThe architecture will be responsible for seeing as input a 784-dim Tensor of pixel values for each image, and producing a Tensor of length 10 (our number of classes) that indicates the class scores for an input image. This particular example uses two hidden layers and dropout to avoid overfitting.", "_____no_output_____" ] ], [ [ "import torch.nn as nn\nimport torch.nn.functional as F\n\n## TODO: Define the NN architecture\nclass Net(nn.Module):\n def __init__(self):\n super(Net, self).__init__()\n # linear layer (784 -> 1 hidden node)\n self.fc1 = nn.Linear(28 * 28, 512)\n self.fc2 = nn.Linear(512, 256)\n self.fc3 = nn.Linear(256, 128)\n self.fc4 = nn.Linear(128, 10)\n self.dropout = nn.Dropout(0.2)\n\n def forward(self, x):\n # flatten image input\n x = x.view(-1, 28 * 28)\n # add hidden layer, with relu activation function\n x = F.relu(self.fc1(x))\n x = self.dropout(x)\n x = F.relu(self.fc2(x))\n x = self.dropout(x)\n x = F.relu(self.fc3(x))\n x = self.dropout(x)\n return self.fc4(x)\n\n# initialize the NN\nmodel = Net()\nprint(model)", "Net(\n (fc1): Linear(in_features=784, out_features=512, bias=True)\n (fc2): Linear(in_features=512, out_features=256, bias=True)\n (fc3): Linear(in_features=256, out_features=128, bias=True)\n (fc4): Linear(in_features=128, out_features=10, bias=True)\n (dropout): Dropout(p=0.2, inplace=False)\n)\n" ] ], [ [ "### Specify [Loss Function](http://pytorch.org/docs/stable/nn.html#loss-functions) and [Optimizer](http://pytorch.org/docs/stable/optim.html)\n\nIt's recommended that you use cross-entropy loss for classification. If you look at the documentation (linked above), you can see that PyTorch's cross entropy function applies a softmax funtion to the output layer *and* then calculates the log loss.", "_____no_output_____" ] ], [ [ "## TODO: Specify loss and optimization functions\n\n# specify loss function\ncriterion = nn.CrossEntropyLoss()\n\n# specify optimizer\noptimizer = torch.optim.SGD(model.parameters(), lr=0.01)", "_____no_output_____" ] ], [ [ "---\n## Train the Network\n\nThe steps for training/learning from a batch of data are described in the comments below:\n1. Clear the gradients of all optimized variables\n2. Forward pass: compute predicted outputs by passing inputs to the model\n3. Calculate the loss\n4. Backward pass: compute gradient of the loss with respect to model parameters\n5. Perform a single optimization step (parameter update)\n6. Update average training loss\n\nThe following loop trains for 30 epochs; feel free to change this number. For now, we suggest somewhere between 20-50 epochs. As you train, take a look at how the values for the training loss decrease over time. We want it to decrease while also avoiding overfitting the training data. ", "_____no_output_____" ] ], [ [ "# number of epochs to train the model\nn_epochs = 30 # suggest training between 20-50 epochs\n\nmodel.train() # prep model for training\n\nfor epoch in range(n_epochs):\n # monitor training loss\n train_loss = 0.0\n \n ###################\n # train the model #\n ###################\n for data, target in train_loader:\n # clear the gradients of all optimized variables\n optimizer.zero_grad()\n # forward pass: compute predicted outputs by passing inputs to the model\n output = model(data)\n # calculate the loss\n loss = criterion(output, target)\n # backward pass: compute gradient of the loss with respect to model parameters\n loss.backward()\n # perform a single optimization step (parameter update)\n optimizer.step()\n # update running training loss\n train_loss += loss.item()*data.size(0)\n \n # print training statistics \n # calculate average loss over an epoch\n train_loss = train_loss/len(train_loader.dataset)\n\n print('Epoch: {} \\tTraining Loss: {:.6f}'.format(\n epoch+1, \n train_loss\n ))", "Epoch: 1 \tTraining Loss: 1.263758\nEpoch: 2 \tTraining Loss: 0.382051\nEpoch: 3 \tTraining Loss: 0.266715\nEpoch: 4 \tTraining Loss: 0.200021\nEpoch: 5 \tTraining Loss: 0.162390\nEpoch: 6 \tTraining Loss: 0.137244\nEpoch: 7 \tTraining Loss: 0.119641\nEpoch: 8 \tTraining Loss: 0.105442\nEpoch: 9 \tTraining Loss: 0.094517\nEpoch: 10 \tTraining Loss: 0.085037\nEpoch: 11 \tTraining Loss: 0.076285\nEpoch: 12 \tTraining Loss: 0.068212\nEpoch: 13 \tTraining Loss: 0.063617\nEpoch: 14 \tTraining Loss: 0.057780\nEpoch: 15 \tTraining Loss: 0.054958\nEpoch: 16 \tTraining Loss: 0.049273\nEpoch: 17 \tTraining Loss: 0.045710\nEpoch: 18 \tTraining Loss: 0.041795\nEpoch: 19 \tTraining Loss: 0.040301\nEpoch: 20 \tTraining Loss: 0.034952\nEpoch: 21 \tTraining Loss: 0.034301\nEpoch: 22 \tTraining Loss: 0.032995\nEpoch: 23 \tTraining Loss: 0.029734\nEpoch: 24 \tTraining Loss: 0.028685\nEpoch: 25 \tTraining Loss: 0.026271\nEpoch: 26 \tTraining Loss: 0.025489\nEpoch: 27 \tTraining Loss: 0.024299\nEpoch: 28 \tTraining Loss: 0.022422\nEpoch: 29 \tTraining Loss: 0.020229\nEpoch: 30 \tTraining Loss: 0.019866\n" ] ], [ [ "---\n## Test the Trained Network\n\nFinally, we test our best model on previously unseen **test data** and evaluate it's performance. Testing on unseen data is a good way to check that our model generalizes well. It may also be useful to be granular in this analysis and take a look at how this model performs on each class as well as looking at its overall loss and accuracy.\n\n#### `model.eval()`\n\n`model.eval(`) will set all the layers in your model to evaluation mode. This affects layers like dropout layers that turn \"off\" nodes during training with some probability, but should allow every node to be \"on\" for evaluation!", "_____no_output_____" ] ], [ [ "# initialize lists to monitor test loss and accuracy\ntest_loss = 0.0\nclass_correct = list(0. for i in range(10))\nclass_total = list(0. for i in range(10))\n\nmodel.eval() # prep model for *evaluation*\n\nfor data, target in test_loader:\n # forward pass: compute predicted outputs by passing inputs to the model\n output = model(data)\n # calculate the loss\n loss = criterion(output, target)\n # update test loss \n test_loss += loss.item()*data.size(0)\n # convert output probabilities to predicted class\n _, pred = torch.max(output, 1)\n # compare predictions to true label\n correct = np.squeeze(pred.eq(target.data.view_as(pred)))\n # calculate test accuracy for each object class\n for i in range(batch_size):\n label = target.data[i]\n class_correct[label] += correct[i].item()\n class_total[label] += 1\n\n# calculate and print avg test loss\ntest_loss = test_loss/len(test_loader.dataset)\nprint('Test Loss: {:.6f}\\n'.format(test_loss))\n\nfor i in range(10):\n if class_total[i] > 0:\n print('Test Accuracy of %5s: %2d%% (%2d/%2d)' % (\n str(i), 100 * class_correct[i] / class_total[i],\n class_correct[i], class_total[i]))\n else:\n print('Test Accuracy of %5s: N/A (no training examples)' % (classes[i]))\n\nprint('\\nTest Accuracy (Overall): %2d%% (%2d/%2d)' % (\n 100. * np.sum(class_correct) / np.sum(class_total),\n np.sum(class_correct), np.sum(class_total)))", "Test Loss: 0.066156\n\nTest Accuracy of 0: 98% (970/980)\nTest Accuracy of 1: 99% (1124/1135)\nTest Accuracy of 2: 97% (1010/1032)\nTest Accuracy of 3: 98% (994/1010)\nTest Accuracy of 4: 98% (966/982)\nTest Accuracy of 5: 98% (877/892)\nTest Accuracy of 6: 97% (937/958)\nTest Accuracy of 7: 97% (1007/1028)\nTest Accuracy of 8: 97% (952/974)\nTest Accuracy of 9: 98% (993/1009)\n\nTest Accuracy (Overall): 98% (9830/10000)\n" ] ], [ [ "### Visualize Sample Test Results\n\nThis cell displays test images and their labels in this format: `predicted (ground-truth)`. The text will be green for accurately classified examples and red for incorrect predictions.", "_____no_output_____" ] ], [ [ "# obtain one batch of test images\ndataiter = iter(test_loader)\nimages, labels = dataiter.next()\n\n# get sample outputs\noutput = model(images)\n# convert output probabilities to predicted class\n_, preds = torch.max(output, 1)\n# prep images for display\nimages = images.numpy()\n\n# plot the images in the batch, along with predicted and true labels\nfig = plt.figure(figsize=(25, 4))\nfor idx in np.arange(20):\n ax = fig.add_subplot(2, 20/2, idx+1, xticks=[], yticks=[])\n ax.imshow(np.squeeze(images[idx]), cmap='gray')\n ax.set_title(\"{} ({})\".format(str(preds[idx].item()), str(labels[idx].item())),\n color=(\"green\" if preds[idx]==labels[idx] else \"red\"))", "/tmp/ipykernel_2060/3350221300.py:15: MatplotlibDeprecationWarning: Passing non-integers as three-element position specification is deprecated since 3.3 and will be removed two minor releases later.\n ax = fig.add_subplot(2, 20/2, idx+1, xticks=[], yticks=[])\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
d0de2c7c566997743680d9d910deffd26b20d926
109,399
ipynb
Jupyter Notebook
exploratory/2020-03-09-GPUInference.ipynb
w210-accessibility/classify-streetview
d60328484ea992b4cb2ffecb04bb548efaf06f1b
[ "MIT" ]
2
2020-06-23T04:02:50.000Z
2022-02-08T00:59:24.000Z
exploratory/2020-03-09-GPUInference.ipynb
w210-accessibility/classify-streetview
d60328484ea992b4cb2ffecb04bb548efaf06f1b
[ "MIT" ]
null
null
null
exploratory/2020-03-09-GPUInference.ipynb
w210-accessibility/classify-streetview
d60328484ea992b4cb2ffecb04bb548efaf06f1b
[ "MIT" ]
null
null
null
156.284286
84,852
0.866242
[ [ [ "import pandas as pd\nimport os\nimport s3fs # for reading from S3FileSystem\nimport json\n\n%matplotlib inline\nimport matplotlib.pyplot as plt\n\n\nimport torch.nn as nn\nimport torch\nimport torch.utils.model_zoo as model_zoo\nimport numpy as np\n\nimport torchvision.models as models # To get ResNet18\n\n# From - https://github.com/cfotache/pytorch_imageclassifier/blob/master/PyTorch_Image_Inference.ipynb\nimport torch\nfrom torch import nn\nfrom torch import optim\nimport torch.nn.functional as F\nfrom torchvision import datasets, transforms\nfrom PIL import Image\nfrom torch.autograd import Variable\n\nfrom torch.utils.data.sampler import SubsetRandomSampler", "_____no_output_____" ] ], [ [ "# Prepare the Model", "_____no_output_____" ] ], [ [ "SAGEMAKER_PATH = r'/home/ec2-user/SageMaker'\n\nMODEL_PATH = os.path.join(SAGEMAKER_PATH, r'sidewalk-cv-assets19/pytorch_pretrained/models/20e_slid_win_no_feats_r18.pt')", "_____no_output_____" ], [ "os.path.exists(MODEL_PATH)", "_____no_output_____" ], [ "device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\ndevice", "_____no_output_____" ], [ "# Use PyTorch's ResNet18\n# https://stackoverflow.com/questions/53612835/size-mismatch-for-fc-bias-and-fc-weight-in-pytorch\nmodel = models.resnet18(num_classes=5) ", "_____no_output_____" ], [ "model.to(device)", "_____no_output_____" ], [ "model.load_state_dict(torch.load(MODEL_PATH))\nmodel.eval()", "_____no_output_____" ] ], [ [ "# Prep Data", "_____no_output_____" ] ], [ [ "# From Galen\n\ntest_transforms = transforms.Compose([\n transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n ])\n\n#device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\n# the dataset loads the files into pytorch vectors\n#image_dataset = TwoFileFolder(dir_containing_crops, meta_to_tensor_version=2, transform=data_transform)\n\n# the dataloader takes these vectors and batches them together for parallelization, increasing performance\n#dataloader = torch.utils.data.DataLoader(image_dataset, batch_size=4, shuffle=True, num_workers=4)\n\n# this is the number of additional features provided by the dataset\n#len_ex_feats = image_dataset.len_ex_feats\n#dataset_size = len(image_dataset)", "_____no_output_____" ], [ "# Load in the data\ndata_dir = 'images'\n\ndata = datasets.ImageFolder(data_dir, transform=test_transforms)\nclasses = data.classes", "_____no_output_____" ], [ "!ls -a images", ". 0_present 2_surface_prob 4_null\n.. 1_missing 3_obstacle .ipynb_checkpoints\n" ], [ "!rm -f -r images/.ipynb_checkpoints/", "_____no_output_____" ], [ "# Examine the classes based on folders... \n# Need to make sure that we don't get a .ipynb_checkpoints as a folder\n# Discussion here - https://forums.fast.ai/t/how-to-remove-ipynb-checkpoint/8532/19\nclasses", "_____no_output_____" ], [ "num = 10\n\nindices = list(range(len(data)))\nprint(indices)\nnp.random.shuffle(indices)\nidx = indices[:num]\n\ntest_transforms = transforms.Compose([transforms.Resize(224),\n transforms.ToTensor(),\n ])\n\n#sampler = SubsetRandomSampler(idx)\nloader = torch.utils.data.DataLoader(data, batch_size=num)\ndataiter = iter(loader)\nimages, labels = dataiter.next()", "[0, 1, 2, 3]\n" ], [ "len(images)", "_____no_output_____" ], [ "# Look at the first image\nimages[0]", "_____no_output_____" ], [ "len(labels)", "_____no_output_____" ], [ "labels", "_____no_output_____" ] ], [ [ "# Execute Inference on 2 Sample Images", "_____no_output_____" ] ], [ [ "# Note on how to make sure the model and the input tensors are both on cuda device (gpu)\n# https://discuss.pytorch.org/t/runtimeerror-input-type-torch-cuda-floattensor-and-weight-type-torch-floattensor-should-be-the-same/21782/6", "_____no_output_____" ], [ "def predict_image(image, model):\n image_tensor = test_transforms(image).float()\n image_tensor = image_tensor.unsqueeze_(0)\n input = Variable(image_tensor)\n input = input.to(device)\n output = model(input)\n index = output.data.cpu().numpy().argmax()\n return index, output ", "_____no_output_____" ], [ "to_pil = transforms.ToPILImage()\n#images, labels = get_random_images(5)\nfig=plt.figure(figsize=(10,10))\nfor ii in range(len(images)):\n image = to_pil(images[ii])\n index, output = predict_image(image, model)\n print(f'index: {index}')\n print(f'output: {output}')\n sub = fig.add_subplot(1, len(images), ii+1)\n res = int(labels[ii]) == index\n sub.set_title(str(classes[index]) + \":\" + str(res))\n plt.axis('off')\n plt.imshow(image)\nplt.show()", "index: 3\noutput: tensor([[-0.2533, 0.2435, -0.3366, 5.1900, -4.6836]], device='cuda:0',\n grad_fn=<AddmmBackward>)\nindex: 3\noutput: tensor([[-0.2533, 0.2435, -0.3366, 5.1900, -4.6836]], device='cuda:0',\n grad_fn=<AddmmBackward>)\nindex: 3\noutput: tensor([[-0.4240, 0.8602, -2.7293, 3.3282, -0.9938]], device='cuda:0',\n grad_fn=<AddmmBackward>)\nindex: 3\noutput: tensor([[-0.2533, 0.2435, -0.3366, 5.1900, -4.6836]], device='cuda:0',\n grad_fn=<AddmmBackward>)\n" ], [ "res", "_____no_output_____" ] ], [ [ "# Comments and Questions\n\nWhat's the order of the labels (and how I should order the folders for the input data?) \n\nThis file implies that there are different orders\nhttps://github.com/ProjectSidewalk/sidewalk-cv-assets19/blob/master/GSVutils/sliding_window.py\n\n```label_from_int = ('Curb Cut', 'Missing Cut', 'Obstruction', 'Sfc Problem')\npytorch_label_from_int = ('Missing Cut', \"Null\", 'Obstruction', \"Curb Cut\", \"Sfc Problem\")```", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ] ]
d0de2c7e7fb49d5126c680ecbb79b5eabc1ae31b
7,920
ipynb
Jupyter Notebook
Modulo1/4. Entrada y Salida de Datos.ipynb
IsabelMamani/PythonMamani
c9213388caf47474fa95b4ba3f38d9c1f220f279
[ "Apache-2.0" ]
null
null
null
Modulo1/4. Entrada y Salida de Datos.ipynb
IsabelMamani/PythonMamani
c9213388caf47474fa95b4ba3f38d9c1f220f279
[ "Apache-2.0" ]
null
null
null
Modulo1/4. Entrada y Salida de Datos.ipynb
IsabelMamani/PythonMamani
c9213388caf47474fa95b4ba3f38d9c1f220f279
[ "Apache-2.0" ]
null
null
null
20.412371
257
0.506313
[ [ [ "# ENTRADA Y SALIDA DE DATOS", "_____no_output_____" ], [ "## 1. Entrada de Información por teclado (Input)", "_____no_output_____" ], [ "En python la funcion input() nos ayudará con la tarea de capturar datos del usuario", "_____no_output_____" ] ], [ [ "# input por defecto nos devuelve datos de tipo texto\n# SHIFT + TAB -> Documentacion de la palabra a ejecutar\ndecimal = input(\"Introduce un número decimal con punto: \")", "Introduce un número decimal con punto: 3.099\n" ], [ "type(decimal)", "_____no_output_____" ], [ "numero = float(decimal)", "_____no_output_____" ], [ "# tab autocompleta texto\nnumero", "_____no_output_____" ], [ "type(numero)", "_____no_output_____" ] ], [ [ "## 2. Entrada de datos por Argumentos", "_____no_output_____" ], [ "Para poder enviar información a un script y manejarla, tenemos que utilizar la librería de sistema [sys](https://www.geeksforgeeks.org/how-to-use-sys-argv-in-python/). En ella encontraremos la lista argv que almacena los argumentos enviados al script.", "_____no_output_____" ] ], [ [ "import sys\nprint(sys.argv)", "['C:\\\\Users\\\\valer\\\\anaconda3\\\\lib\\\\site-packages\\\\ipykernel_launcher.py', '-f', 'C:\\\\Users\\\\valer\\\\AppData\\\\Roaming\\\\jupyter\\\\runtime\\\\kernel-0dde64eb-5976-4dc0-aad5-83f93a56f1e0.json']\n" ] ], [ [ "## 3. Salida de información (Output)", "_____no_output_____" ], [ "La función print nos ayudará a realizar salidas de información", "_____no_output_____" ] ], [ [ "print('hola')", "hola\n" ] ], [ [ "# EJERCICIOS", "_____no_output_____" ], [ "#### 1. Realiza un programa que lea 2 números por teclado y determine los siguientes aspectos (es suficiene con mostrar True o False):\n\n- Si los dos números son iguales\n- Si los dos números son diferentes\n- Si el primero es mayor que el segundo\n- Si el segundo es mayor o igual que el primero", "_____no_output_____" ] ], [ [ "num1=float(input(\"Escribir numero 1:\"))\nnum2=float(input(\"Escribir numero 2:\"))", "Escribir numero 1: 3.7\nEscribir numero 2: 6.3\n" ], [ "print(\"Son iguales?\",num1==num2)\nprint(\"Son diferentes?\",num1!=num2)\nprint(\"Número1 mayor?\",num1>num2)\nprint(\"Número2 mayor o igual que Numero2?\",num1<=num2)", "Son iguales? False\nSon diferentes? True\nNúmero1 mayor? False\nNúmero2 mayor o igual que Numero2? True\n" ], [ "a = float( input(\"Primer número: \") )\nb = float( input(\"Segundo número: \") )", "Primer número: 5\nSegundo número: 2\n" ], [ "print(\"Los dos números son iguales: \", a == b)\nprint(\"Los dos números son diferentes: \", a != b)\nprint(\"El primero es mayor que el segundo: \", a > b)\nprint(\"El segundo es mayor o igual que el primero: \", a <= b)", "Los dos números son iguales: False\nLos dos números son diferentes: True\nEl primero es mayor que el segundo: True\nEl segundo es mayor o igual que el primero: False\n" ] ], [ [ "### 2. Escribir un programa que pregunte el nombre del usuario en la consola y después de que el usuario lo introduzca muestre por pantalla la cadena ¡Hola \"nombre\"!, donde \"nombre\" es el nombre que el usuario haya introducido.\n\n\n", "_____no_output_____" ] ], [ [ "nombre=input(\"¿Cuál es tu nombre?\")\n\"¡Hola \" + nombre+\"!\"", "¿Cuál es tu nombre? Valeria\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ] ]
d0de431cf915d1f0947ef171d66cf1f4365d60c0
1,959
ipynb
Jupyter Notebook
manifest.ipynb
sakhawathsumit/RSR-GAN
a42201ed908113e7227ca1eda8d3dc54f266d337
[ "Apache-2.0" ]
8
2018-06-04T13:38:35.000Z
2021-05-28T08:42:39.000Z
manifest.ipynb
sakhawathsumit/RSR-GAN
a42201ed908113e7227ca1eda8d3dc54f266d337
[ "Apache-2.0" ]
null
null
null
manifest.ipynb
sakhawathsumit/RSR-GAN
a42201ed908113e7227ca1eda8d3dc54f266d337
[ "Apache-2.0" ]
2
2019-01-15T02:12:22.000Z
2021-08-08T10:35:50.000Z
19.39604
109
0.517611
[ [ [ "import pandas as pd", "_____no_output_____" ], [ "!ls '../../SpeechRecognition.EN/deepspeech.cv.i.dvd/data/'", "data_loader.py\t__init__.py __pycache__\t utils.py\r\ndistributed.py\told\t train_manifest.csv val_manifest.csv\r\n" ], [ "df = pd.read_csv('../../SpeechRecognition.EN/deepspeech.cv.i.dvd/data/train_manifest.csv', header=None)", "_____no_output_____" ], [ "train_df = df[:5000]\nval_df = df[5000:6000]\nlen(train_df), len(val_df)", "_____no_output_____" ], [ "train_df.to_csv('train_manifest.csv', header=None, index=None)\nval_df.to_csv('val_manifest.csv', header=None, index=None)", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code" ] ]
d0de53d8ad4d217d9ba3acefadcc5ac3ff1df1e4
13,613
ipynb
Jupyter Notebook
notebooks/cvr/lefteroversFromPipeLineModel.ipynb
mssalvador/notebooks
7b6d0619f4e62cec7ef953ed9330d4a1ee15e7c8
[ "Apache-2.0" ]
null
null
null
notebooks/cvr/lefteroversFromPipeLineModel.ipynb
mssalvador/notebooks
7b6d0619f4e62cec7ef953ed9330d4a1ee15e7c8
[ "Apache-2.0" ]
null
null
null
notebooks/cvr/lefteroversFromPipeLineModel.ipynb
mssalvador/notebooks
7b6d0619f4e62cec7ef953ed9330d4a1ee15e7c8
[ "Apache-2.0" ]
null
null
null
39.005731
1,637
0.552707
[ [ [ "transformedDfs = [i.transform(logDf) for i in model]\ncosts = [(i,v.stages[-1].computeCost(transformedDfs[i])) for i,v in enumerate(model)]\ncosts\n\n#transformedModels = [v.stages[-1].computeCost(transformedDfs[i]) for i,v in enumerate(model)]", "_____no_output_____" ], [ "newParamMap = ({kmeans.k: 10,kmeans.initMode:\"random\"})\nnewModel = pipeline.fit(logDf,newParamMap)\n#computedModel = pipeline.fit(logDf)\n\n#ide til næste gang beregn beste stuff for alle modeller i pipelinen, derefer tag bedste pipeline ud og byg videre på den.", "_____no_output_____" ], [ "trans = newModel.transform(logDf)\ntrans.groupBy(\"prediction\").count().show() # shows the distribution of companies \n\n\nvec = [Row(cluster=i,center=Vectors.dense(v)) for i,v in enumerate(newModel.stages[-1].clusterCenters())]\n#print(type(vec))\nSpDf = sqlContext.createDataFrame(data=vec)\n#SpDf.show(truncate=False)\n\n\n\nfeatureContributionUdf = F.udf(lambda x,y: (x-y)*(x-y),VectorUDT() )\nsqrtUdf = F.udf(lambda x,y: float(Vectors.norm(vector=x-y,p=2)),DoubleType())\nprintUdf = F.udf(lambda x: type(x),StringType())\ntoDenseUDf = F.udf(lambda x: Vectors.dense(x.toArray()),VectorUDT())\n#print(np.sum(vec[0][\"vec\"]))\njoinedDf = (trans\n .join(SpDf,on=(trans[\"prediction\"]==SpDf[\"cluster\"]),how=\"left\")\n .withColumn(colName=\"features\",col=toDenseUDf(F.col(\"features\")))\n .drop(SpDf[\"cluster\"])\n .withColumn(colName=\"contribution\",col=featureContributionUdf(F.col(\"features\"),F.col(\"center\")))\n .withColumn(colName=\"distance\",col=sqrtUdf(F.col(\"features\"),F.col(\"center\")))\n )\n", "_____no_output_____" ], [ "int_range = widgets.IntSlider()\ndisplay(int_range)\n\ndef on_value_change(change):\n print(change['new'])\n\nint_range.observe(on_value_change, names='value')", "_____no_output_____" ], [ "def printTotalAndAvgFeatContribution(df,cluster=0,toPrint=False):\n joinedRdd = (df\n .select(\"prediction\",\"contribution\")\n .rdd)\n #print(joinedRdd.take(1))\n summed = joinedRdd.reduceByKey(add)\n normedtotalContribute = summed.map(lambda x: (x[0],x[1])).collectAsMap()\n \n \n \n return normedtotalContribute", "_____no_output_____" ], [ "stuff = printTotalAndAvgFeatContribution(joinedDf)\n\ncenters = [(i,np.log(stuff[i])/np.sum(np.log(stuff[i]))) for i in range(0,10)]\ncols =joinedDf.columns[5:31]\ncenters\n\nclusters = np.array([i[1] for i in centers if i[0] in [6,9,4] ])\ntransposedCluster = np.log1p(clusters.transpose())\nN =3\n\nimport colorsys\nHSV_tuples = [(x*1.0/len(transposedCluster), 0.5, 0.5) for x in range(len(transposedCluster))]\nRGB_tuples = list(map(lambda x: colorsys.hsv_to_rgb(*x), HSV_tuples))\n\nind = np.arange(N)\n#print(ind)# the x locations for the groups\nwidth = 0.5 \nplots = [plt.bar(ind, transposedCluster[1], width, color='#d62728')] \nformer = transposedCluster[1]\nfor i,v in enumerate(transposedCluster[1:]):\n plots.append(plt.bar(ind, v, width, color=RGB_tuples[i],bottom=former))\n former += v\nplt.ylabel('log Scores')\nplt.title('Component Contribution for outlier clusters')\nplt.xticks(ind+0.3, ['C_'+str(i) for i in [6,9,4]])\nplt.legend([p[0] for p in plots], cols,bbox_to_anchor=(1.05, 1.5),loc=2,borderaxespad=1)\nplt.show()", "_____no_output_____" ], [ "class DistanceTransformation(Transformer,HasInputCol,HasOutputCol):\n '''\n \n '''\n @keyword_only\n def __init__(self, inputCol=None, outputCol=None, model=None):\n super(DistanceTransformation, self).__init__()\n kwargs = self.__init__._input_kwargs\n self.setParams(**kwargs)\n\n @keyword_only\n def setParams(self, inputCol=None, outputCol=None, model=None):\n kwargs = self.setParams._input_kwargs\n return self._set(**kwargs)\n\n def _transform(self, dataset,model):\n \n \n def computeAndInsertClusterCenter(dataset,centers):\n '''\n Insert a clusterCenter as column.\n '''\n\n distanceUdf = F.udf(lambda x,y: float(np.sqrt(np.sum((x-y)*(x-y)))),DoubleType())\n\n return (dataset\n .join(F.broadcast(centers),on=(dataset[\"prediction\"]==centers[\"cluster\"]),how=\"inner\")\n .withColumn(colName=\"distance\",col=distanceUdf(F.col(\"scaledFeatures\"),F.col(\"center\")))\n .drop(\"cluster\")\n .drop(\"features\")\n .drop(\"v2\")\n )\n", "_____no_output_____" ], [ "print(getCenters(0))\n\nparamGrid = ParamGridBuilder() \\\n .addGrid(kmeans.k, [2, 4, 10]) \\\n .addGrid(kmeans.initSteps, [3,5,10]) \\\n .build()\n", "_____no_output_____" ], [ "#create an unsupervised classification evaluator\nclass ElbowEvaluation(Estimator,ValidatorParams):\n '''\n doc\n '''\n \n @keyword_only\n def __init__(self, estimator=None, estimatorParamMaps=None, evaluator=None,\n seed=None):\n super(ElbowEvaluation, self).__init__()\n kwargs = self.__init__._input_kwargs\n self._set(**kwargs)\n \n @keyword_only\n def setParams(self, estimator=None, estimatorParamMaps=None, evaluator=None):\n kwargs = self.setParams._input_kwargs\n return self._set(**kwargs)\n \n computeDistanceToCenterUdf = F.udf(lambda x,y: (x-y)*(x-y),VectorUDT())\n \n \n def _fit(self, dataset):\n est = self.getOrDefault(self.estimator)\n epm = self.getOrDefault(self.estimatorParamMaps)\n numModels = len(epm)\n eva = self.getOrDefault(self.evaluator)\n \n for j in range(numModels):\n model = est.fit(dataset, epm[j])\n model.\n \n metric = eva.evaluate(model.transform(dataset, epm[j]))\n metrics[j] += metric\n if eva.isLargerBetter():\n bestIndex = np.argmax(metrics)\n else:\n bestIndex = np.argmin(metrics)\n bestModel = est.fit(dataset, epm[bestIndex])\n return self._copyValues(TrainValidationSplitModel(bestModel, metrics))\n \n def copy(self, extra=None):\n \"\"\"\n Creates a copy of this instance with a randomly generated uid\n and some extra params. This copies creates a deep copy of\n the embedded paramMap, and copies the embedded and extra parameters over.\n\n :param extra: Extra parameters to copy to the new instance\n :return: Copy of this instance\n \"\"\"\n if extra is None:\n extra = dict()\n newTVS = Params.copy(self, extra)\n if self.isSet(self.estimator):\n newTVS.setEstimator(self.getEstimator().copy(extra))\n # estimatorParamMaps remain the same\n if self.isSet(self.evaluator):\n newTVS.setEvaluator(self.getEvaluator().copy(extra))\n return newTVS", "_____no_output_____" ], [ "\nclass ElbowEvaluationModel(Model, ValidatorParams):\n \"\"\"\n .. note:: Experimental\n\n Model from train validation split.\n\n .. versionadded:: 2.0.0\n \"\"\"\n\n def __init__(self, bestModel, validationMetrics=[]):\n super(TrainValidationSplitModel, self).__init__()\n #: best model from cross validation\n self.bestModel = bestModel\n #: evaluated validation metrics\n self.validationMetrics = validationMetrics\n\n def _transform(self, dataset):\n return self.bestModel.transform(dataset)\n\n def copy(self, extra=None):\n \"\"\"\n Creates a copy of this instance with a randomly generated uid\n and some extra params. This copies the underlying bestModel,\n creates a deep copy of the embedded paramMap, and\n copies the embedded and extra parameters over.\n And, this creates a shallow copy of the validationMetrics.\n\n :param extra: Extra parameters to copy to the new instance\n :return: Copy of this instance\n \"\"\"\n if extra is None:\n extra = dict()\n bestModel = self.bestModel.copy(extra)\n validationMetrics = list(self.validationMetrics)\n return TrainValidationSplitModel(bestModel, validationMetrics)\n\n", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
d0de598e5951a9700b9d86e602e4f4f6fbc87293
10,738
ipynb
Jupyter Notebook
docs/source/intro-clx-workflow.ipynb
jakirkham/clx
9fb1c7ea633d8015b35c5cfaa8ccec47d45f7d2b
[ "Apache-2.0" ]
null
null
null
docs/source/intro-clx-workflow.ipynb
jakirkham/clx
9fb1c7ea633d8015b35c5cfaa8ccec47d45f7d2b
[ "Apache-2.0" ]
null
null
null
docs/source/intro-clx-workflow.ipynb
jakirkham/clx
9fb1c7ea633d8015b35c5cfaa8ccec47d45f7d2b
[ "Apache-2.0" ]
null
null
null
32.539394
386
0.578599
[ [ [ "# CLX Workflow\n\nThis is an introduction to the CLX Workflow and it's I/O components.", "_____no_output_____" ], [ "## What is a CLX Workflow?\n\nA CLX Workflow receives data from a particular source, performs operations on that data within a GPU dataframe, and outputs that data to a particular destination. This guide will teach you how to configure your workflow inputs and outputs around a simple workflow example.", "_____no_output_____" ], [ "## When to use a CLX Workflow\n\nA CLX Workflow provides a simple and modular way of \"plugging in\" a particular workflow to a read from different inputs and outputs. Use a CLX Workflow when you would like to deploy a workflow as part of a data pipeline.", "_____no_output_____" ], [ "#### A simple example of a custom Workflow", "_____no_output_____" ] ], [ [ "from clx.workflow.workflow import Workflow\nclass CustomWorkflow(Workflow):\n def workflow(self, dataframe):\n dataframe[\"enriched\"] = \"enriched output\"\n return dataframe", "_____no_output_____" ] ], [ [ "The Workflow relies on the Workflow class which handles the I/O and general data processing functionality. To implement a new Workflow, the developer need only implement the `workflow` function which receives an input dataframe, as shown above. \n \nA more advanced example of a Worlflow can be found [here](https://github.com/rapidsai/clx/blob/branch-0.12/clx/workflow/splunk_alert_workflow.py).\nIt is an example of a [Splunk](https://www.splunk.com/) Alert Workflow used to find anamolies in Splunk alert data.\n\n", "_____no_output_____" ], [ "## Workflow I/O Components\n\nIn order to deploy a workflow to an input an output data feed, we integrate the CLX I/O components.", "_____no_output_____" ], [ "Let's look quickly at what a workflow configuration for the source and destination might look like. You can see below we declare each of the properties within a dictionary. For more information on how to declare configuration within a configurable yaml file [go]. \n\n", "_____no_output_____" ] ], [ [ "source = {\n \"type\": \"fs\",\n \"input_format\": \"csv\",\n \"input_path\": \"/full/path/to/input/data\",\n \"schema\": [\"raw\"],\n \"delimiter\": \",\",\n \"required_cols\": [\"raw\"],\n \"dtype\": [\"str\"],\n \"header\": 0\n}\ndestination = {\n \"type\": \"fs\",\n \"output_format\": \"csv\",\n \"output_path\": \"/full/path/to/output/data\"\n}", "_____no_output_____" ] ], [ [ "\nThe first step to configuring the input and output of a workflow is to determine the source and destination type. Then to set the associated parameters for that specific type.\nAs seen above the `type` property is listed first and can be one of the following.\n", "_____no_output_____" ], [ "\nSource Types\n \n* `fs` - Read from a local filesystem\n* `dask_fs` - Increase the speed of GPU workflow operations by reading from a file using Dask\n* `kafka` - Read from [Kafka](https://kafka.apache.org/)\n\nDestination Types\n \n* `fs` - Writing to local filesystem\n* `kafka` - Write to [Kafka](https://kafka.apache.org/)\n", "_____no_output_____" ], [ "\n### Source and Destination Configurations\n\n#### Filesystem\nIf the `fs` type is used, the developer must distinguish the data format using the `input_format` attribute. Formats available are: csv, parquet, and orc.\n \nThe associated parameters available for the `fs` type and `input_format` are documented within the [cuDF I/O](https://rapidsai.github.io/projects/cudf/en/0.11.0/api.html#module-cudf.io.csv) API. For example for reading data from a csv file, reference [cudf.io.csv.read_csv](https://rapidsai.github.io/projects/cudf/en/0.11.0/api.html#cudf.io.csv.read_csv) available parameters.\n\nExample\n", "_____no_output_____" ] ], [ [ "source = {\n \"type\": \"fs\",\n \"input_format\": \"parquet\",\n \"input_path\": \"/full/path/to/input/data\",\n \"columns\": [\"x\"]\n}", "_____no_output_____" ] ], [ [ "#### Dask Filesystem\n \nIf the `dask_fs` type is used the developer must distinguish the data format using the `input_format` attribute. Formats available are: csv, parquet, and orc.\n \nThe associated parameters available for the `dask_fs` type and `input_format` are listed within the [Dask cuDF](https://rapidsai.github.io/projects/cudf/en/0.11.0/10min.html#Getting-Data-In/Out) documentation. \n \nExample", "_____no_output_____" ] ], [ [ "source = {\n \"type\": \"dask_fs\",\n \"input_format\": \"csv\",\n \"input_path\": \"/full/path/to/input/data/*.csv\"\n}", "_____no_output_____" ] ], [ [ "#### Kafka\nIf the `kafka` type is used the following parameters must be indicated \n \nSource \n \n* `kafka_brokers` - Kafka brokers\n* `group_id` - Group ID for consuming kafka messages\n* `consumer_kafka_topics` - Names of kafka topics to read from\n* `batch_size` - Indicates number of kafka messages to read before data is processed through the workflow\n* `time_window` - Maximum time window to wait for `batch_size` to be reached before workflow processing begins. \n \nDestination \n \n* `kafka_brokers` - Kafka brokers\n* `publisher_kafka_topic` - Names of kafka topic to write data to\n* `batch_size` - Indicates number of workflow-processed messages to aggregate before data is written to the kafka topic\n* `output_delimiter` - Delimiter of the data columns\n \nExample", "_____no_output_____" ] ], [ [ "source = {\n \"type\": \"kafka\",\n \"kafka_brokers\": \"kafka:9092\",\n \"group_id\": \"cyber\",\n \"batch_size\": 10,\n \"consumer_kafka_topics\": [\"topic1\", \"topic2\"],\n \"time_window\": 5\n}\ndest = {\n \"type\": \"kafka\",\n \"kafka_brokers\": \"kafka:9092\"\n \"batch_size\": 10,\n \"publisher_kafka_topic\": \"topic3\",\n \"output_delimiter\": \",\"\n}", "_____no_output_____" ] ], [ [ "## Tying it together", "_____no_output_____" ], [ "Once we have established our workflow and source and destination configurations we can now run our workflow. Let's create a workflow using the `CustomWorkflow` we created above.\n\nFirstly, we must know the parameters for instantiating a basic workflow\n \n* `name` - The name of the workflow\n* `source` - The source of input data (optional)\n* `destination` - The destination for output data (optional)", "_____no_output_____" ] ], [ [ "from clx.workflow.workflow import Workflow\nclass CustomWorkflow(Workflow):\n def workflow(self, dataframe):\n dataframe[\"enriched\"] = \"enriched output\"\n return dataframe\n \nsource = {\n \"type\": \"fs\",\n \"input_format\": \"csv\",\n \"input_path\": \"/full/path/to/input/data\",\n \"schema\": [\"raw\"],\n \"delimiter\": \",\",\n \"required_cols\": [\"raw\"],\n \"dtype\": [\"str\"],\n \"header\": 0\n}\ndestination = {\n \"type\": \"fs\",\n \"output_format\": \"csv\",\n \"output_path\": \"/full/path/to/output/data\"\n}\n\nmy_new_workflow = CustomWorkflow(source=source, destination=destination, name=\"my_new_workflow\")\nmy_new_workflow.run_workflow()", "_____no_output_____" ] ], [ [ "## Workflow configurations in an external file\n\nSometimes workflow configurations may need to change dependent upon the environment. To avoid declaring workflow configurations within sourcecode you may also declare them in an external yaml file. A workflow will look for and establish I/O connections by searching for configurations in the following order:\n \n1. /etc/clx/[workflow-name]/workflow.yaml\n1. ~/.config/clx/[workflow-name]/workflow.yaml\n1. In-line python config\n \nIf source and destination are indicated in external files, they are not required to instantiate a new workflow", "_____no_output_____" ] ], [ [ "# Workflow config located at /etc/clx/my_new_workflow/workflow.yaml\nmy_new_workflow = CustomWorkflow(name=\"my_new_workflow\")", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
d0de605d386bf49f98814eb29f244a34a614aee6
471,878
ipynb
Jupyter Notebook
AAAI/Interpretability/dataset1/second_layer_averaging_adam_lr001.ipynb
lnpandey/DL_explore_synth_data
0a5d8b417091897f4c7f358377d5198a155f3f24
[ "MIT" ]
2
2019-08-24T07:20:35.000Z
2020-03-27T08:16:59.000Z
AAAI/Interpretability/dataset1/second_layer_averaging_adam_lr001.ipynb
lnpandey/DL_explore_synth_data
0a5d8b417091897f4c7f358377d5198a155f3f24
[ "MIT" ]
null
null
null
AAAI/Interpretability/dataset1/second_layer_averaging_adam_lr001.ipynb
lnpandey/DL_explore_synth_data
0a5d8b417091897f4c7f358377d5198a155f3f24
[ "MIT" ]
3
2019-06-21T09:34:32.000Z
2019-09-19T10:43:07.000Z
334.428065
42,226
0.906686
[ [ [ "from google.colab import drive\ndrive.mount('/content/drive')", "Mounted at /content/drive\n" ], [ "path = '/content/drive/MyDrive/Research/AAAI/dataset1/second_layer_without_entropy/'", "_____no_output_____" ], [ "import numpy as np\nimport pandas as pd\n\nimport torch\nimport torchvision\nfrom torch.utils.data import Dataset, DataLoader\nfrom torchvision import transforms, utils\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\n\nfrom matplotlib import pyplot as plt\n%matplotlib inline\n\ntorch.backends.cudnn.deterministic = True\ntorch.backends.cudnn.benchmark = False", "_____no_output_____" ] ], [ [ "# Generate dataset", "_____no_output_____" ] ], [ [ "np.random.seed(12)\ny = np.random.randint(0,10,5000)\nidx= []\nfor i in range(10):\n print(i,sum(y==i))\n idx.append(y==i)\nx = np.zeros((5000,2))\nnp.random.seed(12)\nx[idx[0],:] = np.random.multivariate_normal(mean = [4,6.5],cov=[[0.01,0],[0,0.01]],size=sum(idx[0]))\nx[idx[1],:] = np.random.multivariate_normal(mean = [5.5,6],cov=[[0.01,0],[0,0.01]],size=sum(idx[1]))\nx[idx[2],:] = np.random.multivariate_normal(mean = [4.5,4.5],cov=[[0.01,0],[0,0.01]],size=sum(idx[2]))\nx[idx[3],:] = np.random.multivariate_normal(mean = [3,3.5],cov=[[0.01,0],[0,0.01]],size=sum(idx[3]))\nx[idx[4],:] = np.random.multivariate_normal(mean = [2.5,5.5],cov=[[0.01,0],[0,0.01]],size=sum(idx[4]))\nx[idx[5],:] = np.random.multivariate_normal(mean = [3.5,8],cov=[[0.01,0],[0,0.01]],size=sum(idx[5]))\nx[idx[6],:] = np.random.multivariate_normal(mean = [5.5,8],cov=[[0.01,0],[0,0.01]],size=sum(idx[6]))\nx[idx[7],:] = np.random.multivariate_normal(mean = [7,6.5],cov=[[0.01,0],[0,0.01]],size=sum(idx[7]))\nx[idx[8],:] = np.random.multivariate_normal(mean = [6.5,4.5],cov=[[0.01,0],[0,0.01]],size=sum(idx[8]))\nx[idx[9],:] = np.random.multivariate_normal(mean = [5,3],cov=[[0.01,0],[0,0.01]],size=sum(idx[9]))\ncolor = ['#1F77B4','orange', 'g','brown']\nname = [1,2,3,0]\nfor i in range(10):\n if i==3:\n plt.scatter(x[idx[i],0],x[idx[i],1],c=color[3],label=\"D_\"+str(name[i]))\n elif i>=4:\n plt.scatter(x[idx[i],0],x[idx[i],1],c=color[3])\n else:\n plt.scatter(x[idx[i],0],x[idx[i],1],c=color[i],label=\"D_\"+str(name[i]))\nplt.legend()", "0 530\n1 463\n2 494\n3 517\n4 488\n5 497\n6 493\n7 507\n8 492\n9 519\n" ], [ "x[idx[0]][0], x[idx[5]][5] ", "_____no_output_____" ], [ "desired_num = 6000\nmosaic_list_of_images =[]\nmosaic_label = []\nfore_idx=[]\nfor j in range(desired_num):\n np.random.seed(j)\n fg_class = np.random.randint(0,3)\n fg_idx = np.random.randint(0,9)\n a = []\n for i in range(9):\n if i == fg_idx:\n b = np.random.choice(np.where(idx[fg_class]==True)[0],size=1)\n a.append(x[b])\n# print(\"foreground \"+str(fg_class)+\" present at \" + str(fg_idx))\n else:\n bg_class = np.random.randint(3,10)\n b = np.random.choice(np.where(idx[bg_class]==True)[0],size=1)\n a.append(x[b])\n# print(\"background \"+str(bg_class)+\" present at \" + str(i))\n a = np.concatenate(a,axis=0)\n mosaic_list_of_images.append(a)\n mosaic_label.append(fg_class)\n fore_idx.append(fg_idx)", "_____no_output_____" ], [ "len(mosaic_list_of_images), mosaic_list_of_images[0]", "_____no_output_____" ] ], [ [ "# load mosaic data", "_____no_output_____" ] ], [ [ "class MosaicDataset(Dataset):\n \"\"\"MosaicDataset dataset.\"\"\"\n\n def __init__(self, mosaic_list, mosaic_label,fore_idx):\n \"\"\"\n Args:\n csv_file (string): Path to the csv file with annotations.\n root_dir (string): Directory with all the images.\n transform (callable, optional): Optional transform to be applied\n on a sample.\n \"\"\"\n self.mosaic = mosaic_list\n self.label = mosaic_label\n self.fore_idx = fore_idx\n \n def __len__(self):\n return len(self.label)\n\n def __getitem__(self, idx):\n return self.mosaic[idx] , self.label[idx] , self.fore_idx[idx]", "_____no_output_____" ], [ "batch = 250\nmsd1 = MosaicDataset(mosaic_list_of_images[0:3000], mosaic_label[0:3000] , fore_idx[0:3000])\ntrain_loader = DataLoader( msd1 ,batch_size= batch ,shuffle=True)", "_____no_output_____" ], [ "batch = 250\nmsd2 = MosaicDataset(mosaic_list_of_images[3000:6000], mosaic_label[3000:6000] , fore_idx[3000:6000])\ntest_loader = DataLoader( msd2 ,batch_size= batch ,shuffle=True)", "_____no_output_____" ] ], [ [ "# models", "_____no_output_____" ] ], [ [ "class Focus_deep(nn.Module):\n '''\n deep focus network averaged at zeroth layer\n input : elemental data\n '''\n def __init__(self,inputs,output,K,d):\n super(Focus_deep,self).__init__()\n self.inputs = inputs\n self.output = output\n self.K = K\n self.d = d\n self.linear1 = nn.Linear(self.inputs,50, bias=False) #,self.output)\n self.linear2 = nn.Linear(50,50 , bias=False)\n self.linear3 = nn.Linear(50,self.output, bias=False) \n\n torch.nn.init.xavier_normal_(self.linear1.weight)\n torch.nn.init.xavier_normal_(self.linear2.weight)\n torch.nn.init.xavier_normal_(self.linear3.weight)\n \n def forward(self,z):\n batch = z.shape[0]\n x = torch.zeros([batch,self.K],dtype=torch.float64)\n y = torch.zeros([batch,50], dtype=torch.float64) # number of features of output\n features = torch.zeros([batch,self.K,50],dtype=torch.float64)\n x,y = x.to(\"cuda\"),y.to(\"cuda\")\n features = features.to(\"cuda\")\n for i in range(self.K):\n alp,ftrs = self.helper(z[:,i] ) # self.d*i:self.d*i+self.d\n x[:,i] = alp[:,0]\n features[:,i] = ftrs \n x = F.softmax(x,dim=1) # alphas\n for i in range(self.K):\n x1 = x[:,i] \n y = y+torch.mul(x1[:,None],features[:,i]) # self.d*i:self.d*i+self.d\n return y , x \n def helper(self,x):\n x = self.linear1(x)\n x = F.relu(x) \n x = self.linear2(x)\n x1 = F.tanh(x)\n x = F.relu(x)\n x = self.linear3(x)\n #print(x1.shape)\n return x,x1", "_____no_output_____" ], [ "class Classification_deep(nn.Module):\n '''\n input : elemental data\n deep classification module data averaged at zeroth layer\n '''\n def __init__(self,inputs,output):\n super(Classification_deep,self).__init__()\n self.inputs = inputs\n self.output = output\n self.linear1 = nn.Linear(self.inputs,50)\n #self.linear2 = nn.Linear(6,12)\n self.linear2 = nn.Linear(50,self.output)\n\n torch.nn.init.xavier_normal_(self.linear1.weight)\n torch.nn.init.zeros_(self.linear1.bias)\n torch.nn.init.xavier_normal_(self.linear2.weight)\n torch.nn.init.zeros_(self.linear2.bias)\n\n def forward(self,x):\n x = F.relu(self.linear1(x))\n #x = F.relu(self.linear2(x))\n x = self.linear2(x)\n return x ", "_____no_output_____" ], [ "# torch.manual_seed(12)\n# focus_net = Focus_deep(2,1,9,2).double()\n# focus_net = focus_net.to(\"cuda\")", "_____no_output_____" ], [ "# focus_net.linear2.weight.shape,focus_net.linear3.weight.shape", "_____no_output_____" ], [ "# focus_net.linear2.weight.data[25:,:] = focus_net.linear2.weight.data[:25,:] #torch.nn.Parameter(torch.tensor([last_layer]) )\n# (focus_net.linear2.weight[:25,:]== focus_net.linear2.weight[25:,:] )", "_____no_output_____" ], [ "# focus_net.linear3.weight.data[:,25:] = -focus_net.linear3.weight.data[:,:25] #torch.nn.Parameter(torch.tensor([last_layer]) )\n# focus_net.linear3.weight", "_____no_output_____" ], [ "# focus_net.helper( torch.randn((5,2,2)).double().to(\"cuda\") )", "_____no_output_____" ], [ "def calculate_attn_loss(dataloader,what,where,criter):\n what.eval()\n where.eval()\n r_loss = 0\n alphas = []\n lbls = []\n pred = []\n fidices = []\n with torch.no_grad():\n for i, data in enumerate(dataloader, 0):\n inputs, labels,fidx = data\n lbls.append(labels)\n fidices.append(fidx)\n inputs = inputs.double()\n inputs, labels = inputs.to(\"cuda\"),labels.to(\"cuda\")\n avg,alpha = where(inputs)\n outputs = what(avg)\n _, predicted = torch.max(outputs.data, 1)\n pred.append(predicted.cpu().numpy())\n alphas.append(alpha.cpu().numpy())\n loss = criter(outputs, labels)\n r_loss += loss.item()\n alphas = np.concatenate(alphas,axis=0)\n pred = np.concatenate(pred,axis=0)\n lbls = np.concatenate(lbls,axis=0)\n fidices = np.concatenate(fidices,axis=0)\n #print(alphas.shape,pred.shape,lbls.shape,fidices.shape) \n analysis = analyse_data(alphas,lbls,pred,fidices)\n return r_loss/i,analysis", "_____no_output_____" ], [ "def analyse_data(alphas,lbls,predicted,f_idx):\n '''\n analysis data is created here\n '''\n batch = len(predicted)\n amth,alth,ftpt,ffpt,ftpf,ffpf = 0,0,0,0,0,0\n for j in range (batch):\n focus = np.argmax(alphas[j])\n if(alphas[j][focus] >= 0.5):\n amth +=1\n else:\n alth +=1\n if(focus == f_idx[j] and predicted[j] == lbls[j]):\n ftpt += 1\n elif(focus != f_idx[j] and predicted[j] == lbls[j]):\n ffpt +=1\n elif(focus == f_idx[j] and predicted[j] != lbls[j]):\n ftpf +=1\n elif(focus != f_idx[j] and predicted[j] != lbls[j]):\n ffpf +=1\n #print(sum(predicted==lbls),ftpt+ffpt)\n return [ftpt,ffpt,ftpf,ffpf,amth,alth]", "_____no_output_____" ] ], [ [ "# training", "_____no_output_____" ] ], [ [ "number_runs = 10\nFTPT_analysis = pd.DataFrame(columns = [\"FTPT\",\"FFPT\", \"FTPF\",\"FFPF\"])\nfull_analysis= []\nfor n in range(number_runs):\n print(\"--\"*40)\n \n # instantiate focus and classification Model\n torch.manual_seed(n)\n where = Focus_deep(2,1,9,2).double()\n where.linear2.weight.data[25:,:] = where.linear2.weight.data[:25,:]\n where.linear3.weight.data[:,25:] = -where.linear3.weight.data[:,:25]\n where = where.double().to(\"cuda\")\n ex,_ = where.helper( torch.randn((5,2,2)).double().to(\"cuda\"))\n print(ex)\n\n torch.manual_seed(n)\n what = Classification_deep(50,3).double()\n where = where.to(\"cuda\")\n what = what.to(\"cuda\")\n\n\n\n # instantiate optimizer\n optimizer_where = optim.Adam(where.parameters(),lr =0.001)#,momentum=0.9)\n optimizer_what = optim.Adam(what.parameters(), lr=0.001)#,momentum=0.9)\n criterion = nn.CrossEntropyLoss()\n acti = []\n analysis_data = []\n loss_curi = []\n epochs = 2000\n\n\n # calculate zeroth epoch loss and FTPT values\n running_loss,anlys_data = calculate_attn_loss(train_loader,what,where,criterion)\n loss_curi.append(running_loss)\n analysis_data.append(anlys_data)\n\n print('epoch: [%d ] loss: %.3f' %(0,running_loss)) \n\n # training starts \n for epoch in range(epochs): # loop over the dataset multiple times\n ep_lossi = []\n running_loss = 0.0\n what.train()\n where.train()\n for i, data in enumerate(train_loader, 0):\n # get the inputs\n inputs, labels,_ = data\n inputs = inputs.double()\n inputs, labels = inputs.to(\"cuda\"),labels.to(\"cuda\")\n\n # zero the parameter gradients\n optimizer_where.zero_grad()\n optimizer_what.zero_grad()\n \n # forward + backward + optimize\n avg, alpha = where(inputs)\n outputs = what(avg)\n loss = criterion(outputs, labels)\n\n # print statistics\n running_loss += loss.item()\n loss.backward()\n optimizer_where.step()\n optimizer_what.step()\n\n running_loss,anls_data = calculate_attn_loss(train_loader,what,where,criterion)\n analysis_data.append(anls_data)\n if(epoch % 200==0):\n print('epoch: [%d] loss: %.3f' %(epoch + 1,running_loss)) \n loss_curi.append(running_loss) #loss per epoch\n if running_loss<=0.01:\n break\n print('Finished Training run ' +str(n)+' at epoch: ',epoch)\n analysis_data = np.array(analysis_data)\n FTPT_analysis.loc[n] = analysis_data[-1,:4]/30\n full_analysis.append((epoch, analysis_data))\n correct = 0\n total = 0\n with torch.no_grad():\n for data in test_loader:\n images, labels,_ = data\n images = images.double()\n images, labels = images.to(\"cuda\"), labels.to(\"cuda\")\n avg, alpha = where(images)\n outputs = what(avg)\n _, predicted = torch.max(outputs.data, 1)\n total += labels.size(0)\n correct += (predicted == labels).sum().item()\n\n print('Accuracy of the network on the 3000 test images: %f %%' % ( 100 * correct / total))\n ", "--------------------------------------------------------------------------------\n" ], [ "print(np.mean(np.array(FTPT_analysis),axis=0))", "[85.94666667 13.72666667 0.1 0.22666667]\n" ], [ "FTPT_analysis", "_____no_output_____" ], [ "FTPT_analysis[FTPT_analysis['FTPT']+FTPT_analysis['FFPT'] > 90 ]", "_____no_output_____" ], [ "print(np.mean(np.array(FTPT_analysis[FTPT_analysis['FTPT']+FTPT_analysis['FFPT'] > 90 ]),axis=0))", "[85.94666667 13.72666667 0.1 0.22666667]\n" ], [ "cnt=1\nfor epoch, analysis_data in full_analysis:\n analysis_data = np.array(analysis_data)\n # print(\"=\"*20+\"run \",cnt,\"=\"*20)\n \n plt.figure(figsize=(6,5))\n plt.plot(np.arange(0,epoch+2,1),analysis_data[:,0]/30,label=\"FTPT\")\n plt.plot(np.arange(0,epoch+2,1),analysis_data[:,1]/30,label=\"FFPT\")\n plt.plot(np.arange(0,epoch+2,1),analysis_data[:,2]/30,label=\"FTPF\")\n plt.plot(np.arange(0,epoch+2,1),analysis_data[:,3]/30,label=\"FFPF\")\n\n plt.title(\"Training trends for run \"+str(cnt))\n plt.grid()\n # plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))\n plt.legend()\n plt.xlabel(\"epochs\", fontsize=14, fontweight = 'bold')\n plt.ylabel(\"percentage train data\", fontsize=14, fontweight = 'bold')\n plt.savefig(path + \"run\"+str(cnt)+\".png\",bbox_inches=\"tight\")\n plt.savefig(path + \"run\"+str(cnt)+\".pdf\",bbox_inches=\"tight\")\n cnt+=1", "_____no_output_____" ], [ "FTPT_analysis.to_csv(path+\"synthetic_zeroth.csv\",index=False)", "_____no_output_____" ], [ "", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ] ]
d0de6c928681338f56a9af468fc295b302b84503
5,790
ipynb
Jupyter Notebook
String/1012/1358. Number of Substrings Containing All Three Characters.ipynb
YuHe0108/Leetcode
90d904dde125dd35ee256a7f383961786f1ada5d
[ "Apache-2.0" ]
1
2020-08-05T11:47:47.000Z
2020-08-05T11:47:47.000Z
String/1012/1358. Number of Substrings Containing All Three Characters.ipynb
YuHe0108/LeetCode
b9e5de69b4e4d794aff89497624f558343e362ad
[ "Apache-2.0" ]
null
null
null
String/1012/1358. Number of Substrings Containing All Three Characters.ipynb
YuHe0108/LeetCode
b9e5de69b4e4d794aff89497624f558343e362ad
[ "Apache-2.0" ]
null
null
null
28.243902
171
0.39171
[ [ [ "说明:\n 给定一个仅由字符a,b和c组成的字符串s。\n 返回至少包含所有这些字符a,b和c的子字符串的数目。\n\nExample 1:\n Input: s = \"abcabc\"\n Output: 10\n Explanation: \n The substrings containing at least one occurrence of the characters a, b and c are \n \"abc\", \"abca\", \"abcab\", \"abcabc\", \"bca\", \"bcab\", \"bcabc\", \"cab\", \"cabc\" and \"abc\" (again). \n \n\nExample 2:\n Input: s = \"aaacb\"\n Output: 3\n Explanation: The substrings containing at least one occurrence of the characters a, b and c are \"aaacb\", \"aacb\" and \"acb\". \n\nExample 3:\n Input: s = \"abc\"\n Output: 1\n\nConstraints:\n 1、3 <= s.length <= 5 x 10^4\n 2、s only consists of a, b or c characters.", "_____no_output_____" ] ], [ [ "class Solution:\n def numberOfSubstrings(self, s: str) -> int:\n letters = {'a', 'b', 'c'}\n N = len(s)\n count = 0\n \n for gap in range(3, N + 1):\n for start in range(N - gap + 1):\n sub_str = s[start:start + gap]\n if set(sub_str) == letters:\n count += 1\n return count", "_____no_output_____" ], [ "from collections import Counter\n\nclass Solution:\n def numberOfSubstrings(self, s: str) -> int:\n def count_letter(ct):\n if ct['a'] and ct['b'] and ct['c']:\n return True\n return False\n \n count = 0\n for i in range(len(s)):\n ctr = Counter(s[:len(s) - i])\n for j in range(i + 1):\n if j != 0:\n start_idx = j - 1\n end_idx = len(s) + start_idx - i\n ctr[s[start_idx]] -= 1\n ctr[s[end_idx]] += 1\n if count_letter(ctr):\n count += 1\n return count", "_____no_output_____" ], [ "class Solution:\n def numberOfSubstrings(self, s: str) -> int:\n a, b, c = 0, 0, 0\n ans, i, n = 0, 0, len(s)\n for j, letter in enumerate(s):\n if letter == 'a':\n a += 1\n elif letter == 'b':\n b += 1\n else:\n c += 1\n while a and b and c:\n ans += n - j\n print(n - j, n, j)\n if s[i] == 'a':\n a -= 1\n elif s[i] == 'b':\n b -= 1\n else:\n c -= 1\n i += 1\n return ans", "_____no_output_____" ], [ "solution = Solution()\nsolution.numberOfSubstrings('abcbb')", "3 5 2\n" ], [ "a = {'a', 'c', 'b'}\nb = {'a', 'b', 'c'}\nprint(a == b)", "True\n" ], [ "class Solution:\n def numberOfSubstrings(self, s: str) -> int:\n a = b = c = 0 # counter for letter a/b/c\n ans, i, n = 0, 0, len(s) # i: slow pointer\n for j, letter in enumerate(s): # j: fast pointer\n if letter == 'a': a += 1 # increment a/b/c accordingly\n elif letter == 'b': b += 1\n else: c += 1\n while a > 0 and b > 0 and c > 0: # if all of a/b/c are contained, move slow pointer\n ans += n-j # count possible substr, if a substr ends at j, then there are n-j substrs to the right that are containing all a/b/c\n if s[i] == 'a': a -= 1 # decrement counter accordingly\n elif s[i] == 'b': b -= 1\n else: c -= 1\n i += 1 # move slow pointer\n return ans ", "_____no_output_____" ] ] ]
[ "raw", "code" ]
[ [ "raw" ], [ "code", "code", "code", "code", "code", "code" ] ]
d0de6d77f8ba2ae1539eb365be42eb8ea159325f
211,743
ipynb
Jupyter Notebook
climate_starter.ipynb
pbhoomika888/sqlalchemy_challenge
21ca2c47722386513c52d4dd0ab00d5b5403945d
[ "ADSL" ]
null
null
null
climate_starter.ipynb
pbhoomika888/sqlalchemy_challenge
21ca2c47722386513c52d4dd0ab00d5b5403945d
[ "ADSL" ]
null
null
null
climate_starter.ipynb
pbhoomika888/sqlalchemy_challenge
21ca2c47722386513c52d4dd0ab00d5b5403945d
[ "ADSL" ]
null
null
null
75.649518
64,539
0.718366
[ [ [ "%matplotlib inline\nfrom matplotlib import style\nstyle.use('fivethirtyeight')\nimport matplotlib.pyplot as plt\nimport seaborn as sns", "_____no_output_____" ], [ "import numpy as np\nimport pandas as pd", "_____no_output_____" ], [ "import datetime as dt", "_____no_output_____" ] ], [ [ "# Reflect Tables into SQLAlchemy ORM", "_____no_output_____" ] ], [ [ "# Python SQL toolkit and Object Relational Mapper\nimport sqlalchemy\nfrom sqlalchemy.ext.automap import automap_base\nfrom sqlalchemy.orm import Session\nfrom sqlalchemy import create_engine, func", "_____no_output_____" ], [ "engine = create_engine(\"sqlite:///Resources/hawaii.sqlite\")", "_____no_output_____" ], [ "# reflect an existing database into a new model\nBase = automap_base()\n# reflect the tables\nBase.prepare(engine, reflect=True)", "_____no_output_____" ], [ "# We can view all of the classes that automap found\nBase.classes.keys()", "_____no_output_____" ], [ "# Save references to each table\nMeasurement = Base.classes.measurement\nStation = Base.classes.station", "_____no_output_____" ], [ "# Create our session (link) from Python to the DB\nsession = Session(engine)", "_____no_output_____" ] ], [ [ "# Exploratory Climate Analysis", "_____no_output_____" ] ], [ [ "# Design a query to retrieve the last 12 months of precipitation data and plot the results\nlast_date = session.query(Measurement.date).order_by(Measurement.date.desc()).first()[0]\nlast_date\n# Calculate the date 1 year ago from the last data point in the database\nlast_year = dt.datetime.strptime(last_date, \"%Y-%m-%d\") - dt.timedelta(days=365)\nlast_year\n", "_____no_output_____" ], [ "# Perform a query to retrieve the data and precipitation scores\nquery = session.query(Measurement.date,Measurement.prcp).filter(Measurement.date>=last_year).all()\n \nquery", "_____no_output_____" ], [ "# Save the query results as a Pandas DataFrame and set the index to the date column\nprec_df = pd.DataFrame(query,columns=['date','precipitation'])\nprec_df\nprec_df['date']= pd.to_datetime(prec_df['date']) #format= '%y-%m-%d')\n# set the index\nprec_df.set_index(\"date\",inplace=True)\n# Sort the dataframe by date\nprec_df = prec_df.sort_values(by= \"date\",ascending=True)\nprec_df.head(20)", "_____no_output_____" ], [ "# Use Pandas Plotting with Matplotlib to plot the data\nprec_df.plot(title = \"Precipitation (12 months)\", color ='blue', alpha = 0.8 , figsize =(10,6))\nplt.legend(loc='upper center',prop={'size':10})\nplt.savefig(\"Images/Precipitation.png\")\nplt.show()", "_____no_output_____" ] ], [ [ "![precipitation](Images/precipitation.png)", "_____no_output_____" ] ], [ [ "# Use Pandas to calcualte the summary statistics for the precipitation data\nprec_df.describe()", "_____no_output_____" ] ], [ [ "![describe](Images/describe.png)", "_____no_output_____" ] ], [ [ "# Design a query to show how many stations are available in this dataset?\nstation_count = session.query(Station.station).count()\nstation_count", "_____no_output_____" ], [ "# What are the most active stations? (i.e. what stations have the most rows)?\n# List the stations and the counts in descending order.\nactive_station = session.query(Measurement.station, func.count(Measurement.station)).\\\n group_by(Measurement.station).\\\n order_by(func.count(Measurement.station).desc()).all()\nactive_station", "_____no_output_____" ], [ "# Using the station id from the previous query, calculate the lowest temperature recorded, \n# highest temperature recorded, and average temperature of the most active station?\ntemp = [Measurement.station,\n func.min(Measurement.tobs),\n func.max(Measurement.tobs),\n func.avg(Measurement.tobs),]\nall_temp = session.query(*temp).group_by(Measurement.station).\\\n order_by(func.count(Measurement.station).desc()).all()\nall_temp", "_____no_output_____" ], [ "# Choose the station with the highest number of temperature observations.\nhighest_temp = session.query(Measurement.station,func.count(Measurement.tobs)).\\\n group_by(Measurement.tobs).\\\n order_by(func.count(Measurement.tobs).desc()).all()\nhighest_temp", "_____no_output_____" ], [ "# Query the last 12 months of temperature observation data for this station and plot the results as a histogram\nlast_year_data = session.query(Measurement.date,Measurement.tobs).filter(Measurement.date>=last_year).all()\nlast_year_data\n\nlast_year_data_df = pd.DataFrame(last_year_data)\nlast_year_data_df\nlast_year_data_df.hist()\nplt.title(\"Temperature over 12 months\")\nplt.ylabel(\"Frequency\")\nplt.legend(\"tobs\",loc='upper right')\nplt.savefig(\"Images/Temperature over 12 months\")\nplt.tight_layout\nplt.show()", "_____no_output_____" ] ], [ [ "![precipitation](Images/station-histogram.png)", "_____no_output_____" ] ], [ [ "# This function called `calc_temps` will accept start date and end date in the format '%Y-%m-%d' \n# and return the minimum, average, and maximum temperatures for that range of dates\ndef calc_temps(start_date, end_date):\n \"\"\"TMIN, TAVG, and TMAX for a list of dates.\n \n Args:\n start_date (string): A date string in the format %Y-%m-%d\n end_date (string): A date string in the format %Y-%m-%d\n \n Returns:\n TMIN, TAVE, and TMAX\n \"\"\"\n \n return session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).\\\n filter(Measurement.date >= start_date).filter(Measurement.date <= end_date).all()\n\n# function usage example\nprint(calc_temps('2012-02-28', '2012-03-05'))", "[(62.0, 69.57142857142857, 74.0)]\n" ], [ "# Use your previous function `calc_temps` to calculate the tmin, tavg, and tmax \n\n# for your trip using the previous year's data for those same dates.\nTemp = calc_temps('2016-08-23','2017-08-23')\nTemp", "_____no_output_____" ], [ "# Plot the results from your previous query as a bar chart. \n# Use \"Trip Avg Temp\" as your Title\n# Use the average temperature for the y value\n# Use the peak-to-peak (tmax-tmin) value as the y error bar (yerr)\nplt.figure(figsize=(3,6))\nsns.barplot(data=Temp,color = \"lightsalmon\")\nplt.ylabel('Temp(F)')\nplt.title(\"Trip Avg Temp\")\nplt.tight_layout\nplt.savefig(\"Images/Trip Avg Temp.png\")\nplt.show()", "_____no_output_____" ], [ "def precipitation(start_date, end_date):\n select_column = [Measurement.station, \n Station.name, \n Station.latitude, \n Station.longitude, \n Station.elevation, \n Measurement.prcp]\n\n return session.query(*select_column).\\\n filter(Measurement.station == Station.station).filter(Measurement.date >= start_date).\\\nfilter(Measurement.date <= end_date).group_by(Measurement.station).order_by(Measurement.prcp.desc()).all()\nprint(precipitation('2016-02-26','2016-03-02'))", "[('USC00513117', 'KANEOHE 838.1, HI US', 21.4234, -157.8015, 14.6, 0.38), ('USC00514830', 'KUALOA RANCH HEADQUARTERS 886.9, HI US', 21.5213, -157.8374, 7.0, 0.36), ('USC00519281', 'WAIHEE 837.5, HI US', 21.45167, -157.84888999999998, 32.9, 0.3), ('USC00516128', 'MANOA LYON ARBO 785.2, HI US', 21.3331, -157.8025, 152.4, 0.04), ('USC00519523', 'WAIMANALO EXPERIMENTAL FARM, HI US', 21.33556, -157.71139, 19.5, 0.0), ('USC00519397', 'WAIKIKI 717.2, HI US', 21.2716, -157.8168, 3.0, 0.0), ('USC00517948', 'PEARL CITY, HI US', 21.3934, -157.9751, 11.9, None)]\n" ] ], [ [ "## Optional Challenge Assignment", "_____no_output_____" ] ], [ [ "# Create a query that will calculate the daily normals \n# (i.e. the averages for tmin, tmax, and tavg for all historic data matching a specific month and day)\n\ndef daily_normals(date):\n \"\"\"Daily Normals.\n \n Args:\n date (str): A date string in the format '%m-%d'\n \n Returns:\n A list of tuples containing the daily normals, tmin, tavg, and tmax\n \n \"\"\"\n \n sel = [func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)]\n return session.query(*sel).filter(func.strftime(\"%m-%d\", Measurement.date) == date).all()\n \ndaily_normals(\"01-01\")", "_____no_output_____" ], [ "# calculate the daily normals for your trip\n# push each tuple of calculations into a list called `normals`\n\n# Set the start and end date of the trip\n\n# Use the start and end date to create a range of dates\n\n# Stip off the year and save a list of %m-%d strings\n\n# Loop through the list of %m-%d strings and calculate the normals for each date\n", "_____no_output_____" ], [ "# Load the previous query results into a Pandas DataFrame and add the `trip_dates` range as the `date` index\n", "_____no_output_____" ], [ "# Plot the daily normals as an area plot with `stacked=False`\n", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ] ]
d0de7255692f1cb2babed043494b21f43ee2c308
61,625
ipynb
Jupyter Notebook
II Machine Learning & Deep Learning/02_Decision Tree. A Supervised Classification Model/02session_decision-tree.ipynb
isisfisica/resolving-python-data-science
5e40c8f4bca28e1c74dd28d2456d3cd6486d2394
[ "MIT" ]
null
null
null
II Machine Learning & Deep Learning/02_Decision Tree. A Supervised Classification Model/02session_decision-tree.ipynb
isisfisica/resolving-python-data-science
5e40c8f4bca28e1c74dd28d2456d3cd6486d2394
[ "MIT" ]
null
null
null
II Machine Learning & Deep Learning/02_Decision Tree. A Supervised Classification Model/02session_decision-tree.ipynb
isisfisica/resolving-python-data-science
5e40c8f4bca28e1c74dd28d2456d3cd6486d2394
[ "MIT" ]
null
null
null
34.274194
1,701
0.44073
[ [ [ "<font size=\"+5\">#02 | Decision Tree. A Supervised Classification Model</font>", "_____no_output_____" ], [ "- Subscribe to my [Blog ↗](https://blog.pythonassembly.com/)\n- Let's keep in touch on [LinkedIn ↗](www.linkedin.com/in/jsulopz) 😄", "_____no_output_____" ], [ "# Discipline to Search Solutions in Google", "_____no_output_____" ], [ "> Apply the following steps when **looking for solutions in Google**:\n>\n> 1. **Necesity**: How to load an Excel in Python?\n> 2. **Search in Google**: by keywords\n> - `load excel python`\n> - ~~how to load excel in python~~\n> 3. **Solution**: What's the `function()` that loads an Excel in Python?\n> - A Function to Programming is what the Atom to Phisics.\n> - Every time you want to do something in programming\n> - **You will need a `function()`** to make it\n> - Theferore, you must **detect parenthesis `()`**\n> - Out of all the words that you see in a website\n> - Because they indicate the presence of a `function()`.", "_____no_output_____" ], [ "# Load the Data", "_____no_output_____" ], [ "> Load the Titanic dataset with the below commands\n> - This dataset **people** (rows) aboard the Titanic\n> - And their **sociological characteristics** (columns)\n> - The aim of this dataset is to predict the probability to `survive`\n> - Based on the social demographic characteristics.", "_____no_output_____" ] ], [ [ "import seaborn as sns\n\ndf = sns.load_dataset(name='titanic').iloc[:, :4]", "_____no_output_____" ], [ "df.head()", "_____no_output_____" ] ], [ [ "# `DecisionTreeClassifier()` Model in Python", "_____no_output_____" ], [ "## Build the Model", "_____no_output_____" ], [ "> 1. **Necesity**: Build Model\n> 2. **Google**: How do you search for the solution?\n> 3. **Solution**: Find the `function()` that makes it happen", "_____no_output_____" ], [ "## Code Thinking\n\n> Which function computes the Model?\n> - `fit()`\n>\n> How could can you **import the function in Python**?", "_____no_output_____" ] ], [ [ "fit()", "_____no_output_____" ], [ "model.fit()", "_____no_output_____" ] ], [ [ "`model = ?`", "_____no_output_____" ] ], [ [ "from sklearn.tree import DecisionTreeClassifier", "_____no_output_____" ], [ "model = DecisionTreeClassifier()", "_____no_output_____" ], [ "model.__dict__", "_____no_output_____" ], [ "model.fit()", "_____no_output_____" ] ], [ [ "### Separate Variables for the Model\n\n> Regarding their role:\n> 1. **Target Variable `y`**\n>\n> - [ ] What would you like **to predict**?\n>\n> 2. **Explanatory Variable `X`**\n>\n> - [ ] Which variable will you use **to explain** the target?", "_____no_output_____" ] ], [ [ "explanatory = df.drop(columns='survived')\ntarget = df.survived", "_____no_output_____" ] ], [ [ "### Finally `fit()` the Model", "_____no_output_____" ] ], [ [ "model.__dict__", "_____no_output_____" ], [ "model.fit(X=explanatory, y=target)", "_____no_output_____" ], [ "import pandas as pd", "_____no_output_____" ], [ "pd.get_dummies(data=df)", "_____no_output_____" ], [ "df = pd.get_dummies(data=df, drop_first=True)", "_____no_output_____" ], [ "df", "_____no_output_____" ], [ "explanatory = df.drop(columns='survived')", "_____no_output_____" ], [ "target = df.survived", "_____no_output_____" ], [ "model.fit(X=explanatory, y=target)", "_____no_output_____" ], [ "df", "_____no_output_____" ], [ "df.isna().sum()", "_____no_output_____" ], [ "df.fillna('hola')", "_____no_output_____" ], [ "df.dropna(inplace=True) # df = df.dropna()", "_____no_output_____" ], [ "df", "_____no_output_____" ], [ "df.dropna(inplace=True) # df = df.dropna()", "_____no_output_____" ], [ "df", "_____no_output_____" ], [ "explanatory = df.drop(columns='survived')\ntarget = df.survived", "_____no_output_____" ], [ "model.fit(X=explanatory, y=target)", "_____no_output_____" ] ], [ [ "## Calculate a Prediction with the Model", "_____no_output_____" ], [ "> - `model.predict_proba()`", "_____no_output_____" ] ], [ [ "model.predict_proba()", "_____no_output_____" ] ], [ [ "## Model Visualization", "_____no_output_____" ], [ "> - `tree.plot_tree()`", "_____no_output_____" ], [ "## Model Interpretation", "_____no_output_____" ], [ "> Why `sex` is the most important column? What has to do with **EDA** (Exploratory Data Analysis)?", "_____no_output_____" ] ], [ [ "%%HTML\n\n<iframe width=\"560\" height=\"315\" src=\"https://www.youtube.com/embed/7VeUPuFGJHk\" title=\"YouTube video player\" frameborder=\"0\" allow=\"accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture\" allowfullscreen></iframe>", "_____no_output_____" ] ], [ [ "# Prediction vs Reality", "_____no_output_____" ], [ "> How good is our model?", "_____no_output_____" ] ], [ [ "dfsel = df[['survived']].copy()", "_____no_output_____" ], [ "dfsel['pred'] = model.predict(X=explanatory)", "_____no_output_____" ], [ "dfsel.sample(10)", "_____no_output_____" ], [ "comp = dfsel.survived == dfsel.pred", "_____no_output_____" ], [ "comp.sum()", "_____no_output_____" ], [ "comp.sum()/714", "_____no_output_____" ], [ "comp.mean()", "_____no_output_____" ] ], [ [ "## Precision", "_____no_output_____" ], [ "> - `model.score()`", "_____no_output_____" ] ], [ [ "model.score(X=explanatory, y=target)", "_____no_output_____" ] ], [ [ "## Confusion Matrix", "_____no_output_____" ], [ "> 1. **Sensitivity** (correct prediction on positive value, $y=1$)\n> 2. **Specificity** (correct prediction on negative value $y=0$).", "_____no_output_____" ], [ "## ROC Curve", "_____no_output_____" ], [ "> A way to summarise all the metrics (score, sensitivity & specificity)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ] ]
d0de73f93c609faf2e46ac3b68b2a6abbf8f70ed
6,787
ipynb
Jupyter Notebook
ipynb/parse_java_methods_0x01.ipynb
mindscan-de/FluentGenesis-Classifier
07d7d5cd7a3798c413379bf57cf99a2ea9aec873
[ "MIT" ]
2
2020-08-19T05:09:48.000Z
2022-03-08T11:24:43.000Z
ipynb/parse_java_methods_0x01.ipynb
mindscan-de/FluentGenesis-Classifier
07d7d5cd7a3798c413379bf57cf99a2ea9aec873
[ "MIT" ]
null
null
null
ipynb/parse_java_methods_0x01.ipynb
mindscan-de/FluentGenesis-Classifier
07d7d5cd7a3798c413379bf57cf99a2ea9aec873
[ "MIT" ]
null
null
null
31.421296
133
0.568734
[ [ [ "# Parse Java Methods\n----\n(C) Maxim Gansert, 2020, Mindscan Engineering", "_____no_output_____" ] ], [ [ "import sys\nsys.path.insert(0,'../src')\n\nimport os\nimport datetime", "_____no_output_____" ], [ "from com.github.c2nes.javalang import tokenizer, parser, ast\nfrom de.mindscan.fluentgenesis.dataprocessing.method_extractor import tokenize_file, extract_allmethods_from_compilation_unit\n", "_____no_output_____" ], [ "from de.mindscan.fluentgenesis.bpe.bpe_model import BPEModel\nfrom de.mindscan.fluentgenesis.bpe.bpe_encoder_decoder import SimpleBPEEncoder\nfrom de.mindscan.fluentgenesis.dataprocessing.method_dataset import MethodDataset", "_____no_output_____" ], [ "def split_methodbody_into_multiple_lines(method_body):\n result = []\n current_line_number = -1\n current_line_tokens = []\n for token in method_body:\n token_line = token.position[0]\n \n if token_line != current_line_number:\n current_line_number = token_line\n if len(current_line_tokens) != 0:\n result.append(current_line_tokens)\n current_line_tokens = []\n current_line_tokens.append(token.value)\n pass\n if len(current_line_tokens) !=0:\n result.append(current_line_tokens)\n pass\n return result\n", "_____no_output_____" ], [ "def process_source_file(dataset_directory, source_file_path, encoder, dataset):\n # derive the full source file path\n full_source_file_path = os.path.join( dataset_directory, source_file_path);\n \n # Work on the source file\n java_tokenlist = tokenize_file(full_source_file_path)\n parsed_compilation_unit = parser.parse(java_tokenlist)\n \n # collect file names, line numbers, method names, class names etc \n all_methods_per_source = extract_allmethods_from_compilation_unit(parsed_compilation_unit, java_tokenlist)\n \n for single_method in all_methods_per_source:\n try:\n method_name = single_method['method_name']\n method_class_name = single_method['class_name']\n method_body = single_method['method_body']\n \n multi_line_body = split_methodbody_into_multiple_lines(method_body)\n one_line = [item for sublist in multi_line_body for item in sublist]\n print(one_line)\n \n # encode body code and methodnames using the bpe-vocabulary\n bpe_encoded_methodname = encoder.encode( [ method_name ] )\n bpe_encoded_methodbody_ml = encoder.encode_multi_line( multi_line_body )\n \n # do some calculations on the tokens and on the java code, so selection of smaller datasets is possible\n bpe_encoded_method_name_length = len(bpe_encoded_methodname)\n bpe_encoded_method_body_length = sum([len(line) for line in bpe_encoded_methodbody_ml])\n \n # save this into dataset\n method_data = { \n \"source_file_path\": source_file_path,\n \"method_class_name\": method_class_name,\n \"method_name\": method_name,\n \"encoded_method_name_length\": bpe_encoded_method_name_length,\n \"encoded_method_name\": bpe_encoded_methodname,\n \"encoded_method_body_length\": bpe_encoded_method_body_length,\n \"encoded_method_body\": bpe_encoded_methodbody_ml,\n \"method_body\": method_body \n }\n dataset.add_method_data( method_data )\n except:\n # ignore problematic method\n pass\n", "_____no_output_____" ], [ "\nmodel = BPEModel(\"16K-full\", \"../src/de/mindscan/fluentgenesis/bpe/\")\nmodel.load_hparams()\n\ndataset_directory = 'D:\\\\Downloads\\\\Big-Code-excerpt\\\\'\n\nmodel_vocabulary = model.load_tokens()\nmodel_bpe_data = model.load_bpe_pairs()\n\nencoder = SimpleBPEEncoder(model_vocabulary, model_bpe_data)\n\nmethod_dataset = MethodDataset(dataset_name='parseMethodPythonNotebook1.jsonl')\nmethod_dataset.prepareNewDataset(dataset_directory)\n \nprocess_source_file(dataset_directory,'wordhash/WordMap.java' ,encoder, method_dataset )\n\nmethod_dataset.finish()\n", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ] ]
d0de80dba123dd8074e9c7365764c4386cd66915
48,829
ipynb
Jupyter Notebook
supervised/classification/neural-networks/mnist/YT tutorial - Weights and biases.ipynb
HarshaRamayanam/ml-algorithms
690fc5b252affafe9534e86a64303ed84feca509
[ "MIT" ]
null
null
null
supervised/classification/neural-networks/mnist/YT tutorial - Weights and biases.ipynb
HarshaRamayanam/ml-algorithms
690fc5b252affafe9534e86a64303ed84feca509
[ "MIT" ]
null
null
null
supervised/classification/neural-networks/mnist/YT tutorial - Weights and biases.ipynb
HarshaRamayanam/ml-algorithms
690fc5b252affafe9534e86a64303ed84feca509
[ "MIT" ]
null
null
null
37.531899
192
0.438244
[ [ [ "# Simple Perceptron", "_____no_output_____" ] ], [ [ "import tensorflow as tf\nimport pandas as pd\nimport numpy as np", "_____no_output_____" ], [ "%load_ext tensorboard", "_____no_output_____" ], [ "# Import the dataset\n\n(x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()", "_____no_output_____" ], [ "# For now, we'll focus on digit images of fives\n\nis_five_train = y_train == 5\nis_five_test = y_test == 5\n\nlabels = [\"Not five\", \"five\"]", "_____no_output_____" ], [ "# Specifying the input shape for the perceptron (here, img_width x img_height)\nimg_width = x_train.shape[1]\nimg_height = x_train.shape[2]", "_____no_output_____" ] ], [ [ "# Creating a model\n\n**Step 1: Design your model**\n\nModel 1:\n\n```\n(28, 28) ---> Flatten (784) ---> Dense (1)\n\nloss : mse (default attr.)\noptimizer : adam (default attr.)\n```", "_____no_output_____" ] ], [ [ "# Imports required for building model\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Flatten, Dense\n\n# Create a simple perceptron model\nmodel = Sequential(name=\"model_1\")\nmodel.add(Flatten(input_shape=(img_width, img_height)))\nmodel.add(Dense(1))\n\n# Shows how your model was built\nmodel.summary()", "Model: \"model_1\"\n_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\nflatten (Flatten) (None, 784) 0 \n_________________________________________________________________\ndense (Dense) (None, 1) 785 \n=================================================================\nTotal params: 785\nTrainable params: 785\nNon-trainable params: 0\n_________________________________________________________________\n" ] ], [ [ "**Step 2: Specify your model parameters**", "_____no_output_____" ] ], [ [ "model_params = {\n \"epochs\": 10,\n \"batch_size\": 32,\n \"loss\": \"mse\",\n \"optimizer\": \"adam\",\n}", "_____no_output_____" ] ], [ [ "**Step 3: Compile the model with the parameters**", "_____no_output_____" ] ], [ [ "model.compile(loss=model_params[\"loss\"],\n optimizer=model_params[\"optimizer\"],\n metrics=['accuracy'])", "_____no_output_____" ] ], [ [ "**Step 4: Fit / Train the model**\n\n***(Optional)*** Specify tensorboard callbacks. \nRun tensorboard command first (after below cell) and then run the below cell. Then click on refersh icon on top right of tensorboard after every epoch to live track the accuracy and loss", "_____no_output_____" ] ], [ [ "from tensorflow.keras.callbacks import TensorBoard\n\nlog_dir = \"logs/fit/\" + model.name\ntb_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir, histogram_freq=1)\n\n\nmodel.fit(x_train, is_five_train, \n epochs=model_params[\"epochs\"], \n batch_size=model_params[\"batch_size\"],\n validation_data=(x_test, is_five_test), \n callbacks=[tb_callback])\n\n", "Epoch 1/10\n1875/1875 [==============================] - 5s 2ms/step - loss: 846.0928 - accuracy: 0.5121 - val_loss: 18.9043 - val_accuracy: 0.5069\nEpoch 2/10\n1875/1875 [==============================] - 4s 2ms/step - loss: 12.5497 - accuracy: 0.5668 - val_loss: 1.0088 - val_accuracy: 0.6655\nEpoch 3/10\n1875/1875 [==============================] - 4s 2ms/step - loss: 2.6123 - accuracy: 0.6344 - val_loss: 8.5548 - val_accuracy: 0.7142\nEpoch 4/10\n1875/1875 [==============================] - 4s 2ms/step - loss: 4.5394 - accuracy: 0.6092 - val_loss: 1.2580 - val_accuracy: 0.5962\nEpoch 5/10\n1875/1875 [==============================] - 4s 2ms/step - loss: 8.3724 - accuracy: 0.5965 - val_loss: 11.7147 - val_accuracy: 0.2687\nEpoch 6/10\n1875/1875 [==============================] - 4s 2ms/step - loss: 3.3527 - accuracy: 0.6406 - val_loss: 2.6416 - val_accuracy: 0.5752\nEpoch 7/10\n1875/1875 [==============================] - 4s 2ms/step - loss: 6.6351 - accuracy: 0.5947 - val_loss: 3.1100 - val_accuracy: 0.8704\nEpoch 8/10\n1875/1875 [==============================] - 4s 2ms/step - loss: 6.7725 - accuracy: 0.5806 - val_loss: 14.9855 - val_accuracy: 0.8948\nEpoch 9/10\n1875/1875 [==============================] - 4s 2ms/step - loss: 3.5270 - accuracy: 0.6323 - val_loss: 1.4845 - val_accuracy: 0.4890\nEpoch 10/10\n1875/1875 [==============================] - 4s 2ms/step - loss: 6.1996 - accuracy: 0.5977 - val_loss: 3.4266 - val_accuracy: 0.7215\n" ], [ "%tensorboard --logdir logs/fit", "_____no_output_____" ] ], [ [ "**Step 5: Evaluate the model**\n\nPredict the test dataset and analyze them with corresponding test labels to check whether they make sense.", "_____no_output_____" ] ], [ [ "# Predict first 10 test samples\nn_samples = 10000\npredictions = model.predict(x_test[:n_samples, :, :])\ntrue_labels = y_test[:n_samples]\nis_five = is_five_test[:n_samples]\n\npd.DataFrame(data={\n \"predicted_label\": predictions.flatten(),\n \"true_label\": true_labels,\n \"is_five\": is_five\n})", "_____no_output_____" ] ], [ [ "**Step 6: Debug, rebuild, retrain and re-evaluate your model**", "_____no_output_____" ] ], [ [ "# Let's try more epochs, say 100\nmodel_params[\"epochs\"] = 100", "_____no_output_____" ], [ "# create a new model with same architecture\nnew_model = tf.keras.models.clone_model(model)\nnew_model._name = \"model_2\"\nnew_model.summary()", "Model: \"model_2\"\n_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\nflatten (Flatten) (None, 784) 0 \n_________________________________________________________________\ndense (Dense) (None, 1) 785 \n=================================================================\nTotal params: 785\nTrainable params: 785\nNon-trainable params: 0\n_________________________________________________________________\n" ], [ "new_model.compile(loss=model_params[\"loss\"],\n optimizer=model_params[\"optimizer\"],\n metrics=['accuracy'])\n\nlog_dir = \"logs/fit/\" + new_model.name\ntb_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir, histogram_freq=1)\n\n\nnew_model.fit(x_train, is_five_train, \n epochs=model_params[\"epochs\"], \n batch_size=model_params[\"batch_size\"],\n validation_data=(x_test, is_five_test), \n callbacks=[tb_callback])\n", "Epoch 1/100\n1875/1875 [==============================] - 5s 2ms/step - loss: 1075.7101 - accuracy: 0.5077 - val_loss: 22.3630 - val_accuracy: 0.5880\nEpoch 2/100\n1875/1875 [==============================] - 4s 2ms/step - loss: 10.1214 - accuracy: 0.5674 - val_loss: 0.8949 - val_accuracy: 0.6680\nEpoch 3/100\n1875/1875 [==============================] - 4s 2ms/step - loss: 3.1649 - accuracy: 0.6398 - val_loss: 6.1561 - val_accuracy: 0.2850\nEpoch 4/100\n1875/1875 [==============================] - 4s 2ms/step - loss: 9.0277 - accuracy: 0.5779 - val_loss: 7.5212 - val_accuracy: 0.5772\nEpoch 5/100\n1875/1875 [==============================] - 4s 2ms/step - loss: 4.9109 - accuracy: 0.6067 - val_loss: 7.3839 - val_accuracy: 0.8926\nEpoch 6/100\n1875/1875 [==============================] - 4s 2ms/step - loss: 7.4243 - accuracy: 0.5946 - val_loss: 1.3850 - val_accuracy: 0.5076\nEpoch 7/100\n1875/1875 [==============================] - 4s 2ms/step - loss: 5.9954 - accuracy: 0.5996 - val_loss: 7.5086 - val_accuracy: 0.4802\nEpoch 8/100\n1875/1875 [==============================] - 4s 2ms/step - loss: 7.3299 - accuracy: 0.5772 - val_loss: 0.8847 - val_accuracy: 0.8289\nEpoch 9/100\n1875/1875 [==============================] - 4s 2ms/step - loss: 5.3561 - accuracy: 0.5939 - val_loss: 4.4207 - val_accuracy: 0.7306\nEpoch 10/100\n1875/1875 [==============================] - 4s 2ms/step - loss: 7.5013 - accuracy: 0.5843 - val_loss: 1.6028 - val_accuracy: 0.7339\nEpoch 11/100\n1875/1875 [==============================] - 4s 2ms/step - loss: 5.3170 - accuracy: 0.6171 - val_loss: 1.6479 - val_accuracy: 0.6641\nEpoch 12/100\n1875/1875 [==============================] - 4s 2ms/step - loss: 6.8225 - accuracy: 0.5762 - val_loss: 8.8483 - val_accuracy: 0.7039\nEpoch 13/100\n1875/1875 [==============================] - 4s 2ms/step - loss: 6.8521 - accuracy: 0.5827 - val_loss: 3.4935 - val_accuracy: 0.8158\nEpoch 14/100\n1875/1875 [==============================] - 4s 2ms/step - loss: 7.8270 - accuracy: 0.5957 - val_loss: 6.8380 - val_accuracy: 0.3335\nEpoch 15/100\n1875/1875 [==============================] - 4s 2ms/step - loss: 7.4007 - accuracy: 0.5757 - val_loss: 10.9967 - val_accuracy: 0.3341\nEpoch 16/100\n1875/1875 [==============================] - 4s 2ms/step - loss: 7.1901 - accuracy: 0.5791 - val_loss: 3.5838 - val_accuracy: 0.7660\nEpoch 17/100\n1875/1875 [==============================] - 4s 2ms/step - loss: 6.1940 - accuracy: 0.5978 - val_loss: 2.6676 - val_accuracy: 0.7571\nEpoch 18/100\n1875/1875 [==============================] - 4s 2ms/step - loss: 6.4512 - accuracy: 0.5899 - val_loss: 4.1473 - val_accuracy: 0.5156\nEpoch 19/100\n1875/1875 [==============================] - 4s 2ms/step - loss: 6.9355 - accuracy: 0.5803 - val_loss: 1.1060 - val_accuracy: 0.5152\nEpoch 20/100\n1875/1875 [==============================] - 4s 2ms/step - loss: 6.6911 - accuracy: 0.6258 - val_loss: 3.8794 - val_accuracy: 0.5651\nEpoch 21/100\n1875/1875 [==============================] - 4s 2ms/step - loss: 6.9757 - accuracy: 0.5804 - val_loss: 6.8071 - val_accuracy: 0.3804\nEpoch 22/100\n1875/1875 [==============================] - 4s 2ms/step - loss: 7.1952 - accuracy: 0.5839 - val_loss: 1.3648 - val_accuracy: 0.6957\nEpoch 23/100\n1875/1875 [==============================] - 4s 2ms/step - loss: 11.3287 - accuracy: 0.5919 - val_loss: 11.6835 - val_accuracy: 0.2638\nEpoch 24/100\n1875/1875 [==============================] - 4s 2ms/step - loss: 6.6961 - accuracy: 0.5989 - val_loss: 3.5491 - val_accuracy: 0.7919\nEpoch 25/100\n1875/1875 [==============================] - 4s 2ms/step - loss: 8.1636 - accuracy: 0.5791 - val_loss: 2.4758 - val_accuracy: 0.4574\nEpoch 26/100\n1875/1875 [==============================] - 4s 2ms/step - loss: 5.3816 - accuracy: 0.5808 - val_loss: 4.7198 - val_accuracy: 0.5978\nEpoch 27/100\n1875/1875 [==============================] - 4s 2ms/step - loss: 7.2695 - accuracy: 0.5779 - val_loss: 2.3531 - val_accuracy: 0.7773\nEpoch 28/100\n1875/1875 [==============================] - 4s 2ms/step - loss: 10.1839 - accuracy: 0.5726 - val_loss: 2.1647 - val_accuracy: 0.8572\nEpoch 29/100\n1875/1875 [==============================] - 4s 2ms/step - loss: 7.2325 - accuracy: 0.5874 - val_loss: 15.6673 - val_accuracy: 0.2325\nEpoch 30/100\n1875/1875 [==============================] - 4s 2ms/step - loss: 6.2225 - accuracy: 0.5795 - val_loss: 4.7152 - val_accuracy: 0.2358\nEpoch 31/100\n1875/1875 [==============================] - 4s 2ms/step - loss: 4.9727 - accuracy: 0.6087 - val_loss: 1.7701 - val_accuracy: 0.5184\nEpoch 32/100\n1875/1875 [==============================] - 4s 2ms/step - loss: 7.2206 - accuracy: 0.5982 - val_loss: 2.7474 - val_accuracy: 0.2190\nEpoch 33/100\n1875/1875 [==============================] - 4s 2ms/step - loss: 4.9630 - accuracy: 0.6188 - val_loss: 1.1045 - val_accuracy: 0.4047\nEpoch 34/100\n1875/1875 [==============================] - 4s 2ms/step - loss: 6.0232 - accuracy: 0.5965 - val_loss: 1.9290 - val_accuracy: 0.3890\nEpoch 35/100\n1875/1875 [==============================] - 4s 2ms/step - loss: 8.1054 - accuracy: 0.5820 - val_loss: 4.1276 - val_accuracy: 0.6441\nEpoch 36/100\n1875/1875 [==============================] - 4s 2ms/step - loss: 4.8139 - accuracy: 0.5983 - val_loss: 15.3575 - val_accuracy: 0.5655\nEpoch 37/100\n1875/1875 [==============================] - 4s 2ms/step - loss: 7.6663 - accuracy: 0.6097 - val_loss: 2.9330 - val_accuracy: 0.8461\nEpoch 38/100\n1875/1875 [==============================] - 4s 2ms/step - loss: 6.3861 - accuracy: 0.5926 - val_loss: 0.8530 - val_accuracy: 0.8511\nEpoch 39/100\n1875/1875 [==============================] - 4s 2ms/step - loss: 4.1475 - accuracy: 0.6318 - val_loss: 15.1410 - val_accuracy: 0.2268\nEpoch 40/100\n1875/1875 [==============================] - 4s 2ms/step - loss: 6.8630 - accuracy: 0.5900 - val_loss: 0.9914 - val_accuracy: 0.8374\nEpoch 41/100\n1875/1875 [==============================] - 4s 2ms/step - loss: 6.4843 - accuracy: 0.5995 - val_loss: 3.2436 - val_accuracy: 0.5349\nEpoch 42/100\n1875/1875 [==============================] - 4s 2ms/step - loss: 3.1219 - accuracy: 0.6244 - val_loss: 4.3599 - val_accuracy: 0.2940\nEpoch 43/100\n1875/1875 [==============================] - 4s 2ms/step - loss: 5.7674 - accuracy: 0.5805 - val_loss: 9.4905 - val_accuracy: 0.7620\nEpoch 44/100\n1875/1875 [==============================] - 4s 2ms/step - loss: 6.2947 - accuracy: 0.5976 - val_loss: 10.6703 - val_accuracy: 0.3834\nEpoch 45/100\n1875/1875 [==============================] - 4s 2ms/step - loss: 11.1182 - accuracy: 0.6002 - val_loss: 3.4123 - val_accuracy: 0.7699\nEpoch 46/100\n1875/1875 [==============================] - 4s 2ms/step - loss: 7.0375 - accuracy: 0.5855 - val_loss: 7.9611 - val_accuracy: 0.6393\nEpoch 47/100\n1875/1875 [==============================] - 4s 2ms/step - loss: 4.3514 - accuracy: 0.6082 - val_loss: 1.4015 - val_accuracy: 0.5497\nEpoch 48/100\n1875/1875 [==============================] - 4s 2ms/step - loss: 5.3363 - accuracy: 0.6073 - val_loss: 8.3178 - val_accuracy: 0.2459\nEpoch 49/100\n1875/1875 [==============================] - 4s 2ms/step - loss: 3.9161 - accuracy: 0.6015 - val_loss: 3.8096 - val_accuracy: 0.8481\nEpoch 50/100\n1875/1875 [==============================] - 4s 2ms/step - loss: 7.7622 - accuracy: 0.5849 - val_loss: 6.1672 - val_accuracy: 0.8853\nEpoch 51/100\n1875/1875 [==============================] - 4s 2ms/step - loss: 7.1732 - accuracy: 0.5929 - val_loss: 1.4365 - val_accuracy: 0.9011\nEpoch 52/100\n1875/1875 [==============================] - 4s 2ms/step - loss: 6.2126 - accuracy: 0.6012 - val_loss: 6.4458 - val_accuracy: 0.4286\nEpoch 53/100\n1875/1875 [==============================] - 4s 2ms/step - loss: 7.3233 - accuracy: 0.5842 - val_loss: 1.0799 - val_accuracy: 0.8157\nEpoch 54/100\n1875/1875 [==============================] - 4s 2ms/step - loss: 5.5092 - accuracy: 0.5994 - val_loss: 16.7491 - val_accuracy: 0.8223\nEpoch 55/100\n1875/1875 [==============================] - 4s 2ms/step - loss: 7.6637 - accuracy: 0.5868 - val_loss: 4.3699 - val_accuracy: 0.6262\nEpoch 56/100\n1875/1875 [==============================] - 4s 2ms/step - loss: 7.4689 - accuracy: 0.5770 - val_loss: 33.9254 - val_accuracy: 0.8629\nEpoch 57/100\n" ], [ "%tensorboard --logdir logs/fit", "_____no_output_____" ], [ "n_samples = 10000\npredictions = new_model.predict(x_test[:n_samples, :, :])\ntrue_labels = y_test[:n_samples]\nis_five = is_five_test[:n_samples]\n\npd.DataFrame(data={\n \"predicted_label\": predictions.flatten(),\n \"true_label\": true_labels,\n \"is_five\": is_five\n})", "_____no_output_____" ] ], [ [ "### We haven't tried an activation function. That's why our model is broken. Let's add a sigmoid activation to our perceptron", "_____no_output_____" ] ], [ [ "model_params[\"epochs\"] = 10\nmodel_params", "_____no_output_____" ], [ "new_model = Sequential(name=\"model_3\")\nnew_model.add(Flatten(input_shape=(img_width, img_height)))\nnew_model.add(Dense(1, activation='relu'))\n\nnew_model.summary()", "Model: \"model_3\"\n_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\nflatten_2 (Flatten) (None, 784) 0 \n_________________________________________________________________\ndense_2 (Dense) (None, 1) 785 \n=================================================================\nTotal params: 785\nTrainable params: 785\nNon-trainable params: 0\n_________________________________________________________________\n" ], [ "new_model.compile(loss=model_params[\"loss\"],\n optimizer=model_params[\"optimizer\"],\n metrics=['accuracy'])\n\nlog_dir = \"logs/fit/\" + new_model.name\ntb_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir, histogram_freq=1)\n\n\nnew_model.fit(x_train, is_five_train, \n epochs=model_params[\"epochs\"], \n batch_size=model_params[\"batch_size\"],\n validation_data=(x_test, is_five_test), \n callbacks=[tb_callback])", "Epoch 1/10\n1875/1875 [==============================] - 5s 3ms/step - loss: 1.1484 - accuracy: 0.9082 - val_loss: 0.0892 - val_accuracy: 0.9108\nEpoch 2/10\n1875/1875 [==============================] - 5s 2ms/step - loss: 0.0908 - accuracy: 0.9092 - val_loss: 0.0892 - val_accuracy: 0.9108\nEpoch 3/10\n1875/1875 [==============================] - 5s 2ms/step - loss: 0.0887 - accuracy: 0.9113 - val_loss: 0.0892 - val_accuracy: 0.9108\nEpoch 4/10\n1875/1875 [==============================] - 4s 2ms/step - loss: 0.0900 - accuracy: 0.9100 - val_loss: 0.0892 - val_accuracy: 0.9108\nEpoch 5/10\n1875/1875 [==============================] - 5s 2ms/step - loss: 0.0897 - accuracy: 0.9103 - val_loss: 0.0892 - val_accuracy: 0.9108\nEpoch 6/10\n1875/1875 [==============================] - 4s 2ms/step - loss: 0.0904 - accuracy: 0.9096 - val_loss: 0.0892 - val_accuracy: 0.9108\nEpoch 7/10\n1875/1875 [==============================] - 4s 2ms/step - loss: 0.0901 - accuracy: 0.9099 - val_loss: 0.0892 - val_accuracy: 0.9108\nEpoch 8/10\n1875/1875 [==============================] - 4s 2ms/step - loss: 0.0925 - accuracy: 0.9075 - val_loss: 0.0892 - val_accuracy: 0.9108\nEpoch 9/10\n1875/1875 [==============================] - 4s 2ms/step - loss: 0.0907 - accuracy: 0.9093 - val_loss: 0.0892 - val_accuracy: 0.9108\nEpoch 10/10\n1875/1875 [==============================] - 4s 2ms/step - loss: 0.0897 - accuracy: 0.9103 - val_loss: 0.0892 - val_accuracy: 0.9108\n" ], [ "%tensorboard --logdir logs/fit", "_____no_output_____" ], [ "n_samples = 10000\npredictions = new_model.predict(x_test[:n_samples, :, :])\ntrue_labels = y_test[:n_samples]\nis_five = is_five_test[:n_samples]\n\npd.DataFrame(data={\n \"predicted_label\": predictions.flatten(),\n \"true_label\": true_labels,\n \"is_five\": is_five\n})", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ] ]
d0de860959af8edf9ba48be0e64b3852521dcdfc
10,435
ipynb
Jupyter Notebook
.ipynb_checkpoints/Untitled-checkpoint.ipynb
yongjinjiang/WebScraping_MissionToMars
f63f3cb8b61e755d048e24cbc5eaaea325cfc1a2
[ "ADSL" ]
null
null
null
.ipynb_checkpoints/Untitled-checkpoint.ipynb
yongjinjiang/WebScraping_MissionToMars
f63f3cb8b61e755d048e24cbc5eaaea325cfc1a2
[ "ADSL" ]
3
2020-03-31T16:31:26.000Z
2021-12-13T19:52:20.000Z
.ipynb_checkpoints/Untitled-checkpoint.ipynb
yongjinjiang/WebScraping_MissionToMars
f63f3cb8b61e755d048e24cbc5eaaea325cfc1a2
[ "ADSL" ]
null
null
null
74.007092
1,603
0.677432
[ [ [ "from bs4 import BeautifulSoup\nimport requests\nimport pymongo\nfrom splinter import Browser\nimport pandas as pd\nimport numpy as np\nimport re\n\n###twitter\nimport json\nimport tweepy \nimport sys\n\nfrom selenium import webdriver\nfrom selenium.webdriver.chrome.options import Options\nimport os\nGOOGLE_CHROME_BIN=os.environ.get(\"GOOGLE_CHROME_BIN\")\nCHROMEDRIVER_PATH=os.environ.get(\"CHROMEDRIVER_PATH\")", "_____no_output_____" ], [ "GOOGLE_CHROME_BIN", "_____no_output_____" ], [ "CHROMEDRIVER_PATH", "_____no_output_____" ], [ "chrome_options = Options()\n# chrome_options.binary_location = GOOGLE_CHROME_BIN\n# chrome_options.add_argument('--headless')\n# chrome_options.add_argument('--disable-gpu')\n# chrome_options.add_argument('--no-sandbox')\n# chrome_options.add_argument('--disable-dev-shm-usage')\ndriver = webdriver.Chrome(chrome_options=chrome_options)\n# executable_path=CHROMEDRIVER_PATH,", "/Users/jyj/anaconda3/envs/PythonData/lib/python3.6/site-packages/ipykernel/__main__.py:7: DeprecationWarning: use options instead of chrome_options\n" ], [ "driver = webdriver.Chrome(chrome_options=chrome_options)\ndriver.get(\"https://www.wenzhao.ca/sign-in/\")\n\n# driver.get (\"https://www.facebook.com\")\n \ndriver.find_element_by_id(\"rcp_user_login\").send_keys(\"[email protected]\")\ndriver.find_element_by_id(\"rcp_user_pass\").send_keys(\"wenZhao#edc@wsx\")\ndriver.find_element_by_id(\"rcp_login_submit\").click()", "/Users/jyj/anaconda3/envs/PythonData/lib/python3.6/site-packages/ipykernel/__main__.py:1: DeprecationWarning: use options instead of chrome_options\n if __name__ == '__main__':\n" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code" ] ]
d0de8b82ea1f482e0fe16ede3af42c81ca92f9e5
120,763
ipynb
Jupyter Notebook
intro-to-pytorch/Part 8 - Transfer Learning (Exercises).ipynb
agisga/deep-learning-v2-pytorch
87a83c20d385a28d347d8ce2ea31b60e2256f5fe
[ "MIT" ]
null
null
null
intro-to-pytorch/Part 8 - Transfer Learning (Exercises).ipynb
agisga/deep-learning-v2-pytorch
87a83c20d385a28d347d8ce2ea31b60e2256f5fe
[ "MIT" ]
null
null
null
intro-to-pytorch/Part 8 - Transfer Learning (Exercises).ipynb
agisga/deep-learning-v2-pytorch
87a83c20d385a28d347d8ce2ea31b60e2256f5fe
[ "MIT" ]
null
null
null
65.206803
662
0.557952
[ [ [ "# Transfer Learning\n\nIn this notebook, you'll learn how to use pre-trained networks to solved challenging problems in computer vision. Specifically, you'll use networks trained on [ImageNet](http://www.image-net.org/) [available from torchvision](http://pytorch.org/docs/0.3.0/torchvision/models.html). \n\nImageNet is a massive dataset with over 1 million labeled images in 1000 categories. It's used to train deep neural networks using an architecture called convolutional layers. I'm not going to get into the details of convolutional networks here, but if you want to learn more about them, please [watch this](https://www.youtube.com/watch?v=2-Ol7ZB0MmU).\n\nOnce trained, these models work astonishingly well as feature detectors for images they weren't trained on. Using a pre-trained network on images not in the training set is called transfer learning. Here we'll use transfer learning to train a network that can classify our cat and dog photos with near perfect accuracy.\n\nWith `torchvision.models` you can download these pre-trained networks and use them in your applications. We'll include `models` in our imports now.", "_____no_output_____" ] ], [ [ "%matplotlib inline\n%config InlineBackend.figure_format = 'retina'\n\nimport matplotlib.pyplot as plt\n\nimport torch\nfrom torch import nn\nfrom torch import optim\nimport torch.nn.functional as F\nfrom torchvision import datasets, transforms, models", "_____no_output_____" ] ], [ [ "Most of the pretrained models require the input to be 224x224 images. Also, we'll need to match the normalization used when the models were trained. Each color channel was normalized separately, the means are `[0.485, 0.456, 0.406]` and the standard deviations are `[0.229, 0.224, 0.225]`.", "_____no_output_____" ] ], [ [ "data_dir = 'Cat_Dog_data/Cat_Dog_data'\n\n# TODO: Define transforms for the training data and testing data\ntrain_transforms = transforms.Compose([transforms.RandomRotation(30),\n transforms.RandomResizedCrop(224),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406],\n [.229, 0.224, 0.225])])\n\ntest_transforms = transforms.Compose([transforms.Resize(255),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406],\n [.229, 0.224, 0.225])]) \n\n# Pass transforms in here, then run the next cell to see how the transforms look\ntrain_data = datasets.ImageFolder(data_dir + '/train', transform=train_transforms)\ntest_data = datasets.ImageFolder(data_dir + '/test', transform=test_transforms)\n\ntrainloader = torch.utils.data.DataLoader(train_data, batch_size=64, shuffle=True)\ntestloader = torch.utils.data.DataLoader(test_data, batch_size=64)", "_____no_output_____" ] ], [ [ "We can load in a model such as [DenseNet](http://pytorch.org/docs/0.3.0/torchvision/models.html#id5). Let's print out the model architecture so we can see what's going on.", "_____no_output_____" ] ], [ [ "model = models.densenet121(pretrained=True)\nmodel", "/home/user/miniconda/envs/py36/lib/python3.6/site-packages/torchvision/models/densenet.py:212: UserWarning: nn.init.kaiming_normal is now deprecated in favor of nn.init.kaiming_normal_.\n nn.init.kaiming_normal(m.weight.data)\n" ] ], [ [ "This model is built out of two main parts, the features and the classifier. The features part is a stack of convolutional layers and overall works as a feature detector that can be fed into a classifier. The classifier part is a single fully-connected layer `(classifier): Linear(in_features=1024, out_features=1000)`. This layer was trained on the ImageNet dataset, so it won't work for our specific problem. That means we need to replace the classifier, but the features will work perfectly on their own. In general, I think about pre-trained networks as amazingly good feature detectors that can be used as the input for simple feed-forward classifiers.", "_____no_output_____" ] ], [ [ "# Freeze parameters so we don't backprop through them\nfor param in model.parameters():\n param.requires_grad = False\n\nfrom collections import OrderedDict\nclassifier = nn.Sequential(OrderedDict([\n ('fc1', nn.Linear(1024, 500)),\n ('relu', nn.ReLU()),\n ('fc2', nn.Linear(500, 2)),\n ('output', nn.LogSoftmax(dim=1))\n ]))\n \nmodel.classifier = classifier", "_____no_output_____" ] ], [ [ "With our model built, we need to train the classifier. However, now we're using a **really deep** neural network. If you try to train this on a CPU like normal, it will take a long, long time. Instead, we're going to use the GPU to do the calculations. The linear algebra computations are done in parallel on the GPU leading to 100x increased training speeds. It's also possible to train on multiple GPUs, further decreasing training time.\n\nPyTorch, along with pretty much every other deep learning framework, uses [CUDA](https://developer.nvidia.com/cuda-zone) to efficiently compute the forward and backwards passes on the GPU. In PyTorch, you move your model parameters and other tensors to the GPU memory using `model.to('cuda')`. You can move them back from the GPU with `model.to('cpu')` which you'll commonly do when you need to operate on the network output outside of PyTorch. As a demonstration of the increased speed, I'll compare how long it takes to perform a forward and backward pass with and without a GPU.", "_____no_output_____" ] ], [ [ "import time", "_____no_output_____" ], [ "for device in ['cpu', 'cuda']:\n\n criterion = nn.NLLLoss()\n # Only train the classifier parameters, feature parameters are frozen\n optimizer = optim.Adam(model.classifier.parameters(), lr=0.001)\n\n model.to(device)\n\n for ii, (inputs, labels) in enumerate(trainloader):\n\n # Move input and label tensors to the GPU\n inputs, labels = inputs.to(device), labels.to(device)\n\n start = time.time()\n\n outputs = model.forward(inputs)\n loss = criterion(outputs, labels)\n loss.backward()\n optimizer.step()\n\n if ii==3:\n break\n \n print(f\"Device = {device}; Time per batch: {(time.time() - start)/3:.3f} seconds\")", "Device = cpu; Time per batch: 11.147 seconds\nDevice = cuda; Time per batch: 0.014 seconds\n" ] ], [ [ "You can write device agnostic code which will automatically use CUDA if it's enabled like so:\n```python\n# at beginning of the script\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\n...\n\n# then whenever you get a new Tensor or Module\n# this won't copy if they are already on the desired device\ninput = data.to(device)\nmodel = MyModule(...).to(device)\n```\n\nFrom here, I'll let you finish training the model. The process is the same as before except now your model is much more powerful. You should get better than 95% accuracy easily.\n\n>**Exercise:** Train a pretrained models to classify the cat and dog images. Continue with the DenseNet model, or try ResNet, it's also a good model to try out first. Make sure you are only training the classifier and the parameters for the features part are frozen.", "_____no_output_____" ], [ "#### Resnet101", "_____no_output_____" ], [ "Load the ResNet101 model.", "_____no_output_____" ] ], [ [ "device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\nprint(f\"Device {device}\")\n\nmodel = models.resnet101(pretrained=True)\nmodel", "Device cuda:0\n" ] ], [ [ "Freeze parameters.", "_____no_output_____" ] ], [ [ "for param in model.parameters():\n param.requires_grad = False", "_____no_output_____" ] ], [ [ "Replace model head.", "_____no_output_____" ] ], [ [ "model.fc = nn.Sequential(nn.Linear(2048, 256),\n nn.ReLU(),\n nn.Dropout(0.2),\n nn.Linear(256, 2),\n nn.LogSoftmax(dim=1))", "_____no_output_____" ], [ "model.to(device)", "_____no_output_____" ] ], [ [ "Define the loss function and the optimizer.", "_____no_output_____" ] ], [ [ "criterion = nn.NLLLoss()\noptimizer = optim.Adam(model.fc.parameters())", "_____no_output_____" ] ], [ [ "Training loop.", "_____no_output_____" ] ], [ [ "n_epochs = 1\nprint_every = 5\nstep, train_loss, train_acc, test_loss, test_acc = 0, 0, 0, 0, 0\n\ndef get_batch_acc(preds, labels):\n _, pred_class = preds.topk(1, dim=1)\n correct_class = (pred_class == labels.view(*pred_class.shape))\n batch_acc = torch.mean(correct_class.type(torch.FloatTensor))\n return batch_acc\n\nfor epoch in range(n_epochs):\n for images, labels in trainloader:\n images = images.to(device)\n labels = labels.to(device)\n optimizer.zero_grad()\n # forward pass\n preds = model(images)\n # compute loss & accuracy\n batch_loss = criterion(preds, labels)\n batch_acc = get_batch_acc(preds, labels)\n # backward pass\n batch_loss.backward()\n # gradient descent step\n optimizer.step()\n # update running loss and accuracy\n train_loss += batch_loss.item() / print_every\n train_acc += batch_acc.item() / print_every\n \n if step % print_every == 0:\n # Evaluate on test data\n with torch.no_grad():\n model.eval()\n for test_images, test_labels in testloader:\n test_images, test_labels = test_images.to(device), test_labels.to(device)\n test_preds = model(test_images)\n batch_loss = criterion(test_preds, test_labels)\n batch_acc = get_batch_acc(test_preds, test_labels)\n test_loss += batch_loss.item() / len(testloader)\n test_acc += batch_acc.item() / len(testloader)\n print(f\"Epoch: {epoch} | Step: {step}/{int(len(trainloader) / print_every)} | Train loss: {train_loss} | Train acc: {train_acc} | Test loss: {test_loss} | Test acc: {test_acc}\")\n model.train()\n train_loss, train_acc, test_loss, test_acc = 0, 0, 0, 0\n \n step += 1", "Epoch: 0 | Step: 0/70 | Train loss: 0.1420997977256775 | Train acc: 0.09375 | Test loss: 0.5888391725718976 | Test acc: 0.5136718750000002\nEpoch: 0 | Step: 5/70 | Train loss: 0.6307643175125123 | Train acc: 0.628125 | Test loss: 0.23891626708209512 | Test acc: 0.9695312500000004\nEpoch: 0 | Step: 10/70 | Train loss: 0.28028063476085663 | Train acc: 0.9125 | Test loss: 0.12141264081001284 | Test acc: 0.9765625000000002\nEpoch: 0 | Step: 15/70 | Train loss: 0.24202772676944734 | Train acc: 0.909375 | Test loss: 0.17700428750831634 | Test acc: 0.9289062500000004\nEpoch: 0 | Step: 20/70 | Train loss: 0.24063588082790371 | Train acc: 0.8937499999999999 | Test loss: 0.12248691681306804 | Test acc: 0.9554687500000005\nEpoch: 0 | Step: 25/70 | Train loss: 0.17640648782253265 | Train acc: 0.934375 | Test loss: 0.060022399388253704 | Test acc: 0.9773437500000003\nEpoch: 0 | Step: 30/70 | Train loss: 0.18905198574066162 | Train acc: 0.9124999999999999 | Test loss: 0.05537798898294568 | Test acc: 0.9785156250000003\nEpoch: 0 | Step: 35/70 | Train loss: 0.1329597905278206 | Train acc: 0.9375 | Test loss: 0.06576174749061464 | Test acc: 0.9738281250000004\nEpoch: 0 | Step: 40/70 | Train loss: 0.14060658365488055 | Train acc: 0.94375 | Test loss: 0.05532630463130773 | Test acc: 0.9777343750000004\nEpoch: 0 | Step: 45/70 | Train loss: 0.20088366121053694 | Train acc: 0.9031250000000001 | Test loss: 0.0554607379483059 | Test acc: 0.9816406250000005\nEpoch: 0 | Step: 50/70 | Train loss: 0.1727498099207878 | Train acc: 0.928125 | Test loss: 0.06095148560125382 | Test acc: 0.9765625000000004\nEpoch: 0 | Step: 55/70 | Train loss: 0.09459403157234192 | Train acc: 0.9562499999999999 | Test loss: 0.04441865370608867 | Test acc: 0.9839843750000005\nEpoch: 0 | Step: 60/70 | Train loss: 0.10896287113428114 | Train acc: 0.9562499999999999 | Test loss: 0.06307774395681919 | Test acc: 0.9757812500000005\nEpoch: 0 | Step: 65/70 | Train loss: 0.16338396668434144 | Train acc: 0.928125 | Test loss: 0.04214015202596784 | Test acc: 0.9851562500000004\nEpoch: 0 | Step: 70/70 | Train loss: 0.14495114535093306 | Train acc: 0.9500000000000001 | Test loss: 0.0413937923964113 | Test acc: 0.9855468750000004\nEpoch: 0 | Step: 75/70 | Train loss: 0.13258390128612518 | Train acc: 0.9437500000000001 | Test loss: 0.07264044333714992 | Test acc: 0.9746093750000003\nEpoch: 0 | Step: 80/70 | Train loss: 0.1904216706752777 | Train acc: 0.925 | Test loss: 0.08890908064786346 | Test acc: 0.9605468750000005\nEpoch: 0 | Step: 85/70 | Train loss: 0.16921156644821167 | Train acc: 0.93125 | Test loss: 0.04083143188618124 | Test acc: 0.9867187500000004\nEpoch: 0 | Step: 90/70 | Train loss: 0.14072314500808714 | Train acc: 0.9374999999999999 | Test loss: 0.05287259081378579 | Test acc: 0.9812500000000003\nEpoch: 0 | Step: 95/70 | Train loss: 0.13679716885089876 | Train acc: 0.9312500000000001 | Test loss: 0.04134784317575395 | Test acc: 0.9851562500000005\nEpoch: 0 | Step: 100/70 | Train loss: 0.08950164318084716 | Train acc: 0.96875 | Test loss: 0.041062067332677546 | Test acc: 0.9863281250000002\nEpoch: 0 | Step: 105/70 | Train loss: 0.16072152853012084 | Train acc: 0.925 | Test loss: 0.044542220793664454 | Test acc: 0.9832031250000005\nEpoch: 0 | Step: 110/70 | Train loss: 0.130016753077507 | Train acc: 0.959375 | Test loss: 0.038838218268938365 | Test acc: 0.9855468750000005\nEpoch: 0 | Step: 115/70 | Train loss: 0.1426736406981945 | Train acc: 0.94375 | Test loss: 0.04202921178657562 | Test acc: 0.9855468750000006\nEpoch: 0 | Step: 120/70 | Train loss: 0.18757236003875732 | Train acc: 0.903125 | Test loss: 0.041919583408162 | Test acc: 0.9871093750000004\nEpoch: 0 | Step: 125/70 | Train loss: 0.10435906201601029 | Train acc: 0.9499999999999998 | Test loss: 0.05947930146940051 | Test acc: 0.9773437500000005\nEpoch: 0 | Step: 130/70 | Train loss: 0.14933517575263977 | Train acc: 0.9312499999999999 | Test loss: 0.037968704686500133 | Test acc: 0.9863281250000006\nEpoch: 0 | Step: 135/70 | Train loss: 0.17588062286376954 | Train acc: 0.9437500000000001 | Test loss: 0.05181127830874175 | Test acc: 0.9792968750000004\nEpoch: 0 | Step: 140/70 | Train loss: 0.15228946805000304 | Train acc: 0.9249999999999999 | Test loss: 0.054136648681014775 | Test acc: 0.9808593750000005\nEpoch: 0 | Step: 145/70 | Train loss: 0.17649099677801133 | Train acc: 0.928125 | Test loss: 0.06559804994612933 | Test acc: 0.9761718750000002\nEpoch: 0 | Step: 150/70 | Train loss: 0.18984051644802094 | Train acc: 0.925 | Test loss: 0.060335627128370116 | Test acc: 0.9757812500000005\nEpoch: 0 | Step: 155/70 | Train loss: 0.11635222733020784 | Train acc: 0.9468749999999999 | Test loss: 0.056617066054604946 | Test acc: 0.9796875000000004\nEpoch: 0 | Step: 160/70 | Train loss: 0.14122935086488722 | Train acc: 0.940625 | Test loss: 0.0529679870000109 | Test acc: 0.9792968750000005\nEpoch: 0 | Step: 165/70 | Train loss: 0.10575526505708696 | Train acc: 0.959375 | Test loss: 0.03797864471562206 | Test acc: 0.9855468750000006\nEpoch: 0 | Step: 170/70 | Train loss: 0.0790023796260357 | Train acc: 0.965625 | Test loss: 0.041275303298607464 | Test acc: 0.9847656250000003\nEpoch: 0 | Step: 175/70 | Train loss: 0.14691830575466155 | Train acc: 0.946875 | Test loss: 0.041710160952061405 | Test acc: 0.9843750000000004\nEpoch: 0 | Step: 180/70 | Train loss: 0.127793987095356 | Train acc: 0.9593749999999999 | Test loss: 0.0420147635275498 | Test acc: 0.9847656250000003\nEpoch: 0 | Step: 185/70 | Train loss: 0.150615394115448 | Train acc: 0.9375 | Test loss: 0.03997119485866279 | Test acc: 0.9843750000000006\nEpoch: 0 | Step: 190/70 | Train loss: 0.10459670722484589 | Train acc: 0.9562499999999999 | Test loss: 0.038834996591322134 | Test acc: 0.9855468750000006\nEpoch: 0 | Step: 195/70 | Train loss: 0.10772354453802109 | Train acc: 0.95 | Test loss: 0.039422417338937504 | Test acc: 0.9855468750000005\nEpoch: 0 | Step: 200/70 | Train loss: 0.11106140911579132 | Train acc: 0.9531249999999999 | Test loss: 0.042719195154495536 | Test acc: 0.9832031250000004\nEpoch: 0 | Step: 205/70 | Train loss: 0.14702724367380143 | Train acc: 0.9281250000000001 | Test loss: 0.04222033789847046 | Test acc: 0.9843750000000004\nEpoch: 0 | Step: 210/70 | Train loss: 0.15944554209709166 | Train acc: 0.9312499999999999 | Test loss: 0.04301619925536217 | Test acc: 0.9843750000000004\nEpoch: 0 | Step: 215/70 | Train loss: 0.14402222633361816 | Train acc: 0.9500000000000001 | Test loss: 0.04359726167749615 | Test acc: 0.9847656250000004\nEpoch: 0 | Step: 220/70 | Train loss: 0.16783702224493027 | Train acc: 0.9374999999999999 | Test loss: 0.04309091055765748 | Test acc: 0.9839843750000002\nEpoch: 0 | Step: 225/70 | Train loss: 0.1488300547003746 | Train acc: 0.95 | Test loss: 0.06570144428405911 | Test acc: 0.9738281250000005\nEpoch: 0 | Step: 230/70 | Train loss: 0.13624729514122008 | Train acc: 0.9468749999999999 | Test loss: 0.04075988531112671 | Test acc: 0.9871093750000005\nEpoch: 0 | Step: 235/70 | Train loss: 0.13050326555967331 | Train acc: 0.9312499999999999 | Test loss: 0.04222942637279629 | Test acc: 0.9859375000000006\nEpoch: 0 | Step: 240/70 | Train loss: 0.13238141909241674 | Train acc: 0.953125 | Test loss: 0.046418800204992304 | Test acc: 0.9835937500000006\nEpoch: 0 | Step: 245/70 | Train loss: 0.05954338535666465 | Train acc: 0.9843749999999999 | Test loss: 0.04885823905933648 | Test acc: 0.9812500000000003\nEpoch: 0 | Step: 250/70 | Train loss: 0.16078078895807266 | Train acc: 0.934375 | Test loss: 0.03736194064840675 | Test acc: 0.9863281250000004\nEpoch: 0 | Step: 255/70 | Train loss: 0.15792878568172455 | Train acc: 0.940625 | Test loss: 0.04166025612503291 | Test acc: 0.9847656250000004\nEpoch: 0 | Step: 260/70 | Train loss: 0.14938536137342454 | Train acc: 0.946875 | Test loss: 0.045122843678109356 | Test acc: 0.9843750000000003\nEpoch: 0 | Step: 265/70 | Train loss: 0.11566838473081589 | Train acc: 0.959375 | Test loss: 0.03719602164346725 | Test acc: 0.9863281250000004\nEpoch: 0 | Step: 270/70 | Train loss: 0.1031675636768341 | Train acc: 0.959375 | Test loss: 0.03917242186143994 | Test acc: 0.9847656250000004\nEpoch: 0 | Step: 275/70 | Train loss: 0.1409957207739353 | Train acc: 0.93125 | Test loss: 0.0431375071639195 | Test acc: 0.9839843750000006\n" ] ], [ [ "#### Densenet121", "_____no_output_____" ] ], [ [ "# Use GPU if it's available\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\nmodel = models.densenet121(pretrained=True)\n\n# Freeze parameters so we don't backprop through them\nfor param in model.parameters():\n param.requires_grad = False\n \nmodel.classifier = nn.Sequential(nn.Linear(1024, 256),\n nn.ReLU(),\n nn.Dropout(0.2),\n nn.Linear(256, 2),\n nn.LogSoftmax(dim=1))\n\ncriterion = nn.NLLLoss()\n\n# Only train the classifier parameters, feature parameters are frozen\noptimizer = optim.Adam(model.classifier.parameters(), lr=0.003)\n\nmodel.to(device);", "_____no_output_____" ], [ "device", "_____no_output_____" ], [ "## TODO: Use a pretrained model to classify the cat and dog images\n\nepochs = 1\nsteps = 0\nrunning_loss = 0\nprint_every = 5\nfor epoch in range(epochs):\n for inputs, labels in trainloader:\n steps += 1\n # Move input and label tensors to the default device\n inputs, labels = inputs.to(device), labels.to(device)\n \n optimizer.zero_grad()\n \n logps = model.forward(inputs)\n loss = criterion(logps, labels)\n loss.backward()\n optimizer.step()\n\n running_loss += loss.item()\n \n if steps % print_every == 0:\n test_loss = 0\n accuracy = 0\n model.eval()\n with torch.no_grad():\n for inputs, labels in testloader:\n inputs, labels = inputs.to(device), labels.to(device)\n logps = model.forward(inputs)\n batch_loss = criterion(logps, labels)\n \n test_loss += batch_loss.item()\n \n # Calculate accuracy\n ps = torch.exp(logps)\n top_p, top_class = ps.topk(1, dim=1)\n equals = top_class == labels.view(*top_class.shape)\n accuracy += torch.mean(equals.type(torch.FloatTensor)).item()\n \n print(f\"Epoch {epoch+1}/{epochs}.. \"\n f\"Train loss: {running_loss/print_every:.3f}.. \"\n f\"Test loss: {test_loss/len(testloader):.3f}.. \"\n f\"Test accuracy: {accuracy/len(testloader):.3f}\")\n running_loss = 0\n model.train()", "Epoch 1/1.. Train loss: 1.046.. Test loss: 0.468.. Test accuracy: 0.691\nEpoch 1/1.. Train loss: 0.476.. Test loss: 0.286.. Test accuracy: 0.880\nEpoch 1/1.. Train loss: 0.347.. Test loss: 0.218.. Test accuracy: 0.920\nEpoch 1/1.. Train loss: 0.316.. Test loss: 0.100.. Test accuracy: 0.976\nEpoch 1/1.. Train loss: 0.262.. Test loss: 0.088.. Test accuracy: 0.977\nEpoch 1/1.. Train loss: 0.287.. Test loss: 0.144.. Test accuracy: 0.942\nEpoch 1/1.. Train loss: 0.187.. Test loss: 0.094.. Test accuracy: 0.970\nEpoch 1/1.. Train loss: 0.263.. Test loss: 0.063.. Test accuracy: 0.980\nEpoch 1/1.. Train loss: 0.189.. Test loss: 0.100.. Test accuracy: 0.963\nEpoch 1/1.. Train loss: 0.176.. Test loss: 0.061.. Test accuracy: 0.979\nEpoch 1/1.. Train loss: 0.186.. Test loss: 0.056.. Test accuracy: 0.980\nEpoch 1/1.. Train loss: 0.142.. Test loss: 0.061.. Test accuracy: 0.978\nEpoch 1/1.. Train loss: 0.131.. Test loss: 0.050.. Test accuracy: 0.982\nEpoch 1/1.. Train loss: 0.178.. Test loss: 0.049.. Test accuracy: 0.982\nEpoch 1/1.. Train loss: 0.198.. Test loss: 0.055.. Test accuracy: 0.981\nEpoch 1/1.. Train loss: 0.135.. Test loss: 0.060.. Test accuracy: 0.977\nEpoch 1/1.. Train loss: 0.131.. Test loss: 0.048.. Test accuracy: 0.982\nEpoch 1/1.. Train loss: 0.163.. Test loss: 0.055.. Test accuracy: 0.980\nEpoch 1/1.. Train loss: 0.121.. Test loss: 0.046.. Test accuracy: 0.982\nEpoch 1/1.. Train loss: 0.147.. Test loss: 0.048.. Test accuracy: 0.980\nEpoch 1/1.. Train loss: 0.156.. Test loss: 0.061.. Test accuracy: 0.978\nEpoch 1/1.. Train loss: 0.208.. Test loss: 0.085.. Test accuracy: 0.965\nEpoch 1/1.. Train loss: 0.310.. Test loss: 0.086.. Test accuracy: 0.974\nEpoch 1/1.. Train loss: 0.184.. Test loss: 0.058.. Test accuracy: 0.980\nEpoch 1/1.. Train loss: 0.184.. Test loss: 0.052.. Test accuracy: 0.981\nEpoch 1/1.. Train loss: 0.127.. Test loss: 0.052.. Test accuracy: 0.984\nEpoch 1/1.. Train loss: 0.137.. Test loss: 0.045.. Test accuracy: 0.982\nEpoch 1/1.. Train loss: 0.142.. Test loss: 0.043.. Test accuracy: 0.983\nEpoch 1/1.. Train loss: 0.144.. Test loss: 0.069.. Test accuracy: 0.973\nEpoch 1/1.. Train loss: 0.158.. Test loss: 0.058.. Test accuracy: 0.981\nEpoch 1/1.. Train loss: 0.168.. Test loss: 0.046.. Test accuracy: 0.983\nEpoch 1/1.. Train loss: 0.226.. Test loss: 0.046.. Test accuracy: 0.982\nEpoch 1/1.. Train loss: 0.196.. Test loss: 0.046.. Test accuracy: 0.982\nEpoch 1/1.. Train loss: 0.205.. Test loss: 0.051.. Test accuracy: 0.982\nEpoch 1/1.. Train loss: 0.181.. Test loss: 0.074.. Test accuracy: 0.977\nEpoch 1/1.. Train loss: 0.178.. Test loss: 0.053.. Test accuracy: 0.980\nEpoch 1/1.. Train loss: 0.190.. Test loss: 0.049.. Test accuracy: 0.983\nEpoch 1/1.. Train loss: 0.182.. Test loss: 0.063.. Test accuracy: 0.980\nEpoch 1/1.. Train loss: 0.180.. Test loss: 0.054.. Test accuracy: 0.978\nEpoch 1/1.. Train loss: 0.163.. Test loss: 0.047.. Test accuracy: 0.981\nEpoch 1/1.. Train loss: 0.146.. Test loss: 0.043.. Test accuracy: 0.984\nEpoch 1/1.. Train loss: 0.135.. Test loss: 0.067.. Test accuracy: 0.975\nEpoch 1/1.. Train loss: 0.166.. Test loss: 0.042.. Test accuracy: 0.984\nEpoch 1/1.. Train loss: 0.237.. Test loss: 0.065.. Test accuracy: 0.979\nEpoch 1/1.. Train loss: 0.172.. Test loss: 0.067.. Test accuracy: 0.974\nEpoch 1/1.. Train loss: 0.139.. Test loss: 0.041.. Test accuracy: 0.985\nEpoch 1/1.. Train loss: 0.131.. Test loss: 0.041.. Test accuracy: 0.986\nEpoch 1/1.. Train loss: 0.120.. Test loss: 0.046.. Test accuracy: 0.981\nEpoch 1/1.. Train loss: 0.146.. Test loss: 0.042.. Test accuracy: 0.984\nEpoch 1/1.. Train loss: 0.152.. Test loss: 0.040.. Test accuracy: 0.986\nEpoch 1/1.. Train loss: 0.151.. Test loss: 0.042.. Test accuracy: 0.984\nEpoch 1/1.. Train loss: 0.100.. Test loss: 0.042.. Test accuracy: 0.983\nEpoch 1/1.. Train loss: 0.148.. Test loss: 0.048.. Test accuracy: 0.982\nEpoch 1/1.. Train loss: 0.111.. Test loss: 0.043.. Test accuracy: 0.982\nEpoch 1/1.. Train loss: 0.137.. Test loss: 0.043.. Test accuracy: 0.980\nEpoch 1/1.. Train loss: 0.106.. Test loss: 0.045.. Test accuracy: 0.981\nEpoch 1/1.. Train loss: 0.204.. Test loss: 0.043.. Test accuracy: 0.983\nEpoch 1/1.. Train loss: 0.209.. Test loss: 0.043.. Test accuracy: 0.983\nEpoch 1/1.. Train loss: 0.170.. Test loss: 0.043.. Test accuracy: 0.985\nEpoch 1/1.. Train loss: 0.141.. Test loss: 0.054.. Test accuracy: 0.980\nEpoch 1/1.. Train loss: 0.133.. Test loss: 0.042.. Test accuracy: 0.983\nEpoch 1/1.. Train loss: 0.106.. Test loss: 0.040.. Test accuracy: 0.984\nEpoch 1/1.. Train loss: 0.141.. Test loss: 0.042.. Test accuracy: 0.982\nEpoch 1/1.. Train loss: 0.152.. Test loss: 0.044.. Test accuracy: 0.983\nEpoch 1/1.. Train loss: 0.149.. Test loss: 0.039.. Test accuracy: 0.984\nEpoch 1/1.. Train loss: 0.189.. Test loss: 0.040.. Test accuracy: 0.986\nEpoch 1/1.. Train loss: 0.109.. Test loss: 0.053.. Test accuracy: 0.982\nEpoch 1/1.. Train loss: 0.184.. Test loss: 0.054.. Test accuracy: 0.980\nEpoch 1/1.. Train loss: 0.218.. Test loss: 0.079.. Test accuracy: 0.971\nEpoch 1/1.. Train loss: 0.174.. Test loss: 0.044.. Test accuracy: 0.983\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ] ]
d0deb1b2c2bb30f66f74bae939a175e51a1f38c6
312,402
ipynb
Jupyter Notebook
model/Forest.ipynb
bogdanovdya/Clients_Lifetime
b47b259a4e7111aa68b216b083568151454f34a2
[ "MIT" ]
2
2021-09-25T17:16:23.000Z
2021-09-25T17:16:36.000Z
model/Forest.ipynb
bogdanovdya/Clients_Lifetime
b47b259a4e7111aa68b216b083568151454f34a2
[ "MIT" ]
null
null
null
model/Forest.ipynb
bogdanovdya/Clients_Lifetime
b47b259a4e7111aa68b216b083568151454f34a2
[ "MIT" ]
null
null
null
80.205905
155,116
0.634269
[ [ [ "%matplotlib inline\nfrom matplotlib import pyplot as plt\nplt.rcParams['figure.figsize'] = (10, 8)\nimport seaborn as sns\nimport numpy as np\nimport pandas as pd\nfrom sklearn.preprocessing import LabelEncoder\nimport collections\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn import preprocessing\nfrom sklearn.tree import DecisionTreeClassifier, export_graphviz\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.metrics import accuracy_score", "_____no_output_____" ], [ "import pickle", "_____no_output_____" ], [ "cmp_df = pd.DataFrame(columns=['ID', 'TITLE', 'CMP_TYPE_CUSTOMER', 'CMP_TYPE_PARTNER'])", "_____no_output_____" ], [ "cmp_df", "_____no_output_____" ], [ "data_train = pd.read_csv('data/dataframe.csv', sep=';', index_col='ID')", "_____no_output_____" ], [ "data_test = pd.read_csv('data/dataframe.csv', sep=';', index_col='ID')", "_____no_output_____" ], [ "data_train", "_____no_output_____" ], [ "title_df = pd.DataFrame(data_train['TITLE'])", "_____no_output_____" ], [ "title_df.head()", "_____no_output_____" ], [ "data_train = data_train.drop(columns=['TITLE'], axis=1)\ndata_test = data_test.drop(columns=['TITLE'], axis=1)", "_____no_output_____" ], [ "data_test.describe(include='all').T", "_____no_output_____" ], [ "fig = plt.figure(figsize=(25, 15))\ncols = 5\nrows = np.ceil(float(data_train.shape[1]) / cols)\nfor i, column in enumerate(data_train.columns):\n ax = fig.add_subplot(rows, cols, i + 1)\n ax.set_title(column)\n if data_train.dtypes[column] == np.object:\n data_train[column].value_counts().plot(kind=\"bar\", axes=ax)\n else:\n data_train[column].hist(axes=ax)\n plt.xticks(rotation=\"vertical\")\nplt.subplots_adjust(hspace=0.7, wspace=0.2)", "_____no_output_____" ], [ "X_train=data_train.drop(['target'], axis=1)\ny_train = data_train['target']\n\nX_test=data_test.drop(['target'], axis=1)\ny_test = data_test['target']", "_____no_output_____" ], [ "tree = DecisionTreeClassifier(criterion = \"entropy\", max_depth = 3, random_state = 17)\ntree.fit(X = X_train, y = y_train)", "_____no_output_____" ], [ "tree_predictions = tree.predict(X = X_test) # Ваш код здесь", "_____no_output_____" ], [ "tree_predictions", "_____no_output_____" ], [ "accuracy_score = tree.score(X = X_test, y = y_test)\nprint(accuracy_score)", "0.9065743944636678\n" ], [ "rf = RandomForestClassifier(criterion=\"entropy\", random_state = 17, n_estimators=200,\n max_depth=4, max_features=0.15 ,n_jobs=-1)\nrf.fit(X = X_train, y = y_train) # Ваш код здесь", "_____no_output_____" ], [ "forest_predictions = rf.predict(X = X_test)\nprint(forest_predictions)", "[1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0\n 1 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 0 0 0 0 0 0\n 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 0 0 1 0 0 0 0 0 0 0 0 0]\n" ], [ "accuracy_score = rf.score(X = X_test, y = y_test)\nprint(accuracy_score)", "0.9480968858131488\n" ], [ "pickle.dump(rf, open('random_forest.sav', 'wb'))", "_____no_output_____" ], [ "model = pickle.load(open('random_forest.sav', 'rb'))", "_____no_output_____" ], [ "predict = model.predict(X=X_test)", "_____no_output_____" ], [ "predict", "_____no_output_____" ], [ "title_df = title_df.reset_index(drop=True)", "_____no_output_____" ], [ "predict = pd.Series(predict).rename('PREDICT')", "_____no_output_____" ], [ "predict = predict.rename('PREDICT')", "_____no_output_____" ], [ "result_df = pd.concat([title_df, predict], axis=1)", "_____no_output_____" ], [ "result_df", "_____no_output_____" ] ], [ [ "--------------------------------------------------------------------------------------------------------------------------------", "_____no_output_____" ] ], [ [ "forest_params = {'max_depth': range(4, 21),\n 'max_features': range(7, 45),\n 'random_state': range(1, 100)}", "_____no_output_____" ], [ "locally_best_forest = GridSearchCV(rf, forest_params, n_jobs=-1)", "_____no_output_____" ], [ "locally_best_forest.fit(X = X_train, y = y_train)", "_____no_output_____" ], [ "print(\"Best params:\", locally_best_forest.best_params_)\nprint(\"Best cross validaton score\", locally_best_forest.best_score_)", "Best params: {'max_depth': 4, 'max_features': 7, 'random_state': 40}\nBest cross validaton score 0.8650519031141869\n" ], [ "tuned_forest_predictions = locally_best_forest.predict(X = X_test) # Ваш код здесь\naccuracy_score = locally_best_forest.score(X = X_test, y = y_test)\nprint(accuracy_score)", "0.9342560553633218\n" ], [ "tuned_forest_predictions", "_____no_output_____" ] ] ]
[ "code", "markdown", "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ] ]
d0debb946e9e8a00d9b1e6162145fbfe1df2df8a
159,454
ipynb
Jupyter Notebook
MNIST_autoencoder.ipynb
LeOntalEs/Deep-Learning-Tutorial
8cbacfa5e8174ddfdb9bb3cb91bd8546ef091e85
[ "MIT" ]
null
null
null
MNIST_autoencoder.ipynb
LeOntalEs/Deep-Learning-Tutorial
8cbacfa5e8174ddfdb9bb3cb91bd8546ef091e85
[ "MIT" ]
null
null
null
MNIST_autoencoder.ipynb
LeOntalEs/Deep-Learning-Tutorial
8cbacfa5e8174ddfdb9bb3cb91bd8546ef091e85
[ "MIT" ]
null
null
null
306.053743
9,732
0.913423
[ [ [ "%matplotlib inline\n\nimport matplotlib.pyplot as plt\n\nimport numpy as np\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.preprocessing import Normalizer, MinMaxScaler\nfrom keras.layers import Dense, Conv2D, MaxPool2D, Dropout, Flatten, Input\nfrom keras.models import Sequential, Model\nfrom keras.utils import to_categorical\nfrom keras.datasets import boston_housing, mnist", "_____no_output_____" ], [ "(x_train, y_train), (x_test, y_test) = mnist.load_data()\nx_train = x_train.reshape((-1, 28*28))\nx_test = x_test.reshape((-1, 28*28))\ny_train = to_categorical(y_train, num_classes=10)\ny_test = to_categorical(y_test, num_classes=10)\nprint(x_train.shape)\nprint(y_train.shape)", "(60000, 784)\n(60000, 10)\n" ], [ "# model = Sequential()\n# model.add(Dense(400, input_shape=(784, ), activation='relu'))\n# model.add(Dense(200, activation='relu'))\n# model.add(Dense(100, activation='relu'))\n# model.add(Dense(2, activation='relu'))\n# model.add(Dense(100, activation='relu'))\n# model.add(Dense(200, activation='relu'))\n# model.add(Dense(400, activation='relu'))\n# model.add(Dense(784, activation='relu'))\n\ninpt = Input(shape=(784, ))\ninner = Dense(300, activation='sigmoid')(inpt)\ninner = Dense(150, activation='sigmoid')(inner)\ncode = Dense(2, activation='sigmoid')(inner)\ninner = Dense(150, activation='sigmoid')(code)\ninner = Dense(300, activation='sigmoid')(inner)\ndecode = Dense(784, activation='sigmoid')(inner)\n\nautoencoder = Model(inpt, decode)\nencoder = Model(inpt, code)", "_____no_output_____" ], [ "autoencoder.summary()", "_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\ninput_7 (InputLayer) (None, 784) 0 \n_________________________________________________________________\ndense_34 (Dense) (None, 300) 235500 \n_________________________________________________________________\ndense_35 (Dense) (None, 150) 45150 \n_________________________________________________________________\ndense_36 (Dense) (None, 2) 302 \n_________________________________________________________________\ndense_37 (Dense) (None, 150) 450 \n_________________________________________________________________\ndense_38 (Dense) (None, 300) 45300 \n_________________________________________________________________\ndense_39 (Dense) (None, 784) 235984 \n=================================================================\nTotal params: 562,686\nTrainable params: 562,686\nNon-trainable params: 0\n_________________________________________________________________\n" ], [ "encoder.summary()", "_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\ninput_7 (InputLayer) (None, 784) 0 \n_________________________________________________________________\ndense_34 (Dense) (None, 300) 235500 \n_________________________________________________________________\ndense_35 (Dense) (None, 150) 45150 \n_________________________________________________________________\ndense_36 (Dense) (None, 2) 302 \n=================================================================\nTotal params: 280,952\nTrainable params: 280,952\nNon-trainable params: 0\n_________________________________________________________________\n" ], [ "autoencoder.compile(loss=\"binary_crossentropy\", \n optimizer='adam')", "_____no_output_____" ], [ "model.fit(x_train, x_train, epochs=10)", "Epoch 1/10\n60000/60000 [==============================] - 14s 231us/step - loss: 2874.0705\nEpoch 2/10\n60000/60000 [==============================] - 13s 223us/step - loss: 2949.9649\nEpoch 3/10\n60000/60000 [==============================] - 14s 227us/step - loss: 2958.6945\nEpoch 4/10\n60000/60000 [==============================] - 13s 223us/step - loss: 3009.1767\nEpoch 5/10\n60000/60000 [==============================] - 13s 225us/step - loss: 2956.3930\nEpoch 6/10\n60000/60000 [==============================] - 13s 225us/step - loss: 2882.7399\nEpoch 7/10\n60000/60000 [==============================] - 14s 225us/step - loss: 2874.1891\nEpoch 8/10\n60000/60000 [==============================] - 14s 226us/step - loss: 2843.9117\nEpoch 9/10\n60000/60000 [==============================] - 14s 226us/step - loss: 2826.2436\nEpoch 10/10\n60000/60000 [==============================] - 14s 227us/step - loss: 2811.0035\n" ], [ "for x, y in list(zip(x_test, y_test))[:10]:\n img = x.reshape(28, 28)\n pred = autoencoder.predict(x.reshape(1, -1))\n pred = pred.reshape((28, 28))\n plt.imshow(img)\n plt.show()\n \n plt.imshow(pred)\n plt.show()\n print()", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code" ] ]
d0deca7d9a7f8ae0fa2e527564344ab9e515e52f
37,429
ipynb
Jupyter Notebook
text.ipynb
Gerson1366/projeto_ia
db80ae88f360de634cc0552ab598cb36abc7f984
[ "MIT" ]
null
null
null
text.ipynb
Gerson1366/projeto_ia
db80ae88f360de634cc0552ab598cb36abc7f984
[ "MIT" ]
null
null
null
text.ipynb
Gerson1366/projeto_ia
db80ae88f360de634cc0552ab598cb36abc7f984
[ "MIT" ]
1
2019-10-26T22:33:40.000Z
2019-10-26T22:33:40.000Z
47.924456
1,204
0.635069
[ [ [ "# TEXT\n\nThis notebook serves as supporting material for topics covered in **Chapter 22 - Natural Language Processing** from the book *Artificial Intelligence: A Modern Approach*. This notebook uses implementations from [text.py](https://github.com/aimacode/aima-python/blob/master/text.py).", "_____no_output_____" ] ], [ [ "from text import *\nfrom utils import open_data\nfrom notebook import psource", "_____no_output_____" ] ], [ [ "## CONTENTS\n\n* Text Models\n* Viterbi Text Segmentation\n* Information Retrieval\n* Information Extraction\n* Decoders", "_____no_output_____" ], [ "## TEXT MODELS\n\nBefore we start analyzing text processing algorithms, we will need to build some language models. Those models serve as a look-up table for character or word probabilities (depending on the type of model). These models can give us the probabilities of words or character sequences appearing in text. Take as example \"the\". Text models can give us the probability of \"the\", *P(\"the\")*, either as a word or as a sequence of characters (\"t\" followed by \"h\" followed by \"e\"). The first representation is called \"word model\" and deals with words as distinct objects, while the second is a \"character model\" and deals with sequences of characters as objects. Note that we can specify the number of words or the length of the char sequences to better suit our needs. So, given that number of words equals 2, we have probabilities in the form *P(word1, word2)*. For example, *P(\"of\", \"the\")*. For char models, we do the same but for chars.\n\nIt is also useful to store the conditional probabilities of words given preceding words. That means, given we found the words \"of\" and \"the\", what is the chance the next word will be \"world\"? More formally, *P(\"world\"|\"of\", \"the\")*. Generalizing, *P(Wi|Wi-1, Wi-2, ... , Wi-n)*.\n\nWe call the word model *N-Gram Word Model* (from the Greek \"gram\", the root of \"write\", or the word for \"letter\") and the char model *N-Gram Character Model*. In the special case where *N* is 1, we call the models *Unigram Word Model* and *Unigram Character Model* respectively.\n\nIn the `text` module we implement the two models (both their unigram and n-gram variants) by inheriting from the `CountingProbDist` from `learning.py`. Note that `CountingProbDist` does not return the actual probability of each object, but the number of times it appears in our test data.\n\nFor word models we have `UnigramWordModel` and `NgramWordModel`. We supply them with a text file and they show the frequency of the different words. We have `UnigramCharModel` and `NgramCharModel` for the character models.\n\nExecute the cells below to take a look at the code.", "_____no_output_____" ] ], [ [ "psource(UnigramWordModel, NgramWordModel, UnigramCharModel, NgramCharModel)", "_____no_output_____" ] ], [ [ "Next we build our models. The text file we will use to build them is *Flatland*, by Edwin A. Abbott. We will load it from [here](https://github.com/aimacode/aima-data/blob/a21fc108f52ad551344e947b0eb97df82f8d2b2b/EN-text/flatland.txt). In that directory you can find other text files we might get to use here.", "_____no_output_____" ], [ "### Getting Probabilities\n\nHere we will take a look at how to read text and find the probabilities for each model, and how to retrieve them.\n\nFirst the word models:", "_____no_output_____" ] ], [ [ "flatland = open_data(\"EN-text/flatland.txt\").read()\nwordseq = words(flatland)\n\nP1 = UnigramWordModel(wordseq)\nP2 = NgramWordModel(2, wordseq)\n\nprint(P1.top(5))\nprint(P2.top(5))\n\nprint(P1['an'])\nprint(P2[('i', 'was')])", "[(2081, 'the'), (1479, 'of'), (1021, 'and'), (1008, 'to'), (850, 'a')]\n[(368, ('of', 'the')), (152, ('to', 'the')), (152, ('in', 'the')), (86, ('of', 'a')), (80, ('it', 'is'))]\n0.0036724740723330495\n0.00114584557527324\n" ] ], [ [ "We see that the most used word in *Flatland* is 'the', with 2081 occurrences, while the most used sequence is 'of the' with 368 occurrences. Also, the probability of 'an' is approximately 0.003, while for 'i was' it is close to 0.001. Note that the strings used as keys are all lowercase. For the unigram model, the keys are single strings, while for n-gram models we have n-tuples of strings.\n\nBelow we take a look at how we can get information from the conditional probabilities of the model, and how we can generate the next word in a sequence.", "_____no_output_____" ] ], [ [ "flatland = open_data(\"EN-text/flatland.txt\").read()\nwordseq = words(flatland)\n\nP3 = NgramWordModel(3, wordseq)\n\nprint(\"Conditional Probabilities Table:\", P3.cond_prob[('i', 'was')].dictionary, '\\n')\nprint(\"Conditional Probability of 'once' give 'i was':\", P3.cond_prob[('i', 'was')]['once'], '\\n')\nprint(\"Next word after 'i was':\", P3.cond_prob[('i', 'was')].sample())", "Conditional Probabilities Table: {'now': 2, 'glad': 1, 'keenly': 1, 'considered': 1, 'once': 2, 'not': 4, 'in': 2, 'by': 1, 'simulating': 1, 'intoxicated': 1, 'wearied': 1, 'quite': 1, 'certain': 2, 'sitting': 1, 'to': 2, 'rapidly': 1, 'will': 1, 'describing': 1, 'allowed': 1, 'at': 2, 'afraid': 1, 'covered': 1, 'approaching': 1, 'standing': 1, 'myself': 1, 'surprised': 1, 'unusually': 1, 'rapt': 1, 'pleased': 1, 'crushed': 1} \n\nConditional Probability of 'once' give 'i was': 0.05128205128205128 \n\nNext word after 'i was': wearied\n" ] ], [ [ "First we print all the possible words that come after 'i was' and the times they have appeared in the model. Next we print the probability of 'once' appearing after 'i was', and finally we pick a word to proceed after 'i was'. Note that the word is picked according to its probability of appearing (high appearance count means higher chance to get picked).", "_____no_output_____" ], [ "Let's take a look at the two character models:", "_____no_output_____" ] ], [ [ "flatland = open_data(\"EN-text/flatland.txt\").read()\nwordseq = words(flatland)\n\nP1 = UnigramCharModel(wordseq)\nP2 = NgramCharModel(2, wordseq)\n\nprint(P1.top(5))\nprint(P2.top(5))\n\nprint(P1['z'])\nprint(P2[('g', 'h')])", "[(19208, 'e'), (13965, 't'), (12069, 'o'), (11702, 'a'), (11440, 'i')]\n[(5364, (' ', 't')), (4573, ('t', 'h')), (4063, (' ', 'a')), (3654, ('h', 'e')), (2967, (' ', 'i'))]\n0.0006028715031814578\n0.0032371578540395666\n" ] ], [ [ "The most common letter is 'e', appearing more than 19000 times, and the most common sequence is \"\\_t\". That is, a space followed by a 't'. Note that even though we do not count spaces for word models or unigram character models, we do count them for n-gram char models.\n\nAlso, the probability of the letter 'z' appearing is close to 0.0006, while for the bigram 'gh' it is 0.003.", "_____no_output_____" ], [ "### Generating Samples\n\nApart from reading the probabilities for n-grams, we can also use our model to generate word sequences, using the `samples` function in the word models.", "_____no_output_____" ] ], [ [ "flatland = open_data(\"EN-text/flatland.txt\").read()\nwordseq = words(flatland)\n\nP1 = UnigramWordModel(wordseq)\nP2 = NgramWordModel(2, wordseq)\nP3 = NgramWordModel(3, wordseq)\n\nprint(P1.samples(10))\nprint(P2.samples(10))\nprint(P3.samples(10))", "hearing as inside is confined to conduct by the duties\nall and of voice being in a day of the\nparty they are stirred to mutual warfare and perish by\n" ] ], [ [ "For the unigram model, we mostly get gibberish, since each word is picked according to its frequency of appearance in the text, without taking into consideration preceding words. As we increase *n* though, we start to get samples that do have some semblance of conherency and do remind a little bit of normal English. As we increase our data, these samples will get better.\n\nLet's try it. We will add to the model more data to work with and let's see what comes out.", "_____no_output_____" ] ], [ [ "data = open_data(\"EN-text/flatland.txt\").read()\ndata += open_data(\"EN-text/sense.txt\").read()\n\nwordseq = words(data)\n\nP3 = NgramWordModel(3, wordseq)\nP4 = NgramWordModel(4, wordseq)\nP5 = NgramWordModel(5, wordseq)\nP7 = NgramWordModel(7, wordseq)\n\nprint(P3.samples(15))\nprint(P4.samples(15))\nprint(P5.samples(15))\nprint(P7.samples(15))", "leave them at cleveland this christmas now pray do not ask you to relate or\nmeaning and both of us sprang forward in the direction and no sooner had they\npalmer though very unwilling to go as well from real humanity and good nature as\ntime about what they should do and they agreed he should take orders directly and\n" ] ], [ [ "Notice how the samples start to become more and more reasonable as we add more data and increase the *n* parameter. We are still a long way to go though from realistic text generation, but at the same time we can see that with enough data even rudimentary algorithms can output something almost passable.", "_____no_output_____" ], [ "## VITERBI TEXT SEGMENTATION\n\n### Overview\n\nWe are given a string containing words of a sentence, but all the spaces are gone! It is very hard to read and we would like to separate the words in the string. We can accomplish this by employing the `Viterbi Segmentation` algorithm. It takes as input the string to segment and a text model, and it returns a list of the separate words.\n\nThe algorithm operates in a dynamic programming approach. It starts from the beginning of the string and iteratively builds the best solution using previous solutions. It accomplishes that by segmentating the string into \"windows\", each window representing a word (real or gibberish). It then calculates the probability of the sequence up that window/word occurring and updates its solution. When it is done, it traces back from the final word and finds the complete sequence of words.", "_____no_output_____" ], [ "### Implementation", "_____no_output_____" ] ], [ [ "psource(viterbi_segment)", "_____no_output_____" ] ], [ [ "The function takes as input a string and a text model, and returns the most probable sequence of words, together with the probability of that sequence.\n\nThe \"window\" is `w` and it includes the characters from *j* to *i*. We use it to \"build\" the following sequence: from the start to *j* and then `w`. We have previously calculated the probability from the start to *j*, so now we multiply that probability by `P[w]` to get the probability of the whole sequence. If that probability is greater than the probability we have calculated so far for the sequence from the start to *i* (`best[i]`), we update it.", "_____no_output_____" ], [ "### Example\n\nThe model the algorithm uses is the `UnigramTextModel`. First we will build the model using the *Flatland* text and then we will try and separate a space-devoid sentence.", "_____no_output_____" ] ], [ [ "flatland = open_data(\"EN-text/flatland.txt\").read()\nwordseq = words(flatland)\nP = UnigramWordModel(wordseq)\ntext = \"itiseasytoreadwordswithoutspaces\"\n\ns, p = viterbi_segment(text,P)\nprint(\"Sequence of words is:\",s)\nprint(\"Probability of sequence is:\",p)", "Sequence of words is: ['it', 'is', 'easy', 'to', 'read', 'words', 'without', 'spaces']\nProbability of sequence is: 2.273672843573388e-24\n" ] ], [ [ "The algorithm correctly retrieved the words from the string. It also gave us the probability of this sequence, which is small, but still the most probable segmentation of the string.", "_____no_output_____" ], [ "## INFORMATION RETRIEVAL\n\n### Overview\n\nWith **Information Retrieval (IR)** we find documents that are relevant to a user's needs for information. A popular example is a web search engine, which finds and presents to a user pages relevant to a query. Information retrieval is not limited only to returning documents though, but can also be used for other type of queries. For example, answering questions when the query is a question, returning information when the query is a concept, and many other applications. An IR system is comprised of the following:\n\n* A body (called corpus) of documents: A collection of documents, where the IR will work on.\n\n* A query language: A query represents what the user wants.\n\n* Results: The documents the system grades as relevant to a user's query and needs.\n\n* Presententation of the results: How the results are presented to the user.\n\nHow does an IR system determine which documents are relevant though? We can sign a document as relevant if all the words in the query appear in it, and sign it as irrelevant otherwise. We can even extend the query language to support boolean operations (for example, \"paint AND brush\") and then sign as relevant the outcome of the query for the document. This technique though does not give a level of relevancy. All the documents are either relevant or irrelevant, but in reality some documents are more relevant than others.\n\nSo, instead of a boolean relevancy system, we use a *scoring function*. There are many scoring functions around for many different situations. One of the most used takes into account the frequency of the words appearing in a document, the frequency of a word appearing across documents (for example, the word \"a\" appears a lot, so it is not very important) and the length of a document (since large documents will have higher occurrences for the query terms, but a short document with a lot of occurrences seems very relevant). We combine these properties in a formula and we get a numeric score for each document, so we can then quantify relevancy and pick the best documents.\n\nThese scoring functions are not perfect though and there is room for improvement. For instance, for the above scoring function we assume each word is independent. That is not the case though, since words can share meaning. For example, the words \"painter\" and \"painters\" are closely related. If in a query we have the word \"painter\" and in a document the word \"painters\" appears a lot, this might be an indication that the document is relevant but we are missing out since we are only looking for \"painter\". There are a lot of ways to combat this. One of them is to reduce the query and document words into their stems. For example, both \"painter\" and \"painters\" have \"paint\" as their stem form. This can improve slightly the performance of algorithms.\n\nTo determine how good an IR system is, we give the system a set of queries (for which we know the relevant pages beforehand) and record the results. The two measures for performance are *precision* and *recall*. Precision measures the proportion of result documents that actually are relevant. Recall measures the proportion of relevant documents (which, as mentioned before, we know in advance) appearing in the result documents.", "_____no_output_____" ], [ "### Implementation\n\nYou can read the source code by running the command below:", "_____no_output_____" ] ], [ [ "psource(IRSystem)", "_____no_output_____" ] ], [ [ "The `stopwords` argument signifies words in the queries that should not be accounted for in documents. Usually they are very common words that do not add any significant information for a document's relevancy.\n\nA quick guide for the functions in the `IRSystem` class:\n\n* `index_document`: Add document to the collection of documents (named `documents`), which is a list of tuples. Also, count how many times each word in the query appears in each document.\n\n* `index_collection`: Index a collection of documents given by `filenames`.\n\n* `query`: Returns a list of `n` pairs of `(score, docid)` sorted on the score of each document. Also takes care of the special query \"learn: X\", where instead of the normal functionality we present the output of the terminal command \"X\".\n\n* `score`: Scores a given document for the given word using `log(1+k)/log(1+n)`, where `k` is the number of query words in a document and `k` is the total number of words in the document. Other scoring functions can be used and you can overwrite this function to better suit your needs.\n\n* `total_score`: Calculate the sum of all the query words in given document.\n\n* `present`/`present_results`: Presents the results as a list.\n\nWe also have the class `Document` that holds metadata of documents, like their title, url and number of words. An additional class, `UnixConsultant`, can be used to initialize an IR System for Unix command manuals. This is the example we will use to showcase the implementation.", "_____no_output_____" ], [ "### Example\n\nFirst let's take a look at the source code of `UnixConsultant`.", "_____no_output_____" ] ], [ [ "psource(UnixConsultant)", "_____no_output_____" ] ], [ [ "The class creates an IR System with the stopwords \"how do i the a of\". We could add more words to exclude, but the queries we will test will generally be in that format, so it is convenient. After the initialization of the system, we get the manual files and start indexing them.\n\nLet's build our Unix consultant and run a query:", "_____no_output_____" ] ], [ [ "uc = UnixConsultant()\n\nq = uc.query(\"how do I remove a file\")\n\ntop_score, top_doc = q[0][0], q[0][1]\nprint(top_score, uc.documents[top_doc].url)", "0.7682667868462166 aima-data/MAN/rm.txt\n" ] ], [ [ "We asked how to remove a file and the top result was the `rm` (the Unix command for remove) manual. This is exactly what we wanted! Let's try another query:", "_____no_output_____" ] ], [ [ "q = uc.query(\"how do I delete a file\")\n\ntop_score, top_doc = q[0][0], q[0][1]\nprint(top_score, uc.documents[top_doc].url)", "0.7546722691607105 aima-data/MAN/diff.txt\n" ] ], [ [ "Even though we are basically asking for the same thing, we got a different top result. The `diff` command shows the differences between two files. So the system failed us and presented us an irrelevant document. Why is that? Unfortunately our IR system considers each word independent. \"Remove\" and \"delete\" have similar meanings, but since they are different words our system will not make the connection. So, the `diff` manual which mentions a lot the word `delete` gets the nod ahead of other manuals, while the `rm` one isn't in the result set since it doesn't use the word at all.", "_____no_output_____" ], [ "## INFORMATION EXTRACTION\n\n**Information Extraction (IE)** is a method for finding occurrences of object classes and relationships in text. Unlike IR systems, an IE system includes (limited) notions of syntax and semantics. While it is difficult to extract object information in a general setting, for more specific domains the system is very useful. One model of an IE system makes use of templates that match with strings in a text.\n\nA typical example of such a model is reading prices from web pages. Prices usually appear after a dollar and consist of numbers, maybe followed by two decimal points. Before the price, usually there will appear a string like \"price:\". Let's build a sample template.\n\nWith the following regular expression (*regex*) we can extract prices from text:\n\n`[$][0-9]+([.][0-9][0-9])?`\n\nWhere `+` means 1 or more occurrences and `?` means atmost 1 occurrence. Usually a template consists of a prefix, a target and a postfix regex. In this template, the prefix regex can be \"price:\", the target regex can be the above regex and the postfix regex can be empty.\n\nA template can match with multiple strings. If this is the case, we need a way to resolve the multiple matches. Instead of having just one template, we can use multiple templates (ordered by priority) and pick the match from the highest-priority template. We can also use other ways to pick. For the dollar example, we can pick the match closer to the numerical half of the highest match. For the text \"Price $90, special offer $70, shipping $5\" we would pick \"$70\" since it is closer to the half of the highest match (\"$90\").", "_____no_output_____" ], [ "The above is called *attribute-based* extraction, where we want to find attributes in the text (in the example, the price). A more sophisticated extraction system aims at dealing with multiple objects and the relations between them. When such a system reads the text \"$100\", it should determine not only the price but also which object has that price.\n\nRelation extraction systems can be built as a series of finite state automata. Each automaton receives as input text, performs transformations on the text and passes it on to the next automaton as input. An automata setup can consist of the following stages:\n\n1. **Tokenization**: Segments text into tokens (words, numbers and punctuation).\n\n2. **Complex-word Handling**: Handles complex words such as \"give up\", or even names like \"Smile Inc.\".\n\n3. **Basic-group Handling**: Handles noun and verb groups, segmenting the text into strings of verbs or nouns (for example, \"had to give up\").\n\n4. **Complex Phrase Handling**: Handles complex phrases using finite-state grammar rules. For example, \"Human+PlayedChess(\"with\" Human+)?\" can be one template/rule for capturing a relation of someone playing chess with others.\n\n5. **Structure Merging**: Merges the structures built in the previous steps.", "_____no_output_____" ], [ "Finite-state, template based information extraction models work well for restricted domains, but perform poorly as the domain becomes more and more general. There are many models though to choose from, each with its own strengths and weaknesses. Some of the models are the following:\n\n* **Probabilistic**: Using Hidden Markov Models, we can extract information in the form of prefix, target and postfix from a given text. Two advantages of using HMMs over templates is that we can train HMMs from data and don't need to design elaborate templates, and that a probabilistic approach behaves well even with noise. In a regex, if one character is off, we do not have a match, while with a probabilistic approach we have a smoother process.\n\n* **Conditional Random Fields**: One problem with HMMs is the assumption of state independence. CRFs are very similar to HMMs, but they don't have the latter's constraint. In addition, CRFs make use of *feature functions*, which act as transition weights. For example, if for observation $e_{i}$ and state $x_{i}$ we have $e_{i}$ is \"run\" and $x_{i}$ is the state ATHLETE, we can have $f(x_{i}, e_{i}) = 1$ and equal to 0 otherwise. We can use multiple, overlapping features, and we can even use features for state transitions. Feature functions don't have to be binary (like the above example) but they can be real-valued as well. Also, we can use any $e$ for the function, not just the current observation. To bring it all together, we weigh a transition by the sum of features.\n\n* **Ontology Extraction**: This is a method for compiling information and facts in a general domain. A fact can be in the form of `NP is NP`, where `NP` denotes a noun-phrase. For example, \"Rabbit is a mammal\".", "_____no_output_____" ], [ "## DECODERS\n\n### Introduction\n\nIn this section we will try to decode ciphertext using probabilistic text models. A ciphertext is obtained by performing encryption on a text message. This encryption lets us communicate safely, as anyone who has access to the ciphertext but doesn't know how to decode it cannot read the message. We will restrict our study to <b>Monoalphabetic Substitution Ciphers</b>. These are primitive forms of cipher where each letter in the message text (also known as plaintext) is replaced by another another letter of the alphabet.\n\n### Shift Decoder\n\n#### The Caesar cipher\n\nThe Caesar cipher, also known as shift cipher is a form of monoalphabetic substitution ciphers where each letter is <i>shifted</i> by a fixed value. A shift by <b>`n`</b> in this context means that each letter in the plaintext is replaced with a letter corresponding to `n` letters down in the alphabet. For example the plaintext `\"ABCDWXYZ\"` shifted by `3` yields `\"DEFGZABC\"`. Note how `X` became `A`. This is because the alphabet is cyclic, i.e. the letter after the last letter in the alphabet, `Z`, is the first letter of the alphabet - `A`.", "_____no_output_____" ] ], [ [ "plaintext = \"ABCDWXYZ\"\nciphertext = shift_encode(plaintext, 3)\nprint(ciphertext)", "DEFGZABC\n" ] ], [ [ "#### Decoding a Caesar cipher\n\nTo decode a Caesar cipher we exploit the fact that not all letters in the alphabet are used equally. Some letters are used more than others and some pairs of letters are more probable to occur together. We call a pair of consecutive letters a <b>bigram</b>.", "_____no_output_____" ] ], [ [ "print(bigrams('this is a sentence'))", "['th', 'hi', 'is', 's ', ' i', 'is', 's ', ' a', 'a ', ' s', 'se', 'en', 'nt', 'te', 'en', 'nc', 'ce']\n" ] ], [ [ "We use `CountingProbDist` to get the probability distribution of bigrams. In the latin alphabet consists of only only `26` letters. This limits the total number of possible substitutions to `26`. We reverse the shift encoding for a given `n` and check how probable it is using the bigram distribution. We try all `26` values of `n`, i.e. from `n = 0` to `n = 26` and use the value of `n` which gives the most probable plaintext.", "_____no_output_____" ] ], [ [ "%psource ShiftDecoder", "_____no_output_____" ] ], [ [ "#### Example\n\nLet us encode a secret message using Caeasar cipher and then try decoding it using `ShiftDecoder`. We will again use `flatland.txt` to build the text model", "_____no_output_____" ] ], [ [ "plaintext = \"This is a secret message\"\nciphertext = shift_encode(plaintext, 13)\nprint('The code is', '\"' + ciphertext + '\"')", "The code is \"Guvf vf n frperg zrffntr\"\n" ], [ "flatland = open_data(\"EN-text/flatland.txt\").read()\ndecoder = ShiftDecoder(flatland)\n\ndecoded_message = decoder.decode(ciphertext)\nprint('The decoded message is', '\"' + decoded_message + '\"')", "The decoded message is \"This is a secret message\"\n" ] ], [ [ "### Permutation Decoder\nNow let us try to decode messages encrypted by a general mono-alphabetic substitution cipher. The letters in the alphabet can be replaced by any permutation of letters. For example, if the alphabet consisted of `{A B C}` then it can be replaced by `{A C B}`, `{B A C}`, `{B C A}`, `{C A B}`, `{C B A}` or even `{A B C}` itself. Suppose we choose the permutation `{C B A}`, then the plain text `\"CAB BA AAC\"` would become `\"ACB BC CCA\"`. We can see that Caesar cipher is also a form of permutation cipher where the permutation is a cyclic permutation. Unlike the Caesar cipher, it is infeasible to try all possible permutations. The number of possible permutations in Latin alphabet is `26!` which is of the order $10^{26}$. We use graph search algorithms to search for a 'good' permutation.", "_____no_output_____" ] ], [ [ "psource(PermutationDecoder)", "_____no_output_____" ] ], [ [ "Each state/node in the graph is represented as a letter-to-letter map. If there is no mapping for a letter, it means the letter is unchanged in the permutation. These maps are stored as dictionaries. Each dictionary is a 'potential' permutation. We use the word 'potential' because every dictionary doesn't necessarily represent a valid permutation since a permutation cannot have repeating elements. For example the dictionary `{'A': 'B', 'C': 'X'}` is invalid because `'A'` is replaced by `'B'`, but so is `'B'` because the dictionary doesn't have a mapping for `'B'`. Two dictionaries can also represent the same permutation e.g. `{'A': 'C', 'C': 'A'}` and `{'A': 'C', 'B': 'B', 'C': 'A'}` represent the same permutation where `'A'` and `'C'` are interchanged and all other letters remain unaltered. To ensure that we get a valid permutation, a goal state must map all letters in the alphabet. We also prevent repetitions in the permutation by allowing only those actions which go to a new state/node in which the newly added letter to the dictionary maps to previously unmapped letter. These two rules together ensure that the dictionary of a goal state will represent a valid permutation.\nThe score of a state is determined using word scores, unigram scores, and bigram scores. Experiment with different weightages for word, unigram and bigram scores and see how they affect the decoding.", "_____no_output_____" ] ], [ [ "ciphertexts = ['ahed world', 'ahed woxld']\n\npd = PermutationDecoder(canonicalize(flatland))\nfor ctext in ciphertexts:\n print('\"{}\" decodes to \"{}\"'.format(ctext, pd.decode(ctext)))", "\"ahed world\" decodes to \"shed could\"\n\"ahed woxld\" decodes to \"shew atiow\"\n" ] ], [ [ "As evident from the above example, permutation decoding using best first search is sensitive to initial text. This is because not only the final dictionary, with substitutions for all letters, must have good score but so must the intermediate dictionaries. You could think of it as performing a local search by finding substitutions for each letter one by one. We could get very different results by changing even a single letter because that letter could be a deciding factor for selecting substitution in early stages which snowballs and affects the later stages. To make the search better we can use different definitions of score in different stages and optimize on which letter to substitute first.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
d0dee3c56d2814fd59f07e6e76cb4071b8902ee3
42,862
ipynb
Jupyter Notebook
VacationPy-Final.ipynb
JessicaKwon0121/Python-API-Challenge
f058d22d60bdc6e5db33153271dac5f3bfac6378
[ "ADSL" ]
null
null
null
VacationPy-Final.ipynb
JessicaKwon0121/Python-API-Challenge
f058d22d60bdc6e5db33153271dac5f3bfac6378
[ "ADSL" ]
null
null
null
VacationPy-Final.ipynb
JessicaKwon0121/Python-API-Challenge
f058d22d60bdc6e5db33153271dac5f3bfac6378
[ "ADSL" ]
null
null
null
32.154539
191
0.348257
[ [ [ "# VacationPy\n----\n\n#### Note\n* Keep an eye on your API usage. Use https://developers.google.com/maps/reporting/gmp-reporting as reference for how to monitor your usage and billing.\n\n* Instructions have been included for each segment. You do not have to follow them exactly, but they are included to help you think through the steps.", "_____no_output_____" ], [ "### Store Part I results into DataFrame\n* Load the csv exported in Part I to a DataFrame", "_____no_output_____" ] ], [ [ "# Dependencies and Setup\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport numpy as np\nimport requests\nimport gmaps\nimport os\n\n# Import API key\nfrom api_keys import g_key\nprint(g_key)", "AIzaSyBaFJ4YJWuNolXhaMX3S48Oxw7K44uUK6w\n" ], [ "csvfile = \"../output_data/weather_data.csv\"\nheatmap_df = pd.read_csv(csvfile)\nheatmap_df", "_____no_output_____" ] ], [ [ "### Humidity Heatmap\n* Configure gmaps.\n* Use the Lat and Lng as locations and Humidity as the weight.\n* Add Heatmap layer to map.", "_____no_output_____" ] ], [ [ "gmaps.configure(api_key=g_key)\nlocations = heatmap_df[[\"Lat\", \"Lng\"]].astype(float)\nhumidity = heatmap_df[\"Humidity\"].astype(float)\nhumidity.max()", "_____no_output_____" ], [ "fig = gmaps.figure()\nheatmap_layer = gmaps.heatmap_layer(locations, weights=humidity, \n dissipating=False, max_intensity=100,\n point_radius = 1)\nfig.add_layer(heatmap_layer)\nfig", "_____no_output_____" ] ], [ [ "### Create new DataFrame fitting weather criteria\n* Narrow down the cities to fit weather conditions.\n* Drop any rows will null values.", "_____no_output_____" ] ], [ [ "heatmap_data = heatmap_df.loc[(heatmap_df[\"Max Temp\"] < 100) & \n (heatmap_df[\"Max Temp\"] >=70) &\n (heatmap_df[\"Cloudiness\"] == 0) &\n (heatmap_df[\"Wind Speed\"] < 10)].dropna()\nheatmap_data", "_____no_output_____" ] ], [ [ "### Hotel Map\n* Store into variable named `hotel_df`.\n* Add a \"Hotel Name\" column to the DataFrame.\n* Set parameters to search for hotels with 5000 meters.\n* Hit the Google Places API for each city's coordinates.\n* Store the first Hotel result into the DataFrame.\n* Plot markers on top of the heatmap.", "_____no_output_____" ] ], [ [ "hotel_df = heatmap_data.loc[:,[\"City\",\"Country\",\"Lat\",\"Lng\"]]\nhotel_df[\"Hotel Name\"] = \"\"\nhotel_df", "_____no_output_____" ], [ "import json", "_____no_output_____" ], [ "url = \"https://maps.googleapis.com/maps/api/place/nearbysearch/json?\"\nparams = {\n \"radius\": 5000,\n \"type\": \"hotel\",\n \"keyword\": \"hotel\",\n \"key\": g_key}\n\nfor index, row in hotel_df.iterrows():\n lat = row['Lat']\n lon = row['Lng']\n city_name = row['City']\n\n params['location'] = f\"{lat},{lon}\"\n response = requests.get(url, params=params).json()\n results = response['results']\n \n try:\n print(f\"Closest hotel in {city_name} is {results[0]['name']}.\")\n hotel_df.loc[index, \"Hotel Name\"] = results[0]['name']\n\n # if there is no hotel available, show missing field\n except (KeyError, IndexError):\n print(\"No hotel\")", "No hotel\nNo hotel\nNo hotel\nClosest hotel in Sakakah is Raoum Inn Hotel.\nNo hotel\nNo hotel\nClosest hotel in Yulara is Sails in the Desert.\nClosest hotel in Tupã is Grande Hotel Tamoios.\nClosest hotel in Erzin is Hattusa Vacation Thermal Club Erzin.\nNo hotel\nClosest hotel in Ibrā’ is Ibra Hotel.\nClosest hotel in Atar is Odar kanawal.\nClosest hotel in Cabo San Lucas is Welk Resorts Cabo San Lucas - Sirena del Mar.\nClosest hotel in Bani Walid is فندق الزيتونة.\n" ], [ "hotel_df", "_____no_output_____" ], [ "hotel_drop = hotel_df.loc[hotel_df[\"Hotel Name\"] != \"\"]\nhotel_drop", "_____no_output_____" ], [ "# NOTE: Do not change any of the code in this cell\n\n# Using the template add the hotel marks to the heatmap\ninfo_box_template = \"\"\"\n<dl>\n<dt>Name</dt><dd>{Hotel Name}</dd>\n<dt>City</dt><dd>{City}</dd>\n<dt>Country</dt><dd>{Country}</dd>\n</dl>\n\"\"\"\n# Store the DataFrame Row\n\nfor index, row in hotel_drop.iterrows():\n city = row['City']\n country = row['Country']\n lat = row['Lat']\n lon = row['Lng']\n hotel_name = row['Hotel Name']\n\n# NOTE: be sure to update with your DataFrame name\nhotel_info = [info_box_template.format(**row) for index, row in hotel_drop.iterrows()]\nlocations = hotel_drop[[\"Lat\", \"Lng\"]]\n\nhotel_info", "_____no_output_____" ], [ "# Add marker layer ontop of heat map\n\nmarkers = gmaps.marker_layer(locations, info_box_content = hotel_info)\nfig.add_layer(markers)\n\n# Display figure\n\nfig", "_____no_output_____" ] ], [ [ "- It found only 2 locations which has a max temperature lower than 80 degrees but higher than 70. Thus, tried to find lower then 100 degrees but higher than 70 and found 8 locations.\n- We can find more locations in Northern Hemisphere then southern.\n- In addition, most hotels are located near the Mediterranean area.\n", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ] ]
d0def84a68362e18c818e1bcef981894087a34c6
294,565
ipynb
Jupyter Notebook
notebooks/esiosdata - Factura electricidad con datos enerPI.ipynb
azogue/esiosdata
680c7918955bc6ceee5bded92b3a4485f5ea8151
[ "MIT" ]
20
2017-06-04T20:34:16.000Z
2021-10-31T22:55:22.000Z
notebooks/esiosdata - Factura electricidad con datos enerPI.ipynb
azogue/esiosdata
680c7918955bc6ceee5bded92b3a4485f5ea8151
[ "MIT" ]
null
null
null
notebooks/esiosdata - Factura electricidad con datos enerPI.ipynb
azogue/esiosdata
680c7918955bc6ceee5bded92b3a4485f5ea8151
[ "MIT" ]
4
2020-01-28T19:02:24.000Z
2022-03-08T15:59:11.000Z
598.70935
128,194
0.926716
[ [ [ "# Cálculo de la factura eléctrica\n\n- Recuperación de datos de consumo eléctrico horario del medidor de consumo ([enerPI]()) vía JSON.\nPara otros casos, formar un `pandas.Series` de índice horario con datos de consumo en kWh.\n- Generación de factura eléctrica mediante `esiosdata.FacturaElec`\n- Simulación de cambio de tarifa eléctrica para el mismo consumo\n- Alguna gráfica de los patrones de consumo diario", "_____no_output_____" ] ], [ [ "%matplotlib inline\n%config InlineBackend.figure_format = 'retina'\nfrom glob import glob\nimport matplotlib.pyplot as plt\nimport os\nimport pandas as pd\nimport requests\nfrom esiosdata import FacturaElec\nfrom esiosdata.prettyprinting import *\n\n\n# enerPI JSON API\nip_enerpi = '192.168.1.44'\nt0, tf = '2016-11-01', '2016-12-24'\nurl = 'http://{}/enerpi/api/consumption/from/{}/to/{}'.format(ip_enerpi, t0, tf)\nprint(url)\nr = requests.get(url)\nif r.ok:\n data = r.json()\n data_consumo = pd.DataFrame(pd.Series(data, name='kWh')).sort_index().reset_index()\n data_consumo.index = data_consumo['index'].apply(lambda x: pd.Timestamp.fromtimestamp(float(x) / 1000.))\n data_consumo.drop('index', axis=1, inplace=True)\n print_ok(data_consumo.head())\n print_ok(data_consumo.tail())\nelse:\n print_err(r)", "http://192.168.1.44/enerpi/api/consumption/from/2016-11-01/to/2016-12-24\n\u001b[1m\u001b[32m kWh\nindex \n2016-11-01 00:00:00 0.3179\n2016-11-01 01:00:00 0.2329\n2016-11-01 02:00:00 0.2317\n2016-11-01 03:00:00 0.2343\n2016-11-01 04:00:00 0.2242\u001b[0m\n\u001b[1m\u001b[32m kWh\nindex \n2016-12-24 19:00:00 0.3294\n2016-12-24 20:00:00 0.5632\n2016-12-24 21:00:00 0.4405\n2016-12-24 22:00:00 0.2757\n2016-12-24 23:00:00 0.2938\u001b[0m\n" ], [ "# Consumo Total en intervalo:\nc_tot = round(data_consumo.kWh.round(3).sum(), 3)\nprint_ok(c_tot)\n\n# Plot consumo diario en kWh\ndata_consumo.kWh.resample('D').sum().plot(figsize=(16, 9));", "\u001b[1m\u001b[32m517.615\u001b[0m\n" ] ], [ [ "## Factura eléctrica con datos horarios", "_____no_output_____" ] ], [ [ "factura = FacturaElec(consumo=data_consumo.kWh)\nprint_info(factura)", "Asignando timezone a consumo horario: Europe/Madrid\n\u001b[34mFACTURA ELÉCTRICA:\n--------------------------------------------------------------------------------\n* Fecha inicio \t31/10/2016\n* Fecha final \t24/12/2016\n* Peaje de acceso \t2.0A (General)\n* Potencia contratada \t3.45 kW\n* Consumo periodo \t517.61 kWh\n* ¿Bono Social? \tNo\n* Equipo de medida \t1.43 €\n* Impuestos \tPenínsula y Baleares (IVA)\n* Días facturables \t54\n--------------------------------------------------------------------------------\n\n- CÁLCULO DEL TÉRMINO FIJO POR POTENCIA CONTRATADA:\n 3.45 kW * 42.043426 €/kW/año * 54 días (2016) / 366 = 21.40 €\n -> Término fijo 21.40 €\n\n- CÁLCULO DEL TÉRMINO VARIABLE POR ENERGÍA CONSUMIDA (TARIFA 2.0A):\n Periodo 1: 0.125777 €/kWh -> 65.10€(P1)\n - Peaje de acceso: 518kWh * 0.044027€/kWh = 22.79€\n - Coste de la energía: 518kWh * 0.081750€/kWh = 42.31€\n -> Término de consumo 65.10 €\n\n\n\n- IMPUESTO ELÉCTRICO:\n 5.11269632% x (21.40€ + 65.10€) 4.42 €\n\n- EQUIPO DE MEDIDA: 1.43 €\n\n- IVA O EQUIVALENTE:\n 21% de 92.35€ 19.39 €\n\n################################################################################\n# TOTAL FACTURA 111.74 €\n################################################################################\n\u001b[0m\n" ], [ "factura.tipo_peaje = 2\nprint_cyan(factura)", "\u001b[36mFACTURA ELÉCTRICA:\n--------------------------------------------------------------------------------\n* Fecha inicio \t31/10/2016\n* Fecha final \t24/12/2016\n* Peaje de acceso \t2.0DHA (Nocturna)\n* Potencia contratada \t3.45 kW\n* Consumo periodo \t517.61 kWh\n* ¿Bono Social? \tNo\n* Equipo de medida \t1.43 €\n* Impuestos \tPenínsula y Baleares (IVA)\n* Días facturables \t54\n--------------------------------------------------------------------------------\n\n- CÁLCULO DEL TÉRMINO FIJO POR POTENCIA CONTRATADA:\n 3.45 kW * 42.043426 €/kW/año * 54 días (2016) / 366 = 21.40 €\n -> Término fijo 21.40 €\n\n- CÁLCULO DEL TÉRMINO VARIABLE POR ENERGÍA CONSUMIDA (TARIFA 2.0DHA):\n Periodo 1: 0.147546 €/kWh -> 38.45€(P1)\n - Peaje de acceso: 261kWh * 0.062012€/kWh = 16.16€\n - Coste de la energía: 261kWh * 0.085534€/kWh = 22.29€\n Periodo 2: 0.073607 €/kWh -> 18.92€(P2)\n - Peaje de acceso: 257kWh * 0.002215€/kWh = 0.57€\n - Coste de la energía: 257kWh * 0.071392€/kWh = 18.35€\n -> Término de consumo 57.37 €\n\n\n\n- IMPUESTO ELÉCTRICO:\n 5.11269632% x (21.40€ + 57.37€) 4.03 €\n\n- EQUIPO DE MEDIDA: 1.43 €\n\n- IVA O EQUIVALENTE:\n 21% de 84.23€ 17.69 €\n\n################################################################################\n# TOTAL FACTURA 101.92 €\n################################################################################\n\u001b[0m\n" ], [ "factura.tipo_peaje = 3\nprint_magenta(factura)", "\u001b[35mFACTURA ELÉCTRICA:\n--------------------------------------------------------------------------------\n* Fecha inicio \t31/10/2016\n* Fecha final \t24/12/2016\n* Peaje de acceso \t2.0DHS (Vehículo eléctrico)\n* Potencia contratada \t3.45 kW\n* Consumo periodo \t517.61 kWh\n* ¿Bono Social? \tNo\n* Equipo de medida \t1.43 €\n* Impuestos \tPenínsula y Baleares (IVA)\n* Días facturables \t54\n--------------------------------------------------------------------------------\n\n- CÁLCULO DEL TÉRMINO FIJO POR POTENCIA CONTRATADA:\n 3.45 kW * 42.043426 €/kW/año * 54 días (2016) / 366 = 21.40 €\n -> Término fijo 21.40 €\n\n- CÁLCULO DEL TÉRMINO VARIABLE POR ENERGÍA CONSUMIDA (TARIFA 2.0DHS):\n Periodo 1: 0.148466 €/kWh -> 39.89€(P1)\n - Peaje de acceso: 269kWh * 0.062012€/kWh = 16.66€\n - Coste de la energía: 269kWh * 0.086454€/kWh = 23.23€\n Periodo 2: 0.079779 €/kWh -> 13.46€(P2)\n - Peaje de acceso: 169kWh * 0.002879€/kWh = 0.49€\n - Coste de la energía: 169kWh * 0.076900€/kWh = 12.97€\n Periodo 3: 0.062719 €/kWh -> 5.03€(P3)\n - Peaje de acceso: 80kWh * 0.000886€/kWh = 0.07€\n - Coste de la energía: 80kWh * 0.061833€/kWh = 4.96€\n -> Término de consumo 58.38 €\n\n\n\n- IMPUESTO ELÉCTRICO:\n 5.11269632% x (21.40€ + 58.38€) 4.08 €\n\n- EQUIPO DE MEDIDA: 1.43 €\n\n- IVA O EQUIVALENTE:\n 21% de 85.29€ 17.91 €\n\n################################################################################\n# TOTAL FACTURA 103.20 €\n################################################################################\n\u001b[0m\n" ] ], [ [ "### EXPORT TO CSV\n\n* Para importar en https://facturaluz2.cnmc.es/facturaluz2.html", "_____no_output_____" ] ], [ [ "path_csv = os.path.expanduser('~/Desktop/')\ndf_csv = factura.generacion_csv_oficial_consumo_horario(path_csv)\nprint_ok(df_csv.tail())\nfile_path = glob(path_csv + '*.csv')[0]\nwith open(file_path, 'r') as f:\n print_magenta(f.read()[:500])", "\u001b[1m\u001b[32m CUPS Fecha Hora Consumo_kWh Metodo_obtencion\nindex \n2016-12-24 19:00:00+01:00 ES00XXXXXXXXXXXXXXDB 24/12/2016 20 0.329 R\n2016-12-24 20:00:00+01:00 ES00XXXXXXXXXXXXXXDB 24/12/2016 21 0.563 R\n2016-12-24 21:00:00+01:00 ES00XXXXXXXXXXXXXXDB 24/12/2016 22 0.440 R\n2016-12-24 22:00:00+01:00 ES00XXXXXXXXXXXXXXDB 24/12/2016 23 0.276 R\n2016-12-24 23:00:00+01:00 ES00XXXXXXXXXXXXXXDB 24/12/2016 24 0.294 R\u001b[0m\n\u001b[35mCUPS;Fecha;Hora;Consumo_kWh;Metodo_obtencion\nES00XXXXXXXXXXXXXXDB;01/11/2016;1;0,318;R\nES00XXXXXXXXXXXXXXDB;01/11/2016;2;0,233;R\nES00XXXXXXXXXXXXXXDB;01/11/2016;3;0,232;R\nES00XXXXXXXXXXXXXXDB;01/11/2016;4;0,234;R\nES00XXXXXXXXXXXXXXDB;01/11/2016;5;0,224;R\nES00XXXXXXXXXXXXXXDB;01/11/2016;6;0,235;R\nES00XXXXXXXXXXXXXXDB;01/11/2016;7;0,226;R\nES00XXXXXXXXXXXXXXDB;01/11/2016;8;0,236;R\nES00XXXXXXXXXXXXXXDB;01/11/2016;9;0,353;R\nES00XXXXXXXXXXXXXXDB;01/11/2016;10;1,106;R\nES00XXXXXXXXXXXXXXDB;01/11/2016;11\u001b[0m\n" ] ], [ [ "### Plots del periodo facturado", "_____no_output_____" ] ], [ [ "print_ok('Consumo diario')\nfactura.plot_consumo_diario()\nplt.show()", "\u001b[1m\u001b[32mConsumo diario\u001b[0m\n" ], [ "print_ok('Patrón de consumo semanal')\nfactura.plot_patron_semanal_consumo()\nplt.show()", "\u001b[1m\u001b[32mPatrón de consumo semanal\u001b[0m\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ] ]
d0defd55f47f805760eb8270a14f16c577dccc08
20,350
ipynb
Jupyter Notebook
1 - Python for Data Science/Module 3 - Python Programming Fundamentals/code/4-PY0101EN-3-2-Loops.ipynb
joaopaulo164/Data-Science-with-Python
eff0240f97c150e65e12a295e47b5dee2d1bdbf7
[ "MIT" ]
null
null
null
1 - Python for Data Science/Module 3 - Python Programming Fundamentals/code/4-PY0101EN-3-2-Loops.ipynb
joaopaulo164/Data-Science-with-Python
eff0240f97c150e65e12a295e47b5dee2d1bdbf7
[ "MIT" ]
null
null
null
1 - Python for Data Science/Module 3 - Python Programming Fundamentals/code/4-PY0101EN-3-2-Loops.ipynb
joaopaulo164/Data-Science-with-Python
eff0240f97c150e65e12a295e47b5dee2d1bdbf7
[ "MIT" ]
null
null
null
27.27882
921
0.537346
[ [ [ "<center>\n <img src=\"https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBMDeveloperSkillsNetwork-PY0101EN-SkillsNetwork/IDSNlogo.png\" width=\"300\" alt=\"cognitiveclass.ai logo\" />\n</center>\n\n# Loops in Python\n\nEstimated time needed: **20** minutes\n\n## Objectives\n\nAfter completing this lab you will be able to:\n\n* work with the loop statements in Python, including for-loop and while-loop.\n", "_____no_output_____" ], [ "<h1>Loops in Python</h1>\n", "_____no_output_____" ], [ "<p><strong>Welcome!</strong> This notebook will teach you about the loops in the Python Programming Language. By the end of this lab, you'll know how to use the loop statements in Python, including for loop, and while loop.</p>\n", "_____no_output_____" ], [ "<h2>Table of Contents</h2>\n<div class=\"alert alert-block alert-info\" style=\"margin-top: 20px\">\n <ul>\n <li>\n <a href=\"https://#loop\">Loops</a>\n <ul>\n <li><a href=\"https://range/?utm_medium=Exinfluencer&utm_source=Exinfluencer&utm_content=000026UJ&utm_term=10006555&utm_id=NA-SkillsNetwork-Channel-SkillsNetworkCoursesIBMDeveloperSkillsNetworkPY0101ENSkillsNetwork19487395-2021-01-01\">Range</a></li>\n <li><a href=\"https://for/?utm_medium=Exinfluencer&utm_source=Exinfluencer&utm_content=000026UJ&utm_term=10006555&utm_id=NA-SkillsNetwork-Channel-SkillsNetworkCoursesIBMDeveloperSkillsNetworkPY0101ENSkillsNetwork19487395-2021-01-01\">What is <code>for</code> loop?</a></li>\n <li><a href=\"https://while/?utm_medium=Exinfluencer&utm_source=Exinfluencer&utm_content=000026UJ&utm_term=10006555&utm_id=NA-SkillsNetwork-Channel-SkillsNetworkCoursesIBMDeveloperSkillsNetworkPY0101ENSkillsNetwork19487395-2021-01-01\">What is <code>while</code> loop?</a></li>\n </ul>\n </li>\n <li>\n <a href=\"https://#quiz\">Quiz on Loops</a>\n </li>\n </ul>\n\n</div>\n\n<hr>\n", "_____no_output_____" ], [ "<h2 id=\"loop\">Loops</h2>\n", "_____no_output_____" ], [ "<h3 id=\"range\">Range</h3>\n", "_____no_output_____" ], [ "Sometimes, you might want to repeat a given operation many times. Repeated executions like this are performed by <b>loops</b>. We will look at two types of loops, <code>for</code> loops and <code>while</code> loops.\n\nBefore we discuss loops lets discuss the <code>range</code> object. It is helpful to think of the range object as an ordered list. For now, let's look at the simplest case. If we would like to generate an object that contains elements ordered from 0 to 2 we simply use the following command:\n", "_____no_output_____" ] ], [ [ "# Use the range\n\nrange(3)", "_____no_output_____" ] ], [ [ "<img src=\"https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBMDeveloperSkillsNetwork-PY0101EN-SkillsNetwork/labs/Module%203/images/range.PNG\" width=\"300\" />\n", "_____no_output_____" ], [ "***NOTE: While in Python 2.x it returned a list as seen in video lessons, in 3.x it returns a range object.***\n", "_____no_output_____" ], [ "<h3 id=\"for\">What is <code>for</code> loop?</h3>\n", "_____no_output_____" ], [ "The <code>for</code> loop enables you to execute a code block multiple times. For example, you would use this if you would like to print out every element in a list.\\\nLet's try to use a <code>for</code> loop to print all the years presented in the list <code>dates</code>:\n", "_____no_output_____" ], [ "This can be done as follows:\n", "_____no_output_____" ] ], [ [ "# For loop example\n\ndates = [1982,1980,1973]\nN = len(dates)\n\nfor i in range(N):\n print(dates[i]) ", "1982\n1980\n1973\n" ] ], [ [ "The code in the indent is executed <code>N</code> times, each time the value of <code>i</code> is increased by 1 for every execution. The statement executed is to <code>print</code> out the value in the list at index <code>i</code> as shown here:\n", "_____no_output_____" ], [ "<img src=\"https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBMDeveloperSkillsNetwork-PY0101EN-SkillsNetwork/labs/Module%203/images/LoopsForRange.gif\" width=\"800\" />\n", "_____no_output_____" ], [ "In this example we can print out a sequence of numbers from 0 to 7:\n", "_____no_output_____" ] ], [ [ "# Example of for loop\n\nfor i in range(0, 8):\n print(i)", "0\n1\n2\n3\n4\n5\n6\n7\n" ] ], [ [ "In Python we can directly access the elements in the list as follows:\n", "_____no_output_____" ] ], [ [ "# Exmaple of for loop, loop through list\n\nfor year in dates: \n print(year) ", "1982\n1980\n1973\n" ] ], [ [ "For each iteration, the value of the variable <code>year</code> behaves like the value of <code>dates\\[i]</code> in the first example:\n", "_____no_output_____" ], [ "<img src=\"https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBMDeveloperSkillsNetwork-PY0101EN-SkillsNetwork/labs/Module%203/images/LoopsForList.gif\" width=\"800\">\n", "_____no_output_____" ], [ "We can change the elements in a list:\n", "_____no_output_____" ] ], [ [ "# Use for loop to change the elements in list\n\nsquares = ['red', 'yellow', 'green', 'purple', 'blue']\n\nfor i in range(0, 5):\n print(\"Before square \", i, 'is', squares[i])\n squares[i] = 'white'\n print(\"After square \", i, 'is', squares[i])", "Before square 0 is red\nAfter square 0 is white\nBefore square 1 is yellow\nAfter square 1 is white\nBefore square 2 is green\nAfter square 2 is white\nBefore square 3 is purple\nAfter square 3 is white\nBefore square 4 is blue\nAfter square 4 is white\n" ] ], [ [ "We can access the index and the elements of a list as follows:\n", "_____no_output_____" ] ], [ [ "# Loop through the list and iterate on both index and element value\n\nsquares=['red', 'yellow', 'green', 'purple', 'blue']\n\nfor i, square in enumerate(squares):\n print(i, square)", "0 red\n1 yellow\n2 green\n3 purple\n4 blue\n" ] ], [ [ "<h3 id=\"while\">What is <code>while</code> loop?</h3>\n", "_____no_output_____" ], [ "As you can see, the <code>for</code> loop is used for a controlled flow of repetition. However, what if we don't know when we want to stop the loop? What if we want to keep executing a code block until a certain condition is met? The <code>while</code> loop exists as a tool for repeated execution based on a condition. The code block will keep being executed until the given logical condition returns a **False** boolean value.\n", "_____no_output_____" ], [ "Let’s say we would like to iterate through list <code>dates</code> and stop at the year 1973, then print out the number of iterations. This can be done with the following block of code:\n", "_____no_output_____" ] ], [ [ "# While Loop Example\n\ndates = [1982, 1980, 1973, 2000]\n\ni = 0\nyear = dates[0]\n\nwhile(year != 1973): \n print(year)\n i = i + 1\n year = dates[i]\n \n\nprint(\"It took \", i ,\"repetitions to get out of loop.\")", "1982\n1980\nIt took 2 repetitions to get out of loop.\n" ] ], [ [ "A while loop iterates merely until the condition in the argument is not met, as shown in the following figure:\n", "_____no_output_____" ], [ "<img src=\"https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBMDeveloperSkillsNetwork-PY0101EN-SkillsNetwork/labs/Module%203/images/LoopsWhile.gif\" width=\"650\" />\n", "_____no_output_____" ], [ "<hr>\n", "_____no_output_____" ], [ "<h2 id=\"quiz\">Quiz on Loops</h2>\n", "_____no_output_____" ], [ "Write a <code>for</code> loop the prints out all the element between <b>-5</b> and <b>5</b> using the range function.\n", "_____no_output_____" ] ], [ [ "# Write your code below and press Shift+Enter to execute\nfor i in range(-4, 5):\n print(i)", "-4\n-3\n-2\n-1\n0\n1\n2\n3\n4\n" ] ], [ [ "<details><summary>Click here for the solution</summary>\n\n```python\nfor i in range(-4, 5):\n print(i)\n \n```\n\n</details>\n", "_____no_output_____" ], [ "Print the elements of the following list: <code>Genres=\\[ 'rock', 'R\\&B', 'Soundtrack', 'R\\&B', 'soul', 'pop']</code>\nMake sure you follow Python conventions.\n", "_____no_output_____" ] ], [ [ "# Write your code below and press Shift+Enter to execute\nGenres = ['rock', 'R&B', 'Soundtrack', 'R&B', 'soul', 'pop']\nfor Genre in Genres:\n print(Genre)", "rock\nR&B\nSoundtrack\nR&B\nsoul\npop\n" ] ], [ [ "<details><summary>Click here for the solution</summary>\n\n```python\nGenres = ['rock', 'R&B', 'Soundtrack', 'R&B', 'soul', 'pop']\nfor Genre in Genres:\n print(Genre)\n \n```\n\n</details>\n", "_____no_output_____" ], [ "<hr>\n", "_____no_output_____" ], [ "Write a for loop that prints out the following list: <code>squares=\\['red', 'yellow', 'green', 'purple', 'blue']</code>\n", "_____no_output_____" ] ], [ [ "# Write your code below and press Shift+Enter to execute\nsquares=['red', 'yellow', 'green', 'purple', 'blue']\nfor square in squares:\n print(square)", "red\nyellow\ngreen\npurple\nblue\n" ] ], [ [ "<details><summary>Click here for the solution</summary>\n\n```python\nsquares=['red', 'yellow', 'green', 'purple', 'blue']\nfor square in squares:\n print(square)\n \n```\n\n</details>\n", "_____no_output_____" ], [ "<hr>\n", "_____no_output_____" ], [ "Write a while loop to display the values of the Rating of an album playlist stored in the list <code>PlayListRatings</code>. If the score is less than 6, exit the loop. The list <code>PlayListRatings</code> is given by: <code>PlayListRatings = \\[10, 9.5, 10, 8, 7.5, 5, 10, 10]</code>\n", "_____no_output_____" ] ], [ [ "# Write your code below and press Shift+Enter to execute\nPlayListRatings = [10, 9.5, 10, 8, 7.5, 5, 10, 10]\ni = 0\nRating = PlayListRatings[0]\nwhile(i < len(PlayListRatings) and Rating >= 6):\n Rating = PlayListRatings[i]\n print(Rating)\n i = i + 1", "10\n9.5\n10\n8\n7.5\n5\n" ] ], [ [ "<details><summary>Click here for the solution</summary>\n\n```python\nPlayListRatings = [10, 9.5, 10, 8, 7.5, 5, 10, 10]\ni = 0\nRating = PlayListRatings[0]\nwhile(i < len(PlayListRatings) and Rating >= 6):\n Rating = PlayListRatings[i]\n print(Rating)\n i = i + 1\n \n```\n\n</details>\n", "_____no_output_____" ], [ "<hr>\n", "_____no_output_____" ], [ "Write a while loop to copy the strings <code>'orange'</code> of the list <code>squares</code> to the list <code>new_squares</code>. Stop and exit the loop if the value on the list is not <code>'orange'</code>:\n", "_____no_output_____" ] ], [ [ "# Write your code below and press Shift+Enter to execute\n\nsquares = ['orange', 'orange', 'purple', 'blue ', 'orange']\nnew_squares = []\nsquares = ['orange', 'orange', 'purple', 'blue ', 'orange']\nnew_squares = []\ni = 0\nwhile(i < len(squares) and squares[i] == 'orange'):\n new_squares.append(squares[i])\n i = i + 1\nprint (new_squares)", "['orange', 'orange']\n" ] ], [ [ "<details><summary>Click here for the solution</summary>\n\n```python\nsquares = ['orange', 'orange', 'purple', 'blue ', 'orange']\nnew_squares = []\ni = 0\nwhile(i < len(squares) and squares[i] == 'orange'):\n new_squares.append(squares[i])\n i = i + 1\nprint (new_squares)\n \n```\n\n</details>\n", "_____no_output_____" ], [ "<hr>\n<h2>The last exercise!</h2>\n<p>Congratulations, you have completed your first lesson and hands-on lab in Python. However, there is one more thing you need to do. The Data Science community encourages sharing work. The best way to share and showcase your work is to share it on GitHub. By sharing your notebook on GitHub you are not only building your reputation with fellow data scientists, but you can also show it off when applying for a job. Even though this was your first piece of work, it is never too early to start building good habits. So, please read and follow <a href=\"https://cognitiveclass.ai/blog/data-scientists-stand-out-by-sharing-your-notebooks/?utm_medium=Exinfluencer&utm_source=Exinfluencer&utm_content=000026UJ&utm_term=10006555&utm_id=NA-SkillsNetwork-Channel-SkillsNetworkCoursesIBMDeveloperSkillsNetworkPY0101ENSkillsNetwork19487395-2021-01-01\" target=\"_blank\">this article</a> to learn how to share your work.\n<hr>\n", "_____no_output_____" ], [ "## Author\n\n<a href=\"https://www.linkedin.com/in/joseph-s-50398b136/?utm_medium=Exinfluencer&utm_source=Exinfluencer&utm_content=000026UJ&utm_term=10006555&utm_id=NA-SkillsNetwork-Channel-SkillsNetworkCoursesIBMDeveloperSkillsNetworkPY0101ENSkillsNetwork19487395-2021-01-01\" target=\"_blank\">Joseph Santarcangelo</a>\n\n## Other contributors\n\n<a href=\"https://www.linkedin.com/in/jiahui-mavis-zhou-a4537814a?utm_medium=Exinfluencer&utm_source=Exinfluencer&utm_content=000026UJ&utm_term=10006555&utm_id=NA-SkillsNetwork-Channel-SkillsNetworkCoursesIBMDeveloperSkillsNetworkPY0101ENSkillsNetwork19487395-2021-01-01\">Mavis Zhou</a>\n\n## Change Log\n\n| Date (YYYY-MM-DD) | Version | Changed By | Change Description |\n| ----------------- | ------- | ---------- | ---------------------------------- |\n| 2020-08-26 | 2.0 | Lavanya | Moved lab to course repo in GitLab |\n| | | | |\n| | | | |\n\n<hr/>\n\n## <h3 align=\"center\"> © IBM Corporation 2020. All rights reserved. <h3/>\n", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ] ]
d0deff47bc06c6af71a39f0495063f7b749c29f6
1,114
ipynb
Jupyter Notebook
utils/decorator.ipynb
ksterx/kxnet
a638c072005549b86f73a734a62d78aeca697df2
[ "MIT" ]
null
null
null
utils/decorator.ipynb
ksterx/kxnet
a638c072005549b86f73a734a62d78aeca697df2
[ "MIT" ]
null
null
null
utils/decorator.ipynb
ksterx/kxnet
a638c072005549b86f73a734a62d78aeca697df2
[ "MIT" ]
1
2020-12-29T16:06:01.000Z
2020-12-29T16:06:01.000Z
18.881356
45
0.494614
[ [ [ "import time", "_____no_output_____" ], [ "def exec_time(func):\n def wrapper(*args, **kwargs):\n t1 = time.time()\n func(*args, **kwargs)\n t2 = time.time()\n print(t2 - t1, \"sec elapsed\")", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code" ] ]
d0df027da00d442b5005033d324c1d4befb3c4bb
53,336
ipynb
Jupyter Notebook
matplotlib.ipynb
AKSK16101999/technovative
15b3c32fe3b253058118ed20d51c3b4e258f86b3
[ "MIT" ]
4
2020-11-04T16:04:07.000Z
2021-07-23T15:53:56.000Z
matplotlib.ipynb
AKSK16101999/technovative
15b3c32fe3b253058118ed20d51c3b4e258f86b3
[ "MIT" ]
null
null
null
matplotlib.ipynb
AKSK16101999/technovative
15b3c32fe3b253058118ed20d51c3b4e258f86b3
[ "MIT" ]
null
null
null
266.68
27,736
0.929147
[ [ [ "import matplotlib ", "_____no_output_____" ], [ "matplotlib.__version__\n", "_____no_output_____" ], [ "from matplotlib import pyplot as plt", "_____no_output_____" ] ], [ [ "# Line plot", "_____no_output_____" ] ], [ [ "x=[3,5,7,9,12,45,68,100]\ny=[100,300,900,400,600,200,700,500]\ny1=[1000,700,300,500,1200,100,600,900]\nplt.style.use('dark_background')\n#plt.xkcd()\nplt.plot(x,y,color='b',marker='o')\nplt.plot(x,y1,color='r',marker='.',linestyle='--')\nplt.legend(['F','S'])\nplt.grid()\nplt.tight_layout()\nplt.xlabel(\"x-point\")\nplt.ylabel(\"y-point\")\nplt.title('Simple line plot')\n\nplt.savefig(\"temp.jpg\")\nplt.show()", "_____no_output_____" ], [ "x=[3,13,29,40,65,78,90,100]\ny=[100,700,900,500,1200,200,700,1200]\ny1=[500,300,300,400,600,100,600,500]\n\nplt.style.use('dark_background')\nplt.bar(x,y,color='r',label='f')\nplt.bar(x,y1,color='b',label=\"s\")\nplt.xlabel('x-point')\nplt.ylabel('y-point')\nplt.title('bar ')\nplt.legend()\nplt.show()", "_____no_output_____" ], [ "x=[50,30,20]\nlabel=['a','b','c']\nplt.pie(x,labels=label,explode = [0.01,0.01,0.01],autopct='%1.1f%%')", "_____no_output_____" ] ] ]
[ "code", "markdown", "code" ]
[ [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ] ]
d0df301149cd6c7fd96111ccb6a68848f5e7d719
33,112
ipynb
Jupyter Notebook
workshop/lab1/amazon-comprehend-custom-classification-byod-lab.ipynb
sriharshams/amazon-comprehend-workshop
7fc279a96a45ae175365a2d33e6cab1056147763
[ "MIT" ]
null
null
null
workshop/lab1/amazon-comprehend-custom-classification-byod-lab.ipynb
sriharshams/amazon-comprehend-workshop
7fc279a96a45ae175365a2d33e6cab1056147763
[ "MIT" ]
null
null
null
workshop/lab1/amazon-comprehend-custom-classification-byod-lab.ipynb
sriharshams/amazon-comprehend-workshop
7fc279a96a45ae175365a2d33e6cab1056147763
[ "MIT" ]
1
2020-11-30T05:21:43.000Z
2020-11-30T05:21:43.000Z
31.992271
415
0.575411
[ [ [ "# Amazon Comprehend Custom Classification - Lab\n\nThis notebook will serve as a template for the overall process of taking a text dataset and integrating it into [Amazon Comprehend Custom Classification](https://docs.aws.amazon.com/comprehend/latest/dg/how-document-classification.html) and perform NLP for custom classification.\n\n## Overview\n\n1. [Introduction to Amazon Comprehend Custom Classification](#Introduction)\n1. [Obtaining Your Data](#data)\n1. [Pre-processing data](#preprocess)\n1. [Building Custom Classification model](#build)\n1. [Evaluate Custom Classification model](#evaluate)\n1. [Cleanup](#cleanup)\n\n\n## Introduction to Amazon Comprehend Custom Classification <a class=\"anchor\" id=\"Introduction\"/>\n\nIf you are not familiar with Amazon Comprehend Custom Classification you can learn more about this tool on these pages:\n\n* [Product Page](https://aws.amazon.com/comprehend/)\n* [Product Docs](https://docs.aws.amazon.com/comprehend/latest/dg/how-document-classification.html)\n\n\n## Bring Your Own Data <a class=\"anchor\" id=\"data\"/>\n\nWe will be using Multi-Class mode in Amazon Comprehend Custom Classifier. Multi-class mode specifies a single class for each document. The individual classes are mutually exclusive, this part is important. If we have an overlapping classes, it is best to set expectaion that our model will learn and try predict same overlapping classes and accuracy might be impacted.\n\nWe are going to upload custom dataset. We ensure that dataset is a .csv and the format of the file must be one class and document per line. For example:\n```\nCLASS,Text of document 1\nCLASS,Text of document 2\nCLASS,Text of document 3\n```\nif we dont have the file in above fomat, we will convert it to above format.\n\nTo begin the cell below will complete the following:\n\n1. Create a directory for the data files.\n1. Upload the file manually to the nlp_data folder.", "_____no_output_____" ] ], [ [ "!mkdir nlp_data\n", "_____no_output_____" ] ], [ [ "With the data downloaded, now we will import the Pandas library as well as a few other data science tools in order to inspect the information.", "_____no_output_____" ] ], [ [ "import boto3\nfrom time import sleep\nimport os\nimport subprocess\nimport pandas as pd\nimport json\nimport time\nimport pprint\nimport numpy as np\nimport seaborn as sn\nimport matplotlib.pyplot as plt\nfrom matplotlib.dates import DateFormatter\nimport matplotlib.dates as mdates\nimport secrets\nimport string\nimport datetime \nimport random", "_____no_output_____" ], [ "# run this only once\n! pip install tqdm", "_____no_output_____" ], [ "from tqdm import tqdm\ntqdm.pandas()", "_____no_output_____" ] ], [ [ "Please use the credentials that were part of initial login screen. Set the env variables.", "_____no_output_____" ] ], [ [ "import os\nos.environ['AWS_DEFAULT_REGION'] = \"us-east-1\"\nos.environ['AWS_ACCESS_KEY_ID'] = \"<AWS_ACCESS_KEY_ID>\"\nos.environ['AWS_SECRET_ACCESS_KEY'] = \"<AWS_SECRET_ACCESS_KEY>\"\nos.environ['AWS_SESSION_TOKEN'] = \"<AWS_SESSION_TOKEN>\"", "_____no_output_____" ] ], [ [ "Test previous aws configure is set properly by running following command", "_____no_output_____" ] ], [ [ "!echo $AWS_SESSION_TOKEN", "_____no_output_____" ] ], [ [ "Lets load the data in to dataframe and look at the data we uploaded. Examine the number of columns that are present. Look at few samples to see the content of the data.", "_____no_output_____" ] ], [ [ "raw_data = pd.read_csv('nlp_data/raw_data.csv')\nraw_data.head()", "_____no_output_____" ], [ "raw_data['CATEGORY_NAME'] = raw_data['CATEGORY_NAME'].astype(str)\nraw_data.groupby('CATEGORY_NAME')['CASE_SUBJECT_FULL'].count()", "_____no_output_____" ] ], [ [ "To convert data to the format that is required by Amazon Comprehend Custom Classifier,\n\n```\nCLASS,Text of document 1\nCLASS,Text of document 2\nCLASS,Text of document 3\n```\nWe will identify the column which are class and which have the text content we would like to train on, we can create a new dataframe with selected columns.\n", "_____no_output_____" ] ], [ [ "selected_columns = ['CATEGORY_NAME', 'CASE_SUBJECT_FULL', 'CASE_DESCRIPTION_FULL']", "_____no_output_____" ], [ "# Select the columns we are interested in\nselected_data = raw_data[selected_columns]\nselected_data = selected_data[selected_data['CATEGORY_NAME']!='Not Known']\nselected_data.shape", "_____no_output_____" ], [ "selected_data.groupby('CATEGORY_NAME')['CASE_SUBJECT_FULL'].count()", "_____no_output_____" ] ], [ [ "As we might be interested in finding outt he accuracy level of the model compared to known labels, we want to held out 10% dataset for later use to infer from the comdel, generate performanace matrix to asses the model. We want to stratify split data based on 'CATEGORY_NAME'", "_____no_output_____" ] ], [ [ "from sklearn.model_selection import train_test_split\n\ntrain_data, test_data = train_test_split(selected_data, test_size=0.1, random_state=0, \n stratify=selected_data[['CATEGORY_NAME']])\n\ntrain_data_df = train_data.copy()\ntest_data_df = test_data.copy()", "_____no_output_____" ] ], [ [ "## Pre-processing data<a class=\"anchor\" id=\"preprocess\"/> \n", "_____no_output_____" ], [ "For training, the file format must conform with the [following](https://docs.aws.amazon.com/comprehend/latest/dg/how-document-classification-training.html):\n\n- File must contain one label and one text per line – 2 columns\n- No header\n- Format UTF-8, carriage return “\\n”.\n\nLabels “must be uppercase, can be multitoken, have whitespace, consist of multiple words connect by underscores or hyphens or may even contain a comma in it, as long as it is correctly escaped.”\n\nHere are the proposed labels:\n\n| Index | Original | For training |\n| --- | --- | --- |\n| 1 | Company | COMPANY |\n| 2 | EducationalInstitution | EDUCATIONALINSTITUTION |\n| 3 | Artist | ARTIST |\n| 4 | Athlete | ATHLETE |\n| 5 | OfficeHolder | OFFICEHOLDER |\n| 6 | MeanOfTransportation | MEANOFTRANSPORTATION |\n| 7 | Building | BUILDING |\n| 8 | NaturalPlace | NATURALPLACE |\n| 9 | Village | VILLAGE |\n| 10 | Animal | ANIMAL |\n| 11 | Plant | PLANT |\n| 12 | Album | ALBUM |\n| 13 | Film | FILM |\n| 14 | WrittenWork | WRITTENWORK |\n\nFor the inference part of it - when you want your custom model to determine which label corresponds to a given text -, the file format must conform with the following:\n\n- File must contain text per line\n- No header\n- Format UTF-8, carriage return “\\n”.", "_____no_output_____" ] ], [ [ "labels_dict = {'Company':'COMPANY',\n 'EducationalInstitution':'EDUCATIONALINSTITUTION',\n 'Artist':'ARTIST',\n 'Athlete':'ATHLETE',\n 'OfficeHolder':'OFFICEHOLDER',\n 'MeanOfTransportation':'MEANOFTRANSPORTATION',\n 'Building':'BUILDING',\n 'NaturalPlace':'NATURALPLACE',\n 'Village':'VILLAGE',\n 'Animal':'ANIMAL',\n 'Plant':'PLANT',\n 'Album':'ALBUM',\n 'Film':'FILM',\n 'WrittenWork':'WRITTENWORK'\n }", "_____no_output_____" ], [ "import re\n\ndef remove_between_square_brackets(text):\n return re.sub('\\[[^]]*\\]', '', text)\n\ndef denoise_text(text):\n text = remove_between_square_brackets(text)\n return text\n\ndef preprocess_text(document):\n document = denoise_text(document)\n # Remove all the special characters\n document = re.sub(r'\\W', ' ', str(document))\n\n # remove all single characters\n document = re.sub(r'\\s+[a-zA-Z]\\s+', ' ', document)\n\n # Remove single characters from the start\n document = re.sub(r'\\^[a-zA-Z]\\s+', ' ', document)\n\n # Substituting multiple spaces with single space\n document = re.sub(r'\\s+', ' ', document, flags=re.I)\n\n # Removing prefixed 'b'\n document = re.sub(r'^b\\s+', '', document)\n\n return document\n\ndef process_data(df):\n df['CATEGORY_NAME'] = df['CATEGORY_NAME'].apply(labels_dict.get)\n\n df['document'] = df[df.columns[1:]].progress_apply(\n lambda x: ' '.join(x.dropna().astype(str)),\n axis=1\n )\n\n df.drop(['CASE_SUBJECT_FULL' ,'CASE_DESCRIPTION_FULL'], axis=1, inplace=True)\n\n df.columns = ['class', 'text']\n \n df['text'] = df['text'].progress_apply(preprocess_text)\n \n return df", "_____no_output_____" ], [ "train_data_df = process_data(train_data_df)\ntest_data_df = process_data(test_data_df)", "_____no_output_____" ] ], [ [ "At this point we have all the data the 2 needed files. \n\n### Building The Target Train and Test Files\n\nWith all of the above spelled out the next thing to do is to build 2 distinct files:\n\n1. `comprehend-train.csv` - A CSV file containing 2 columns without header, first column class, second column text.\n1. `comprehend-test.csv` - A CSV file containing 1 column of text without header.", "_____no_output_____" ] ], [ [ "DSTTRAINFILE='nlp_data/comprehend-train.csv'\nDSTVALIDATIONFILE='nlp_data/comprehend-test.csv'\n\ntrain_data_df.to_csv(path_or_buf=DSTTRAINFILE,\n header=False,\n index=False,\n escapechar='\\\\',\n doublequote=False,\n quotechar='\"')\n\nvalidattion_data_df = test_data_df.copy()\nvalidattion_data_df.drop(['class'], axis=1, inplace=True)\nvalidattion_data_df.to_csv(path_or_buf=DSTVALIDATIONFILE,\n header=False,\n index=False,\n escapechar='\\\\',\n doublequote=False,\n quotechar='\"')", "_____no_output_____" ] ], [ [ "## Getting Started With Amazon Comprehend\nNow that all of the required data to get started exists, we can start working on Comprehend Custom Classfier. \n\nThe custom classifier workload is built in two steps:\n\n1. Training the custom model – no particular machine learning or deep learning knowledge is necessary\n1. Classifying new data\n\nLets follow below steps for Training the custom model:\n\n1. Create a bucket that will host training data\n1. Create a bucket that will host training data artifacts and production results. That can be the same\n1. Configure an IAM role allowing Comprehend to [access newly created buckets](https://docs.aws.amazon.com/comprehend/latest/dg/access-control-managing-permissions.html#auth-role-permissions)\n1. Prepare data for training\n1. Upload training data in the S3 bucket\n1. Launch a “Train Classifier” job from the console: “Amazon Comprehend” > “Custom Classification” > “Train Classifier”\n1. Prepare data for classification (one text per line, no header, same format as training data). Some more details [here](https://docs.aws.amazon.com/comprehend/latest/dg/how-class-run.html)\n", "_____no_output_____" ], [ "Now using the metada stored on this instance of a SageMaker Notebook determine the region we are operating in. If you are using a Jupyter Notebook outside of SageMaker simply define `region` as the string that indicates the region you would like to use for Forecast and S3.", "_____no_output_____" ] ], [ [ "with open('/opt/ml/metadata/resource-metadata.json') as notebook_info:\n data = json.load(notebook_info)\n resource_arn = data['ResourceArn']\n region = resource_arn.split(':')[3]\nprint(region)", "_____no_output_____" ] ], [ [ "Configure your AWS APIs", "_____no_output_____" ] ], [ [ "session = boto3.Session(region_name=region) \ncomprehend = session.client(service_name='comprehend')", "_____no_output_____" ] ], [ [ "Lets create a s3 bucket that will host training data and test data.", "_____no_output_____" ] ], [ [ "# Now perform the join\nprint(region)\ns3 = boto3.client('s3')\nprefix = 'ComprehendBYODPediaClassification'\naccount_id = boto3.client('sts').get_caller_identity().get('Account')\nbucket_name = account_id + \"-comprehend-byod-classification-{}\".format(''.join(\n secrets.choice(string.ascii_lowercase + string.digits) for i in range(8)))\nprint(bucket_name)\nif region != \"us-east-1\":\n s3.create_bucket(Bucket=bucket_name, CreateBucketConfiguration={'LocationConstraint': region})\nelse:\n s3.create_bucket(Bucket=bucket_name)", "_____no_output_____" ] ], [ [ "### Uploading the data", "_____no_output_____" ] ], [ [ "boto3.Session().resource('s3').Bucket(bucket_name).Object(prefix+'/'+DSTTRAINFILE).upload_file(DSTTRAINFILE)\nboto3.Session().resource('s3').Bucket(bucket_name).Object(prefix+'/'+DSTVALIDATIONFILE).upload_file(DSTVALIDATIONFILE)", "_____no_output_____" ] ], [ [ "### Configure an IAM role\n\nIn order to authorize Amazon Comprehend to perform bucket reads and writes during the training or during the inference, we must grant Amazon Comprehend access to the Amazon S3 bucket that we created.\n\nWe are going to create a data access role in our account to trust the Amazon Comprehend service principal.\n", "_____no_output_____" ] ], [ [ "iam = boto3.client(\"iam\")\n\nrole_name = \"ComprehendBucketAccessRole-{}\".format(''.join(\n secrets.choice(string.ascii_lowercase + string.digits) for i in range(8)))\nassume_role_policy_document = {\n \"Version\": \"2012-10-17\",\n \"Statement\": [\n {\n \"Effect\": \"Allow\",\n \"Principal\": {\n \"Service\": \"comprehend.amazonaws.com\"\n },\n \"Action\": \"sts:AssumeRole\"\n }\n ]\n}\n\ncreate_role_response = iam.create_role(\n RoleName = role_name,\n AssumeRolePolicyDocument = json.dumps(assume_role_policy_document)\n)\n\npolicy_arn = \"arn:aws:iam::aws:policy/ComprehendFullAccess\"\niam.attach_role_policy(\n RoleName = role_name,\n PolicyArn = policy_arn\n)\n\n# Now add S3 support\niam.attach_role_policy(\n PolicyArn='arn:aws:iam::aws:policy/AmazonS3FullAccess',\n RoleName=role_name\n)\ntime.sleep(60) # wait for a minute to allow IAM role policy attachment to propagate\n\nrole_arn = create_role_response[\"Role\"][\"Arn\"]\nprint(role_arn)", "_____no_output_____" ] ], [ [ "## Building Custom Classification model <a class=\"anchor\" id=\"#build\"/>\n\nLaunch the classifier training:", "_____no_output_____" ] ], [ [ "s3_train_data = 's3://{}/{}/{}'.format(bucket_name, prefix, DSTTRAINFILE)\ns3_output_job = 's3://{}/{}/{}'.format(bucket_name, prefix, 'output/train_job')\nprint('training data location: ',s3_train_data, \"output location:\", s3_output_job)", "_____no_output_____" ], [ "id = str(datetime.datetime.now().strftime(\"%s\"))\ntraining_job = comprehend.create_document_classifier(\n DocumentClassifierName='BYOD-Custom-Classifier-'+ id,\n DataAccessRoleArn=role_arn,\n InputDataConfig={\n 'S3Uri': s3_train_data\n },\n OutputDataConfig={\n 'S3Uri': s3_output_job\n },\n LanguageCode='en'\n)", "_____no_output_____" ], [ "jobArn = training_job['DocumentClassifierArn']\n\nmax_time = time.time() + 3*60*60 # 3 hours\nwhile time.time() < max_time:\n describe_custom_classifier = comprehend.describe_document_classifier(\n DocumentClassifierArn = jobArn\n )\n status = describe_custom_classifier[\"DocumentClassifierProperties\"][\"Status\"]\n print(\"Custom classifier: {}\".format(status))\n \n if status == \"TRAINED\" or status == \"IN_ERROR\":\n break\n \n time.sleep(60)", "_____no_output_____" ] ], [ [ "## Trained model confusion matrix\n\nWhen a custom classifier model is trained, Amazon Comprehend creates a confusion matrix that provides metrics on how well the model performed in training. This enables you to assess how well the classifier will perform when run. This matrix shows a matrix of labels as predicted by the model compared to actual labels and is created using 10 to 20 percent of the documents submitted to test the trained model.", "_____no_output_____" ] ], [ [ "#Retrieve the S3URI from the model output and create jobkey variable.\njob_output = describe_custom_classifier[\"DocumentClassifierProperties\"][\"OutputDataConfig\"][\"S3Uri\"]\npath_prefix = 's3://{}/'.format(bucket_name)\njob_key = os.path.relpath(job_output, path_prefix)", "_____no_output_____" ], [ "#Download the model metrics\nboto3.Session().resource('s3').Bucket(bucket_name).download_file(job_key, './output.tar.gz')", "_____no_output_____" ], [ "!ls -ltr", "_____no_output_____" ], [ "#Unpack the gzip file\n!tar xvzf ./output.tar.gz", "_____no_output_____" ], [ "import json\n\nwith open('output/confusion_matrix.json') as f:\n comprehend_cm = json.load(f)\n\ncm_array = comprehend_cm['confusion_matrix']\n\n\ndef plot_confusion_matrix(cm_array, labels):\n df_cm = pd.DataFrame(cm_array, index = [i for i in labels],\n columns = [i for i in labels])\n\n #sn.set(font_scale=1.4) # for label size\n plt.figure(figsize = (15,13))\n sn.heatmap(df_cm, annot=True) # font size\n\n plt.show()\n\nplot_confusion_matrix(cm_array, labels = comprehend_cm['labels'])", "_____no_output_____" ], [ "from sklearn.metrics import confusion_matrix\nimport numpy as np\n\ncm = np.array(comprehend_cm['confusion_matrix'])\n\ncols = ['label','precision', 'recall','f1_score','type']\nmodels_report = pd.DataFrame(columns = cols)\n\ndef precision(label, confusion_matrix):\n col = confusion_matrix[:, label]\n return confusion_matrix[label, label] / col.sum()\n \ndef recall(label, confusion_matrix):\n row = confusion_matrix[label, :]\n return confusion_matrix[label, label] / row.sum()\n\ndef precision_macro_average(confusion_matrix):\n rows, columns = confusion_matrix.shape\n sum_of_precisions = 0\n for label in range(rows):\n sum_of_precisions += precision(label, confusion_matrix)\n return sum_of_precisions / rows\n\ndef recall_macro_average(confusion_matrix):\n rows, columns = confusion_matrix.shape\n sum_of_recalls = 0\n for label in range(columns):\n sum_of_recalls += recall(label, confusion_matrix)\n return sum_of_recalls / columns\n\ndef f1_score(precision, recall):\n return (2 * (precision * recall) / (precision + recall))\n\ndef accuracy(confusion_matrix):\n diagonal_sum = confusion_matrix.trace()\n sum_of_all_elements = confusion_matrix.sum()\n return diagonal_sum / sum_of_all_elements \n\ndef display_confusion_matrix(cm, labels, matrix_type, models_report):\n #print(\"label precision recall f1score\")\n count = 0\n for label in labels:\n p = precision(count, cm)\n r = recall(count, cm)\n f1 = f1_score(p, r)\n #print(f\"{labels_dict.get(label)} {p:2.4f} {r:2.4f} {f1:2.4f}\")\n tmp = pd.Series({'label': label,\\\n 'precision' : p,\\\n 'recall': r,\\\n 'f1_score': f1,\\\n 'type': matrix_type\n })\n models_report = models_report.append(tmp, ignore_index = True)\n count += 1\n #print(models_report) \n\n p_total = precision_macro_average(cm)\n print(f\"precision total: {p_total:2.4f}\")\n\n r_total = recall_macro_average(cm)\n print(f\"recall total: {r_total:2.4f}\")\n\n\n\n a_total = accuracy(cm)\n print(f\"accuracy total: {a_total:2.4f}\")\n\n f1_total = f1_score(p_total, r_total)\n print(f\"f1 total: {f1_total:2.4f}\")\n \n return models_report\n\ntraining_model_report = display_confusion_matrix(cm, comprehend_cm['labels'], 'training_matrix', models_report)\ntraining_model_report.sort_values(by=['f1_score'], inplace=True, ascending=False)\nprint(training_model_report.to_string(index=False))", "_____no_output_____" ] ], [ [ "## Evaluate Custom Classification model <a class=\"anchor\" id=\"evaluate\"/>\n\nWe will use custom classifier jobs to Evaluate on the test data we have.", "_____no_output_____" ] ], [ [ "model_arn = describe_custom_classifier[\"DocumentClassifierProperties\"][\"DocumentClassifierArn\"]\nprint(model_arn)", "_____no_output_____" ], [ "s3_test_data = 's3://{}/{}/{}'.format(bucket_name, prefix, DSTVALIDATIONFILE)\nprint(s3_test_data)", "_____no_output_____" ], [ "id = str(datetime.datetime.now().strftime(\"%s\"))\n\nstart_response = comprehend.start_document_classification_job(\n JobName = 'BYOD-Custom-Classifier-Inference'+ id,\n InputDataConfig={\n 'S3Uri': s3_test_data,\n 'InputFormat': 'ONE_DOC_PER_LINE'\n },\n OutputDataConfig={\n 'S3Uri': s3_output_job\n },\n DataAccessRoleArn=role_arn,\n DocumentClassifierArn=model_arn\n)\n\nprint(\"Start response: %s\\n\", start_response)\n\n# Check the status of the job\ndescribe_response = comprehend.describe_document_classification_job(JobId=start_response['JobId'])\nprint(\"Describe response: %s\\n\", describe_response)\n\n# List all classification jobs in account\nlist_response = comprehend.list_document_classification_jobs()\nprint(\"List response: %s\\n\", list_response)", "_____no_output_____" ], [ "max_time = time.time() + 3*60*60 # 3 hours\nwhile time.time() < max_time:\n describe_response = comprehend.describe_document_classification_job(JobId=start_response['JobId'])\n status = describe_response[\"DocumentClassificationJobProperties\"][\"JobStatus\"]\n print(\"Custom classifier job status : {}\".format(status))\n \n if status == \"COMPLETED\" or status == \"FAILED\" or status == \"STOP_REQUESTED\" or status== \"STOPPED\":\n break\n \n time.sleep(30)", "_____no_output_____" ], [ "inference_s3uri = describe_response[\"DocumentClassificationJobProperties\"][\"OutputDataConfig\"][\"S3Uri\"]\npath_prefix = 's3://{}/'.format(bucket_name)\ninference_job_key = os.path.relpath(inference_s3uri, path_prefix)\nboto3.Session().resource('s3').Bucket(bucket_name).download_file(inference_job_key, './inference_output.tar.gz')", "_____no_output_____" ], [ "#Unpack the gzip file\n!tar xvzf ./inference_output.tar.gz", "_____no_output_____" ], [ "def load_jsonl(input_path) -> list:\n \"\"\"\n Read list of objects from a JSON lines file.\n \"\"\"\n data = []\n with open(input_path, 'r', encoding='utf-8') as f:\n for line in f:\n data.append(json.loads(line.rstrip('\\n|\\r')))\n print('Loaded {} records from {}'.format(len(data), input_path))\n return data\n\ninference_data = load_jsonl('predictions.jsonl')", "_____no_output_____" ], [ "test_data_df.shape", "_____no_output_____" ], [ "inferred_class = []\nfor line in inference_data:\n predicted_class = sorted(line['Classes'], key=lambda x: x['Score'], reverse=True)[0]['Name']\n inferred_class.append(predicted_class)\n ", "_____no_output_____" ], [ "test_data_df[\"predicted_class\"] = inferred_class\ntest_data_df.head()", "_____no_output_____" ] ], [ [ "Lets generate confusion metrix and other evaluation metrix for inferred results", "_____no_output_____" ] ], [ [ "import sklearn\nprint('The scikit-learn version is {}.'.format(sklearn.__version__))", "_____no_output_____" ], [ "from sklearn.metrics import confusion_matrix\n\ny_true = test_data_df['class']\ny_pred = test_data_df['predicted_class']\nlabels = comprehend_cm['labels']\ncm_inference = confusion_matrix(y_true, y_pred,labels=labels)", "_____no_output_____" ], [ "plot_confusion_matrix(cm_inference, labels = labels)", "_____no_output_____" ], [ "inference_model_report = display_confusion_matrix(cm_inference, labels, 'inference_matrix', models_report)\n\ninference_model_report.sort_values(by=['f1_score'], inplace=True, ascending=False)\nprint(inference_model_report.to_string(index=False))", "_____no_output_____" ], [ "%store bucket_name\n%store region\n%store jobArn\n%store role_arn", "_____no_output_____" ] ], [ [ "## Cleanup <a class=\"anchor\" id=\"cleanup\"/>\nRun [clean up notebook](./Cleanup.ipynb) to clean all the resources", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ] ]
d0df3d94b63d6ac2f6383f2ae390ded464dfe8dd
4,837
ipynb
Jupyter Notebook
master.ipynb
hfwittmann/colab-mlflow-papermill
3fdbb51e5247b2367ff93320da12b6a3159b3812
[ "MIT" ]
11
2019-09-29T02:50:08.000Z
2021-09-20T14:00:01.000Z
master.ipynb
hfwittmann/colab-mlflow-papermill
3fdbb51e5247b2367ff93320da12b6a3159b3812
[ "MIT" ]
null
null
null
master.ipynb
hfwittmann/colab-mlflow-papermill
3fdbb51e5247b2367ff93320da12b6a3159b3812
[ "MIT" ]
2
2019-10-28T12:50:50.000Z
2022-03-20T04:07:01.000Z
4,837
4,837
0.678106
[ [ [ "# Set up google drive access", "_____no_output_____" ] ], [ [ "from google.colab import drive\ndrive.mount('/content/gdrive')", "_____no_output_____" ] ], [ [ "# Goto directory\n\n## Make sure this is the directory that corresponds to that on your local drive!!!", "_____no_output_____" ] ], [ [ "local_googledrive = '/Users/<username>/Google Drive/'\ncolab_googledrive_directory = \"/content/gdrive/My Drive/\"\nproject_directory = 'git/colab-mlflow-papermill/'", "_____no_output_____" ], [ "cd {colab_googledrive_directory}{project_directory}", "_____no_output_____" ] ], [ [ "# Install missing packages", "_____no_output_____" ] ], [ [ "import importlib\n\nif importlib.util.find_spec('mlflow') is None:\n !pip install mlflow\n\nif importlib.util.find_spec('papermill') is None:\n !pip install papermill\n# !pip install -r requirements.txt", "_____no_output_____" ] ], [ [ "# Start", "_____no_output_____" ] ], [ [ "# import packages\nimport os\nimport pandas as pd\n\nimport papermill as pm\nimport mlflow", "_____no_output_____" ] ], [ [ "# Load model names\n## ... and make directories", "_____no_output_____" ] ], [ [ "# Create directory for temporary storage of artefacts and \n# create mlflow experiment\n\nartefacts_temp_dir = 'artefacts_temporary_depot'\nif not os.path.exists(artefacts_temp_dir):\n os.makedirs(artefacts_temp_dir)", "_____no_output_____" ], [ "pwd", "_____no_output_____" ], [ "2\n# create mlflow experiment\nmlflow.create_experiment('Iris Classification')", "_____no_output_____" ], [ "model_list = ['decision_tree', 'logistic_regression', 'svm']\n\nfor model_name in model_list:\n print(f\"Running {model_name!r} model\")\n\n now = str(pd.to_datetime('now'))\n notebook_out = f'{artefacts_temp_dir}/{model_name}-{now}.ipynb'\n \n parameters = {'model_name': model_name,\n 'notebook_out': notebook_out,\n 'artefacts_temp_dir': artefacts_temp_dir\n }\n\n pm.execute_notebook(\n input_path = 'minion.ipynb',\n output_path = notebook_out,\n parameters = parameters\n )", "_____no_output_____" ] ], [ [ "# Make metafiles local", "_____no_output_____" ] ], [ [ "from util import make_metafiles_local\nmake_metafiles_local(colab_googledrive_directory, local_googledrive)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ] ]
d0df3e03fda04d3d2b0906a76338d4214f4667da
2,683
ipynb
Jupyter Notebook
fc_svd.ipynb
dummy-lee/ZTE_Challenge_2019
455bb9044e53a4006f75be20e54413cb617bf841
[ "MIT" ]
null
null
null
fc_svd.ipynb
dummy-lee/ZTE_Challenge_2019
455bb9044e53a4006f75be20e54413cb617bf841
[ "MIT" ]
null
null
null
fc_svd.ipynb
dummy-lee/ZTE_Challenge_2019
455bb9044e53a4006f75be20e54413cb617bf841
[ "MIT" ]
null
null
null
20.022388
113
0.487141
[ [ [ "import caffe\nimport numpy as np", "_____no_output_____" ], [ "r = 240", "_____no_output_____" ], [ "net = caffe.Net(\"./models/no_bn/TestModel.prototxt\", \"./models/no_bn/TestModel.caffemodel\", caffe.TEST)", "_____no_output_____" ], [ "weight, bias = net.params['fc5_']", "_____no_output_____" ], [ "U, sigma, VT = np.linalg.svd(weight.data, full_matrices=False)", "_____no_output_____" ], [ "sigma[:r].sum()/sigma.sum()", "_____no_output_____" ], [ "net2 = caffe.Net(\"./models/fc_svd/TestModel.prototxt\", caffe.TEST)", "_____no_output_____" ], [ "for key in net2.params.keys():\n if key == 'fc5_1':\n net2.params[key][0].data[...] = np.dot(np.eye(r) * sigma[:r], VT[:r])\n elif key =='fc5_':\n net2.params[key][0].data[...] = U[:,:r]\n net2.params[key][1].data[...] = bias.data\n else:\n net2.params[key][0].data[...] = net.params[key][0].data\n net2.params[key][1].data[...] = net.params[key][1].data", "_____no_output_____" ], [ "net2.save(\"./models/fc_svd/TestModel.caffemodel\")", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
d0df4001ef8f72e95b5d6bc20291b2e64c53fe45
9,718
ipynb
Jupyter Notebook
Trying my new module.ipynb
denilsonsa/rpi-led-display
a6001ce01431b29f0a3c32584d9d7bee1e81d870
[ "MIT" ]
null
null
null
Trying my new module.ipynb
denilsonsa/rpi-led-display
a6001ce01431b29f0a3c32584d9d7bee1e81d870
[ "MIT" ]
null
null
null
Trying my new module.ipynb
denilsonsa/rpi-led-display
a6001ce01431b29f0a3c32584d9d7bee1e81d870
[ "MIT" ]
null
null
null
21.172113
91
0.43723
[ [ [ "from time import sleep\nfrom tm1640 import TM1640", "_____no_output_____" ] ], [ [ "# Simple and elegant usage", "_____no_output_____" ] ], [ [ "with TM1640(clk_pin=24, din_pin=23) as d:\n d.brightness = 0\n d.write_text('HELLO')\n for i in [1,1,1,1,1, -1,-1,-1,-1,-1]:\n sleep(1)\n d.brightness += i", "_____no_output_____" ] ], [ [ "# Global object for the purposes of this notebook", "_____no_output_____" ] ], [ [ "disp = TM1640(clk_pin=24, din_pin=23)", "_____no_output_____" ], [ "disp.brightness = 1", "_____no_output_____" ] ], [ [ "# Showing some garbage", "_____no_output_____" ] ], [ [ "disp.write_bytes(b'0123456789abcdef')", "_____no_output_____" ], [ "disp.write_bytes([0xff, 0xef, 0b10000000, 0x63])", "_____no_output_____" ], [ "disp.write_bytes([0, 0x63, 0x5c, 0], 8)", "_____no_output_____" ], [ "import random\nfor i in range(64):\n disp.write_bytes(bytes(random.randrange(256) for b in range(16)))\n sleep(1 / (i+1))", "_____no_output_____" ] ], [ [ "# Testing different characters", "_____no_output_____" ] ], [ [ "disp.write_text('.01.02..03')", "_____no_output_____" ], [ "disp.write_text('0123456789 yYzZ')", "_____no_output_____" ], [ "disp.write_text('aAbBcCdDeEfFgGhH')", "_____no_output_____" ], [ "disp.write_text('iIjJkKlLmMnNoOpP')", "_____no_output_____" ], [ "disp.write_text('qQrRsStTuUvVwWxX')", "_____no_output_____" ], [ "disp.write_text('~!@#$%^&*()[]{}')", "_____no_output_____" ], [ "disp.write_text('-_¯\\'`\"+=,./\\\\:;')", "_____no_output_____" ], [ "disp.write_text('🯰🯱🯲🯳🯴🯵🯶🯷🯸🯹⁐ニ≡‾|‖')", "_____no_output_____" ], [ "disp.write_text('⌈⌉⌊⌋⎾⏋⎿⏌⌜⌝⌞⌟⌌⌍⌎⌏')", "_____no_output_____" ], [ "disp.write_text('⊦⊢⊣⎡⎢⎣⎤⎥⎦')", "_____no_output_____" ], [ "disp.write_text('⊏⊑⊐⊒⊓⊔⋂⋃Πμ')", "_____no_output_____" ] ], [ [ "# Some progress bar simulations", "_____no_output_____" ] ], [ [ "import textwrap\ndef animate(textarea):\n for i in textwrap.dedent(textarea).strip().splitlines():\n disp.write_text(i)\n sleep(1)", "_____no_output_____" ], [ "animate('''\n [ ]\n [⁐ ]\n [⁐⁐ ]\n [⁐⁐⁐]\n''')", "_____no_output_____" ], [ "animate('''\n [ ]\n [. ]\n [.. ]\n [... ]\n [....]\n''')", "_____no_output_____" ], [ "animate('''\n ⌈ ⌉\n [ ⌉\n [. ⌉\n [._ ⌉\n [._. ⌉\n [._._ ⌉\n [._._. ⌉\n [._._._⌉\n [._._._.⌉\n [._._._.]\n''')", "_____no_output_____" ], [ "animate('''\n [ ]\n E ]\n 8 ]\n 8| ]\n 8E ]\n 88 ]\n 88|]\n 88E]\n 888]\n 8880\n 8888\n''')", "_____no_output_____" ], [ "animate('''\n ⎢ ⎥\n ‖ ⎥\n ‖⎢ ⎥\n ‖‖ ⎥\n ‖‖⎢ ⎥\n ‖‖‖ ⎥\n ‖‖‖⎢⎥\n ‖‖‖‖⎥\n ‖‖‖‖‖\n''')", "_____no_output_____" ], [ "from collections import namedtuple\n\nPBS = namedtuple('PBS', 'left middle right half full') # PBS = Progress Bar Style\n\nprogress_bar_styles = [\n # ⌊ _ _ _ ⌋\n PBS(0b_0011000, 0b_0001000, 0b_0001100, 0b0100000, 0b0100010),\n # ⌊._._._.⌋\n PBS(0b10011000, 0b10001000, 0b_0001100, 0b0100000, 0b0100010),\n # ⌈ ¯ ¯ ¯ ⌉\n PBS(0b_0100001, 0b_0000001, 0b_0000011, 0b0010000, 0b0010100),\n # ‖.‖.‖. . .\n PBS(0b10000000, 0b10000000, 0b10000000, 0b0110000, 0b0110110),\n # ‖.‖.‖._._.\n PBS(0b10001000, 0b10001000, 0b10001000, 0b0110000, 0b0110110),\n]\n\n\ndef progress_bar(total, filled, theme):\n assert total >= 1\n assert isinstance(total, int)\n bits = total * 2\n marks = round(filled * 2)\n\n buffer = [0] * total\n buffer[0] |= theme.left\n buffer[-1] |= theme.right\n for i in range(total):\n if i > 0 and i + 1 < total:\n buffer[i] |= theme.middle\n if i * 2 + 1 < marks:\n buffer[i] |= theme.full\n elif i * 2 < marks:\n buffer[i] |= theme.half\n return bytes(buffer)", "_____no_output_____" ], [ "def byteanimate(iterable, delay=1.0):\n disp.write_text('')\n for i in iterable:\n disp.write_bytes(i)\n sleep(delay)", "_____no_output_____" ], [ "for total in [1, 2, 3, 4, 8]:\n for theme in progress_bar_styles:\n byteanimate(\n (progress_bar(total, i / 2, theme) for i in range(0, 2 * total + 1)),\n 1 / total\n )", "_____no_output_____" ], [ "# This looks like a stereo VU meter.\ndef double_progress_bar(total, top, bottom):\n assert total >= 1\n assert isinstance(total, int)\n bits = total * 2\n tops = round(top * 2)\n bots = round(bottom * 2)\n\n buffer = [0] * total\n for i in range(total):\n if i * 2 + 1 < tops:\n buffer[i] |= 0b0100010\n elif i * 2 < tops:\n buffer[i] |= 0b0100000\n if i * 2 + 1 < bots:\n buffer[i] |= 0b0010100\n elif i * 2 < bots:\n buffer[i] |= 0b0010000\n return bytes(buffer)", "_____no_output_____" ], [ "import math\nfor total in [1, 2, 3, 8]:\n for theme in progress_bar_styles:\n byteanimate(\n (double_progress_bar(\n total,\n (math.sin(2 * i * math.tau / 64) + 1) / 2 * total,\n (math.cos(2 * i * math.tau / 64) + 1) / 2 * total\n ) for i in range(0, 64)),\n 1 / 64\n )", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
d0df41768e59b173d773c3e0a8523c1496c35bc4
23,679
ipynb
Jupyter Notebook
decipher/decipher.ipynb
anoopsarkar/nlp-class-hw-archive
04c92930ae0be9c5cdf3c41234604af97335e943
[ "MIT" ]
null
null
null
decipher/decipher.ipynb
anoopsarkar/nlp-class-hw-archive
04c92930ae0be9c5cdf3c41234604af97335e943
[ "MIT" ]
null
null
null
decipher/decipher.ipynb
anoopsarkar/nlp-class-hw-archive
04c92930ae0be9c5cdf3c41234604af97335e943
[ "MIT" ]
1
2021-01-27T01:20:00.000Z
2021-01-27T01:20:00.000Z
38.628059
582
0.353985
[ [ [ "# Homework: Decipherment", "_____no_output_____" ] ], [ [ "from collections import defaultdict, Counter\nimport collections\nimport pprint\nimport math\nimport bz2\npp = pprint.PrettyPrinter(width=45, compact=True)", "_____no_output_____" ] ], [ [ "First let us read in the cipher text from the `data` directory:", "_____no_output_____" ] ], [ [ "def read_file(filename):\n if filename[-4:] == \".bz2\":\n with bz2.open(filename, 'rt') as f:\n content = f.read()\n f.close()\n else:\n with open(filename, 'r') as f:\n content = f.read()\n f.close()\n return content\n\ncipher = read_file(\"data/cipher.txt\")\nprint(cipher)", "º∫P/Z/uB∫ÀOR•–X•B\nWV+≈GyF∞ºHPπKÇ—y≈\nMJy^uIÀΩ—T‘NQyDµ£\nS¢/º∑BPORAu∫∆R×E\nÀ^LMZJƒ“\\–FHVW≈æy\nπ+—GDºKI£∞—Xæµ§S¢\nRN‘IyEÃOæ—GBTQS∑B\nLƒ/P∑BπX—EHMu^RRÀ\n√ZK—–I£W—ÇæµLM“º∑\nBPDR+j•∞\\N¢≈EuHÀF\nZ√–OVWIµ+‘L£Ã^R∞H\nIºDR∏Ty“\\ƒ≈/πXJQA\nPµMæRu‘∫L£NVEKH•G\n“IÇJÀµºæLMÃNA£Z¢P\n§u–ÀAº∑BVW\\+VT‘OP\n^•S“Ã∆u≈∞ΩD§G∫∫IM\nNÀ£S√E/º∫∫Z∆AP∑BV\n–≈X—W—∏F∑æ√+πºAºB\n∫OTµRu√+∏ƒy—∏^S—W\nVZ≈GyKE∏TyAº∫∑L‘∏\nHÇFBXº§XADƒ\\ΩLÇ•—\n∏≈ƒ∑∑∞≈µPORXQF∫G√\nZπJT‘—∏æJI+“BPQW∞\nVEX“ºWI∞—EHM£•uIÀ\n" ] ], [ [ "## Default Solution", "_____no_output_____" ], [ "For the default solution we need to compute statistics like length, number of symbols/letters, \nunique occurences, frequencies and relative frequencies of a given file. This is done in the function `get_statistics` below.\n\nWhile using `get_statistics`, make sure that `cipher=True` is set when the input is a ciphertext.", "_____no_output_____" ] ], [ [ "def get_statistics(content, cipher=True):\n stats = {}\n content = list(content)\n split_content = [x for x in content if x != '\\n' and x!=' ']\n length = len(split_content)\n symbols = set(split_content)\n uniq_sym = len(list(symbols))\n freq = collections.Counter(split_content)\n rel_freq = {}\n for sym, frequency in freq.items():\n rel_freq[sym] = (frequency/length)*100\n \n if cipher:\n stats = {'content':split_content, 'length':length, 'vocab':list(symbols), 'vocab_length':uniq_sym, 'frequencies':freq, 'relative_freq':rel_freq}\n else:\n stats = {'length':length, 'vocab':list(symbols), 'vocab_length':uniq_sym, 'frequencies':freq, 'relative_freq':rel_freq}\n return stats", "_____no_output_____" ], [ "cipher_desc = get_statistics(cipher, cipher=True)\npp.pprint(cipher_desc)", "{'content': ['º', '∫', 'P', '/', 'Z', '/',\n 'u', 'B', '∫', 'À', 'O', 'R',\n '•', '–', 'X', '•', 'B', 'W',\n 'V', '+', '≈', 'G', 'y', 'F',\n '∞', 'º', 'H', 'P', 'π', 'K',\n 'Ç', '—', 'y', '≈', 'M', 'J',\n 'y', '^', 'u', 'I', 'À', 'Ω',\n '—', 'T', '‘', 'N', 'Q', 'y',\n 'D', 'µ', '£', 'S', '¢', '/',\n 'º', '∑', 'B', 'P', 'O', 'R',\n 'A', 'u', '∫', '∆', 'R', 'Ã',\n '—', 'E', 'À', '^', 'L', 'M',\n 'Z', 'J', 'ƒ', '“', '\\\\', '–',\n 'F', 'H', 'V', 'W', '≈', 'æ',\n 'y', 'π', '+', '—', 'G', 'D',\n 'º', 'K', 'I', '£', '∞', '—',\n 'X', 'æ', 'µ', '§', 'S', '¢',\n 'R', 'N', '‘', 'I', 'y', 'E',\n 'Ã', 'O', 'æ', '—', 'G', 'B',\n 'T', 'Q', 'S', '∑', 'B', 'L',\n 'ƒ', '/', 'P', '∑', 'B', 'π',\n 'X', '—', 'E', 'H', 'M', 'u',\n '^', 'R', 'R', 'À', '√', 'Z',\n 'K', '—', '–', 'I', '£', 'W',\n '—', 'Ç', 'æ', 'µ', 'L', 'M',\n '“', 'º', '∑', 'B', 'P', 'D',\n 'R', '+', 'j', '•', '∞', '\\\\',\n 'N', '¢', '≈', 'E', 'u', 'H',\n 'À', 'F', 'Z', '√', '–', 'O',\n 'V', 'W', 'I', 'µ', '+', '‘',\n 'L', '£', 'Ã', '^', 'R', '∞',\n 'H', 'I', 'º', 'D', 'R', '∏',\n 'T', 'y', '“', '\\\\', 'ƒ', '≈',\n '/', 'π', 'X', 'J', 'Q', 'A',\n 'P', 'µ', 'M', 'æ', 'R', 'u',\n '‘', '∫', 'L', '£', 'N', 'V',\n 'E', 'K', 'H', '•', 'G', '“',\n 'I', 'Ç', 'J', 'À', 'µ', 'º',\n 'æ', 'L', 'M', 'Ã', 'N', 'A',\n '£', 'Z', '¢', 'P', '§', 'u',\n '–', 'À', 'A', 'º', '∑', 'B',\n 'V', 'W', '\\\\', '+', 'V', 'T',\n '‘', 'O', 'P', '^', '•', 'S',\n '“', 'Ã', '∆', 'u', '≈', '∞',\n 'Ω', 'D', '§', 'G', '∫', '∫',\n 'I', 'M', 'N', 'À', '£', 'S',\n '√', 'E', '/', 'º', '∫', '∫',\n 'Z', '∆', 'A', 'P', '∑', 'B',\n 'V', '–', '≈', 'X', '—', 'W',\n '—', '∏', 'F', '∑', 'æ', '√',\n '+', 'π', 'º', 'A', 'º', 'B',\n '∫', 'O', 'T', 'µ', 'R', 'u',\n '√', '+', '∏', 'ƒ', 'y', '—',\n '∏', '^', 'S', '—', 'W', 'V',\n 'Z', '≈', 'G', 'y', 'K', 'E',\n '∏', 'T', 'y', 'A', 'º', '∫',\n '∑', 'L', '‘', '∏', 'H', 'Ç',\n 'F', 'B', 'X', 'º', '§', 'X',\n 'A', 'D', 'ƒ', '\\\\', 'Ω', 'L',\n 'Ç', '•', '—', '∏', '≈', 'ƒ',\n '∑', '∑', '∞', '≈', 'µ', 'P',\n 'O', 'R', 'X', 'Q', 'F', '∫',\n 'G', '√', 'Z', 'π', 'J', 'T',\n '‘', '—', '∏', 'æ', 'J', 'I',\n '+', '“', 'B', 'P', 'Q', 'W',\n '∞', 'V', 'E', 'X', '“', 'º',\n 'W', 'I', '∞', '—', 'E', 'H',\n 'M', '£', '•', 'u', 'I', 'À'],\n 'frequencies': Counter({'—': 16,\n 'º': 14,\n 'B': 12,\n 'R': 12,\n '∫': 11,\n 'P': 11,\n 'I': 11,\n 'u': 10,\n '≈': 10,\n 'y': 10,\n '∑': 10,\n 'À': 9,\n 'X': 9,\n 'W': 9,\n 'V': 9,\n 'E': 9,\n 'Z': 8,\n '+': 8,\n '∞': 8,\n 'H': 8,\n 'M': 8,\n 'µ': 8,\n '£': 8,\n 'A': 8,\n 'L': 8,\n 'æ': 8,\n '∏': 8,\n 'O': 7,\n '•': 7,\n 'G': 7,\n 'T': 7,\n '‘': 7,\n '“': 7,\n '/': 6,\n '–': 6,\n 'F': 6,\n 'π': 6,\n 'J': 6,\n '^': 6,\n 'N': 6,\n 'D': 6,\n 'S': 6,\n 'ƒ': 6,\n '√': 6,\n 'K': 5,\n 'Ç': 5,\n 'Q': 5,\n 'Ã': 5,\n '\\\\': 5,\n '¢': 4,\n '§': 4,\n 'Ω': 3,\n '∆': 3,\n 'j': 1}),\n 'length': 408,\n 'relative_freq': {'+': 1.9607843137254901,\n '/': 1.4705882352941175,\n 'A': 1.9607843137254901,\n 'B': 2.941176470588235,\n 'D': 1.4705882352941175,\n 'E': 2.2058823529411766,\n 'F': 1.4705882352941175,\n 'G': 1.715686274509804,\n 'H': 1.9607843137254901,\n 'I': 2.696078431372549,\n 'J': 1.4705882352941175,\n 'K': 1.2254901960784315,\n 'L': 1.9607843137254901,\n 'M': 1.9607843137254901,\n 'N': 1.4705882352941175,\n 'O': 1.715686274509804,\n 'P': 2.696078431372549,\n 'Q': 1.2254901960784315,\n 'R': 2.941176470588235,\n 'S': 1.4705882352941175,\n 'T': 1.715686274509804,\n 'V': 2.2058823529411766,\n 'W': 2.2058823529411766,\n 'X': 2.2058823529411766,\n 'Z': 1.9607843137254901,\n '\\\\': 1.2254901960784315,\n '^': 1.4705882352941175,\n 'j': 0.24509803921568626,\n 'u': 2.450980392156863,\n 'y': 2.450980392156863,\n '¢': 0.9803921568627451,\n '£': 1.9607843137254901,\n '§': 0.9803921568627451,\n 'µ': 1.9607843137254901,\n 'º': 3.431372549019608,\n 'À': 2.2058823529411766,\n 'Ã': 1.2254901960784315,\n 'Ç': 1.2254901960784315,\n 'æ': 1.9607843137254901,\n 'ƒ': 1.4705882352941175,\n 'Ω': 0.7352941176470588,\n 'π': 1.4705882352941175,\n '–': 1.4705882352941175,\n '—': 3.9215686274509802,\n '‘': 1.715686274509804,\n '“': 1.715686274509804,\n '•': 1.715686274509804,\n '∆': 0.7352941176470588,\n '∏': 1.9607843137254901,\n '∑': 2.450980392156863,\n '√': 1.4705882352941175,\n '∞': 1.9607843137254901,\n '∫': 2.696078431372549,\n '≈': 2.450980392156863},\n 'vocab': ['∫', 'À', 'ƒ', 'N', 'W', '/', 'V',\n 'O', 'J', 'µ', '√', 'M', 'º', '“',\n 'π', '–', '\\\\', 'P', '∑', 'T',\n '§', 'L', '¢', '+', 'R', '£', 'Ç',\n 'u', '∞', '^', 'Q', '∆', 'S', 'Ω',\n 'E', 'A', 'Z', '∏', '≈', 'æ', 'G',\n '‘', 'Ã', '•', 'K', 'B', 'D', 'y',\n 'I', 'F', 'X', 'H', '—', 'j'],\n 'vocab_length': 54}\n" ] ], [ [ "The default solution matches the frequency of symbols in the cipher text with frequency of letters in the plaintext language (in this case, English). Note that this is just some text in English used to compute letter frequencies. We do not have access to the real plaintext in this homework. \n\nIn order to do compute plaintext frequencies, we use an English dataset has no punctuation or spaces and all characters are lowercase.", "_____no_output_____" ] ], [ [ "# plaintext description\nplaintxt = read_file(\"data/default.wiki.txt.bz2\")\nplaintxt_desc = get_statistics(plaintxt, cipher=False)\npp.pprint(plaintxt_desc)", "{'frequencies': Counter({'e': 1001029,\n 't': 725515,\n 'a': 716871,\n 'i': 609790,\n 'n': 605384,\n 'o': 595295,\n 'r': 547660,\n 's': 544866,\n 'h': 404479,\n 'l': 340389,\n 'd': 339004,\n 'c': 271811,\n 'u': 215523,\n 'm': 214359,\n 'f': 184661,\n 'g': 168439,\n 'p': 166824,\n 'w': 142745,\n 'b': 130070,\n 'y': 126667,\n 'v': 86098,\n 'k': 56452,\n 'j': 18131,\n 'x': 15796,\n 'z': 9903,\n 'q': 7356}),\n 'length': 8245117,\n 'relative_freq': {'a': 8.69449153965917,\n 'b': 1.5775397729346958,\n 'c': 3.296629993243273,\n 'd': 4.111572946751393,\n 'e': 12.140870772361387,\n 'f': 2.2396407473659865,\n 'g': 2.0428939941058446,\n 'h': 4.905679325108425,\n 'i': 7.395771339569833,\n 'j': 0.21989985102697754,\n 'k': 0.6846719094465245,\n 'l': 4.12837076781324,\n 'm': 2.5998296931383753,\n 'n': 7.342333650329038,\n 'o': 7.219970316976703,\n 'p': 2.0233066431925706,\n 'q': 0.08921644168299855,\n 'r': 6.642234427965062,\n 's': 6.608347704465564,\n 't': 8.799329348510154,\n 'u': 2.6139471398647225,\n 'v': 1.0442301789046777,\n 'w': 1.7312671245295852,\n 'x': 0.19158005883967444,\n 'y': 1.5362668595242495,\n 'z': 0.12010745268987694},\n 'vocab': ['t', 'p', 'n', 'h', 'a', 'g', 'f',\n 'i', 'v', 'r', 'd', 'k', 'c', 's',\n 'b', 'w', 'z', 'e', 'u', 'x', 'q',\n 'o', 'm', 'l', 'y', 'j'],\n 'vocab_length': 26}\n" ] ], [ [ "We have all the tools we need to describe the default solution to this homework.\n\nWe use a simple frequency matching heuristic to map cipher symbols to English letters.\n\nWe match the frequencies using the function $f(\\cdot)$ of each cipher symbol $c$ with each English letter $e$:\n\n$$h_{c,e} = | \\log(\\frac{f(c)}{f(e)})) | $$\n\nFor each cipher text symbol $c$ we then compute the most likely plain text symbol $e$ by sorting based on the above score.", "_____no_output_____" ] ], [ [ "\"\"\"\ndefault : frequency matching heuristic\n\nNotice how the candidate mappings, a.k.a hypotheses, are first scored with a measure of quality and, \nthen, the best scoring hypothesis is chosen as the winner. \n\nThe plaintext letters from the winner are then mapped to the respective ciphertext symbols.\n\"\"\"\n\ndef find_mappings(ciphertext, plaintext):\n mappings = defaultdict(dict)\n hypotheses = defaultdict(dict)\n # calculate alignment scores\n for symbol in ciphertext['vocab']:\n for letter in plaintext['vocab']:\n hypotheses[symbol][letter] = abs(math.log((ciphertext['relative_freq'][symbol]/plaintext['relative_freq'][letter])))\n \n # find winner\n for sym in hypotheses.keys():\n #mappings[sym] = min(lemma_alignment[sym], key=lemma_alignment[sym].get)\n winner = sorted(hypotheses[sym].items(), key=lambda kv: kv[1])\n mappings[sym] = winner[1][0]\n \n return mappings", "_____no_output_____" ] ], [ [ "Using this scoring function we map the cipher symbol `∆` to `v` in English", "_____no_output_____" ] ], [ [ "mapping = find_mappings(cipher_desc, plaintxt_desc)\nprint(\"∆ maps to {}\\n\".format(mapping['∆']))\nprint(mapping)", "∆ maps to v\n\ndefaultdict(<class 'dict'>, {'∫': 'm', 'À': 'g', 'ƒ': 'b', 'N': 'b', 'W': 'g', '/': 'b', 'V': 'g', 'O': 'b', 'J': 'b', 'µ': 'g', '√': 'b', 'M': 'g', 'º': 'd', '“': 'b', 'π': 'b', '–': 'b', '\\\\': 'y', 'P': 'm', '∑': 'u', 'T': 'b', '§': 'k', 'L': 'g', '¢': 'k', '+': 'g', 'R': 'u', '£': 'g', 'Ç': 'y', 'u': 'u', '∞': 'g', '^': 'b', 'Q': 'y', '∆': 'v', 'S': 'b', 'Ω': 'v', 'E': 'g', 'A': 'g', 'Z': 'g', '∏': 'g', '≈': 'u', 'æ': 'g', 'G': 'b', '‘': 'b', 'Ã': 'y', '•': 'b', 'K': 'y', 'B': 'u', 'D': 'b', 'y': 'u', 'I': 'm', 'F': 'b', 'X': 'g', 'H': 'g', '—': 'l', 'j': 'x'})\n" ] ], [ [ "The default solution to this decipherment problem is to take each cipher symbol and map it to the most likely English letter as provided by the `find_mappings` function above.", "_____no_output_____" ] ], [ [ "english_text = []\nfor symbol in cipher_desc['content']:\n english_text.append(mapping[symbol])\ndecipherment = ('').join(english_text)\nprint(decipherment)", "dmmbgbuumgbubbgbugggububgdgmbyyluugbubumgvlbbbyubggbkbduumbugumvuylggbgggbbbybbgggugubglbbdymgglgggkbkubbmugybglbubybuugbbmuubglgggubuugbgylbmgglyggggbduumbugxbgybkuguggbgbbbggmggbggybuggmdbugbubybubbgbygmggguubmggbggygbbbmybggdgggybgggkmkubggduuggyggbbbmbbbbyvuugvbkbmmmgbggbbgbdmmgvgmuugbuglglgbugbgbdgdumbbguubggbulgbblgggubuyggbugdmugbggybugdkggbbyvgyblgubuugugmbugybmbbgbbbblggbmgbumygggggbdgmglggggbumg\n" ] ], [ [ "Notice that the default solution provides a very bad decipherment. Your job is to make it better!", "_____no_output_____" ], [ "## Grading", "_____no_output_____" ], [ "Ignore the following cells. They are for grading against the reference decipherment. Based on the clues provided in the decipherment homework description, you can easily find a reasonable reference text online for this cipher text.", "_____no_output_____" ] ], [ [ "\"\"\"\nATTENTION!\nFor grading purposes only. Don't bundle with the assignment. \nMake sure '_ref.txt' is removed from the 'data' directory before publishing.\n\"\"\"\n\ndef read_gold(gold_file):\n with open(gold_file) as f:\n gold = f.read()\n f.close()\n gold = list(gold.strip())\n return gold\n\ndef symbol_error_rate(dec, _gold):\n gold = read_gold(_gold)\n correct = 0\n if len(gold) == len(dec):\n for (d,g) in zip(dec, gold):\n if d==g:\n correct += 1\n wrong = len(gold)-correct\n error = wrong/len(gold)\n \n return error\n \n# gold decipherment\ngold_file = \"data/_ref.txt\"\nser = symbol_error_rate(decipherment, gold_file)\nprint('Error: ', ser*100, 'Accuracy: ', (1-ser)*100)", "Error: 97.30392156862744 Accuracy: 2.6960784313725505\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ] ]
d0df4784391275d3e84ba7fcfbecbeb9433ba784
428,655
ipynb
Jupyter Notebook
Google_Smart_Compose.ipynb
RavenColEvol/gmail-smart-compose
0e36d6fade80bef99b09028e0c2eb39319ab9602
[ "MIT" ]
20
2020-06-02T00:13:23.000Z
2021-11-17T23:11:26.000Z
Google_Smart_Compose.ipynb
RavenColEvol/gmail-smart-compose
0e36d6fade80bef99b09028e0c2eb39319ab9602
[ "MIT" ]
8
2020-10-09T18:50:25.000Z
2022-02-27T05:39:59.000Z
Google_Smart_Compose.ipynb
RavenColEvol/gmail-smart-compose
0e36d6fade80bef99b09028e0c2eb39319ab9602
[ "MIT" ]
8
2020-06-02T00:00:16.000Z
2021-06-18T11:37:45.000Z
246.636939
112,874
0.634391
[ [ [ "The first thing we need to do is to download the dataset from Kaggle. We use the [Enron dataset](https://www.kaggle.com/wcukierski/enron-email-dataset), which is the biggest public email dataset available.\nTo do so we will use GDrive and download the dataset within a Drive folder to be used by Colab.", "_____no_output_____" ] ], [ [ "from google.colab import drive\ndrive.mount('/content/gdrive')", "Drive already mounted at /content/gdrive; to attempt to forcibly remount, call drive.mount(\"/content/gdrive\", force_remount=True).\n" ], [ "import os\nos.environ['KAGGLE_CONFIG_DIR'] = \"/content/gdrive/My Drive/Kaggle\"\n%cd /content/gdrive/My Drive/Kaggle", "/content/gdrive/My Drive/Kaggle\n" ] ], [ [ "We can download the dataset from Kaggle and save it in the GDrive folder. This needs to be done only the first time.", "_____no_output_____" ] ], [ [ "# !kaggle datasets download -d wcukierski/enron-email-dataset\n\n# unzipping the zip files\n# !unzip \\*.zip", "_____no_output_____" ] ], [ [ "Now we are finally ready to start working with the dataset, accessible as a CSV file called `emails.csv`.", "_____no_output_____" ] ], [ [ "import numpy as np \nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport email\nimport re", "_____no_output_____" ], [ "if not 'emails_df' in locals():\n emails_df = pd.read_csv('./emails.csv')", "_____no_output_____" ], [ "# Use only a subpart of the whole dataset to avoid exceeding RAM\n# emails_df = emails_df[:10000]", "_____no_output_____" ], [ "print(emails_df.shape)\nemails_df.head()", "(517401, 2)\n" ], [ "print(emails_df['message'][0])", "Message-ID: <18782981.1075855378110.JavaMail.evans@thyme>\nDate: Mon, 14 May 2001 16:39:00 -0700 (PDT)\nFrom: [email protected]\nTo: [email protected]\nSubject: \nMime-Version: 1.0\nContent-Type: text/plain; charset=us-ascii\nContent-Transfer-Encoding: 7bit\nX-From: Phillip K Allen\nX-To: Tim Belden <Tim Belden/Enron@EnronXGate>\nX-cc: \nX-bcc: \nX-Folder: \\Phillip_Allen_Jan2002_1\\Allen, Phillip K.\\'Sent Mail\nX-Origin: Allen-P\nX-FileName: pallen (Non-Privileged).pst\n\nHere is our forecast\n\n \n" ], [ "# Convert to message objects from the message strings\nmessages = list(map(email.message_from_string, emails_df['message']))", "_____no_output_____" ], [ "def get_text_from_email(msg):\n parts = []\n for part in msg.walk():\n if part.get_content_type() == 'text/plain':\n parts.append( part.get_payload() )\n text = ''.join(parts)\n return text\n\nemails = pd.DataFrame()\n\n# Parse content from emails\nemails['content'] = list(map(get_text_from_email, messages))", "_____no_output_____" ], [ "import gc\n\n# Remove variables from memory\ndel messages\ndel emails_df\ngc.collect()", "_____no_output_____" ], [ "def normalize_text(text):\n text = text.lower()\n \n # creating a space between a word and the punctuation following it to separate words\n # and compact repetition of punctuation\n # eg: \"he is a boy..\" => \"he is a boy .\"\n text = re.sub(r'([.,!?]+)', r\" \\1 \", text)\n\n # replacing everything with space except (a-z, A-Z, \".\", \"?\", \"!\", \",\", \"'\")\n text = re.sub(r\"[^a-zA-Z?.!,']+\", \" \", text)\n\n # Compact spaces\n text = re.sub(r'[\" \"]+', \" \", text)\n\n # Remove forwarded messages\n text = text.split('forwarded by')[0]\n\n text = text.strip()\n\n return text\n\nemails['content'] = list(map(normalize_text, emails['content']))", "_____no_output_____" ], [ "# Drop samples with empty content text after normalization\nemails['content'].replace('', np.nan, inplace=True)\nemails.dropna(subset=['content'], inplace=True)", "_____no_output_____" ], [ "pd.set_option('max_colwidth', -1)\nemails.head(50)", "/usr/local/lib/python3.6/dist-packages/ipykernel_launcher.py:1: FutureWarning: Passing a negative integer is deprecated in version 1.0 and will not be supported in future version. Instead, use None to not limit the column width.\n \"\"\"Entry point for launching an IPython kernel.\n" ] ], [ [ "In the original paper, the dataset is built with 8billion emails where the context is provided by the email date, subject and previous message if the user is replying. Unfortunately, in the Enron dataset it's not possible to build the reply relationship between emails. In order thus to generate the context of a sentence, we train the sequence-to-sequence model to predict the sentence completion from pairs of split sentences.\n\nFor instance, the sentence `here is our forecast` is split in the following pairs within the dataset:\n\n```\n[\n ('<start> here is <end>', '<start> our forecast <end>'),\n ('<start> here is our <end>', '<start> forecast <end>')\n]\n```", "_____no_output_____" ] ], [ [ "# Skip long sentences, which increase maximum length a lot when padding\n# and make the number of parameters to train explode\nSENTENCE_MAX_WORDS = 20\n\ndef generate_dataset (emails):\n contents = emails['content']\n output = []\n vocabulary_sentences = []\n\n for content in contents:\n # Skip emails longer than one sentence\n if (len(content) > SENTENCE_MAX_WORDS * 5):\n continue\n\n sentences = content.split(' . ')\n for sentence in sentences:\n # Remove user names from start or end of sentence. This is just an heuristic\n # but it's more efficient than compiling the list of names and removing all of them\n sentence = re.sub(\"(^\\w+\\s,\\s)|(\\s,\\s\\w+$)\", \"\", sentence)\n words = sentence.split(' ')\n\n if ((len(words) > SENTENCE_MAX_WORDS) or (len(words) < 2)):\n continue\n\n vocabulary_sentences.append('<start> ' + sentence + ' <end>')\n\n for i in range(1, len(words) - 1):\n input_data = '<start> ' + ' '.join(words[:i+1]) + ' <end>'\n output_data = '<start> ' + ' '.join(words[i+1:]) + ' <end>'\n data = (input_data, output_data)\n output.append(data)\n\n return output, vocabulary_sentences\n\npairs, vocabulary_sentences = generate_dataset(emails)", "_____no_output_____" ], [ "print(len(pairs))\nprint(len(vocabulary_sentences))\nprint(*pairs[:10], sep='\\n')\nprint(*vocabulary_sentences[:10], sep='\\n')", "509918\n104151\n('<start> here is <end>', '<start> our forecast <end>')\n('<start> here is our <end>', '<start> forecast <end>')\n('<start> way to <end>', '<start> go !!! <end>')\n('<start> way to go <end>', '<start> !!! <end>')\n(\"<start> let's shoot <end>\", '<start> for tuesday at . <end>')\n(\"<start> let's shoot for <end>\", '<start> tuesday at . <end>')\n(\"<start> let's shoot for tuesday <end>\", '<start> at . <end>')\n(\"<start> let's shoot for tuesday at <end>\", '<start> . <end>')\n('<start> how about <end>', '<start> either next tuesday or thursday ? phillip <end>')\n('<start> how about either <end>', '<start> next tuesday or thursday ? phillip <end>')\n<start> here is our forecast <end>\n<start> test successful <end>\n<start> way to go !!! <end>\n<start> let's shoot for tuesday at . <end>\n<start> how about either next tuesday or thursday ? phillip <end>\n<start> any morning between and <end>\n<start> million is fine phillip <end>\n<start> i think fletch has a good cpa <end>\n<start> i am still doing my own . <end>\n<start> nymex expiration is during this time frame <end>\n" ] ], [ [ "This is where the fun begins. The dataset is finally available and we start working on the analysis by using [Keras](https://keras.io/) and [TensorFlow](https://www.tensorflow.org/).", "_____no_output_____" ] ], [ [ "import tensorflow as tf\nfrom tensorflow import keras\n\nnp.random.seed(42)", "_____no_output_____" ] ], [ [ "We need to transform the text corpora into sequences of integers (each integer being the index of a token in a dictionary) by using keras `Tokenizer`. We also limit to the 10k most frequent words, deleting uncommon words from sentences.\n\nNormally we would use two tokenizers, one for the input strings and a different one for the output text, but in this case we are predicting the same vocabulary in both cases. All the words in the output texts are available also in the input texts because of how dataset pairs are generated.\n\nAlso since we will apply the \"teacher forcing\" technique during training, we need both the target data and the (target + 1 timestep) data.", "_____no_output_____" ] ], [ [ "vocab_max_size = 10000\n\ndef tokenize(text):\n tokenizer = keras.preprocessing.text.Tokenizer(filters='', num_words=vocab_max_size)\n tokenizer.fit_on_texts(text)\n\n return tokenizer\n\ninput = [pair[0] for pair in pairs]\noutput = [pair[1] for pair in pairs]\ntokenizer = tokenize(vocabulary_sentences)\n\nencoder_input = tokenizer.texts_to_sequences(input)\ndecoder_input = tokenizer.texts_to_sequences(output)\n\ndecoder_target = [\n [decoder_input[seqN][tokenI + 1]\n for tokenI in range(len(decoder_input[seqN]) - 1)]\n for seqN in range(len(decoder_input))]\n\n# Convert to np.array\nencoder_input = np.array(encoder_input)\ndecoder_input = np.array(decoder_input)\ndecoder_target = np.array(decoder_target)", "_____no_output_____" ], [ "from sklearn.model_selection import train_test_split\n\nencoder_input_train, encoder_input_test, decoder_input_train, decoder_input_test, decoder_target_train, decoder_target_test = train_test_split(encoder_input, decoder_input, decoder_target, test_size=0.2)", "_____no_output_____" ], [ "print(encoder_input_train.shape, encoder_input_test.shape)\nprint(decoder_input_train.shape, decoder_input_test.shape)\nprint(decoder_target_train.shape, decoder_target_test.shape)", "(407934,) (101984,)\n(407934,) (101984,)\n(407934,) (101984,)\n" ], [ "def max_length(t):\n return max(len(i) for i in t)\n\nmax_length_in = max_length(encoder_input)\nmax_length_out = max_length(decoder_input)\n\nencoder_input_train = keras.preprocessing.sequence.pad_sequences(encoder_input_train, maxlen=max_length_in, padding=\"post\")\ndecoder_input_train = keras.preprocessing.sequence.pad_sequences(decoder_input_train, maxlen=max_length_out, padding=\"post\")\ndecoder_target_train = keras.preprocessing.sequence.pad_sequences(decoder_target_train, maxlen=max_length_out, padding=\"post\")\n\nencoder_input_test = keras.preprocessing.sequence.pad_sequences(encoder_input_test, maxlen=max_length_in, padding=\"post\")\ndecoder_input_test = keras.preprocessing.sequence.pad_sequences(decoder_input_test, maxlen=max_length_out, padding=\"post\")\ndecoder_target_test = keras.preprocessing.sequence.pad_sequences(decoder_target_test, maxlen=max_length_out, padding=\"post\")", "_____no_output_____" ], [ "print(max_length_in, max_length_out)", "21 20\n" ], [ "# Shuffle the data in unison\np = np.random.permutation(len(encoder_input_train))\nencoder_input_train = encoder_input_train[p]\ndecoder_input_train = decoder_input_train[p]\ndecoder_target_train = decoder_target_train[p]\n\nq = np.random.permutation(len(encoder_input_test))\nencoder_input_test = encoder_input_test[q]\ndecoder_input_test = decoder_input_test[q]\ndecoder_target_test = decoder_target_test[q]", "_____no_output_____" ], [ "import math\n\nbatch_size = 128\nvocab_size = vocab_max_size if len(tokenizer.word_index) > vocab_max_size else len(tokenizer.word_index)\n# Rule of thumb of embedding size: vocab_size ** 0.25\n# https://stackoverflow.com/questions/48479915/what-is-the-preferred-ratio-between-the-vocabulary-size-and-embedding-dimension\nembedding_dim = math.ceil(vocab_size ** 0.25)\nlatent_dim = 192 # Latent dimensionality of the encoding space.", "_____no_output_____" ], [ "print(vocab_size, embedding_dim)", "10000 10\n" ] ], [ [ "Here we define the RNN models. We start with the Encoder-Decoder model used in training which leverages the \"teacher forcing technique\". Therefore, it will receive as input `encoder_input` and `decoder_input` datasets.\n\nThen the second model is represented by the inference Decoder which will receive as input the encoded states of the input sequence and the predicted token of the previous time step.\n\nBoth models use GRU units to preserve the context state, which have been shown to be more accurate than LSTM units and simpler to use since they have only one state.", "_____no_output_____" ] ], [ [ "# GRU Encoder\nencoder_in_layer = keras.layers.Input(shape=(max_length_in,))\nencoder_embedding = keras.layers.Embedding(input_dim=vocab_size, output_dim=embedding_dim)\nencoder_bi_gru = keras.layers.Bidirectional(keras.layers.GRU(units=latent_dim, return_sequences=True, return_state=True))\n\n# Discard the encoder output and use hidden states (h) and memory cells states (c)\n# for forward (f) and backward (b) layer\nencoder_out, fstate_h, bstate_h = encoder_bi_gru(encoder_embedding(encoder_in_layer))\nstate_h = keras.layers.Concatenate()([fstate_h, bstate_h])\n\n# GRUDecoder\ndecoder_in_layer = keras.layers.Input(shape=(None,))\ndecoder_embedding = keras.layers.Embedding(input_dim=vocab_size, output_dim=embedding_dim)\ndecoder_gru = keras.layers.GRU(units=latent_dim * 2, return_sequences=True, return_state=True)\n# Discard internal states in training, keep only the output sequence\ndecoder_gru_out, _ = decoder_gru(decoder_embedding(decoder_in_layer), initial_state=state_h)\ndecoder_dense_1 = keras.layers.Dense(128, activation=\"relu\")\ndecoder_dense = keras.layers.Dense(vocab_size, activation=\"softmax\")\ndecoder_out_layer = decoder_dense(keras.layers.Dropout(rate=0.2)(decoder_dense_1(keras.layers.Dropout(rate=0.2)(decoder_gru_out))))\n\n# Define the model that uses the Encoder and the Decoder\nmodel = keras.models.Model([encoder_in_layer, decoder_in_layer], decoder_out_layer)\n\ndef perplexity(y_true, y_pred):\n return keras.backend.exp(keras.backend.mean(keras.backend.sparse_categorical_crossentropy(y_true, y_pred)))\n\nmodel.compile(optimizer='adam', loss=\"sparse_categorical_crossentropy\", metrics=[perplexity])\nmodel.summary()", "Model: \"model_13\"\n__________________________________________________________________________________________________\nLayer (type) Output Shape Param # Connected to \n==================================================================================================\ninput_24 (InputLayer) [(None, 21)] 0 \n__________________________________________________________________________________________________\nembedding_22 (Embedding) (None, 21, 10) 100000 input_24[0][0] \n__________________________________________________________________________________________________\ninput_25 (InputLayer) [(None, None)] 0 \n__________________________________________________________________________________________________\nbidirectional_11 (Bidirectional [(None, 21, 384), (N 235008 embedding_22[0][0] \n__________________________________________________________________________________________________\nembedding_23 (Embedding) (None, None, 10) 100000 input_25[0][0] \n__________________________________________________________________________________________________\nconcatenate_11 (Concatenate) (None, 384) 0 bidirectional_11[0][1] \n bidirectional_11[0][2] \n__________________________________________________________________________________________________\ngru_23 (GRU) [(None, None, 384), 456192 embedding_23[0][0] \n concatenate_11[0][0] \n__________________________________________________________________________________________________\ndropout_23 (Dropout) (None, None, 384) 0 gru_23[0][0] \n__________________________________________________________________________________________________\ndense_22 (Dense) (None, None, 128) 49280 dropout_23[0][0] \n__________________________________________________________________________________________________\ndropout_22 (Dropout) (None, None, 128) 0 dense_22[0][0] \n__________________________________________________________________________________________________\ndense_23 (Dense) (None, None, 10000) 1290000 dropout_22[0][0] \n==================================================================================================\nTotal params: 2,230,480\nTrainable params: 2,230,480\nNon-trainable params: 0\n__________________________________________________________________________________________________\n" ], [ "keras.utils.plot_model(model, \"encoder-decoder.png\", show_shapes=True)", "_____no_output_____" ], [ "epochs = 10\nhistory = model.fit([encoder_input_train, decoder_input_train], decoder_target_train,\n batch_size=batch_size,\n epochs=epochs,\n validation_split=0.2)\n\ndef plot_history(history):\n plt.plot(history.history['loss'], label=\"Training loss\")\n plt.plot(history.history['val_loss'], label=\"Validation loss\")\n plt.legend()\n\nplot_history(history)", "Epoch 1/10\n2550/2550 [==============================] - 305s 120ms/step - loss: 1.5884 - perplexity: 42.0100 - val_loss: 1.2990 - val_perplexity: 3.6789\nEpoch 2/10\n2550/2550 [==============================] - 304s 119ms/step - loss: 1.2131 - perplexity: 3.3808 - val_loss: 1.0714 - val_perplexity: 2.9271\nEpoch 3/10\n2550/2550 [==============================] - 304s 119ms/step - loss: 1.0341 - perplexity: 2.8213 - val_loss: 0.9081 - val_perplexity: 2.4844\nEpoch 4/10\n2550/2550 [==============================] - 303s 119ms/step - loss: 0.9191 - perplexity: 2.5131 - val_loss: 0.8118 - val_perplexity: 2.2556\nEpoch 5/10\n2550/2550 [==============================] - 302s 118ms/step - loss: 0.8453 - perplexity: 2.3329 - val_loss: 0.7469 - val_perplexity: 2.1135\nEpoch 6/10\n2550/2550 [==============================] - 302s 119ms/step - loss: 0.7948 - perplexity: 2.2177 - val_loss: 0.7001 - val_perplexity: 2.0164\nEpoch 7/10\n2550/2550 [==============================] - 303s 119ms/step - loss: 0.7584 - perplexity: 2.1383 - val_loss: 0.6694 - val_perplexity: 1.9553\nEpoch 8/10\n2550/2550 [==============================] - 304s 119ms/step - loss: 0.7317 - perplexity: 2.0816 - val_loss: 0.6417 - val_perplexity: 1.9018\nEpoch 9/10\n2550/2550 [==============================] - 303s 119ms/step - loss: 0.7103 - perplexity: 2.0376 - val_loss: 0.6240 - val_perplexity: 1.8684\nEpoch 10/10\n2550/2550 [==============================] - 303s 119ms/step - loss: 0.6931 - perplexity: 2.0025 - val_loss: 0.6122 - val_perplexity: 1.8463\n" ], [ "scores = model.evaluate([encoder_input_test[:1000], decoder_input_test[:1000]], decoder_target_test[:1000])\nprint(\"%s: %.2f\" % (model.metrics_names[1], scores[1]))", "32/32 [==============================] - 1s 20ms/step - loss: 0.6070 - perplexity: 1.8352\nperplexity: 1.84\n" ], [ "# Inference Decoder\nencoder_model = keras.models.Model(encoder_in_layer, state_h)\n\nstate_input_h = keras.layers.Input(shape=(latent_dim * 2,))\ninf_decoder_out, decoder_h = decoder_gru(decoder_embedding(decoder_in_layer), initial_state=state_input_h)\ninf_decoder_out = decoder_dense(decoder_dense_1(inf_decoder_out))\ninf_model = keras.models.Model(inputs=[decoder_in_layer, state_input_h], \n outputs=[inf_decoder_out, decoder_h])", "_____no_output_____" ], [ "keras.utils.plot_model(encoder_model, \"encoder-model.png\", show_shapes=True)", "_____no_output_____" ], [ "keras.utils.plot_model(inf_model, \"inference-model.png\", show_shapes=True)", "_____no_output_____" ], [ "def tokenize_text(text):\n text = '<start> ' + text.lower() + ' <end>'\n text_tensor = tokenizer.texts_to_sequences([text])\n text_tensor = keras.preprocessing.sequence.pad_sequences(text_tensor, maxlen=max_length_in, padding=\"post\")\n return text_tensor\n\n# Reversed map from a tokenizer index to a word\nindex_to_word = dict(map(reversed, tokenizer.word_index.items()))\n\n# Given an input string, an encoder model (infenc_model) and a decoder model (infmodel),\ndef decode_sequence(input_tensor):\n # Encode the input as state vectors.\n state = encoder_model.predict(input_tensor)\n\n target_seq = np.zeros((1, 1))\n target_seq[0, 0] = tokenizer.word_index['<start>']\n curr_word = \"<start>\"\n decoded_sentence = ''\n\n i = 0\n while curr_word != \"<end>\" and i < (max_length_out - 1):\n output_tokens, h = inf_model.predict([target_seq, state])\n\n curr_token = np.argmax(output_tokens[0, 0])\n\n if (curr_token == 0):\n break;\n\n curr_word = index_to_word[curr_token]\n\n decoded_sentence += ' ' + curr_word\n target_seq[0, 0] = curr_token\n state = h\n i += 1\n\n return decoded_sentence", "_____no_output_____" ], [ "def tokens_to_seq(tokens):\n words = list(map(lambda token: index_to_word[token] if token != 0 else '', tokens))\n return ' '.join(words)", "_____no_output_____" ] ], [ [ "Let's test the inference model with some inputs.", "_____no_output_____" ] ], [ [ "texts = [\n 'here is',\n 'have a',\n 'please review',\n 'please call me',\n 'thanks for',\n 'let me',\n 'Let me know',\n 'Let me know if you',\n 'this sounds',\n 'is this call going to',\n 'can you get',\n 'is it okay',\n 'it should',\n 'call if there\\'s',\n 'gave her a',\n 'i will let',\n 'i will be',\n 'may i get a copy of all the',\n 'how is our trade',\n 'this looks like a',\n 'i am fine with the changes',\n 'please be sure this'\n]\n\noutput = list(map(lambda text: (text, decode_sequence(tokenize_text(text))), texts))\noutput_df = pd.DataFrame(output, columns=[\"input\", \"output\"])\noutput_df.head(len(output))", "_____no_output_____" ] ], [ [ "The predicted outputs are actually quite good. The grammar is correct and have a logical sense. Some predictions also show that the predictions are personalized based on the Enron dataset, for instance in the case of `here is - the latest version of the presentation`.\nThe `please review - the attached outage report` also shows personalized prediction. This is consistent with the goal the task.", "_____no_output_____" ], [ "Save the Tokenizer and the Keras model for usage within the browser.", "_____no_output_____" ] ], [ [ "import json \n\nwith open( 'word_dict-final.json' , 'w' ) as file: \n json.dump( tokenizer.word_index , file)", "_____no_output_____" ], [ "encoder_model.save('./encoder-model-final.h5')\ninf_model.save('./inf-model-final.h5')", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ] ]
d0df4867a25d92685d9814c81f4c17e24d0fd7d9
273,209
ipynb
Jupyter Notebook
notebooks/plot_convolutional_barycenter.ipynb
vfdev-5/POT
e757b75976ece1e6e53e655852b9f8863e7b6f5a
[ "MIT" ]
2
2019-06-18T14:22:11.000Z
2019-07-01T08:43:43.000Z
notebooks/plot_convolutional_barycenter.ipynb
vfdev-5/POT
e757b75976ece1e6e53e655852b9f8863e7b6f5a
[ "MIT" ]
null
null
null
notebooks/plot_convolutional_barycenter.ipynb
vfdev-5/POT
e757b75976ece1e6e53e655852b9f8863e7b6f5a
[ "MIT" ]
2
2020-03-12T03:08:31.000Z
2021-08-20T14:08:09.000Z
1,543.553672
268,884
0.961901
[ [ [ "%matplotlib inline", "_____no_output_____" ] ], [ [ "\n# Convolutional Wasserstein Barycenter example\n\n\nThis example is designed to illustrate how the Convolutional Wasserstein Barycenter\nfunction of POT works.\n\n", "_____no_output_____" ] ], [ [ "# Author: Nicolas Courty <[email protected]>\n#\n# License: MIT License\n\n\nimport numpy as np\nimport pylab as pl\nimport ot", "_____no_output_____" ] ], [ [ "Data preparation\n----------------\n\nThe four distributions are constructed from 4 simple images\n\n", "_____no_output_____" ] ], [ [ "f1 = 1 - pl.imread('../data/redcross.png')[:, :, 2]\nf2 = 1 - pl.imread('../data/duck.png')[:, :, 2]\nf3 = 1 - pl.imread('../data/heart.png')[:, :, 2]\nf4 = 1 - pl.imread('../data/tooth.png')[:, :, 2]\n\nA = []\nf1 = f1 / np.sum(f1)\nf2 = f2 / np.sum(f2)\nf3 = f3 / np.sum(f3)\nf4 = f4 / np.sum(f4)\nA.append(f1)\nA.append(f2)\nA.append(f3)\nA.append(f4)\nA = np.array(A)\n\nnb_images = 5\n\n# those are the four corners coordinates that will be interpolated by bilinear\n# interpolation\nv1 = np.array((1, 0, 0, 0))\nv2 = np.array((0, 1, 0, 0))\nv3 = np.array((0, 0, 1, 0))\nv4 = np.array((0, 0, 0, 1))", "_____no_output_____" ] ], [ [ "Barycenter computation and visualization\n----------------------------------------\n\n\n", "_____no_output_____" ] ], [ [ "pl.figure(figsize=(10, 10))\npl.title('Convolutional Wasserstein Barycenters in POT')\ncm = 'Blues'\n# regularization parameter\nreg = 0.004\nfor i in range(nb_images):\n for j in range(nb_images):\n pl.subplot(nb_images, nb_images, i * nb_images + j + 1)\n tx = float(i) / (nb_images - 1)\n ty = float(j) / (nb_images - 1)\n\n # weights are constructed by bilinear interpolation\n tmp1 = (1 - tx) * v1 + tx * v2\n tmp2 = (1 - tx) * v3 + tx * v4\n weights = (1 - ty) * tmp1 + ty * tmp2\n\n if i == 0 and j == 0:\n pl.imshow(f1, cmap=cm)\n pl.axis('off')\n elif i == 0 and j == (nb_images - 1):\n pl.imshow(f3, cmap=cm)\n pl.axis('off')\n elif i == (nb_images - 1) and j == 0:\n pl.imshow(f2, cmap=cm)\n pl.axis('off')\n elif i == (nb_images - 1) and j == (nb_images - 1):\n pl.imshow(f4, cmap=cm)\n pl.axis('off')\n else:\n # call to barycenter computation\n pl.imshow(ot.bregman.convolutional_barycenter2d(A, reg, weights), cmap=cm)\n pl.axis('off')\npl.show()", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
d0df53f789f95facbfd3446c97bb2cf68a91052f
14,216
ipynb
Jupyter Notebook
projects_py/Generating Keywords for Google AdWords/notebook.ipynb
AndreasFerox/DataCamp
41525d7252f574111f4929158da1498ee1e73a84
[ "MIT" ]
1
2021-01-31T20:51:10.000Z
2021-01-31T20:51:10.000Z
projects_py/Generating Keywords for Google AdWords/notebook.ipynb
AndreasFerox/DataCamp
41525d7252f574111f4929158da1498ee1e73a84
[ "MIT" ]
null
null
null
projects_py/Generating Keywords for Google AdWords/notebook.ipynb
AndreasFerox/DataCamp
41525d7252f574111f4929158da1498ee1e73a84
[ "MIT" ]
null
null
null
14,216
14,216
0.693303
[ [ [ "## 1. The brief\n<p>Imagine working for a digital marketing agency, and the agency is approached by a massive online retailer of furniture. They want to test our skills at creating large campaigns for all of their website. We are tasked with creating a prototype set of keywords for search campaigns for their sofas section. The client says that they want us to generate keywords for the following products: </p>\n<ul>\n<li>sofas</li>\n<li>convertible sofas</li>\n<li>love seats</li>\n<li>recliners</li>\n<li>sofa beds</li>\n</ul>\n<p><strong>The brief</strong>: The client is generally a low-cost retailer, offering many promotions and discounts. We will need to focus on such keywords. We will also need to move away from luxury keywords and topics, as we are targeting price-sensitive customers. Because we are going to be tight on budget, it would be good to focus on a tightly targeted set of keywords and make sure they are all set to exact and phrase match.</p>\n<p>Based on the brief above we will first need to generate a list of words, that together with the products given above would make for good keywords. Here are some examples:</p>\n<ul>\n<li>Products: sofas, recliners</li>\n<li>Words: buy, prices</li>\n</ul>\n<p>The resulting keywords: 'buy sofas', 'sofas buy', 'buy recliners', 'recliners buy',\n 'prices sofas', 'sofas prices', 'prices recliners', 'recliners prices'.</p>\n<p>As a final result, we want to have a DataFrame that looks like this: </p>\n<table>\n<thead>\n<tr>\n<th>Campaign</th>\n<th>Ad Group</th>\n<th>Keyword</th>\n<th>Criterion Type</th>\n</tr>\n</thead>\n<tbody>\n<tr>\n<td>Campaign1</td>\n<td>AdGroup_1</td>\n<td>keyword 1a</td>\n<td>Exact</td>\n</tr>\n<tr>\n<td>Campaign1</td>\n<td>AdGroup_1</td>\n<td>keyword 1a</td>\n<td>Phrase</td>\n</tr>\n<tr>\n<td>Campaign1</td>\n<td>AdGroup_1</td>\n<td>keyword 1b</td>\n<td>Exact</td>\n</tr>\n<tr>\n<td>Campaign1</td>\n<td>AdGroup_1</td>\n<td>keyword 1b</td>\n<td>Phrase</td>\n</tr>\n<tr>\n<td>Campaign1</td>\n<td>AdGroup_2</td>\n<td>keyword 2a</td>\n<td>Exact</td>\n</tr>\n<tr>\n<td>Campaign1</td>\n<td>AdGroup_2</td>\n<td>keyword 2a</td>\n<td>Phrase</td>\n</tr>\n</tbody>\n</table>\n<p>The first step is to come up with a list of words that users might use to express their desire in buying low-cost sofas.</p>", "_____no_output_____" ] ], [ [ "# List of words to pair with products\nwords = ['buy', 'discount', 'promotion', 'cheap', 'offer', 'purchase', 'sale']\n\n# Print list of words\nprint(words)", "['buy', 'discount', 'promotion', 'cheap', 'offer', 'purchase', 'sale']\n" ] ], [ [ "## 2. Combine the words with the product names\n<p>Imagining all the possible combinations of keywords can be stressful! But not for us, because we are keyword ninjas! We know how to translate campaign briefs into Python data structures and can imagine the resulting DataFrames that we need to create.</p>\n<p>Now that we have brainstormed the words that work well with the brief that we received, it is now time to combine them with the product names to generate meaningful search keywords. We want to combine every word with every product once before, and once after, as seen in the example above.</p>\n<p>As a quick reminder, for the product 'recliners' and the words 'buy' and 'price' for example, we would want to generate the following combinations: </p>\n<p>buy recliners<br>\nrecliners buy<br>\nprice recliners<br>\nrecliners price<br>\n... </p>\n<p>and so on for all the words and products that we have.</p>", "_____no_output_____" ] ], [ [ "products = ['sofas', 'convertible sofas', 'love seats', 'recliners', 'sofa beds']\n\n# Create an empty list\nkeywords_list = []\n\n# Loop through products\nfor product in products:\n # Loop through words\n for word in words:\n # Append combinations\n keywords_list.append([product, product + ' ' + word])\n keywords_list.append([product, word + ' ' + product])\n \n# Inspect keyword list\nprint(keywords_list)", "[['sofas', 'sofas buy'], ['sofas', 'buy sofas'], ['sofas', 'sofas discount'], ['sofas', 'discount sofas'], ['sofas', 'sofas promotion'], ['sofas', 'promotion sofas'], ['sofas', 'sofas cheap'], ['sofas', 'cheap sofas'], ['sofas', 'sofas offer'], ['sofas', 'offer sofas'], ['sofas', 'sofas purchase'], ['sofas', 'purchase sofas'], ['sofas', 'sofas sale'], ['sofas', 'sale sofas'], ['convertible sofas', 'convertible sofas buy'], ['convertible sofas', 'buy convertible sofas'], ['convertible sofas', 'convertible sofas discount'], ['convertible sofas', 'discount convertible sofas'], ['convertible sofas', 'convertible sofas promotion'], ['convertible sofas', 'promotion convertible sofas'], ['convertible sofas', 'convertible sofas cheap'], ['convertible sofas', 'cheap convertible sofas'], ['convertible sofas', 'convertible sofas offer'], ['convertible sofas', 'offer convertible sofas'], ['convertible sofas', 'convertible sofas purchase'], ['convertible sofas', 'purchase convertible sofas'], ['convertible sofas', 'convertible sofas sale'], ['convertible sofas', 'sale convertible sofas'], ['love seats', 'love seats buy'], ['love seats', 'buy love seats'], ['love seats', 'love seats discount'], ['love seats', 'discount love seats'], ['love seats', 'love seats promotion'], ['love seats', 'promotion love seats'], ['love seats', 'love seats cheap'], ['love seats', 'cheap love seats'], ['love seats', 'love seats offer'], ['love seats', 'offer love seats'], ['love seats', 'love seats purchase'], ['love seats', 'purchase love seats'], ['love seats', 'love seats sale'], ['love seats', 'sale love seats'], ['recliners', 'recliners buy'], ['recliners', 'buy recliners'], ['recliners', 'recliners discount'], ['recliners', 'discount recliners'], ['recliners', 'recliners promotion'], ['recliners', 'promotion recliners'], ['recliners', 'recliners cheap'], ['recliners', 'cheap recliners'], ['recliners', 'recliners offer'], ['recliners', 'offer recliners'], ['recliners', 'recliners purchase'], ['recliners', 'purchase recliners'], ['recliners', 'recliners sale'], ['recliners', 'sale recliners'], ['sofa beds', 'sofa beds buy'], ['sofa beds', 'buy sofa beds'], ['sofa beds', 'sofa beds discount'], ['sofa beds', 'discount sofa beds'], ['sofa beds', 'sofa beds promotion'], ['sofa beds', 'promotion sofa beds'], ['sofa beds', 'sofa beds cheap'], ['sofa beds', 'cheap sofa beds'], ['sofa beds', 'sofa beds offer'], ['sofa beds', 'offer sofa beds'], ['sofa beds', 'sofa beds purchase'], ['sofa beds', 'purchase sofa beds'], ['sofa beds', 'sofa beds sale'], ['sofa beds', 'sale sofa beds']]\n" ] ], [ [ "## 3. Convert the list of lists into a DataFrame\n<p>Now we want to convert this list of lists into a DataFrame so we can easily manipulate it and manage the final output.</p>", "_____no_output_____" ] ], [ [ "# Load library\n# ... YOUR CODE FOR TASK 3 ...\nimport pandas as pd\n\n# Create a DataFrame from list\nkeywords_df = pd.DataFrame.from_records(keywords_list)\n\n# Print the keywords DataFrame to explore it\n# ... YOUR CODE FOR TASK 3 ...\nprint(keywords_df.head())", " 0 1\n0 sofas sofas buy\n1 sofas buy sofas\n2 sofas sofas discount\n3 sofas discount sofas\n4 sofas sofas promotion\n" ] ], [ [ "## 4. Rename the columns of the DataFrame\n<p>Before we can upload this table of keywords, we will need to give the columns meaningful names. If we inspect the DataFrame we just created above, we can see that the columns are currently named <code>0</code> and <code>1</code>. <code>Ad Group</code> (example: \"sofas\") and <code>Keyword</code> (example: \"sofas buy\") are much more appropriate names.</p>", "_____no_output_____" ] ], [ [ "# Rename the columns of the DataFrame\nkeywords_df.columns = ['Ad Group', 'Keyword']", "_____no_output_____" ] ], [ [ "## 5. Add a campaign column\n<p>Now we need to add some additional information to our DataFrame. \nWe need a new column called <code>Campaign</code> for the campaign name. We want campaign names to be descriptive of our group of keywords and products, so let's call this campaign 'SEM_Sofas'.</p>", "_____no_output_____" ] ], [ [ "# Add a campaign column\n# ... YOUR CODE FOR TASK 5 ...\nkeywords_df['Campaign'] = 'SEM_Sofas'", "_____no_output_____" ] ], [ [ "## 6. Create the match type column\n<p>There are different keyword match types. One is exact match, which is for matching the exact term or are close variations of that exact term. Another match type is broad match, which means ads may show on searches that include misspellings, synonyms, related searches, and other relevant variations.</p>\n<p>Straight from Google's AdWords <a href=\"https://support.google.com/google-ads/answer/2497836?hl=en\">documentation</a>:</p>\n<blockquote>\n <p>In general, the broader the match type, the more traffic potential that keyword will have, since your ads may be triggered more often. Conversely, a narrower match type means that your ads may show less often—but when they do, they’re likely to be more related to someone’s search.</p>\n</blockquote>\n<p>Since the client is tight on budget, we want to make sure all the keywords are in exact match at the beginning.</p>", "_____no_output_____" ] ], [ [ "# Add a criterion type column\n# ... YOUR CODE FOR TASK 6 ...\nkeywords_df['Criterion Type'] = 'Exact'", "_____no_output_____" ] ], [ [ "## 7. Duplicate all the keywords into 'phrase' match\n<p>The great thing about exact match is that it is very specific, and we can control the process very well. The tradeoff, however, is that: </p>\n<ol>\n<li>The search volume for exact match is lower than other match types</li>\n<li>We can't possibly think of all the ways in which people search, and so, we are probably missing out on some high-quality keywords.</li>\n</ol>\n<p>So it's good to use another match called <em>phrase match</em> as a discovery mechanism to allow our ads to be triggered by keywords that include our exact match keywords, together with anything before (or after) them.</p>\n<p>Later on, when we launch the campaign, we can explore with modified broad match, broad match, and negative match types, for better visibility and control of our campaigns.</p>", "_____no_output_____" ] ], [ [ "# Make a copy of the keywords DataFrame\nkeywords_phrase = keywords_df.copy()\n\n# Change criterion type match to phrase\n# ... YOUR CODE FOR TASK 7 ...\nkeywords_phrase['Criterion Type'] = 'Phrase'\n# Append the DataFrames\nkeywords_df_final = keywords_df.append(keywords_phrase)", "_____no_output_____" ] ], [ [ "## 8. Save and summarize!\n<p>To upload our campaign, we need to save it as a CSV file. Then we will be able to import it to AdWords editor or BingAds editor. There is also the option of pasting the data into the editor if we want, but having easy access to the saved data is great so let's save to a CSV file!</p>\n<p>To wrap up our campaign work, it is good to look at a summary of our campaign structure. We can do that by grouping by ad group and criterion type and counting by keyword.</p>", "_____no_output_____" ] ], [ [ "# Save the final keywords to a CSV file\n# ... YOUR CODE FOR TASK 8 ...\nkeywords_df_final.to_csv('keywords.csv', index=False)\n# View a summary of our campaign work\nsummary = keywords_df_final.groupby(['Ad Group', 'Criterion Type'])['Keyword'].count()\nprint(summary)", "Ad Group Criterion Type\nconvertible sofas Exact 14\n Phrase 14\nlove seats Exact 14\n Phrase 14\nrecliners Exact 14\n Phrase 14\nsofa beds Exact 14\n Phrase 14\nsofas Exact 14\n Phrase 14\nName: Keyword, dtype: int64\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
d0df5b43eced3c6d9e0cfe90fc8ee37f6fcdb6b5
996,331
ipynb
Jupyter Notebook
.ipynb_checkpoints/02_modeling_model_5-checkpoint.ipynb
leemjm92/dsi15_capstone_steering_wheel_prediction
386fab5ad71fa9d26269d146137be19a2042b694
[ "BSD-3-Clause" ]
null
null
null
.ipynb_checkpoints/02_modeling_model_5-checkpoint.ipynb
leemjm92/dsi15_capstone_steering_wheel_prediction
386fab5ad71fa9d26269d146137be19a2042b694
[ "BSD-3-Clause" ]
null
null
null
.ipynb_checkpoints/02_modeling_model_5-checkpoint.ipynb
leemjm92/dsi15_capstone_steering_wheel_prediction
386fab5ad71fa9d26269d146137be19a2042b694
[ "BSD-3-Clause" ]
null
null
null
125.942485
69,604
0.811495
[ [ [ "from google.colab import drive\ndrive.mount('/content/drive')", "Go to this URL in a browser: https://accounts.google.com/o/oauth2/auth?client_id=947318989803-6bn6qk8qdgf4n4g3pfee6491hc0brc4i.apps.googleusercontent.com&redirect_uri=urn%3aietf%3awg%3aoauth%3a2.0%3aoob&response_type=code&scope=email%20https%3a%2f%2fwww.googleapis.com%2fauth%2fdocs.test%20https%3a%2f%2fwww.googleapis.com%2fauth%2fdrive%20https%3a%2f%2fwww.googleapis.com%2fauth%2fdrive.photos.readonly%20https%3a%2f%2fwww.googleapis.com%2fauth%2fpeopleapi.readonly\n\nEnter your authorization code:\n··········\nMounted at /content/drive\n" ], [ "import numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nfrom numpy import load\nfrom numpy import asarray\nfrom numpy import savez_compressed\n\nfrom sklearn.model_selection import train_test_split\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, Activation, Flatten\nfrom keras.layers import Conv2D, MaxPooling2D\nfrom keras.optimizers import Adam\nfrom keras.metrics import RootMeanSquaredError\nfrom keras.models import load_model\nfrom keras.callbacks import *\n\n%matplotlib inline", "_____no_output_____" ], [ "###################################### loading new data", "_____no_output_____" ], [ "camera2 = load('/content/drive/My Drive/datasets/camera2_cleaned.npz')\nlog2 = pd.read_csv('/content/drive/My Drive/datasets/nedlog2_clea.csv')", "_____no_output_____" ], [ "def camera_processing(camera_file, file_name):\n\n # camera file\n camera = camera_file.f.arr_0\n camera = camera.astype('float32')\n camera = camera/255\n camera = camera.reshape(camera.shape[0], camera.shape[1], camera.shape[2], 1)\n savez_compressed(f'/content/drive/My Drive/datasets/{file_name}_train', camera)\n \n return print('Done')", "_____no_output_____" ], [ "def log_processing(log_file, file_name):\n log_file['steering_avg_radian'] = log_file['steering_avg'] * np.pi / 180\n log_file.to_csv(f'/content/drive/My Drive/datasets/{file_name}_train.csv')\n\n return print('Done')", "_____no_output_____" ], [ "camera_processing(camera2, 'camera2')", "_____no_output_____" ], [ "log_processing(log2, 'log2')", "Done\n" ], [ "def train_split(camera_file_name, log_file_name):\n # load camera file\n X = load(f'/content/drive/My Drive/datasets/{camera_file_name}_train.npz')\n X = X.f.arr_0\n # load log file\n log = pd.read_csv(f'/content/drive/My Drive/datasets/{log_file_name}_train.csv')\n y = log['steering_avg_radian']\n y = y.to_numpy()\n y = y.reshape(y.shape[0], 1)\n # train test split\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, shuffle=True)\n # save them into individual file doing so due to ram management\n savez_compressed(f'/content/drive/My Drive/datasets/{camera_file_name}_X_train', X_train)\n savez_compressed(f'/content/drive/My Drive/datasets/{camera_file_name}_X_test', X_test)\n savez_compressed(f'/content/drive/My Drive/datasets/{camera_file_name}_y_train', y_train)\n savez_compressed(f'/content/drive/My Drive/datasets/{camera_file_name}_y_test', y_test)\n\n return print('Done')", "_____no_output_____" ], [ "train_split('camera2', 'log2')", "Done\n" ], [ "# # log file\n# log_file['steering_avg_radian'] = log_file['steering_avg'] * np.pi / 180\n# y = log_file['steering_avg_radian']\n# y = y.to_numpy", "_____no_output_____" ], [ "############################# end of loading new data", "_____no_output_____" ], [ "X = X.f.arr_0", "_____no_output_____" ], [ "X.shape", "_____no_output_____" ], [ "log1 = pd.read_csv('/content/drive/My Drive/log1_full.csv')", "_____no_output_____" ], [ "log1.head()", "_____no_output_____" ], [ "# convert the angle from degree to radian\nlog1['steering_avg_radian'] = log1['steering_avg'] * np.pi / 180", "_____no_output_____" ], [ "log1.head()", "_____no_output_____" ], [ "log1.to_csv('/content/drive/My Drive/log1_train.csv')", "_____no_output_____" ], [ "log1 = pd.read_csv('/content/drive/My Drive/log1_train.csv')", "_____no_output_____" ], [ "y = log1['steering_avg_radian']", "_____no_output_____" ], [ "y = y.to_numpy()", "_____no_output_____" ], [ "y.shape", "_____no_output_____" ], [ "y = y.reshape(y.shape[0], 1)", "_____no_output_____" ], [ "y.shape", "_____no_output_____" ], [ "from sklearn.model_selection import train_test_split", "_____no_output_____" ], [ "# split it so that the validation set is the last 20% of the dataset as I want sequential data \nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, shuffle=False)", "_____no_output_____" ], [ "########################### start of train with camera8 data for model 5 epochs = 30", "_____no_output_____" ], [ "camera8 = load('/content/drive/My Drive/datasets/camera8_cleaned.npz')\nlog8 = pd.read_csv('/content/drive/My Drive/datasets/log8_cleaned.csv')", "_____no_output_____" ], [ "camera_processing(camera8, 'camera8')", "Done\n" ], [ "log_processing(log1, 'log1')", "Done\n" ], [ "train_split('camera1', 'log1')", "Done\n" ], [ "X_train, X_test, y_train, y_test = train_load('camera8')", "_____no_output_____" ], [ "X_train.shape, X_test.shape, y_train.shape, y_test.shape", "_____no_output_____" ], [ "model = Sequential()\n\nmodel.add(Conv2D(16, (8, 8), strides=(4, 4), activation='elu', padding=\"same\"))\n\nmodel.add(Conv2D(32, (5, 5), strides=(2, 2), activation='elu', padding=\"same\"))\n\nmodel.add(Conv2D(64, (5, 5), strides=(2, 2), padding=\"same\"))\n\nmodel.add(Flatten())\nmodel.add(Dropout(.2))\n\nmodel.add(Dense(512, activation='elu'))\nmodel.add(Dropout(.5))\n\nmodel.add(Dense(1))\n\nmodel.compile(loss='mse', optimizer=Adam(lr=1e-04), metrics=[RootMeanSquaredError()])", "_____no_output_____" ], [ "filepath = \"/content/drive/My Drive/epochs/model_2_1_camera8.{epoch:04d}-{val_loss:.4f}.h5\"\ncheckpoint = ModelCheckpoint(filepath, monitor='val_loss', verbose=1, save_best_only=True, mode='min')\ncallbacks_list = [checkpoint]", "_____no_output_____" ], [ "history = model.fit(X_train,\n y_train,\n batch_size=64,\n validation_data=(X_test, y_test),\n epochs=30,\n verbose=1,\n callbacks=callbacks_list)", "Epoch 1/30\n347/347 [==============================] - ETA: 0s - loss: 0.3885 - root_mean_squared_error: 0.6233\nEpoch 00001: val_loss improved from inf to 0.35014, saving model to /content/drive/My Drive/epochs/model_5_1_camera8.0001-0.3501.h5\n347/347 [==============================] - 2s 7ms/step - loss: 0.3885 - root_mean_squared_error: 0.6233 - val_loss: 0.3501 - val_root_mean_squared_error: 0.5917\nEpoch 2/30\n344/347 [============================>.] - ETA: 0s - loss: 0.3511 - root_mean_squared_error: 0.5925\nEpoch 00002: val_loss improved from 0.35014 to 0.33945, saving model to /content/drive/My Drive/epochs/model_5_1_camera8.0002-0.3395.h5\n347/347 [==============================] - 2s 6ms/step - loss: 0.3530 - root_mean_squared_error: 0.5942 - val_loss: 0.3395 - val_root_mean_squared_error: 0.5826\nEpoch 3/30\n345/347 [============================>.] - ETA: 0s - loss: 0.3234 - root_mean_squared_error: 0.5687\nEpoch 00003: val_loss improved from 0.33945 to 0.33724, saving model to /content/drive/My Drive/epochs/model_5_1_camera8.0003-0.3372.h5\n347/347 [==============================] - 2s 6ms/step - loss: 0.3250 - root_mean_squared_error: 0.5701 - val_loss: 0.3372 - val_root_mean_squared_error: 0.5807\nEpoch 4/30\n338/347 [============================>.] - ETA: 0s - loss: 0.3078 - root_mean_squared_error: 0.5548\nEpoch 00004: val_loss improved from 0.33724 to 0.33642, saving model to /content/drive/My Drive/epochs/model_5_1_camera8.0004-0.3364.h5\n347/347 [==============================] - 2s 7ms/step - loss: 0.3104 - root_mean_squared_error: 0.5572 - val_loss: 0.3364 - val_root_mean_squared_error: 0.5800\nEpoch 5/30\n340/347 [============================>.] - ETA: 0s - loss: 0.2960 - root_mean_squared_error: 0.5440\nEpoch 00005: val_loss did not improve from 0.33642\n347/347 [==============================] - 2s 6ms/step - loss: 0.2966 - root_mean_squared_error: 0.5446 - val_loss: 0.3365 - val_root_mean_squared_error: 0.5801\nEpoch 6/30\n340/347 [============================>.] - ETA: 0s - loss: 0.2852 - root_mean_squared_error: 0.5340\nEpoch 00006: val_loss did not improve from 0.33642\n347/347 [==============================] - 2s 6ms/step - loss: 0.2837 - root_mean_squared_error: 0.5326 - val_loss: 0.3387 - val_root_mean_squared_error: 0.5820\nEpoch 7/30\n343/347 [============================>.] - ETA: 0s - loss: 0.2760 - root_mean_squared_error: 0.5254\nEpoch 00007: val_loss did not improve from 0.33642\n347/347 [==============================] - 2s 6ms/step - loss: 0.2765 - root_mean_squared_error: 0.5258 - val_loss: 0.3412 - val_root_mean_squared_error: 0.5842\nEpoch 8/30\n341/347 [============================>.] - ETA: 0s - loss: 0.2689 - root_mean_squared_error: 0.5185\nEpoch 00008: val_loss did not improve from 0.33642\n347/347 [==============================] - 2s 6ms/step - loss: 0.2699 - root_mean_squared_error: 0.5195 - val_loss: 0.3443 - val_root_mean_squared_error: 0.5868\nEpoch 9/30\n344/347 [============================>.] - ETA: 0s - loss: 0.2649 - root_mean_squared_error: 0.5147\nEpoch 00009: val_loss did not improve from 0.33642\n347/347 [==============================] - 2s 6ms/step - loss: 0.2642 - root_mean_squared_error: 0.5140 - val_loss: 0.3437 - val_root_mean_squared_error: 0.5862\nEpoch 10/30\n344/347 [============================>.] - ETA: 0s - loss: 0.2584 - root_mean_squared_error: 0.5083\nEpoch 00010: val_loss did not improve from 0.33642\n347/347 [==============================] - 2s 6ms/step - loss: 0.2577 - root_mean_squared_error: 0.5076 - val_loss: 0.3449 - val_root_mean_squared_error: 0.5872\nEpoch 11/30\n344/347 [============================>.] - ETA: 0s - loss: 0.2544 - root_mean_squared_error: 0.5043\nEpoch 00011: val_loss did not improve from 0.33642\n347/347 [==============================] - 2s 6ms/step - loss: 0.2539 - root_mean_squared_error: 0.5039 - val_loss: 0.3474 - val_root_mean_squared_error: 0.5894\nEpoch 12/30\n347/347 [==============================] - ETA: 0s - loss: 0.2496 - root_mean_squared_error: 0.4996\nEpoch 00012: val_loss did not improve from 0.33642\n347/347 [==============================] - 2s 6ms/step - loss: 0.2496 - root_mean_squared_error: 0.4996 - val_loss: 0.3487 - val_root_mean_squared_error: 0.5905\nEpoch 13/30\n343/347 [============================>.] - ETA: 0s - loss: 0.2446 - root_mean_squared_error: 0.4945\nEpoch 00013: val_loss did not improve from 0.33642\n347/347 [==============================] - 2s 6ms/step - loss: 0.2449 - root_mean_squared_error: 0.4949 - val_loss: 0.3463 - val_root_mean_squared_error: 0.5884\nEpoch 14/30\n344/347 [============================>.] - ETA: 0s - loss: 0.2423 - root_mean_squared_error: 0.4923\nEpoch 00014: val_loss did not improve from 0.33642\n347/347 [==============================] - 2s 6ms/step - loss: 0.2418 - root_mean_squared_error: 0.4917 - val_loss: 0.3542 - val_root_mean_squared_error: 0.5951\nEpoch 15/30\n340/347 [============================>.] - ETA: 0s - loss: 0.2410 - root_mean_squared_error: 0.4909\nEpoch 00015: val_loss did not improve from 0.33642\n347/347 [==============================] - 2s 6ms/step - loss: 0.2398 - root_mean_squared_error: 0.4897 - val_loss: 0.3555 - val_root_mean_squared_error: 0.5962\nEpoch 16/30\n340/347 [============================>.] - ETA: 0s - loss: 0.2349 - root_mean_squared_error: 0.4847\nEpoch 00016: val_loss did not improve from 0.33642\n347/347 [==============================] - 2s 6ms/step - loss: 0.2346 - root_mean_squared_error: 0.4843 - val_loss: 0.3555 - val_root_mean_squared_error: 0.5962\nEpoch 17/30\n346/347 [============================>.] - ETA: 0s - loss: 0.2315 - root_mean_squared_error: 0.4812\nEpoch 00017: val_loss did not improve from 0.33642\n347/347 [==============================] - 2s 6ms/step - loss: 0.2314 - root_mean_squared_error: 0.4811 - val_loss: 0.3629 - val_root_mean_squared_error: 0.6024\nEpoch 18/30\n342/347 [============================>.] - ETA: 0s - loss: 0.2290 - root_mean_squared_error: 0.4785\nEpoch 00018: val_loss did not improve from 0.33642\n347/347 [==============================] - 2s 6ms/step - loss: 0.2279 - root_mean_squared_error: 0.4773 - val_loss: 0.3644 - val_root_mean_squared_error: 0.6036\nEpoch 19/30\n342/347 [============================>.] - ETA: 0s - loss: 0.2244 - root_mean_squared_error: 0.4737\nEpoch 00019: val_loss did not improve from 0.33642\n347/347 [==============================] - 2s 6ms/step - loss: 0.2241 - root_mean_squared_error: 0.4734 - val_loss: 0.3732 - val_root_mean_squared_error: 0.6109\nEpoch 20/30\n343/347 [============================>.] - ETA: 0s - loss: 0.2229 - root_mean_squared_error: 0.4721\nEpoch 00020: val_loss did not improve from 0.33642\n347/347 [==============================] - 2s 6ms/step - loss: 0.2230 - root_mean_squared_error: 0.4723 - val_loss: 0.3757 - val_root_mean_squared_error: 0.6129\nEpoch 21/30\n346/347 [============================>.] - ETA: 0s - loss: 0.2226 - root_mean_squared_error: 0.4718\nEpoch 00021: val_loss did not improve from 0.33642\n347/347 [==============================] - 2s 6ms/step - loss: 0.2223 - root_mean_squared_error: 0.4715 - val_loss: 0.3742 - val_root_mean_squared_error: 0.6118\nEpoch 22/30\n343/347 [============================>.] - ETA: 0s - loss: 0.2182 - root_mean_squared_error: 0.4672\nEpoch 00022: val_loss did not improve from 0.33642\n347/347 [==============================] - 2s 6ms/step - loss: 0.2178 - root_mean_squared_error: 0.4666 - val_loss: 0.3794 - val_root_mean_squared_error: 0.6159\nEpoch 23/30\n343/347 [============================>.] - ETA: 0s - loss: 0.2170 - root_mean_squared_error: 0.4658\nEpoch 00023: val_loss did not improve from 0.33642\n347/347 [==============================] - 2s 6ms/step - loss: 0.2175 - root_mean_squared_error: 0.4664 - val_loss: 0.3792 - val_root_mean_squared_error: 0.6158\nEpoch 24/30\n340/347 [============================>.] - ETA: 0s - loss: 0.2145 - root_mean_squared_error: 0.4631\nEpoch 00024: val_loss did not improve from 0.33642\n347/347 [==============================] - 2s 6ms/step - loss: 0.2135 - root_mean_squared_error: 0.4620 - val_loss: 0.3804 - val_root_mean_squared_error: 0.6168\nEpoch 25/30\n341/347 [============================>.] - ETA: 0s - loss: 0.2097 - root_mean_squared_error: 0.4579\nEpoch 00025: val_loss did not improve from 0.33642\n347/347 [==============================] - 2s 6ms/step - loss: 0.2102 - root_mean_squared_error: 0.4585 - val_loss: 0.3917 - val_root_mean_squared_error: 0.6258\nEpoch 26/30\n343/347 [============================>.] - ETA: 0s - loss: 0.2090 - root_mean_squared_error: 0.4571\nEpoch 00026: val_loss did not improve from 0.33642\n347/347 [==============================] - 2s 6ms/step - loss: 0.2085 - root_mean_squared_error: 0.4567 - val_loss: 0.3987 - val_root_mean_squared_error: 0.6314\nEpoch 27/30\n338/347 [============================>.] - ETA: 0s - loss: 0.2043 - root_mean_squared_error: 0.4519\nEpoch 00027: val_loss did not improve from 0.33642\n347/347 [==============================] - 2s 7ms/step - loss: 0.2067 - root_mean_squared_error: 0.4546 - val_loss: 0.3909 - val_root_mean_squared_error: 0.6252\nEpoch 28/30\n341/347 [============================>.] - ETA: 0s - loss: 0.2044 - root_mean_squared_error: 0.4521\nEpoch 00028: val_loss did not improve from 0.33642\n347/347 [==============================] - 2s 7ms/step - loss: 0.2048 - root_mean_squared_error: 0.4525 - val_loss: 0.3991 - val_root_mean_squared_error: 0.6317\nEpoch 29/30\n342/347 [============================>.] - ETA: 0s - loss: 0.2035 - root_mean_squared_error: 0.4512\nEpoch 00029: val_loss did not improve from 0.33642\n347/347 [==============================] - 2s 7ms/step - loss: 0.2038 - root_mean_squared_error: 0.4514 - val_loss: 0.4025 - val_root_mean_squared_error: 0.6345\nEpoch 30/30\n345/347 [============================>.] - ETA: 0s - loss: 0.2001 - root_mean_squared_error: 0.4473\nEpoch 00030: val_loss did not improve from 0.33642\n347/347 [==============================] - 2s 7ms/step - loss: 0.2002 - root_mean_squared_error: 0.4474 - val_loss: 0.3944 - val_root_mean_squared_error: 0.6280\n" ], [ "model_2_camera8 = model_history('model_2_1_camera8')", "_____no_output_____" ], [ "model_3_camera8.head()", "_____no_output_____" ], [ "#################### end of training camera9 data for model 3", "_____no_output_____" ], [ "########################### continue training with camera1 data for model 3", "_____no_output_____" ], [ "from keras.models import load_model", "_____no_output_____" ], [ "model = load_model('/content/drive/My Drive/epochs/model_5_1_camera8.0004-0.3364.h5')", "_____no_output_____" ], [ "def train_load(camera_file_name):\n !cp -r \"/content/drive/My Drive/datasets/{camera_file_name}_X_train.npz\" ./X_train.npz\n !cp -r \"/content/drive/My Drive/datasets/{camera_file_name}_X_test.npz\" ./X_test.npz\n !cp -r \"/content/drive/My Drive/datasets/{camera_file_name}_y_train.npz\" ./y_train.npz\n !cp -r \"/content/drive/My Drive/datasets/{camera_file_name}_y_test.npz\" ./y_test.npz\n X_train = load('./X_train.npz')\n X_train = X_train.f.arr_0\n X_test = load('./X_test.npz')\n X_test = X_test.f.arr_0\n y_train = load('./y_train.npz')\n y_train = y_train.f.arr_0\n y_test = load('./y_test.npz')\n y_test = y_test.f.arr_0\n\n return X_train, X_test, y_train, y_test", "_____no_output_____" ], [ "X_train, X_test, y_train, y_test = train_load('camera1')", "_____no_output_____" ], [ "X_train.shape, X_test.shape, y_train.shape, y_test.shape", "_____no_output_____" ], [ "from keras.callbacks import *\nfilepath = \"/content/drive/My Drive/epochs/model_5_2_camera1.{epoch:04d}-{val_loss:.4f}.h5\"\ncheckpoint = ModelCheckpoint(filepath, monitor='val_loss', verbose=1, save_best_only=True, mode='min')\ncallbacks_list = [checkpoint]", "_____no_output_____" ], [ "history = model.fit(X_train,\n y_train,\n batch_size=64,\n validation_data=(X_test, y_test),\n epochs=30,\n verbose=1,\n callbacks=callbacks_list)", "Epoch 1/30\n641/641 [==============================] - ETA: 0s - loss: 0.2590 - root_mean_squared_error: 0.5089\nEpoch 00001: val_loss improved from inf to 0.22856, saving model to /content/drive/My Drive/epochs/model_5_2_camera1.0001-0.2286.h5\n641/641 [==============================] - 5s 8ms/step - loss: 0.2590 - root_mean_squared_error: 0.5089 - val_loss: 0.2286 - val_root_mean_squared_error: 0.4781\nEpoch 2/30\n640/641 [============================>.] - ETA: 0s - loss: 0.2440 - root_mean_squared_error: 0.4940\nEpoch 00002: val_loss improved from 0.22856 to 0.22818, saving model to /content/drive/My Drive/epochs/model_5_2_camera1.0002-0.2282.h5\n641/641 [==============================] - 4s 6ms/step - loss: 0.2438 - root_mean_squared_error: 0.4938 - val_loss: 0.2282 - val_root_mean_squared_error: 0.4777\nEpoch 3/30\n639/641 [============================>.] - ETA: 0s - loss: 0.2380 - root_mean_squared_error: 0.4879\nEpoch 00003: val_loss improved from 0.22818 to 0.22266, saving model to /content/drive/My Drive/epochs/model_5_2_camera1.0003-0.2227.h5\n641/641 [==============================] - 4s 6ms/step - loss: 0.2378 - root_mean_squared_error: 0.4876 - val_loss: 0.2227 - val_root_mean_squared_error: 0.4719\nEpoch 4/30\n639/641 [============================>.] - ETA: 0s - loss: 0.2328 - root_mean_squared_error: 0.4825\nEpoch 00004: val_loss improved from 0.22266 to 0.22231, saving model to /content/drive/My Drive/epochs/model_5_2_camera1.0004-0.2223.h5\n641/641 [==============================] - 4s 6ms/step - loss: 0.2323 - root_mean_squared_error: 0.4819 - val_loss: 0.2223 - val_root_mean_squared_error: 0.4715\nEpoch 5/30\n636/641 [============================>.] - ETA: 0s - loss: 0.2293 - root_mean_squared_error: 0.4789\nEpoch 00005: val_loss improved from 0.22231 to 0.22180, saving model to /content/drive/My Drive/epochs/model_5_2_camera1.0005-0.2218.h5\n641/641 [==============================] - 4s 6ms/step - loss: 0.2291 - root_mean_squared_error: 0.4787 - val_loss: 0.2218 - val_root_mean_squared_error: 0.4710\nEpoch 6/30\n640/641 [============================>.] - ETA: 0s - loss: 0.2259 - root_mean_squared_error: 0.4753\nEpoch 00006: val_loss did not improve from 0.22180\n641/641 [==============================] - 4s 6ms/step - loss: 0.2257 - root_mean_squared_error: 0.4751 - val_loss: 0.2239 - val_root_mean_squared_error: 0.4732\nEpoch 7/30\n636/641 [============================>.] - ETA: 0s - loss: 0.2243 - root_mean_squared_error: 0.4736\nEpoch 00007: val_loss did not improve from 0.22180\n641/641 [==============================] - 4s 6ms/step - loss: 0.2231 - root_mean_squared_error: 0.4723 - val_loss: 0.2221 - val_root_mean_squared_error: 0.4713\nEpoch 8/30\n639/641 [============================>.] - ETA: 0s - loss: 0.2193 - root_mean_squared_error: 0.4683\nEpoch 00008: val_loss did not improve from 0.22180\n641/641 [==============================] - 4s 6ms/step - loss: 0.2193 - root_mean_squared_error: 0.4683 - val_loss: 0.2240 - val_root_mean_squared_error: 0.4732\nEpoch 9/30\n640/641 [============================>.] - ETA: 0s - loss: 0.2181 - root_mean_squared_error: 0.4671\nEpoch 00009: val_loss did not improve from 0.22180\n641/641 [==============================] - 4s 6ms/step - loss: 0.2179 - root_mean_squared_error: 0.4668 - val_loss: 0.2257 - val_root_mean_squared_error: 0.4751\nEpoch 10/30\n634/641 [============================>.] - ETA: 0s - loss: 0.2147 - root_mean_squared_error: 0.4633\nEpoch 00010: val_loss did not improve from 0.22180\n641/641 [==============================] - 4s 6ms/step - loss: 0.2152 - root_mean_squared_error: 0.4639 - val_loss: 0.2244 - val_root_mean_squared_error: 0.4737\nEpoch 11/30\n637/641 [============================>.] - ETA: 0s - loss: 0.2133 - root_mean_squared_error: 0.4619\nEpoch 00011: val_loss did not improve from 0.22180\n641/641 [==============================] - 4s 6ms/step - loss: 0.2132 - root_mean_squared_error: 0.4617 - val_loss: 0.2264 - val_root_mean_squared_error: 0.4758\nEpoch 12/30\n635/641 [============================>.] - ETA: 0s - loss: 0.2100 - root_mean_squared_error: 0.4583\nEpoch 00012: val_loss did not improve from 0.22180\n641/641 [==============================] - 4s 6ms/step - loss: 0.2112 - root_mean_squared_error: 0.4596 - val_loss: 0.2243 - val_root_mean_squared_error: 0.4736\nEpoch 13/30\n637/641 [============================>.] - ETA: 0s - loss: 0.2086 - root_mean_squared_error: 0.4568\nEpoch 00013: val_loss did not improve from 0.22180\n641/641 [==============================] - 4s 7ms/step - loss: 0.2098 - root_mean_squared_error: 0.4580 - val_loss: 0.2318 - val_root_mean_squared_error: 0.4815\nEpoch 14/30\n641/641 [==============================] - ETA: 0s - loss: 0.2076 - root_mean_squared_error: 0.4557\nEpoch 00014: val_loss did not improve from 0.22180\n641/641 [==============================] - 4s 6ms/step - loss: 0.2076 - root_mean_squared_error: 0.4557 - val_loss: 0.2272 - val_root_mean_squared_error: 0.4766\nEpoch 15/30\n632/641 [============================>.] - ETA: 0s - loss: 0.2059 - root_mean_squared_error: 0.4537\nEpoch 00015: val_loss did not improve from 0.22180\n641/641 [==============================] - 4s 6ms/step - loss: 0.2053 - root_mean_squared_error: 0.4531 - val_loss: 0.2304 - val_root_mean_squared_error: 0.4800\nEpoch 16/30\n636/641 [============================>.] - ETA: 0s - loss: 0.2047 - root_mean_squared_error: 0.4525\nEpoch 00016: val_loss did not improve from 0.22180\n641/641 [==============================] - 4s 6ms/step - loss: 0.2046 - root_mean_squared_error: 0.4523 - val_loss: 0.2326 - val_root_mean_squared_error: 0.4823\nEpoch 17/30\n640/641 [============================>.] - ETA: 0s - loss: 0.2035 - root_mean_squared_error: 0.4511\nEpoch 00017: val_loss did not improve from 0.22180\n641/641 [==============================] - 4s 6ms/step - loss: 0.2035 - root_mean_squared_error: 0.4511 - val_loss: 0.2286 - val_root_mean_squared_error: 0.4781\nEpoch 18/30\n632/641 [============================>.] - ETA: 0s - loss: 0.2024 - root_mean_squared_error: 0.4498\nEpoch 00018: val_loss did not improve from 0.22180\n641/641 [==============================] - 4s 6ms/step - loss: 0.2023 - root_mean_squared_error: 0.4498 - val_loss: 0.2285 - val_root_mean_squared_error: 0.4780\nEpoch 19/30\n632/641 [============================>.] - ETA: 0s - loss: 0.2009 - root_mean_squared_error: 0.4482\nEpoch 00019: val_loss did not improve from 0.22180\n641/641 [==============================] - 4s 6ms/step - loss: 0.2008 - root_mean_squared_error: 0.4481 - val_loss: 0.2319 - val_root_mean_squared_error: 0.4816\nEpoch 20/30\n640/641 [============================>.] - ETA: 0s - loss: 0.1993 - root_mean_squared_error: 0.4465\nEpoch 00020: val_loss did not improve from 0.22180\n641/641 [==============================] - 4s 6ms/step - loss: 0.1995 - root_mean_squared_error: 0.4467 - val_loss: 0.2303 - val_root_mean_squared_error: 0.4799\nEpoch 21/30\n634/641 [============================>.] - ETA: 0s - loss: 0.1970 - root_mean_squared_error: 0.4438\nEpoch 00021: val_loss did not improve from 0.22180\n641/641 [==============================] - 4s 6ms/step - loss: 0.1979 - root_mean_squared_error: 0.4448 - val_loss: 0.2317 - val_root_mean_squared_error: 0.4814\nEpoch 22/30\n636/641 [============================>.] - ETA: 0s - loss: 0.1982 - root_mean_squared_error: 0.4452\nEpoch 00022: val_loss did not improve from 0.22180\n641/641 [==============================] - 4s 6ms/step - loss: 0.1974 - root_mean_squared_error: 0.4443 - val_loss: 0.2306 - val_root_mean_squared_error: 0.4803\nEpoch 23/30\n639/641 [============================>.] - ETA: 0s - loss: 0.1953 - root_mean_squared_error: 0.4420\nEpoch 00023: val_loss did not improve from 0.22180\n641/641 [==============================] - 4s 6ms/step - loss: 0.1953 - root_mean_squared_error: 0.4419 - val_loss: 0.2341 - val_root_mean_squared_error: 0.4838\nEpoch 24/30\n633/641 [============================>.] - ETA: 0s - loss: 0.1937 - root_mean_squared_error: 0.4401\nEpoch 00024: val_loss did not improve from 0.22180\n641/641 [==============================] - 4s 6ms/step - loss: 0.1944 - root_mean_squared_error: 0.4409 - val_loss: 0.2348 - val_root_mean_squared_error: 0.4845\nEpoch 25/30\n637/641 [============================>.] - ETA: 0s - loss: 0.1934 - root_mean_squared_error: 0.4398\nEpoch 00025: val_loss did not improve from 0.22180\n641/641 [==============================] - 4s 6ms/step - loss: 0.1935 - root_mean_squared_error: 0.4399 - val_loss: 0.2346 - val_root_mean_squared_error: 0.4844\nEpoch 26/30\n634/641 [============================>.] - ETA: 0s - loss: 0.1925 - root_mean_squared_error: 0.4387\nEpoch 00026: val_loss did not improve from 0.22180\n641/641 [==============================] - 4s 6ms/step - loss: 0.1920 - root_mean_squared_error: 0.4382 - val_loss: 0.2371 - val_root_mean_squared_error: 0.4869\nEpoch 27/30\n637/641 [============================>.] - ETA: 0s - loss: 0.1928 - root_mean_squared_error: 0.4391\nEpoch 00027: val_loss did not improve from 0.22180\n641/641 [==============================] - 4s 6ms/step - loss: 0.1921 - root_mean_squared_error: 0.4383 - val_loss: 0.2362 - val_root_mean_squared_error: 0.4860\nEpoch 28/30\n633/641 [============================>.] - ETA: 0s - loss: 0.1905 - root_mean_squared_error: 0.4364\nEpoch 00028: val_loss did not improve from 0.22180\n641/641 [==============================] - 4s 6ms/step - loss: 0.1904 - root_mean_squared_error: 0.4363 - val_loss: 0.2396 - val_root_mean_squared_error: 0.4895\nEpoch 29/30\n640/641 [============================>.] - ETA: 0s - loss: 0.1897 - root_mean_squared_error: 0.4355\nEpoch 00029: val_loss did not improve from 0.22180\n641/641 [==============================] - 4s 6ms/step - loss: 0.1896 - root_mean_squared_error: 0.4354 - val_loss: 0.2357 - val_root_mean_squared_error: 0.4855\nEpoch 30/30\n637/641 [============================>.] - ETA: 0s - loss: 0.1899 - root_mean_squared_error: 0.4358\nEpoch 00030: val_loss did not improve from 0.22180\n641/641 [==============================] - 4s 6ms/step - loss: 0.1893 - root_mean_squared_error: 0.4350 - val_loss: 0.2410 - val_root_mean_squared_error: 0.4909\n" ], [ "ticks = [i for i in range(0, 31, 5)]\nlabels = [i for i in range(0, 31, 5)]\nlabels[0] = 1", "_____no_output_____" ], [ "train_loss = history.history['loss']\ntest_loss = history.history['val_loss']\n\n# Set figure size.\nplt.figure(figsize=(20, 8))\n\n# Generate line plot of training, testing loss over epochs.\nplt.plot(train_loss, label='Training Loss', color='#185fad')\nplt.plot(test_loss, label='Testing Loss', color='orange')\n\n# Set title\nplt.title('Training and Testing Loss by Epoch for Camera1', fontsize = 25)\nplt.xlabel('Epoch', fontsize = 18)\nplt.ylabel('Mean Squared Error', fontsize = 18)\nplt.xticks(ticks, labels)\n\nplt.legend(fontsize = 18)\n\nplt.savefig('/content/drive/My Drive/images/train_test_loss_model4_2_camera1.png');", "_____no_output_____" ], [ "def model_history(model_name): \n model = pd.DataFrame({'loss': history.history['loss'],\n 'root_mean_squared_error': history.history['root_mean_squared_error'],\n 'val_loss': history.history['val_loss'],\n 'val_root_mean_squared_error': history.history['val_root_mean_squared_error']},\n columns = ['loss', 'root_mean_squared_error', 'val_loss', 'val_root_mean_squared_error'])\n\n model.to_csv(f'/content/drive/My Drive/datasets/{model_name}.csv', index=False)\n return model", "_____no_output_____" ], [ "model_3_camera1 = model_history('model_4_2_camera1')", "_____no_output_____" ], [ "########################### end of train with camera1 data for model 3", "_____no_output_____" ], [ "########################### star of train with camera9 data for model 3", "_____no_output_____" ], [ "camera3 = load('/content/drive/My Drive/datasets/camera3_cleaned.npz')\nlog3 = pd.read_csv('/content/drive/My Drive/datasets/log3_cleaned.csv')", "_____no_output_____" ], [ "camera_processing(camera3, 'camera3')", "Done\n" ], [ "log_processing(log3, 'log3')", "Done\n" ], [ "def train_split(camera_file_name, log_file_name):\n # load camera file\n X = load(f'/content/drive/My Drive/datasets/{camera_file_name}_train.npz')\n X = X.f.arr_0\n # load log file\n log = pd.read_csv(f'/content/drive/My Drive/datasets/{log_file_name}_train.csv')\n y = log['steering_avg_radian']\n y = y.to_numpy()\n y = y.reshape(y.shape[0], 1)\n # train test split\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, shuffle=True)\n # save them into individual file doing so due to ram management\n savez_compressed(f'/content/drive/My Drive/datasets/{camera_file_name}_X_train', X_train)\n savez_compressed(f'/content/drive/My Drive/datasets/{camera_file_name}_X_test', X_test)\n savez_compressed(f'/content/drive/My Drive/datasets/{camera_file_name}_y_train', y_train)\n savez_compressed(f'/content/drive/My Drive/datasets/{camera_file_name}_y_test', y_test)\n\n return print('Done')", "_____no_output_____" ], [ "train_split('camera3', 'log3')", "Done\n" ], [ "\"\"\"\nnew data workflow\ncamera2 = load('/content/drive/My Drive/datasets/camera2_cleaned.npz')\nlog2 = pd.read_csv('/content/drive/My Drive/datasets/log2_cleaned.csv')\n\nlog_processing(log2, 'log2')\n\ntrain_split('camera2', 'log2')\n\"\"\"", "_____no_output_____" ], [ "from keras.models import load_model", "_____no_output_____" ], [ "model = load_model('/content/drive/My Drive/epochs/model_5_2_camera1.0006-0.2219.h5')", "_____no_output_____" ], [ "def train_load(camera_file_name):\n !cp -r \"/content/drive/My Drive/datasets/{camera_file_name}_X_train.npz\" ./X_train.npz\n !cp -r \"/content/drive/My Drive/datasets/{camera_file_name}_X_test.npz\" ./X_test.npz\n !cp -r \"/content/drive/My Drive/datasets/{camera_file_name}_y_train.npz\" ./y_train.npz\n !cp -r \"/content/drive/My Drive/datasets/{camera_file_name}_y_test.npz\" ./y_test.npz\n X_train = load('./X_train.npz')\n X_train = X_train.f.arr_0\n X_test = load('./X_test.npz')\n X_test = X_test.f.arr_0\n y_train = load('./y_train.npz')\n y_train = y_train.f.arr_0\n y_test = load('./y_test.npz')\n y_test = y_test.f.arr_0\n\n return X_train, X_test, y_train, y_test", "_____no_output_____" ], [ "X_train, X_test, y_train, y_test = train_load('camera9')", "_____no_output_____" ], [ "X_train.shape, X_test.shape, y_train.shape, y_test.shape", "_____no_output_____" ], [ "from keras.callbacks import *\nfilepath = \"/content/drive/My Drive/epochs/model_5_3_camera9.{epoch:04d}-{val_loss:.4f}.h5\"\ncheckpoint = ModelCheckpoint(filepath, monitor='val_loss', verbose=1, save_best_only=True, mode='min')\ncallbacks_list = [checkpoint]", "_____no_output_____" ], [ "history = model.fit(X_train,\n y_train,\n batch_size=64,\n validation_data=(X_test, y_test),\n epochs=30,\n verbose=1,\n callbacks=callbacks_list)", "Epoch 1/30\n599/599 [==============================] - ETA: 0s - loss: 0.0626 - root_mean_squared_error: 0.2503\nEpoch 00001: val_loss improved from inf to 0.05617, saving model to /content/drive/My Drive/epochs/model_5_3_camera9.0001-0.0562.h5\n599/599 [==============================] - 4s 7ms/step - loss: 0.0626 - root_mean_squared_error: 0.2503 - val_loss: 0.0562 - val_root_mean_squared_error: 0.2370\nEpoch 2/30\n598/599 [============================>.] - ETA: 0s - loss: 0.0539 - root_mean_squared_error: 0.2321\nEpoch 00002: val_loss improved from 0.05617 to 0.05341, saving model to /content/drive/My Drive/epochs/model_5_3_camera9.0002-0.0534.h5\n599/599 [==============================] - 4s 6ms/step - loss: 0.0538 - root_mean_squared_error: 0.2320 - val_loss: 0.0534 - val_root_mean_squared_error: 0.2311\nEpoch 3/30\n597/599 [============================>.] - ETA: 0s - loss: 0.0510 - root_mean_squared_error: 0.2258\nEpoch 00003: val_loss improved from 0.05341 to 0.05258, saving model to /content/drive/My Drive/epochs/model_5_3_camera9.0003-0.0526.h5\n599/599 [==============================] - 4s 7ms/step - loss: 0.0513 - root_mean_squared_error: 0.2265 - val_loss: 0.0526 - val_root_mean_squared_error: 0.2293\nEpoch 4/30\n596/599 [============================>.] - ETA: 0s - loss: 0.0497 - root_mean_squared_error: 0.2229\nEpoch 00004: val_loss did not improve from 0.05258\n599/599 [==============================] - 4s 6ms/step - loss: 0.0498 - root_mean_squared_error: 0.2231 - val_loss: 0.0528 - val_root_mean_squared_error: 0.2298\nEpoch 5/30\n591/599 [============================>.] - ETA: 0s - loss: 0.0480 - root_mean_squared_error: 0.2191\nEpoch 00005: val_loss did not improve from 0.05258\n599/599 [==============================] - 4s 6ms/step - loss: 0.0485 - root_mean_squared_error: 0.2202 - val_loss: 0.0530 - val_root_mean_squared_error: 0.2303\nEpoch 6/30\n598/599 [============================>.] - ETA: 0s - loss: 0.0475 - root_mean_squared_error: 0.2179\nEpoch 00006: val_loss did not improve from 0.05258\n599/599 [==============================] - 4s 6ms/step - loss: 0.0475 - root_mean_squared_error: 0.2178 - val_loss: 0.0534 - val_root_mean_squared_error: 0.2311\nEpoch 7/30\n593/599 [============================>.] - ETA: 0s - loss: 0.0461 - root_mean_squared_error: 0.2147\nEpoch 00007: val_loss did not improve from 0.05258\n599/599 [==============================] - 4s 6ms/step - loss: 0.0462 - root_mean_squared_error: 0.2149 - val_loss: 0.0529 - val_root_mean_squared_error: 0.2299\nEpoch 8/30\n597/599 [============================>.] - ETA: 0s - loss: 0.0456 - root_mean_squared_error: 0.2134\nEpoch 00008: val_loss did not improve from 0.05258\n599/599 [==============================] - 4s 6ms/step - loss: 0.0455 - root_mean_squared_error: 0.2133 - val_loss: 0.0531 - val_root_mean_squared_error: 0.2304\nEpoch 9/30\n594/599 [============================>.] - ETA: 0s - loss: 0.0451 - root_mean_squared_error: 0.2125\nEpoch 00009: val_loss did not improve from 0.05258\n599/599 [==============================] - 4s 6ms/step - loss: 0.0450 - root_mean_squared_error: 0.2121 - val_loss: 0.0543 - val_root_mean_squared_error: 0.2330\nEpoch 10/30\n594/599 [============================>.] - ETA: 0s - loss: 0.0446 - root_mean_squared_error: 0.2112\nEpoch 00010: val_loss did not improve from 0.05258\n599/599 [==============================] - 4s 6ms/step - loss: 0.0444 - root_mean_squared_error: 0.2106 - val_loss: 0.0538 - val_root_mean_squared_error: 0.2320\nEpoch 11/30\n594/599 [============================>.] - ETA: 0s - loss: 0.0433 - root_mean_squared_error: 0.2081\nEpoch 00011: val_loss did not improve from 0.05258\n599/599 [==============================] - 4s 6ms/step - loss: 0.0435 - root_mean_squared_error: 0.2085 - val_loss: 0.0551 - val_root_mean_squared_error: 0.2347\nEpoch 12/30\n594/599 [============================>.] - ETA: 0s - loss: 0.0428 - root_mean_squared_error: 0.2070\nEpoch 00012: val_loss did not improve from 0.05258\n599/599 [==============================] - 4s 6ms/step - loss: 0.0431 - root_mean_squared_error: 0.2076 - val_loss: 0.0546 - val_root_mean_squared_error: 0.2337\nEpoch 13/30\n596/599 [============================>.] - ETA: 0s - loss: 0.0425 - root_mean_squared_error: 0.2062\nEpoch 00013: val_loss did not improve from 0.05258\n599/599 [==============================] - 4s 6ms/step - loss: 0.0424 - root_mean_squared_error: 0.2060 - val_loss: 0.0548 - val_root_mean_squared_error: 0.2341\nEpoch 14/30\n595/599 [============================>.] - ETA: 0s - loss: 0.0413 - root_mean_squared_error: 0.2032\nEpoch 00014: val_loss did not improve from 0.05258\n599/599 [==============================] - 4s 6ms/step - loss: 0.0417 - root_mean_squared_error: 0.2042 - val_loss: 0.0558 - val_root_mean_squared_error: 0.2363\nEpoch 15/30\n596/599 [============================>.] - ETA: 0s - loss: 0.0416 - root_mean_squared_error: 0.2039\nEpoch 00015: val_loss did not improve from 0.05258\n599/599 [==============================] - 4s 6ms/step - loss: 0.0415 - root_mean_squared_error: 0.2037 - val_loss: 0.0554 - val_root_mean_squared_error: 0.2353\nEpoch 16/30\n594/599 [============================>.] - ETA: 0s - loss: 0.0408 - root_mean_squared_error: 0.2021\nEpoch 00016: val_loss did not improve from 0.05258\n599/599 [==============================] - 4s 6ms/step - loss: 0.0408 - root_mean_squared_error: 0.2019 - val_loss: 0.0560 - val_root_mean_squared_error: 0.2367\nEpoch 17/30\n594/599 [============================>.] - ETA: 0s - loss: 0.0409 - root_mean_squared_error: 0.2021\nEpoch 00017: val_loss did not improve from 0.05258\n599/599 [==============================] - 4s 6ms/step - loss: 0.0408 - root_mean_squared_error: 0.2020 - val_loss: 0.0548 - val_root_mean_squared_error: 0.2341\nEpoch 18/30\n597/599 [============================>.] - ETA: 0s - loss: 0.0398 - root_mean_squared_error: 0.1994\nEpoch 00018: val_loss did not improve from 0.05258\n599/599 [==============================] - 4s 6ms/step - loss: 0.0398 - root_mean_squared_error: 0.1994 - val_loss: 0.0561 - val_root_mean_squared_error: 0.2369\nEpoch 19/30\n593/599 [============================>.] - ETA: 0s - loss: 0.0394 - root_mean_squared_error: 0.1985\nEpoch 00019: val_loss did not improve from 0.05258\n599/599 [==============================] - 4s 6ms/step - loss: 0.0395 - root_mean_squared_error: 0.1988 - val_loss: 0.0574 - val_root_mean_squared_error: 0.2395\nEpoch 20/30\n599/599 [==============================] - ETA: 0s - loss: 0.0392 - root_mean_squared_error: 0.1980\nEpoch 00020: val_loss did not improve from 0.05258\n599/599 [==============================] - 4s 6ms/step - loss: 0.0392 - root_mean_squared_error: 0.1980 - val_loss: 0.0578 - val_root_mean_squared_error: 0.2405\nEpoch 21/30\n595/599 [============================>.] - ETA: 0s - loss: 0.0393 - root_mean_squared_error: 0.1982\nEpoch 00021: val_loss did not improve from 0.05258\n599/599 [==============================] - 4s 6ms/step - loss: 0.0393 - root_mean_squared_error: 0.1984 - val_loss: 0.0577 - val_root_mean_squared_error: 0.2403\nEpoch 22/30\n596/599 [============================>.] - ETA: 0s - loss: 0.0390 - root_mean_squared_error: 0.1974\nEpoch 00022: val_loss did not improve from 0.05258\n599/599 [==============================] - 4s 6ms/step - loss: 0.0388 - root_mean_squared_error: 0.1971 - val_loss: 0.0571 - val_root_mean_squared_error: 0.2390\nEpoch 23/30\n594/599 [============================>.] - ETA: 0s - loss: 0.0387 - root_mean_squared_error: 0.1966\nEpoch 00023: val_loss did not improve from 0.05258\n599/599 [==============================] - 4s 6ms/step - loss: 0.0385 - root_mean_squared_error: 0.1962 - val_loss: 0.0570 - val_root_mean_squared_error: 0.2388\nEpoch 24/30\n596/599 [============================>.] - ETA: 0s - loss: 0.0382 - root_mean_squared_error: 0.1954\nEpoch 00024: val_loss did not improve from 0.05258\n599/599 [==============================] - 4s 6ms/step - loss: 0.0381 - root_mean_squared_error: 0.1952 - val_loss: 0.0588 - val_root_mean_squared_error: 0.2425\nEpoch 25/30\n594/599 [============================>.] - ETA: 0s - loss: 0.0377 - root_mean_squared_error: 0.1941\nEpoch 00025: val_loss did not improve from 0.05258\n599/599 [==============================] - 4s 6ms/step - loss: 0.0377 - root_mean_squared_error: 0.1941 - val_loss: 0.0590 - val_root_mean_squared_error: 0.2430\nEpoch 26/30\n594/599 [============================>.] - ETA: 0s - loss: 0.0374 - root_mean_squared_error: 0.1935\nEpoch 00026: val_loss did not improve from 0.05258\n599/599 [==============================] - 4s 6ms/step - loss: 0.0374 - root_mean_squared_error: 0.1934 - val_loss: 0.0590 - val_root_mean_squared_error: 0.2428\nEpoch 27/30\n596/599 [============================>.] - ETA: 0s - loss: 0.0371 - root_mean_squared_error: 0.1926\nEpoch 00027: val_loss did not improve from 0.05258\n599/599 [==============================] - 4s 6ms/step - loss: 0.0372 - root_mean_squared_error: 0.1929 - val_loss: 0.0596 - val_root_mean_squared_error: 0.2442\nEpoch 28/30\n590/599 [============================>.] - ETA: 0s - loss: 0.0369 - root_mean_squared_error: 0.1920\nEpoch 00028: val_loss did not improve from 0.05258\n599/599 [==============================] - 4s 6ms/step - loss: 0.0372 - root_mean_squared_error: 0.1928 - val_loss: 0.0594 - val_root_mean_squared_error: 0.2437\nEpoch 29/30\n591/599 [============================>.] - ETA: 0s - loss: 0.0368 - root_mean_squared_error: 0.1918\nEpoch 00029: val_loss did not improve from 0.05258\n599/599 [==============================] - 4s 6ms/step - loss: 0.0366 - root_mean_squared_error: 0.1913 - val_loss: 0.0595 - val_root_mean_squared_error: 0.2439\nEpoch 30/30\n593/599 [============================>.] - ETA: 0s - loss: 0.0364 - root_mean_squared_error: 0.1908\nEpoch 00030: val_loss did not improve from 0.05258\n599/599 [==============================] - 4s 6ms/step - loss: 0.0364 - root_mean_squared_error: 0.1907 - val_loss: 0.0600 - val_root_mean_squared_error: 0.2450\n" ], [ "ticks = [i for i in range(10)]\nlabels = [i for i in range(1, 11)]", "_____no_output_____" ], [ "train_loss = history.history['loss']\ntest_loss = history.history['val_loss']\n\n# Set figure size.\nplt.figure(figsize=(20, 8))\n\n# Generate line plot of training, testing loss over epochs.\nplt.plot(train_loss, label='Training Loss', color='#185fad')\nplt.plot(test_loss, label='Testing Loss', color='orange')\n\n# Set title\nplt.title('Training and Testing Loss by Epoch for Camera9', fontsize = 25)\nplt.xlabel('Epoch', fontsize = 18)\nplt.ylabel('Mean Squared Error', fontsize = 18)\nplt.xticks(ticks, labels)\n\nplt.legend(fontsize = 18)\n\nplt.savefig('/content/drive/My Drive/images/train_test_loss_model5_3_camera9.png');", "_____no_output_____" ], [ "def model_history(model_name): \n model = pd.DataFrame({'loss': history.history['loss'],\n 'root_mean_squared_error': history.history['root_mean_squared_error'],\n 'val_loss': history.history['val_loss'],\n 'val_root_mean_squared_error': history.history['val_root_mean_squared_error']},\n columns = ['loss', 'root_mean_squared_error', 'val_loss', 'val_root_mean_squared_error'])\n\n model.to_csv(f'/content/drive/My Drive/datasets/{model_name}.csv', index=False)\n return model", "_____no_output_____" ], [ "model_3_camera9 = model_history('model_5_3_camera9')", "_____no_output_____" ], [ "model_3_camera9.head()", "_____no_output_____" ], [ "#################### end of training camera9 data for model 3", "_____no_output_____" ], [ "########################### start of train with camera2 data for model 3", "_____no_output_____" ], [ "camera4 = load('/content/drive/My Drive/datasets/camera4_cleaned.npz')\nlog4 = pd.read_csv('/content/drive/My Drive/datasets/log4_cleaned.csv')", "_____no_output_____" ], [ "camera_processing(camera4, 'camera4')", "Done\n" ], [ "log_processing(log4, 'log4')", "Done\n" ], [ "train_split('camera4', 'log4')", "Done\n" ], [ "\"\"\"\nnew data workflow\ncamera2 = load('/content/drive/My Drive/datasets/camera2_cleaned.npz')\nlog2 = pd.read_csv('/content/drive/My Drive/datasets/log2_cleaned.csv')\n\ncamera_processing(camera2, 'camera2')\n\nlog_processing(log2, 'log2')\n\ntrain_split('camera2', 'log2')\n\"\"\"", "_____no_output_____" ], [ "model = load_model('/content/drive/My Drive/epochs/model_5_3_camera9.0003-0.0526.h5')", "_____no_output_____" ], [ "def train_load(camera_file_name):\n !cp -r \"/content/drive/My Drive/datasets/{camera_file_name}_X_train.npz\" ./X_train.npz\n !cp -r \"/content/drive/My Drive/datasets/{camera_file_name}_X_test.npz\" ./X_test.npz\n !cp -r \"/content/drive/My Drive/datasets/{camera_file_name}_y_train.npz\" ./y_train.npz\n !cp -r \"/content/drive/My Drive/datasets/{camera_file_name}_y_test.npz\" ./y_test.npz\n X_train = load('./X_train.npz')\n X_train = X_train.f.arr_0\n X_test = load('./X_test.npz')\n X_test = X_test.f.arr_0\n y_train = load('./y_train.npz')\n y_train = y_train.f.arr_0\n y_test = load('./y_test.npz')\n y_test = y_test.f.arr_0\n\n return X_train, X_test, y_train, y_test", "_____no_output_____" ], [ "X_train, X_test, y_train, y_test = train_load('camera2')", "_____no_output_____" ], [ "X_train.shape, X_test.shape, y_train.shape, y_test.shape", "_____no_output_____" ], [ "filepath = \"/content/drive/My Drive/epochs/model_5_4_camera2.{epoch:04d}-{val_loss:.4f}.h5\"\ncheckpoint = ModelCheckpoint(filepath, monitor='val_loss', verbose=1, save_best_only=True, mode='min')\ncallbacks_list = [checkpoint]", "_____no_output_____" ], [ "history = model.fit(X_train,\n y_train,\n batch_size=64,\n validation_data=(X_test, y_test),\n epochs=30,\n verbose=1,\n callbacks=callbacks_list)", "Epoch 1/30\n702/703 [============================>.] - ETA: 0s - loss: 0.0459 - root_mean_squared_error: 0.2142\nEpoch 00001: val_loss improved from inf to 0.03861, saving model to /content/drive/My Drive/epochs/model_5_4_camera2.0001-0.0386.h5\n703/703 [==============================] - 5s 7ms/step - loss: 0.0459 - root_mean_squared_error: 0.2143 - val_loss: 0.0386 - val_root_mean_squared_error: 0.1965\nEpoch 2/30\n694/703 [============================>.] - ETA: 0s - loss: 0.0429 - root_mean_squared_error: 0.2072\nEpoch 00002: val_loss improved from 0.03861 to 0.03833, saving model to /content/drive/My Drive/epochs/model_5_4_camera2.0002-0.0383.h5\n703/703 [==============================] - 4s 6ms/step - loss: 0.0429 - root_mean_squared_error: 0.2070 - val_loss: 0.0383 - val_root_mean_squared_error: 0.1958\nEpoch 3/30\n696/703 [============================>.] - ETA: 0s - loss: 0.0416 - root_mean_squared_error: 0.2039\nEpoch 00003: val_loss improved from 0.03833 to 0.03827, saving model to /content/drive/My Drive/epochs/model_5_4_camera2.0003-0.0383.h5\n703/703 [==============================] - 4s 6ms/step - loss: 0.0413 - root_mean_squared_error: 0.2033 - val_loss: 0.0383 - val_root_mean_squared_error: 0.1956\nEpoch 4/30\n699/703 [============================>.] - ETA: 0s - loss: 0.0405 - root_mean_squared_error: 0.2012\nEpoch 00004: val_loss improved from 0.03827 to 0.03820, saving model to /content/drive/My Drive/epochs/model_5_4_camera2.0004-0.0382.h5\n703/703 [==============================] - 5s 7ms/step - loss: 0.0405 - root_mean_squared_error: 0.2012 - val_loss: 0.0382 - val_root_mean_squared_error: 0.1955\nEpoch 5/30\n694/703 [============================>.] - ETA: 0s - loss: 0.0396 - root_mean_squared_error: 0.1990\nEpoch 00005: val_loss did not improve from 0.03820\n703/703 [==============================] - 4s 6ms/step - loss: 0.0395 - root_mean_squared_error: 0.1987 - val_loss: 0.0391 - val_root_mean_squared_error: 0.1976\nEpoch 6/30\n700/703 [============================>.] - ETA: 0s - loss: 0.0388 - root_mean_squared_error: 0.1970\nEpoch 00006: val_loss did not improve from 0.03820\n703/703 [==============================] - 4s 6ms/step - loss: 0.0387 - root_mean_squared_error: 0.1968 - val_loss: 0.0396 - val_root_mean_squared_error: 0.1991\nEpoch 7/30\n698/703 [============================>.] - ETA: 0s - loss: 0.0380 - root_mean_squared_error: 0.1951\nEpoch 00007: val_loss did not improve from 0.03820\n703/703 [==============================] - 4s 6ms/step - loss: 0.0382 - root_mean_squared_error: 0.1954 - val_loss: 0.0400 - val_root_mean_squared_error: 0.2000\nEpoch 8/30\n694/703 [============================>.] - ETA: 0s - loss: 0.0372 - root_mean_squared_error: 0.1929\nEpoch 00008: val_loss did not improve from 0.03820\n703/703 [==============================] - 4s 6ms/step - loss: 0.0377 - root_mean_squared_error: 0.1941 - val_loss: 0.0396 - val_root_mean_squared_error: 0.1990\nEpoch 9/30\n701/703 [============================>.] - ETA: 0s - loss: 0.0366 - root_mean_squared_error: 0.1913\nEpoch 00009: val_loss did not improve from 0.03820\n703/703 [==============================] - 4s 6ms/step - loss: 0.0370 - root_mean_squared_error: 0.1924 - val_loss: 0.0401 - val_root_mean_squared_error: 0.2003\nEpoch 10/30\n701/703 [============================>.] - ETA: 0s - loss: 0.0361 - root_mean_squared_error: 0.1900\nEpoch 00010: val_loss did not improve from 0.03820\n703/703 [==============================] - 4s 6ms/step - loss: 0.0363 - root_mean_squared_error: 0.1905 - val_loss: 0.0404 - val_root_mean_squared_error: 0.2009\nEpoch 11/30\n695/703 [============================>.] - ETA: 0s - loss: 0.0358 - root_mean_squared_error: 0.1891\nEpoch 00011: val_loss did not improve from 0.03820\n703/703 [==============================] - 4s 6ms/step - loss: 0.0358 - root_mean_squared_error: 0.1892 - val_loss: 0.0417 - val_root_mean_squared_error: 0.2041\nEpoch 12/30\n700/703 [============================>.] - ETA: 0s - loss: 0.0356 - root_mean_squared_error: 0.1886\nEpoch 00012: val_loss did not improve from 0.03820\n703/703 [==============================] - 4s 6ms/step - loss: 0.0355 - root_mean_squared_error: 0.1885 - val_loss: 0.0414 - val_root_mean_squared_error: 0.2034\nEpoch 13/30\n695/703 [============================>.] - ETA: 0s - loss: 0.0345 - root_mean_squared_error: 0.1858\nEpoch 00013: val_loss did not improve from 0.03820\n703/703 [==============================] - 4s 6ms/step - loss: 0.0349 - root_mean_squared_error: 0.1869 - val_loss: 0.0425 - val_root_mean_squared_error: 0.2062\nEpoch 14/30\n699/703 [============================>.] - ETA: 0s - loss: 0.0347 - root_mean_squared_error: 0.1862\nEpoch 00014: val_loss did not improve from 0.03820\n703/703 [==============================] - 4s 6ms/step - loss: 0.0346 - root_mean_squared_error: 0.1860 - val_loss: 0.0434 - val_root_mean_squared_error: 0.2084\nEpoch 15/30\n698/703 [============================>.] - ETA: 0s - loss: 0.0343 - root_mean_squared_error: 0.1853\nEpoch 00015: val_loss did not improve from 0.03820\n703/703 [==============================] - 4s 6ms/step - loss: 0.0342 - root_mean_squared_error: 0.1849 - val_loss: 0.0431 - val_root_mean_squared_error: 0.2076\nEpoch 16/30\n696/703 [============================>.] - ETA: 0s - loss: 0.0339 - root_mean_squared_error: 0.1841\nEpoch 00016: val_loss did not improve from 0.03820\n703/703 [==============================] - 4s 6ms/step - loss: 0.0338 - root_mean_squared_error: 0.1839 - val_loss: 0.0438 - val_root_mean_squared_error: 0.2092\nEpoch 17/30\n698/703 [============================>.] - ETA: 0s - loss: 0.0334 - root_mean_squared_error: 0.1827\nEpoch 00017: val_loss did not improve from 0.03820\n703/703 [==============================] - 4s 6ms/step - loss: 0.0335 - root_mean_squared_error: 0.1829 - val_loss: 0.0448 - val_root_mean_squared_error: 0.2117\nEpoch 18/30\n699/703 [============================>.] - ETA: 0s - loss: 0.0330 - root_mean_squared_error: 0.1816\nEpoch 00018: val_loss did not improve from 0.03820\n703/703 [==============================] - 4s 6ms/step - loss: 0.0330 - root_mean_squared_error: 0.1817 - val_loss: 0.0444 - val_root_mean_squared_error: 0.2106\nEpoch 19/30\n699/703 [============================>.] - ETA: 0s - loss: 0.0327 - root_mean_squared_error: 0.1809\nEpoch 00019: val_loss did not improve from 0.03820\n703/703 [==============================] - 4s 6ms/step - loss: 0.0328 - root_mean_squared_error: 0.1811 - val_loss: 0.0442 - val_root_mean_squared_error: 0.2102\nEpoch 20/30\n700/703 [============================>.] - ETA: 0s - loss: 0.0325 - root_mean_squared_error: 0.1802\nEpoch 00020: val_loss did not improve from 0.03820\n703/703 [==============================] - 4s 6ms/step - loss: 0.0324 - root_mean_squared_error: 0.1801 - val_loss: 0.0452 - val_root_mean_squared_error: 0.2126\nEpoch 21/30\n697/703 [============================>.] - ETA: 0s - loss: 0.0321 - root_mean_squared_error: 0.1790\nEpoch 00021: val_loss did not improve from 0.03820\n703/703 [==============================] - 4s 6ms/step - loss: 0.0320 - root_mean_squared_error: 0.1788 - val_loss: 0.0442 - val_root_mean_squared_error: 0.2102\nEpoch 22/30\n698/703 [============================>.] - ETA: 0s - loss: 0.0318 - root_mean_squared_error: 0.1782\nEpoch 00022: val_loss did not improve from 0.03820\n703/703 [==============================] - 4s 6ms/step - loss: 0.0318 - root_mean_squared_error: 0.1783 - val_loss: 0.0456 - val_root_mean_squared_error: 0.2134\nEpoch 23/30\n697/703 [============================>.] - ETA: 0s - loss: 0.0315 - root_mean_squared_error: 0.1773\nEpoch 00023: val_loss did not improve from 0.03820\n703/703 [==============================] - 4s 6ms/step - loss: 0.0316 - root_mean_squared_error: 0.1777 - val_loss: 0.0448 - val_root_mean_squared_error: 0.2116\nEpoch 24/30\n699/703 [============================>.] - ETA: 0s - loss: 0.0316 - root_mean_squared_error: 0.1777\nEpoch 00024: val_loss did not improve from 0.03820\n703/703 [==============================] - 4s 6ms/step - loss: 0.0315 - root_mean_squared_error: 0.1775 - val_loss: 0.0458 - val_root_mean_squared_error: 0.2141\nEpoch 25/30\n697/703 [============================>.] - ETA: 0s - loss: 0.0308 - root_mean_squared_error: 0.1756\nEpoch 00025: val_loss did not improve from 0.03820\n703/703 [==============================] - 4s 6ms/step - loss: 0.0307 - root_mean_squared_error: 0.1753 - val_loss: 0.0461 - val_root_mean_squared_error: 0.2148\nEpoch 26/30\n697/703 [============================>.] - ETA: 0s - loss: 0.0310 - root_mean_squared_error: 0.1760\nEpoch 00026: val_loss did not improve from 0.03820\n703/703 [==============================] - 4s 6ms/step - loss: 0.0308 - root_mean_squared_error: 0.1755 - val_loss: 0.0470 - val_root_mean_squared_error: 0.2168\nEpoch 27/30\n701/703 [============================>.] - ETA: 0s - loss: 0.0305 - root_mean_squared_error: 0.1747\nEpoch 00027: val_loss did not improve from 0.03820\n703/703 [==============================] - 4s 6ms/step - loss: 0.0306 - root_mean_squared_error: 0.1749 - val_loss: 0.0463 - val_root_mean_squared_error: 0.2153\nEpoch 28/30\n703/703 [==============================] - ETA: 0s - loss: 0.0302 - root_mean_squared_error: 0.1738\nEpoch 00028: val_loss did not improve from 0.03820\n703/703 [==============================] - 4s 6ms/step - loss: 0.0302 - root_mean_squared_error: 0.1738 - val_loss: 0.0465 - val_root_mean_squared_error: 0.2156\nEpoch 29/30\n695/703 [============================>.] - ETA: 0s - loss: 0.0297 - root_mean_squared_error: 0.1725\nEpoch 00029: val_loss did not improve from 0.03820\n703/703 [==============================] - 4s 6ms/step - loss: 0.0300 - root_mean_squared_error: 0.1731 - val_loss: 0.0469 - val_root_mean_squared_error: 0.2166\nEpoch 30/30\n702/703 [============================>.] - ETA: 0s - loss: 0.0296 - root_mean_squared_error: 0.1721\nEpoch 00030: val_loss did not improve from 0.03820\n703/703 [==============================] - 4s 6ms/step - loss: 0.0297 - root_mean_squared_error: 0.1722 - val_loss: 0.0467 - val_root_mean_squared_error: 0.2161\n" ], [ "ticks = [i for i in range(10)]\nlabels = [i for i in range(0, 11)]", "_____no_output_____" ], [ "train_loss = history.history['loss']\ntest_loss = history.history['val_loss']\n\n# Set figure size.\nplt.figure(figsize=(20, 8))\n\n# Generate line plot of training, testing loss over epochs.\nplt.plot(train_loss, label='Training Loss', color='#185fad')\nplt.plot(test_loss, label='Testing Loss', color='orange')\n\n# Set title\nplt.title('Training and Testing Loss by Epoch for Camera2', fontsize = 25)\nplt.xlabel('Epoch', fontsize = 18)\nplt.ylabel('Mean Squared Error', fontsize = 18)\nplt.xticks(ticks, labels)\n\nplt.legend(fontsize = 18)\n\nplt.savefig('/content/drive/My Drive/images/train_test_loss_model5_4_camera2.png');", "_____no_output_____" ], [ "def model_history(model_name): \n model = pd.DataFrame({'loss': history.history['loss'],\n 'root_mean_squared_error': history.history['root_mean_squared_error'],\n 'val_loss': history.history['val_loss'],\n 'val_root_mean_squared_error': history.history['val_root_mean_squared_error']},\n columns = ['loss', 'root_mean_squared_error', 'val_loss', 'val_root_mean_squared_error'])\n\n model.to_csv(f'/content/drive/My Drive/datasets/{model_name}.csv', index=False)\n return model", "_____no_output_____" ], [ "model_3_camera2 = model_history('model_5_4_camera2')", "_____no_output_____" ], [ "model_2_camera4.head()", "_____no_output_____" ], [ "#################### end of training camera2 data for model 3", "_____no_output_____" ], [ "########################### start of train with camera3 data for model 3", "_____no_output_____" ], [ "camera5 = load('/content/drive/My Drive/datasets/camera5_cleaned.npz')\nlog5 = pd.read_csv('/content/drive/My Drive/datasets/log5_cleaned.csv')", "_____no_output_____" ], [ "camera_processing(camera5, 'camera5')", "Done\n" ], [ "log_processing(log5, 'log5')", "Done\n" ], [ "train_split('camera5', 'log5')", "Done\n" ], [ "\"\"\"\nnew data workflow\ncamera2 = load('/content/drive/My Drive/datasets/camera2_cleaned.npz')\nlog2 = pd.read_csv('/content/drive/My Drive/datasets/log2_cleaned.csv')\n\ncamera_processing(camera2, 'camera2')\n\nlog_processing(log2, 'log2')\n\ntrain_split('camera2', 'log2')\n\"\"\"", "_____no_output_____" ], [ "model = load_model('/content/drive/My Drive/epochs/model_5_4_camera2.0004-0.0382.h5')", "_____no_output_____" ], [ "def train_load(camera_file_name):\n !cp -r \"/content/drive/My Drive/datasets/{camera_file_name}_X_train.npz\" ./X_train.npz\n !cp -r \"/content/drive/My Drive/datasets/{camera_file_name}_X_test.npz\" ./X_test.npz\n !cp -r \"/content/drive/My Drive/datasets/{camera_file_name}_y_train.npz\" ./y_train.npz\n !cp -r \"/content/drive/My Drive/datasets/{camera_file_name}_y_test.npz\" ./y_test.npz\n X_train = load('./X_train.npz')\n X_train = X_train.f.arr_0\n X_test = load('./X_test.npz')\n X_test = X_test.f.arr_0\n y_train = load('./y_train.npz')\n y_train = y_train.f.arr_0\n y_test = load('./y_test.npz')\n y_test = y_test.f.arr_0\n\n return X_train, X_test, y_train, y_test", "_____no_output_____" ], [ "X_train, X_test, y_train, y_test = train_load('camera3')", "_____no_output_____" ], [ "X_train.shape, X_test.shape, y_train.shape, y_test.shape", "_____no_output_____" ], [ "filepath = \"/content/drive/My Drive/epochs/model_5_5_camera3.{epoch:04d}-{val_loss:.4f}.h5\"\ncheckpoint = ModelCheckpoint(filepath, monitor='val_loss', verbose=1, save_best_only=True, mode='min')\ncallbacks_list = [checkpoint]", "_____no_output_____" ], [ "history = model.fit(X_train,\n y_train,\n batch_size=64,\n validation_data=(X_test, y_test),\n epochs=30,\n verbose=1,\n callbacks=callbacks_list)", "Epoch 1/30\n212/212 [==============================] - ETA: 0s - loss: 0.0727 - root_mean_squared_error: 0.2696\nEpoch 00001: val_loss improved from inf to 0.05052, saving model to /content/drive/My Drive/epochs/model_5_5_camera3.0001-0.0505.h5\n212/212 [==============================] - 2s 8ms/step - loss: 0.0727 - root_mean_squared_error: 0.2696 - val_loss: 0.0505 - val_root_mean_squared_error: 0.2248\nEpoch 2/30\n207/212 [============================>.] - ETA: 0s - loss: 0.0642 - root_mean_squared_error: 0.2534\nEpoch 00002: val_loss improved from 0.05052 to 0.04959, saving model to /content/drive/My Drive/epochs/model_5_5_camera3.0002-0.0496.h5\n212/212 [==============================] - 1s 7ms/step - loss: 0.0632 - root_mean_squared_error: 0.2514 - val_loss: 0.0496 - val_root_mean_squared_error: 0.2227\nEpoch 3/30\n207/212 [============================>.] - ETA: 0s - loss: 0.0597 - root_mean_squared_error: 0.2444\nEpoch 00003: val_loss improved from 0.04959 to 0.04791, saving model to /content/drive/My Drive/epochs/model_5_5_camera3.0003-0.0479.h5\n212/212 [==============================] - 1s 7ms/step - loss: 0.0594 - root_mean_squared_error: 0.2437 - val_loss: 0.0479 - val_root_mean_squared_error: 0.2189\nEpoch 4/30\n203/212 [===========================>..] - ETA: 0s - loss: 0.0571 - root_mean_squared_error: 0.2389\nEpoch 00004: val_loss did not improve from 0.04791\n212/212 [==============================] - 1s 6ms/step - loss: 0.0573 - root_mean_squared_error: 0.2394 - val_loss: 0.0480 - val_root_mean_squared_error: 0.2191\nEpoch 5/30\n207/212 [============================>.] - ETA: 0s - loss: 0.0563 - root_mean_squared_error: 0.2373\nEpoch 00005: val_loss improved from 0.04791 to 0.04643, saving model to /content/drive/My Drive/epochs/model_5_5_camera3.0005-0.0464.h5\n212/212 [==============================] - 1s 7ms/step - loss: 0.0558 - root_mean_squared_error: 0.2363 - val_loss: 0.0464 - val_root_mean_squared_error: 0.2155\nEpoch 6/30\n209/212 [============================>.] - ETA: 0s - loss: 0.0547 - root_mean_squared_error: 0.2339\nEpoch 00006: val_loss did not improve from 0.04643\n212/212 [==============================] - 1s 6ms/step - loss: 0.0543 - root_mean_squared_error: 0.2330 - val_loss: 0.0468 - val_root_mean_squared_error: 0.2163\nEpoch 7/30\n211/212 [============================>.] - ETA: 0s - loss: 0.0533 - root_mean_squared_error: 0.2309\nEpoch 00007: val_loss did not improve from 0.04643\n212/212 [==============================] - 1s 7ms/step - loss: 0.0532 - root_mean_squared_error: 0.2306 - val_loss: 0.0466 - val_root_mean_squared_error: 0.2159\nEpoch 8/30\n208/212 [============================>.] - ETA: 0s - loss: 0.0516 - root_mean_squared_error: 0.2273\nEpoch 00008: val_loss improved from 0.04643 to 0.04639, saving model to /content/drive/My Drive/epochs/model_5_5_camera3.0008-0.0464.h5\n212/212 [==============================] - 2s 7ms/step - loss: 0.0519 - root_mean_squared_error: 0.2279 - val_loss: 0.0464 - val_root_mean_squared_error: 0.2154\nEpoch 9/30\n207/212 [============================>.] - ETA: 0s - loss: 0.0512 - root_mean_squared_error: 0.2263\nEpoch 00009: val_loss did not improve from 0.04639\n212/212 [==============================] - 1s 7ms/step - loss: 0.0509 - root_mean_squared_error: 0.2256 - val_loss: 0.0469 - val_root_mean_squared_error: 0.2165\nEpoch 10/30\n205/212 [============================>.] - ETA: 0s - loss: 0.0492 - root_mean_squared_error: 0.2219\nEpoch 00010: val_loss did not improve from 0.04639\n212/212 [==============================] - 1s 6ms/step - loss: 0.0496 - root_mean_squared_error: 0.2226 - val_loss: 0.0468 - val_root_mean_squared_error: 0.2163\nEpoch 11/30\n211/212 [============================>.] - ETA: 0s - loss: 0.0494 - root_mean_squared_error: 0.2223\nEpoch 00011: val_loss did not improve from 0.04639\n212/212 [==============================] - 1s 6ms/step - loss: 0.0493 - root_mean_squared_error: 0.2220 - val_loss: 0.0477 - val_root_mean_squared_error: 0.2185\nEpoch 12/30\n211/212 [============================>.] - ETA: 0s - loss: 0.0489 - root_mean_squared_error: 0.2212\nEpoch 00012: val_loss did not improve from 0.04639\n212/212 [==============================] - 1s 6ms/step - loss: 0.0489 - root_mean_squared_error: 0.2212 - val_loss: 0.0466 - val_root_mean_squared_error: 0.2158\nEpoch 13/30\n206/212 [============================>.] - ETA: 0s - loss: 0.0483 - root_mean_squared_error: 0.2199\nEpoch 00013: val_loss did not improve from 0.04639\n212/212 [==============================] - 1s 6ms/step - loss: 0.0486 - root_mean_squared_error: 0.2204 - val_loss: 0.0486 - val_root_mean_squared_error: 0.2204\nEpoch 14/30\n206/212 [============================>.] - ETA: 0s - loss: 0.0476 - root_mean_squared_error: 0.2182\nEpoch 00014: val_loss did not improve from 0.04639\n212/212 [==============================] - 1s 6ms/step - loss: 0.0473 - root_mean_squared_error: 0.2175 - val_loss: 0.0499 - val_root_mean_squared_error: 0.2235\nEpoch 15/30\n204/212 [===========================>..] - ETA: 0s - loss: 0.0469 - root_mean_squared_error: 0.2165\nEpoch 00015: val_loss did not improve from 0.04639\n212/212 [==============================] - 1s 6ms/step - loss: 0.0475 - root_mean_squared_error: 0.2178 - val_loss: 0.0476 - val_root_mean_squared_error: 0.2182\nEpoch 16/30\n205/212 [============================>.] - ETA: 0s - loss: 0.0469 - root_mean_squared_error: 0.2166\nEpoch 00016: val_loss did not improve from 0.04639\n212/212 [==============================] - 1s 7ms/step - loss: 0.0469 - root_mean_squared_error: 0.2166 - val_loss: 0.0496 - val_root_mean_squared_error: 0.2228\nEpoch 17/30\n212/212 [==============================] - ETA: 0s - loss: 0.0457 - root_mean_squared_error: 0.2139\nEpoch 00017: val_loss did not improve from 0.04639\n212/212 [==============================] - 1s 6ms/step - loss: 0.0457 - root_mean_squared_error: 0.2139 - val_loss: 0.0487 - val_root_mean_squared_error: 0.2207\nEpoch 18/30\n209/212 [============================>.] - ETA: 0s - loss: 0.0460 - root_mean_squared_error: 0.2145\nEpoch 00018: val_loss did not improve from 0.04639\n212/212 [==============================] - 1s 6ms/step - loss: 0.0460 - root_mean_squared_error: 0.2144 - val_loss: 0.0487 - val_root_mean_squared_error: 0.2207\nEpoch 19/30\n212/212 [==============================] - ETA: 0s - loss: 0.0453 - root_mean_squared_error: 0.2128\nEpoch 00019: val_loss did not improve from 0.04639\n212/212 [==============================] - 1s 6ms/step - loss: 0.0453 - root_mean_squared_error: 0.2128 - val_loss: 0.0480 - val_root_mean_squared_error: 0.2191\nEpoch 20/30\n212/212 [==============================] - ETA: 0s - loss: 0.0450 - root_mean_squared_error: 0.2121\nEpoch 00020: val_loss did not improve from 0.04639\n212/212 [==============================] - 1s 6ms/step - loss: 0.0450 - root_mean_squared_error: 0.2121 - val_loss: 0.0488 - val_root_mean_squared_error: 0.2210\nEpoch 21/30\n205/212 [============================>.] - ETA: 0s - loss: 0.0434 - root_mean_squared_error: 0.2082\nEpoch 00021: val_loss did not improve from 0.04639\n212/212 [==============================] - 1s 6ms/step - loss: 0.0440 - root_mean_squared_error: 0.2097 - val_loss: 0.0495 - val_root_mean_squared_error: 0.2226\nEpoch 22/30\n203/212 [===========================>..] - ETA: 0s - loss: 0.0448 - root_mean_squared_error: 0.2116\nEpoch 00022: val_loss did not improve from 0.04639\n212/212 [==============================] - 1s 6ms/step - loss: 0.0440 - root_mean_squared_error: 0.2098 - val_loss: 0.0493 - val_root_mean_squared_error: 0.2220\nEpoch 23/30\n206/212 [============================>.] - ETA: 0s - loss: 0.0447 - root_mean_squared_error: 0.2113\nEpoch 00023: val_loss did not improve from 0.04639\n212/212 [==============================] - 1s 7ms/step - loss: 0.0442 - root_mean_squared_error: 0.2102 - val_loss: 0.0486 - val_root_mean_squared_error: 0.2204\nEpoch 24/30\n204/212 [===========================>..] - ETA: 0s - loss: 0.0437 - root_mean_squared_error: 0.2090\nEpoch 00024: val_loss did not improve from 0.04639\n212/212 [==============================] - 1s 6ms/step - loss: 0.0440 - root_mean_squared_error: 0.2098 - val_loss: 0.0498 - val_root_mean_squared_error: 0.2231\nEpoch 25/30\n211/212 [============================>.] - ETA: 0s - loss: 0.0432 - root_mean_squared_error: 0.2078\nEpoch 00025: val_loss did not improve from 0.04639\n212/212 [==============================] - 1s 6ms/step - loss: 0.0431 - root_mean_squared_error: 0.2075 - val_loss: 0.0494 - val_root_mean_squared_error: 0.2222\nEpoch 26/30\n204/212 [===========================>..] - ETA: 0s - loss: 0.0421 - root_mean_squared_error: 0.2052\nEpoch 00026: val_loss did not improve from 0.04639\n212/212 [==============================] - 1s 6ms/step - loss: 0.0426 - root_mean_squared_error: 0.2063 - val_loss: 0.0497 - val_root_mean_squared_error: 0.2230\nEpoch 27/30\n206/212 [============================>.] - ETA: 0s - loss: 0.0430 - root_mean_squared_error: 0.2074\nEpoch 00027: val_loss did not improve from 0.04639\n212/212 [==============================] - 1s 6ms/step - loss: 0.0426 - root_mean_squared_error: 0.2064 - val_loss: 0.0495 - val_root_mean_squared_error: 0.2224\nEpoch 28/30\n203/212 [===========================>..] - ETA: 0s - loss: 0.0424 - root_mean_squared_error: 0.2058\nEpoch 00028: val_loss did not improve from 0.04639\n212/212 [==============================] - 1s 6ms/step - loss: 0.0422 - root_mean_squared_error: 0.2054 - val_loss: 0.0522 - val_root_mean_squared_error: 0.2284\nEpoch 29/30\n203/212 [===========================>..] - ETA: 0s - loss: 0.0400 - root_mean_squared_error: 0.2000\nEpoch 00029: val_loss did not improve from 0.04639\n212/212 [==============================] - 1s 6ms/step - loss: 0.0416 - root_mean_squared_error: 0.2041 - val_loss: 0.0507 - val_root_mean_squared_error: 0.2251\nEpoch 30/30\n204/212 [===========================>..] - ETA: 0s - loss: 0.0412 - root_mean_squared_error: 0.2030\nEpoch 00030: val_loss did not improve from 0.04639\n212/212 [==============================] - 1s 6ms/step - loss: 0.0415 - root_mean_squared_error: 0.2037 - val_loss: 0.0501 - val_root_mean_squared_error: 0.2238\n" ], [ "ticks = [i for i in range(0, 31, 5)]\nlabels = [i for i in range(0, 31, 5)]\nlabels[0] = 1", "_____no_output_____" ], [ "train_loss = history.history['loss']\ntest_loss = history.history['val_loss']\n\n# Set figure size.\nplt.figure(figsize=(20, 8))\n\n# Generate line plot of training, testing loss over epochs.\nplt.plot(train_loss, label='Training Loss', color='#185fad')\nplt.plot(test_loss, label='Testing Loss', color='orange')\n\n# Set title\nplt.title('Training and Testing Loss by Epoch for Camera3', fontsize = 25)\nplt.xlabel('Epoch', fontsize = 18)\nplt.ylabel('Mean Squared Error', fontsize = 18)\nplt.xticks(ticks, labels)\n\nplt.legend(fontsize = 18)\n\nplt.savefig('/content/drive/My Drive/images/train_test_loss_model5_5_camera3.png');", "_____no_output_____" ], [ "def model_history(model_name): \n model = pd.DataFrame({'loss': history.history['loss'],\n 'root_mean_squared_error': history.history['root_mean_squared_error'],\n 'val_loss': history.history['val_loss'],\n 'val_root_mean_squared_error': history.history['val_root_mean_squared_error']},\n columns = ['loss', 'root_mean_squared_error', 'val_loss', 'val_root_mean_squared_error'])\n\n model.to_csv(f'/content/drive/My Drive/datasets/{model_name}.csv', index=False)\n return model", "_____no_output_____" ], [ "model_3_camera3 = model_history('model_5_5_camera3')", "_____no_output_____" ], [ "model_2_camera5.head()", "_____no_output_____" ], [ "#################### end of training camera3 data for model 3", "_____no_output_____" ], [ "########################### start of train with camera4 data for model 3", "_____no_output_____" ], [ "camera6 = load('/content/drive/My Drive/datasets/camera6_cleaned.npz')\nlog6 = pd.read_csv('/content/drive/My Drive/datasets/log6_cleaned.csv')", "_____no_output_____" ], [ "camera_processing(camera6, 'camera6')", "Done\n" ], [ "log_processing(log6, 'log6')", "Done\n" ], [ "train_split('camera6', 'log6')", "Done\n" ], [ "\"\"\"\nnew data workflow\ncamera2 = load('/content/drive/My Drive/datasets/camera2_cleaned.npz')\nlog2 = pd.read_csv('/content/drive/My Drive/datasets/log2_cleaned.csv')\n\ncamera_processing(camera2, 'camera2')\n\nlog_processing(log2, 'log2')\n\ntrain_split('camera2', 'log2')\n\"\"\"", "_____no_output_____" ], [ "model = load_model('/content/drive/My Drive/epochs/model_5_5_camera3.0008-0.0464.h5')", "_____no_output_____" ], [ "def train_load(camera_file_name):\n !cp -r \"/content/drive/My Drive/datasets/{camera_file_name}_X_train.npz\" ./X_train.npz\n !cp -r \"/content/drive/My Drive/datasets/{camera_file_name}_X_test.npz\" ./X_test.npz\n !cp -r \"/content/drive/My Drive/datasets/{camera_file_name}_y_train.npz\" ./y_train.npz\n !cp -r \"/content/drive/My Drive/datasets/{camera_file_name}_y_test.npz\" ./y_test.npz\n X_train = load('./X_train.npz')\n X_train = X_train.f.arr_0\n X_test = load('./X_test.npz')\n X_test = X_test.f.arr_0\n y_train = load('./y_train.npz')\n y_train = y_train.f.arr_0\n y_test = load('./y_test.npz')\n y_test = y_test.f.arr_0\n\n return X_train, X_test, y_train, y_test", "_____no_output_____" ], [ "X_train, X_test, y_train, y_test = train_load('camera4')", "_____no_output_____" ], [ "X_train.shape, X_test.shape, y_train.shape, y_test.shape", "_____no_output_____" ], [ "filepath = \"/content/drive/My Drive/epochs/model_5_6_camera4.{epoch:04d}-{val_loss:.4f}.h5\"\ncheckpoint = ModelCheckpoint(filepath, monitor='val_loss', verbose=1, save_best_only=True, mode='min')\ncallbacks_list = [checkpoint]", "_____no_output_____" ], [ "history = model.fit(X_train,\n y_train,\n batch_size=64,\n validation_data=(X_test, y_test),\n epochs=30,\n verbose=1,\n callbacks=callbacks_list)", "Epoch 1/30\n668/668 [==============================] - ETA: 0s - loss: 0.1216 - root_mean_squared_error: 0.3487\nEpoch 00001: val_loss improved from inf to 0.13672, saving model to /content/drive/My Drive/epochs/model_5_6_camera4.0001-0.1367.h5\n668/668 [==============================] - 5s 7ms/step - loss: 0.1216 - root_mean_squared_error: 0.3487 - val_loss: 0.1367 - val_root_mean_squared_error: 0.3698\nEpoch 2/30\n667/668 [============================>.] - ETA: 0s - loss: 0.1131 - root_mean_squared_error: 0.3363\nEpoch 00002: val_loss improved from 0.13672 to 0.13305, saving model to /content/drive/My Drive/epochs/model_5_6_camera4.0002-0.1331.h5\n668/668 [==============================] - 4s 7ms/step - loss: 0.1131 - root_mean_squared_error: 0.3363 - val_loss: 0.1331 - val_root_mean_squared_error: 0.3648\nEpoch 3/30\n665/668 [============================>.] - ETA: 0s - loss: 0.1085 - root_mean_squared_error: 0.3294\nEpoch 00003: val_loss improved from 0.13305 to 0.13265, saving model to /content/drive/My Drive/epochs/model_5_6_camera4.0003-0.1327.h5\n668/668 [==============================] - 4s 7ms/step - loss: 0.1082 - root_mean_squared_error: 0.3290 - val_loss: 0.1327 - val_root_mean_squared_error: 0.3642\nEpoch 4/30\n661/668 [============================>.] - ETA: 0s - loss: 0.1023 - root_mean_squared_error: 0.3198\nEpoch 00004: val_loss improved from 0.13265 to 0.13179, saving model to /content/drive/My Drive/epochs/model_5_6_camera4.0004-0.1318.h5\n668/668 [==============================] - 5s 7ms/step - loss: 0.1044 - root_mean_squared_error: 0.3231 - val_loss: 0.1318 - val_root_mean_squared_error: 0.3630\nEpoch 5/30\n664/668 [============================>.] - ETA: 0s - loss: 0.1015 - root_mean_squared_error: 0.3187\nEpoch 00005: val_loss did not improve from 0.13179\n668/668 [==============================] - 4s 7ms/step - loss: 0.1014 - root_mean_squared_error: 0.3184 - val_loss: 0.1332 - val_root_mean_squared_error: 0.3649\nEpoch 6/30\n667/668 [============================>.] - ETA: 0s - loss: 0.0982 - root_mean_squared_error: 0.3134\nEpoch 00006: val_loss did not improve from 0.13179\n668/668 [==============================] - 4s 7ms/step - loss: 0.0982 - root_mean_squared_error: 0.3133 - val_loss: 0.1346 - val_root_mean_squared_error: 0.3669\nEpoch 7/30\n667/668 [============================>.] - ETA: 0s - loss: 0.0961 - root_mean_squared_error: 0.3100\nEpoch 00007: val_loss did not improve from 0.13179\n668/668 [==============================] - 4s 7ms/step - loss: 0.0961 - root_mean_squared_error: 0.3100 - val_loss: 0.1355 - val_root_mean_squared_error: 0.3681\nEpoch 8/30\n665/668 [============================>.] - ETA: 0s - loss: 0.0941 - root_mean_squared_error: 0.3067\nEpoch 00008: val_loss did not improve from 0.13179\n668/668 [==============================] - 4s 6ms/step - loss: 0.0939 - root_mean_squared_error: 0.3064 - val_loss: 0.1363 - val_root_mean_squared_error: 0.3692\nEpoch 9/30\n662/668 [============================>.] - ETA: 0s - loss: 0.0920 - root_mean_squared_error: 0.3033\nEpoch 00009: val_loss did not improve from 0.13179\n668/668 [==============================] - 4s 6ms/step - loss: 0.0919 - root_mean_squared_error: 0.3032 - val_loss: 0.1378 - val_root_mean_squared_error: 0.3712\nEpoch 10/30\n665/668 [============================>.] - ETA: 0s - loss: 0.0908 - root_mean_squared_error: 0.3012\nEpoch 00010: val_loss did not improve from 0.13179\n668/668 [==============================] - 4s 6ms/step - loss: 0.0906 - root_mean_squared_error: 0.3010 - val_loss: 0.1406 - val_root_mean_squared_error: 0.3750\nEpoch 11/30\n668/668 [==============================] - ETA: 0s - loss: 0.0888 - root_mean_squared_error: 0.2980\nEpoch 00011: val_loss did not improve from 0.13179\n668/668 [==============================] - 4s 6ms/step - loss: 0.0888 - root_mean_squared_error: 0.2980 - val_loss: 0.1411 - val_root_mean_squared_error: 0.3757\nEpoch 12/30\n660/668 [============================>.] - ETA: 0s - loss: 0.0879 - root_mean_squared_error: 0.2964\nEpoch 00012: val_loss did not improve from 0.13179\n668/668 [==============================] - 4s 6ms/step - loss: 0.0876 - root_mean_squared_error: 0.2960 - val_loss: 0.1413 - val_root_mean_squared_error: 0.3759\nEpoch 13/30\n660/668 [============================>.] - ETA: 0s - loss: 0.0861 - root_mean_squared_error: 0.2935\nEpoch 00013: val_loss did not improve from 0.13179\n668/668 [==============================] - 4s 7ms/step - loss: 0.0861 - root_mean_squared_error: 0.2934 - val_loss: 0.1446 - val_root_mean_squared_error: 0.3802\nEpoch 14/30\n660/668 [============================>.] - ETA: 0s - loss: 0.0848 - root_mean_squared_error: 0.2913\nEpoch 00014: val_loss did not improve from 0.13179\n668/668 [==============================] - 4s 7ms/step - loss: 0.0849 - root_mean_squared_error: 0.2914 - val_loss: 0.1449 - val_root_mean_squared_error: 0.3807\nEpoch 15/30\n668/668 [==============================] - ETA: 0s - loss: 0.0834 - root_mean_squared_error: 0.2888\nEpoch 00015: val_loss did not improve from 0.13179\n668/668 [==============================] - 4s 7ms/step - loss: 0.0834 - root_mean_squared_error: 0.2888 - val_loss: 0.1452 - val_root_mean_squared_error: 0.3811\nEpoch 16/30\n668/668 [==============================] - ETA: 0s - loss: 0.0829 - root_mean_squared_error: 0.2880\nEpoch 00016: val_loss did not improve from 0.13179\n668/668 [==============================] - 4s 6ms/step - loss: 0.0829 - root_mean_squared_error: 0.2880 - val_loss: 0.1464 - val_root_mean_squared_error: 0.3826\nEpoch 17/30\n664/668 [============================>.] - ETA: 0s - loss: 0.0820 - root_mean_squared_error: 0.2864\nEpoch 00017: val_loss did not improve from 0.13179\n668/668 [==============================] - 4s 6ms/step - loss: 0.0819 - root_mean_squared_error: 0.2861 - val_loss: 0.1481 - val_root_mean_squared_error: 0.3848\nEpoch 18/30\n666/668 [============================>.] - ETA: 0s - loss: 0.0800 - root_mean_squared_error: 0.2828\nEpoch 00018: val_loss did not improve from 0.13179\n668/668 [==============================] - 4s 6ms/step - loss: 0.0801 - root_mean_squared_error: 0.2830 - val_loss: 0.1502 - val_root_mean_squared_error: 0.3875\nEpoch 19/30\n660/668 [============================>.] - ETA: 0s - loss: 0.0798 - root_mean_squared_error: 0.2825\nEpoch 00019: val_loss did not improve from 0.13179\n668/668 [==============================] - 4s 6ms/step - loss: 0.0800 - root_mean_squared_error: 0.2829 - val_loss: 0.1497 - val_root_mean_squared_error: 0.3869\nEpoch 20/30\n666/668 [============================>.] - ETA: 0s - loss: 0.0786 - root_mean_squared_error: 0.2803\nEpoch 00020: val_loss did not improve from 0.13179\n668/668 [==============================] - 4s 6ms/step - loss: 0.0786 - root_mean_squared_error: 0.2804 - val_loss: 0.1495 - val_root_mean_squared_error: 0.3866\nEpoch 21/30\n663/668 [============================>.] - ETA: 0s - loss: 0.0768 - root_mean_squared_error: 0.2771\nEpoch 00021: val_loss did not improve from 0.13179\n668/668 [==============================] - 4s 6ms/step - loss: 0.0773 - root_mean_squared_error: 0.2780 - val_loss: 0.1508 - val_root_mean_squared_error: 0.3884\nEpoch 22/30\n660/668 [============================>.] - ETA: 0s - loss: 0.0769 - root_mean_squared_error: 0.2773\nEpoch 00022: val_loss did not improve from 0.13179\n668/668 [==============================] - 4s 7ms/step - loss: 0.0770 - root_mean_squared_error: 0.2775 - val_loss: 0.1520 - val_root_mean_squared_error: 0.3898\nEpoch 23/30\n661/668 [============================>.] - ETA: 0s - loss: 0.0762 - root_mean_squared_error: 0.2761\nEpoch 00023: val_loss did not improve from 0.13179\n668/668 [==============================] - 4s 6ms/step - loss: 0.0763 - root_mean_squared_error: 0.2761 - val_loss: 0.1524 - val_root_mean_squared_error: 0.3904\nEpoch 24/30\n668/668 [==============================] - ETA: 0s - loss: 0.0756 - root_mean_squared_error: 0.2750\nEpoch 00024: val_loss did not improve from 0.13179\n668/668 [==============================] - 4s 6ms/step - loss: 0.0756 - root_mean_squared_error: 0.2750 - val_loss: 0.1531 - val_root_mean_squared_error: 0.3913\nEpoch 25/30\n664/668 [============================>.] - ETA: 0s - loss: 0.0747 - root_mean_squared_error: 0.2733\nEpoch 00025: val_loss did not improve from 0.13179\n668/668 [==============================] - 4s 6ms/step - loss: 0.0750 - root_mean_squared_error: 0.2738 - val_loss: 0.1560 - val_root_mean_squared_error: 0.3949\nEpoch 26/30\n666/668 [============================>.] - ETA: 0s - loss: 0.0744 - root_mean_squared_error: 0.2728\nEpoch 00026: val_loss did not improve from 0.13179\n668/668 [==============================] - 4s 6ms/step - loss: 0.0744 - root_mean_squared_error: 0.2727 - val_loss: 0.1556 - val_root_mean_squared_error: 0.3944\nEpoch 27/30\n667/668 [============================>.] - ETA: 0s - loss: 0.0742 - root_mean_squared_error: 0.2725\nEpoch 00027: val_loss did not improve from 0.13179\n668/668 [==============================] - 4s 6ms/step - loss: 0.0742 - root_mean_squared_error: 0.2724 - val_loss: 0.1541 - val_root_mean_squared_error: 0.3925\nEpoch 28/30\n662/668 [============================>.] - ETA: 0s - loss: 0.0728 - root_mean_squared_error: 0.2698\nEpoch 00028: val_loss did not improve from 0.13179\n668/668 [==============================] - 4s 6ms/step - loss: 0.0727 - root_mean_squared_error: 0.2696 - val_loss: 0.1557 - val_root_mean_squared_error: 0.3946\nEpoch 29/30\n660/668 [============================>.] - ETA: 0s - loss: 0.0722 - root_mean_squared_error: 0.2687\nEpoch 00029: val_loss did not improve from 0.13179\n668/668 [==============================] - 4s 6ms/step - loss: 0.0720 - root_mean_squared_error: 0.2683 - val_loss: 0.1586 - val_root_mean_squared_error: 0.3982\nEpoch 30/30\n666/668 [============================>.] - ETA: 0s - loss: 0.0718 - root_mean_squared_error: 0.2680\nEpoch 00030: val_loss did not improve from 0.13179\n668/668 [==============================] - 4s 6ms/step - loss: 0.0718 - root_mean_squared_error: 0.2679 - val_loss: 0.1565 - val_root_mean_squared_error: 0.3956\n" ], [ "ticks = [i for i in range(10)]\nlabels = [i for i in range(0, 11)]", "_____no_output_____" ], [ "train_loss = history.history['loss']\ntest_loss = history.history['val_loss']\n\n# Set figure size.\nplt.figure(figsize=(20, 8))\n\n# Generate line plot of training, testing loss over epochs.\nplt.plot(train_loss, label='Training Loss', color='#185fad')\nplt.plot(test_loss, label='Testing Loss', color='orange')\n\n# Set title\nplt.title('Training and Testing Loss by Epoch for Camera6', fontsize = 25)\nplt.xlabel('Epoch', fontsize = 18)\nplt.ylabel('Mean Squared Error', fontsize = 18)\nplt.xticks(ticks, labels)\n\nplt.legend(fontsize = 18)\n\nplt.savefig('/content/drive/My Drive/images/train_test_loss_model5_6_camera4.png');", "_____no_output_____" ], [ "def model_history(model_name): \n model = pd.DataFrame({'loss': history.history['loss'],\n 'root_mean_squared_error': history.history['root_mean_squared_error'],\n 'val_loss': history.history['val_loss'],\n 'val_root_mean_squared_error': history.history['val_root_mean_squared_error']},\n columns = ['loss', 'root_mean_squared_error', 'val_loss', 'val_root_mean_squared_error'])\n\n model.to_csv(f'/content/drive/My Drive/datasets/{model_name}.csv', index=False)\n return model", "_____no_output_____" ], [ "model_3_camera4 = model_history('model_5_6_camera4')", "_____no_output_____" ], [ "model_2_camera6.head()", "_____no_output_____" ], [ "#################### end of training camera4 data for model 3", "_____no_output_____" ], [ "########################### start of train with camera5 data for model 3", "_____no_output_____" ], [ "camera7 = load('/content/drive/My Drive/datasets/camera7_cleaned.npz')\nlog7 = pd.read_csv('/content/drive/My Drive/datasets/log7_cleaned.csv')", "_____no_output_____" ], [ "camera_processing(camera7, 'camera7')", "Done\n" ], [ "log_processing(log7, 'log7')", "Done\n" ], [ "train_split('camera7', 'log7')", "Done\n" ], [ "\"\"\"\nnew data workflow\ncamera2 = load('/content/drive/My Drive/datasets/camera2_cleaned.npz')\nlog2 = pd.read_csv('/content/drive/My Drive/datasets/log2_cleaned.csv')\n\ncamera_processing(camera2, 'camera2')\n\nlog_processing(log2, 'log2')\n\ntrain_split('camera2', 'log2')\n\"\"\"", "_____no_output_____" ], [ "model = load_model('/content/drive/My Drive/epochs/model_5_6_camera4.0004-0.1318.h5')", "_____no_output_____" ], [ "def train_load(camera_file_name):\n !cp -r \"/content/drive/My Drive/datasets/{camera_file_name}_X_train.npz\" ./X_train.npz\n !cp -r \"/content/drive/My Drive/datasets/{camera_file_name}_X_test.npz\" ./X_test.npz\n !cp -r \"/content/drive/My Drive/datasets/{camera_file_name}_y_train.npz\" ./y_train.npz\n !cp -r \"/content/drive/My Drive/datasets/{camera_file_name}_y_test.npz\" ./y_test.npz\n X_train = load('./X_train.npz')\n X_train = X_train.f.arr_0\n X_test = load('./X_test.npz')\n X_test = X_test.f.arr_0\n y_train = load('./y_train.npz')\n y_train = y_train.f.arr_0\n y_test = load('./y_test.npz')\n y_test = y_test.f.arr_0\n\n return X_train, X_test, y_train, y_test", "_____no_output_____" ], [ "X_train, X_test, y_train, y_test = train_load('camera5')", "_____no_output_____" ], [ "X_train.shape, X_test.shape, y_train.shape, y_test.shape", "_____no_output_____" ], [ "filepath = \"/content/drive/My Drive/epochs/model_5_7_camera5.{epoch:04d}-{val_loss:.4f}.h5\"\ncheckpoint = ModelCheckpoint(filepath, monitor='val_loss', verbose=1, save_best_only=True, mode='min')\ncallbacks_list = [checkpoint]", "_____no_output_____" ], [ "history = model.fit(X_train,\n y_train,\n batch_size=64,\n validation_data=(X_test, y_test),\n epochs=30,\n verbose=1,\n callbacks=callbacks_list)", "Epoch 1/30\n284/289 [============================>.] - ETA: 0s - loss: 0.1137 - root_mean_squared_error: 0.3372\nEpoch 00001: val_loss improved from inf to 0.14154, saving model to /content/drive/My Drive/epochs/model_5_7_camera5.0001-0.1415.h5\n289/289 [==============================] - 3s 9ms/step - loss: 0.1138 - root_mean_squared_error: 0.3373 - val_loss: 0.1415 - val_root_mean_squared_error: 0.3762\nEpoch 2/30\n280/289 [============================>.] - ETA: 0s - loss: 0.1007 - root_mean_squared_error: 0.3174\nEpoch 00002: val_loss improved from 0.14154 to 0.13828, saving model to /content/drive/My Drive/epochs/model_5_7_camera5.0002-0.1383.h5\n289/289 [==============================] - 2s 7ms/step - loss: 0.0989 - root_mean_squared_error: 0.3144 - val_loss: 0.1383 - val_root_mean_squared_error: 0.3719\nEpoch 3/30\n282/289 [============================>.] - ETA: 0s - loss: 0.0961 - root_mean_squared_error: 0.3100\nEpoch 00003: val_loss improved from 0.13828 to 0.13581, saving model to /content/drive/My Drive/epochs/model_5_7_camera5.0003-0.1358.h5\n289/289 [==============================] - 2s 7ms/step - loss: 0.0947 - root_mean_squared_error: 0.3078 - val_loss: 0.1358 - val_root_mean_squared_error: 0.3685\nEpoch 4/30\n286/289 [============================>.] - ETA: 0s - loss: 0.0917 - root_mean_squared_error: 0.3028\nEpoch 00004: val_loss improved from 0.13581 to 0.13488, saving model to /content/drive/My Drive/epochs/model_5_7_camera5.0004-0.1349.h5\n289/289 [==============================] - 2s 7ms/step - loss: 0.0913 - root_mean_squared_error: 0.3022 - val_loss: 0.1349 - val_root_mean_squared_error: 0.3673\nEpoch 5/30\n285/289 [============================>.] - ETA: 0s - loss: 0.0865 - root_mean_squared_error: 0.2941\nEpoch 00005: val_loss improved from 0.13488 to 0.13354, saving model to /content/drive/My Drive/epochs/model_5_7_camera5.0005-0.1335.h5\n289/289 [==============================] - 2s 7ms/step - loss: 0.0880 - root_mean_squared_error: 0.2967 - val_loss: 0.1335 - val_root_mean_squared_error: 0.3654\nEpoch 6/30\n287/289 [============================>.] - ETA: 0s - loss: 0.0855 - root_mean_squared_error: 0.2924\nEpoch 00006: val_loss improved from 0.13354 to 0.13233, saving model to /content/drive/My Drive/epochs/model_5_7_camera5.0006-0.1323.h5\n289/289 [==============================] - 2s 7ms/step - loss: 0.0852 - root_mean_squared_error: 0.2920 - val_loss: 0.1323 - val_root_mean_squared_error: 0.3638\nEpoch 7/30\n282/289 [============================>.] - ETA: 0s - loss: 0.0833 - root_mean_squared_error: 0.2887\nEpoch 00007: val_loss did not improve from 0.13233\n289/289 [==============================] - 2s 7ms/step - loss: 0.0824 - root_mean_squared_error: 0.2871 - val_loss: 0.1324 - val_root_mean_squared_error: 0.3638\nEpoch 8/30\n288/289 [============================>.] - ETA: 0s - loss: 0.0803 - root_mean_squared_error: 0.2834\nEpoch 00008: val_loss improved from 0.13233 to 0.13203, saving model to /content/drive/My Drive/epochs/model_5_7_camera5.0008-0.1320.h5\n289/289 [==============================] - 2s 7ms/step - loss: 0.0802 - root_mean_squared_error: 0.2833 - val_loss: 0.1320 - val_root_mean_squared_error: 0.3634\nEpoch 9/30\n285/289 [============================>.] - ETA: 0s - loss: 0.0789 - root_mean_squared_error: 0.2810\nEpoch 00009: val_loss did not improve from 0.13203\n289/289 [==============================] - 2s 7ms/step - loss: 0.0783 - root_mean_squared_error: 0.2798 - val_loss: 0.1333 - val_root_mean_squared_error: 0.3651\nEpoch 10/30\n282/289 [============================>.] - ETA: 0s - loss: 0.0761 - root_mean_squared_error: 0.2759\nEpoch 00010: val_loss did not improve from 0.13203\n289/289 [==============================] - 2s 7ms/step - loss: 0.0766 - root_mean_squared_error: 0.2767 - val_loss: 0.1328 - val_root_mean_squared_error: 0.3645\nEpoch 11/30\n283/289 [============================>.] - ETA: 0s - loss: 0.0749 - root_mean_squared_error: 0.2737\nEpoch 00011: val_loss did not improve from 0.13203\n289/289 [==============================] - 2s 6ms/step - loss: 0.0745 - root_mean_squared_error: 0.2730 - val_loss: 0.1334 - val_root_mean_squared_error: 0.3652\nEpoch 12/30\n281/289 [============================>.] - ETA: 0s - loss: 0.0741 - root_mean_squared_error: 0.2722\nEpoch 00012: val_loss did not improve from 0.13203\n289/289 [==============================] - 2s 7ms/step - loss: 0.0733 - root_mean_squared_error: 0.2707 - val_loss: 0.1348 - val_root_mean_squared_error: 0.3671\nEpoch 13/30\n281/289 [============================>.] - ETA: 0s - loss: 0.0726 - root_mean_squared_error: 0.2694\nEpoch 00013: val_loss did not improve from 0.13203\n289/289 [==============================] - 2s 7ms/step - loss: 0.0717 - root_mean_squared_error: 0.2678 - val_loss: 0.1362 - val_root_mean_squared_error: 0.3691\nEpoch 14/30\n289/289 [==============================] - ETA: 0s - loss: 0.0705 - root_mean_squared_error: 0.2656\nEpoch 00014: val_loss did not improve from 0.13203\n289/289 [==============================] - 2s 6ms/step - loss: 0.0705 - root_mean_squared_error: 0.2656 - val_loss: 0.1346 - val_root_mean_squared_error: 0.3669\nEpoch 15/30\n286/289 [============================>.] - ETA: 0s - loss: 0.0688 - root_mean_squared_error: 0.2624\nEpoch 00015: val_loss did not improve from 0.13203\n289/289 [==============================] - 2s 6ms/step - loss: 0.0693 - root_mean_squared_error: 0.2632 - val_loss: 0.1353 - val_root_mean_squared_error: 0.3679\nEpoch 16/30\n287/289 [============================>.] - ETA: 0s - loss: 0.0683 - root_mean_squared_error: 0.2613\nEpoch 00016: val_loss did not improve from 0.13203\n289/289 [==============================] - 2s 6ms/step - loss: 0.0681 - root_mean_squared_error: 0.2609 - val_loss: 0.1347 - val_root_mean_squared_error: 0.3670\nEpoch 17/30\n282/289 [============================>.] - ETA: 0s - loss: 0.0675 - root_mean_squared_error: 0.2597\nEpoch 00017: val_loss did not improve from 0.13203\n289/289 [==============================] - 2s 7ms/step - loss: 0.0675 - root_mean_squared_error: 0.2599 - val_loss: 0.1348 - val_root_mean_squared_error: 0.3672\nEpoch 18/30\n288/289 [============================>.] - ETA: 0s - loss: 0.0660 - root_mean_squared_error: 0.2570\nEpoch 00018: val_loss did not improve from 0.13203\n289/289 [==============================] - 2s 6ms/step - loss: 0.0660 - root_mean_squared_error: 0.2569 - val_loss: 0.1341 - val_root_mean_squared_error: 0.3661\nEpoch 19/30\n288/289 [============================>.] - ETA: 0s - loss: 0.0648 - root_mean_squared_error: 0.2546\nEpoch 00019: val_loss did not improve from 0.13203\n289/289 [==============================] - 2s 6ms/step - loss: 0.0648 - root_mean_squared_error: 0.2545 - val_loss: 0.1352 - val_root_mean_squared_error: 0.3677\nEpoch 20/30\n283/289 [============================>.] - ETA: 0s - loss: 0.0644 - root_mean_squared_error: 0.2538\nEpoch 00020: val_loss did not improve from 0.13203\n289/289 [==============================] - 2s 6ms/step - loss: 0.0642 - root_mean_squared_error: 0.2534 - val_loss: 0.1348 - val_root_mean_squared_error: 0.3671\nEpoch 21/30\n284/289 [============================>.] - ETA: 0s - loss: 0.0626 - root_mean_squared_error: 0.2501\nEpoch 00021: val_loss did not improve from 0.13203\n289/289 [==============================] - 2s 6ms/step - loss: 0.0638 - root_mean_squared_error: 0.2527 - val_loss: 0.1362 - val_root_mean_squared_error: 0.3690\nEpoch 22/30\n280/289 [============================>.] - ETA: 0s - loss: 0.0634 - root_mean_squared_error: 0.2517\nEpoch 00022: val_loss did not improve from 0.13203\n289/289 [==============================] - 2s 6ms/step - loss: 0.0630 - root_mean_squared_error: 0.2509 - val_loss: 0.1380 - val_root_mean_squared_error: 0.3715\nEpoch 23/30\n289/289 [==============================] - ETA: 0s - loss: 0.0617 - root_mean_squared_error: 0.2484\nEpoch 00023: val_loss did not improve from 0.13203\n289/289 [==============================] - 2s 6ms/step - loss: 0.0617 - root_mean_squared_error: 0.2484 - val_loss: 0.1360 - val_root_mean_squared_error: 0.3688\nEpoch 24/30\n287/289 [============================>.] - ETA: 0s - loss: 0.0619 - root_mean_squared_error: 0.2487\nEpoch 00024: val_loss did not improve from 0.13203\n289/289 [==============================] - 2s 6ms/step - loss: 0.0619 - root_mean_squared_error: 0.2489 - val_loss: 0.1379 - val_root_mean_squared_error: 0.3713\nEpoch 25/30\n282/289 [============================>.] - ETA: 0s - loss: 0.0601 - root_mean_squared_error: 0.2452\nEpoch 00025: val_loss did not improve from 0.13203\n289/289 [==============================] - 2s 7ms/step - loss: 0.0609 - root_mean_squared_error: 0.2468 - val_loss: 0.1372 - val_root_mean_squared_error: 0.3705\nEpoch 26/30\n287/289 [============================>.] - ETA: 0s - loss: 0.0600 - root_mean_squared_error: 0.2449\nEpoch 00026: val_loss did not improve from 0.13203\n289/289 [==============================] - 2s 6ms/step - loss: 0.0602 - root_mean_squared_error: 0.2453 - val_loss: 0.1382 - val_root_mean_squared_error: 0.3718\nEpoch 27/30\n282/289 [============================>.] - ETA: 0s - loss: 0.0591 - root_mean_squared_error: 0.2430\nEpoch 00027: val_loss did not improve from 0.13203\n289/289 [==============================] - 2s 7ms/step - loss: 0.0591 - root_mean_squared_error: 0.2432 - val_loss: 0.1388 - val_root_mean_squared_error: 0.3725\nEpoch 28/30\n280/289 [============================>.] - ETA: 0s - loss: 0.0599 - root_mean_squared_error: 0.2447\nEpoch 00028: val_loss did not improve from 0.13203\n289/289 [==============================] - 2s 6ms/step - loss: 0.0599 - root_mean_squared_error: 0.2448 - val_loss: 0.1400 - val_root_mean_squared_error: 0.3742\nEpoch 29/30\n285/289 [============================>.] - ETA: 0s - loss: 0.0583 - root_mean_squared_error: 0.2414\nEpoch 00029: val_loss did not improve from 0.13203\n289/289 [==============================] - 2s 6ms/step - loss: 0.0581 - root_mean_squared_error: 0.2410 - val_loss: 0.1395 - val_root_mean_squared_error: 0.3735\nEpoch 30/30\n289/289 [==============================] - ETA: 0s - loss: 0.0577 - root_mean_squared_error: 0.2402\nEpoch 00030: val_loss did not improve from 0.13203\n289/289 [==============================] - 2s 6ms/step - loss: 0.0577 - root_mean_squared_error: 0.2402 - val_loss: 0.1403 - val_root_mean_squared_error: 0.3745\n" ], [ "ticks = [i for i in range(10)]\nlabels = [i for i in range(1, 11)]", "_____no_output_____" ], [ "train_loss = history.history['loss']\ntest_loss = history.history['val_loss']\n\n# Set figure size.\nplt.figure(figsize=(20, 8))\n\n# Generate line plot of training, testing loss over epochs.\nplt.plot(train_loss, label='Training Loss', color='#185fad')\nplt.plot(test_loss, label='Testing Loss', color='orange')\n\n# Set title\nplt.title('Training and Testing Loss by Epoch for Camera5', fontsize = 25)\nplt.xlabel('Epoch', fontsize = 18)\nplt.ylabel('Mean Squared Error', fontsize = 18)\nplt.xticks(ticks, labels)\n\nplt.legend(fontsize = 18)\n\nplt.savefig('/content/drive/My Drive/images/train_test_loss_model5_7_camera5.png');", "_____no_output_____" ], [ "def model_history(model_name): \n model = pd.DataFrame({'loss': history.history['loss'],\n 'root_mean_squared_error': history.history['root_mean_squared_error'],\n 'val_loss': history.history['val_loss'],\n 'val_root_mean_squared_error': history.history['val_root_mean_squared_error']},\n columns = ['loss', 'root_mean_squared_error', 'val_loss', 'val_root_mean_squared_error'])\n\n model.to_csv(f'/content/drive/My Drive/datasets/{model_name}.csv', index=False)\n return model", "_____no_output_____" ], [ "model_3_camera5 = model_history('model_5_7_camera5')", "_____no_output_____" ], [ "model_2_camera7.head()", "_____no_output_____" ], [ "#################### end of training camera5 data for model 3", "_____no_output_____" ], [ "########################### start of train with camera6 data for model 3", "_____no_output_____" ], [ "camera8 = load('/content/drive/My Drive/datasets/camera8_cleaned.npz')\nlog8 = pd.read_csv('/content/drive/My Drive/datasets/log8_cleaned.csv')", "_____no_output_____" ], [ "camera_processing(camera8, 'camera8')", "Done\n" ], [ "log_processing(log8, 'log8')", "Done\n" ], [ "train_split('camera8', 'log8')", "Done\n" ], [ "\"\"\"\nnew data workflow\ncamera2 = load('/content/drive/My Drive/datasets/camera2_cleaned.npz')\nlog2 = pd.read_csv('/content/drive/My Drive/datasets/log2_cleaned.csv')\n\ncamera_processing(camera2, 'camera2')\n\nlog_processing(log2, 'log2')\n\ntrain_split('camera2', 'log2')\n\"\"\"", "_____no_output_____" ], [ "model = load_model('/content/drive/My Drive/epochs/model_5_7_camera5.0008-0.1320.h5')", "_____no_output_____" ], [ "def train_load(camera_file_name):\n !cp -r \"/content/drive/My Drive/datasets/{camera_file_name}_X_train.npz\" ./X_train.npz\n !cp -r \"/content/drive/My Drive/datasets/{camera_file_name}_X_test.npz\" ./X_test.npz\n !cp -r \"/content/drive/My Drive/datasets/{camera_file_name}_y_train.npz\" ./y_train.npz\n !cp -r \"/content/drive/My Drive/datasets/{camera_file_name}_y_test.npz\" ./y_test.npz\n X_train = load('./X_train.npz')\n X_train = X_train.f.arr_0\n X_test = load('./X_test.npz')\n X_test = X_test.f.arr_0\n y_train = load('./y_train.npz')\n y_train = y_train.f.arr_0\n y_test = load('./y_test.npz')\n y_test = y_test.f.arr_0\n\n return X_train, X_test, y_train, y_test", "_____no_output_____" ], [ "X_train, X_test, y_train, y_test = train_load('camera6')", "_____no_output_____" ], [ "X_train.shape, X_test.shape, y_train.shape, y_test.shape", "_____no_output_____" ], [ "filepath = \"/content/drive/My Drive/epochs/model_5_8_camera6.{epoch:04d}-{val_loss:.4f}.h5\"\ncheckpoint = ModelCheckpoint(filepath, monitor='val_loss', verbose=1, save_best_only=True, mode='min')\ncallbacks_list = [checkpoint]", "_____no_output_____" ], [ "history = model.fit(X_train,\n y_train,\n batch_size=64,\n validation_data=(X_test, y_test),\n epochs=30,\n verbose=1,\n callbacks=callbacks_list)", "Epoch 1/30\n1026/1032 [============================>.] - ETA: 0s - loss: 0.0761 - root_mean_squared_error: 0.2759\nEpoch 00001: val_loss improved from inf to 0.07317, saving model to /content/drive/My Drive/epochs/model_5_8_camera6.0001-0.0732.h5\n1032/1032 [==============================] - 7s 7ms/step - loss: 0.0763 - root_mean_squared_error: 0.2762 - val_loss: 0.0732 - val_root_mean_squared_error: 0.2705\nEpoch 2/30\n1030/1032 [============================>.] - ETA: 0s - loss: 0.0690 - root_mean_squared_error: 0.2626\nEpoch 00002: val_loss improved from 0.07317 to 0.07173, saving model to /content/drive/My Drive/epochs/model_5_8_camera6.0002-0.0717.h5\n1032/1032 [==============================] - 7s 7ms/step - loss: 0.0689 - root_mean_squared_error: 0.2624 - val_loss: 0.0717 - val_root_mean_squared_error: 0.2678\nEpoch 3/30\n1029/1032 [============================>.] - ETA: 0s - loss: 0.0656 - root_mean_squared_error: 0.2561\nEpoch 00003: val_loss improved from 0.07173 to 0.07048, saving model to /content/drive/My Drive/epochs/model_5_8_camera6.0003-0.0705.h5\n1032/1032 [==============================] - 7s 7ms/step - loss: 0.0656 - root_mean_squared_error: 0.2561 - val_loss: 0.0705 - val_root_mean_squared_error: 0.2655\nEpoch 4/30\n1032/1032 [==============================] - ETA: 0s - loss: 0.0640 - root_mean_squared_error: 0.2531\nEpoch 00004: val_loss improved from 0.07048 to 0.07029, saving model to /content/drive/My Drive/epochs/model_5_8_camera6.0004-0.0703.h5\n1032/1032 [==============================] - 7s 7ms/step - loss: 0.0640 - root_mean_squared_error: 0.2531 - val_loss: 0.0703 - val_root_mean_squared_error: 0.2651\nEpoch 5/30\n1030/1032 [============================>.] - ETA: 0s - loss: 0.0626 - root_mean_squared_error: 0.2502\nEpoch 00005: val_loss did not improve from 0.07029\n1032/1032 [==============================] - 7s 7ms/step - loss: 0.0627 - root_mean_squared_error: 0.2504 - val_loss: 0.0705 - val_root_mean_squared_error: 0.2655\nEpoch 6/30\n1023/1032 [============================>.] - ETA: 0s - loss: 0.0617 - root_mean_squared_error: 0.2484\nEpoch 00006: val_loss did not improve from 0.07029\n1032/1032 [==============================] - 7s 7ms/step - loss: 0.0619 - root_mean_squared_error: 0.2487 - val_loss: 0.0717 - val_root_mean_squared_error: 0.2678\nEpoch 7/30\n1025/1032 [============================>.] - ETA: 0s - loss: 0.0613 - root_mean_squared_error: 0.2475\nEpoch 00007: val_loss did not improve from 0.07029\n1032/1032 [==============================] - 7s 6ms/step - loss: 0.0611 - root_mean_squared_error: 0.2472 - val_loss: 0.0716 - val_root_mean_squared_error: 0.2675\nEpoch 8/30\n1028/1032 [============================>.] - ETA: 0s - loss: 0.0608 - root_mean_squared_error: 0.2466\nEpoch 00008: val_loss did not improve from 0.07029\n1032/1032 [==============================] - 7s 7ms/step - loss: 0.0607 - root_mean_squared_error: 0.2464 - val_loss: 0.0712 - val_root_mean_squared_error: 0.2668\nEpoch 9/30\n1027/1032 [============================>.] - ETA: 0s - loss: 0.0601 - root_mean_squared_error: 0.2451\nEpoch 00009: val_loss did not improve from 0.07029\n1032/1032 [==============================] - 7s 6ms/step - loss: 0.0600 - root_mean_squared_error: 0.2449 - val_loss: 0.0711 - val_root_mean_squared_error: 0.2666\nEpoch 10/30\n1032/1032 [==============================] - ETA: 0s - loss: 0.0597 - root_mean_squared_error: 0.2444\nEpoch 00010: val_loss did not improve from 0.07029\n1032/1032 [==============================] - 7s 6ms/step - loss: 0.0597 - root_mean_squared_error: 0.2444 - val_loss: 0.0716 - val_root_mean_squared_error: 0.2676\nEpoch 11/30\n1030/1032 [============================>.] - ETA: 0s - loss: 0.0592 - root_mean_squared_error: 0.2434\nEpoch 00011: val_loss did not improve from 0.07029\n1032/1032 [==============================] - 7s 6ms/step - loss: 0.0592 - root_mean_squared_error: 0.2433 - val_loss: 0.0736 - val_root_mean_squared_error: 0.2713\nEpoch 12/30\n1027/1032 [============================>.] - ETA: 0s - loss: 0.0592 - root_mean_squared_error: 0.2432\nEpoch 00012: val_loss did not improve from 0.07029\n1032/1032 [==============================] - 7s 6ms/step - loss: 0.0590 - root_mean_squared_error: 0.2429 - val_loss: 0.0718 - val_root_mean_squared_error: 0.2680\nEpoch 13/30\n1031/1032 [============================>.] - ETA: 0s - loss: 0.0589 - root_mean_squared_error: 0.2426\nEpoch 00013: val_loss did not improve from 0.07029\n1032/1032 [==============================] - 7s 6ms/step - loss: 0.0589 - root_mean_squared_error: 0.2426 - val_loss: 0.0727 - val_root_mean_squared_error: 0.2696\nEpoch 14/30\n1029/1032 [============================>.] - ETA: 0s - loss: 0.0587 - root_mean_squared_error: 0.2422\nEpoch 00014: val_loss did not improve from 0.07029\n1032/1032 [==============================] - 7s 6ms/step - loss: 0.0586 - root_mean_squared_error: 0.2420 - val_loss: 0.0732 - val_root_mean_squared_error: 0.2706\nEpoch 15/30\n1025/1032 [============================>.] - ETA: 0s - loss: 0.0581 - root_mean_squared_error: 0.2410\nEpoch 00015: val_loss did not improve from 0.07029\n1032/1032 [==============================] - 7s 6ms/step - loss: 0.0579 - root_mean_squared_error: 0.2407 - val_loss: 0.0736 - val_root_mean_squared_error: 0.2713\nEpoch 16/30\n1024/1032 [============================>.] - ETA: 0s - loss: 0.0580 - root_mean_squared_error: 0.2409\nEpoch 00016: val_loss did not improve from 0.07029\n1032/1032 [==============================] - 7s 6ms/step - loss: 0.0579 - root_mean_squared_error: 0.2407 - val_loss: 0.0742 - val_root_mean_squared_error: 0.2723\nEpoch 17/30\n1027/1032 [============================>.] - ETA: 0s - loss: 0.0578 - root_mean_squared_error: 0.2404\nEpoch 00017: val_loss did not improve from 0.07029\n1032/1032 [==============================] - 7s 7ms/step - loss: 0.0577 - root_mean_squared_error: 0.2403 - val_loss: 0.0730 - val_root_mean_squared_error: 0.2701\nEpoch 18/30\n1027/1032 [============================>.] - ETA: 0s - loss: 0.0578 - root_mean_squared_error: 0.2404\nEpoch 00018: val_loss did not improve from 0.07029\n1032/1032 [==============================] - 7s 6ms/step - loss: 0.0577 - root_mean_squared_error: 0.2402 - val_loss: 0.0731 - val_root_mean_squared_error: 0.2703\nEpoch 19/30\n1028/1032 [============================>.] - ETA: 0s - loss: 0.0574 - root_mean_squared_error: 0.2395\nEpoch 00019: val_loss did not improve from 0.07029\n1032/1032 [==============================] - 7s 6ms/step - loss: 0.0573 - root_mean_squared_error: 0.2393 - val_loss: 0.0734 - val_root_mean_squared_error: 0.2710\nEpoch 20/30\n1025/1032 [============================>.] - ETA: 0s - loss: 0.0572 - root_mean_squared_error: 0.2391\nEpoch 00020: val_loss did not improve from 0.07029\n1032/1032 [==============================] - 7s 6ms/step - loss: 0.0574 - root_mean_squared_error: 0.2395 - val_loss: 0.0752 - val_root_mean_squared_error: 0.2742\nEpoch 21/30\n1031/1032 [============================>.] - ETA: 0s - loss: 0.0570 - root_mean_squared_error: 0.2387\nEpoch 00021: val_loss did not improve from 0.07029\n1032/1032 [==============================] - 7s 7ms/step - loss: 0.0570 - root_mean_squared_error: 0.2387 - val_loss: 0.0740 - val_root_mean_squared_error: 0.2721\nEpoch 22/30\n1027/1032 [============================>.] - ETA: 0s - loss: 0.0569 - root_mean_squared_error: 0.2385\nEpoch 00022: val_loss did not improve from 0.07029\n1032/1032 [==============================] - 7s 7ms/step - loss: 0.0568 - root_mean_squared_error: 0.2384 - val_loss: 0.0739 - val_root_mean_squared_error: 0.2719\nEpoch 23/30\n1030/1032 [============================>.] - ETA: 0s - loss: 0.0566 - root_mean_squared_error: 0.2379\nEpoch 00023: val_loss did not improve from 0.07029\n1032/1032 [==============================] - 7s 7ms/step - loss: 0.0568 - root_mean_squared_error: 0.2384 - val_loss: 0.0745 - val_root_mean_squared_error: 0.2729\nEpoch 24/30\n1025/1032 [============================>.] - ETA: 0s - loss: 0.0562 - root_mean_squared_error: 0.2371\nEpoch 00024: val_loss did not improve from 0.07029\n1032/1032 [==============================] - 7s 7ms/step - loss: 0.0566 - root_mean_squared_error: 0.2379 - val_loss: 0.0748 - val_root_mean_squared_error: 0.2736\nEpoch 25/30\n1031/1032 [============================>.] - ETA: 0s - loss: 0.0564 - root_mean_squared_error: 0.2375\nEpoch 00025: val_loss did not improve from 0.07029\n1032/1032 [==============================] - 7s 6ms/step - loss: 0.0564 - root_mean_squared_error: 0.2374 - val_loss: 0.0766 - val_root_mean_squared_error: 0.2768\nEpoch 26/30\n1027/1032 [============================>.] - ETA: 0s - loss: 0.0558 - root_mean_squared_error: 0.2362\nEpoch 00026: val_loss did not improve from 0.07029\n1032/1032 [==============================] - 7s 6ms/step - loss: 0.0562 - root_mean_squared_error: 0.2370 - val_loss: 0.0744 - val_root_mean_squared_error: 0.2728\nEpoch 27/30\n1024/1032 [============================>.] - ETA: 0s - loss: 0.0562 - root_mean_squared_error: 0.2371\nEpoch 00027: val_loss did not improve from 0.07029\n1032/1032 [==============================] - 7s 6ms/step - loss: 0.0560 - root_mean_squared_error: 0.2367 - val_loss: 0.0758 - val_root_mean_squared_error: 0.2753\nEpoch 28/30\n1027/1032 [============================>.] - ETA: 0s - loss: 0.0561 - root_mean_squared_error: 0.2369\nEpoch 00028: val_loss did not improve from 0.07029\n1032/1032 [==============================] - 7s 6ms/step - loss: 0.0560 - root_mean_squared_error: 0.2366 - val_loss: 0.0744 - val_root_mean_squared_error: 0.2727\nEpoch 29/30\n1031/1032 [============================>.] - ETA: 0s - loss: 0.0560 - root_mean_squared_error: 0.2367\nEpoch 00029: val_loss did not improve from 0.07029\n1032/1032 [==============================] - 7s 7ms/step - loss: 0.0560 - root_mean_squared_error: 0.2367 - val_loss: 0.0747 - val_root_mean_squared_error: 0.2733\nEpoch 30/30\n1032/1032 [==============================] - ETA: 0s - loss: 0.0556 - root_mean_squared_error: 0.2359\nEpoch 00030: val_loss did not improve from 0.07029\n1032/1032 [==============================] - 7s 6ms/step - loss: 0.0556 - root_mean_squared_error: 0.2359 - val_loss: 0.0753 - val_root_mean_squared_error: 0.2744\n" ], [ "ticks = [i for i in range(0, 101, 10)]\nlabels = [i for i in range(0, 101, 10)]\nlabels[0] = 1", "_____no_output_____" ], [ "train_loss = history.history['loss']\ntest_loss = history.history['val_loss']\n\n# Set figure size.\nplt.figure(figsize=(20, 8))\n\n# Generate line plot of training, testing loss over epochs.\nplt.plot(train_loss, label='Training Loss', color='#185fad')\nplt.plot(test_loss, label='Testing Loss', color='orange')\n\n# Set title\nplt.title('Training and Testing Loss by Epoch for Camera6', fontsize = 25)\nplt.xlabel('Epoch', fontsize = 18)\nplt.ylabel('Mean Squared Error', fontsize = 18)\nplt.xticks(ticks, labels)\n\nplt.legend(fontsize = 18)\n\nplt.savefig('/content/drive/My Drive/images/train_test_loss_model5_8_camera6.png');", "_____no_output_____" ], [ "def model_history(model_name): \n model = pd.DataFrame({'loss': history.history['loss'],\n 'root_mean_squared_error': history.history['root_mean_squared_error'],\n 'val_loss': history.history['val_loss'],\n 'val_root_mean_squared_error': history.history['val_root_mean_squared_error']},\n columns = ['loss', 'root_mean_squared_error', 'val_loss', 'val_root_mean_squared_error'])\n\n model.to_csv(f'/content/drive/My Drive/datasets/{model_name}.csv', index=False)\n return model", "_____no_output_____" ], [ "model_3_camera6 = model_history('model_5_8_camera6')", "_____no_output_____" ], [ "model_2_camera8.head()", "_____no_output_____" ], [ "#################### end of training camera6 data for model 3", "_____no_output_____" ], [ "########################### start of train with camera6 data for model 3", "_____no_output_____" ], [ "camera9 = load('/content/drive/My Drive/datasets/camera9_cleaned.npz')\nlog9 = pd.read_csv('/content/drive/My Drive/datasets/log9_cleaned.csv')", "_____no_output_____" ], [ "camera_processing(camera9, 'camera9')", "Done\n" ], [ "log_processing(log9, 'log9')", "Done\n" ], [ "train_split('camera9', 'log9')", "Done\n" ], [ "\"\"\"\nnew data workflow\ncamera2 = load('/content/drive/My Drive/datasets/camera2_cleaned.npz')\nlog2 = pd.read_csv('/content/drive/My Drive/datasets/log2_cleaned.csv')\n\ncamera_processing(camera2, 'camera2')\n\nlog_processing(log2, 'log2')\n\ntrain_split('camera2', 'log2')\n\"\"\"", "_____no_output_____" ], [ "model = load_model('/content/drive/My Drive/epochs/model_5_8_camera6.0004-0.0703.h5')", "_____no_output_____" ], [ "def train_load(camera_file_name):\n !cp -r \"/content/drive/My Drive/datasets/{camera_file_name}_X_train.npz\" ./X_train.npz\n !cp -r \"/content/drive/My Drive/datasets/{camera_file_name}_X_test.npz\" ./X_test.npz\n !cp -r \"/content/drive/My Drive/datasets/{camera_file_name}_y_train.npz\" ./y_train.npz\n !cp -r \"/content/drive/My Drive/datasets/{camera_file_name}_y_test.npz\" ./y_test.npz\n X_train = load('./X_train.npz')\n X_train = X_train.f.arr_0\n X_test = load('./X_test.npz')\n X_test = X_test.f.arr_0\n y_train = load('./y_train.npz')\n y_train = y_train.f.arr_0\n y_test = load('./y_test.npz')\n y_test = y_test.f.arr_0\n\n return X_train, X_test, y_train, y_test", "_____no_output_____" ], [ "X_train, X_test, y_train, y_test = train_load('camera7')", "_____no_output_____" ], [ "X_train.shape, X_test.shape, y_train.shape, y_test.shape", "_____no_output_____" ], [ "filepath = \"/content/drive/My Drive/epochs/model_5_9_camera7.{epoch:04d}-{val_loss:.4f}.h5\"\ncheckpoint = ModelCheckpoint(filepath, monitor='val_loss', verbose=1, save_best_only=True, mode='min')\ncallbacks_list = [checkpoint]", "_____no_output_____" ], [ "history = model.fit(X_train,\n y_train,\n batch_size=64,\n validation_data=(X_test, y_test),\n epochs=30,\n verbose=1,\n callbacks=callbacks_list)", "Epoch 1/30\n926/926 [==============================] - ETA: 0s - loss: 0.0390 - root_mean_squared_error: 0.1975\nEpoch 00001: val_loss improved from inf to 0.03959, saving model to /content/drive/My Drive/epochs/model_5_9_camera7.0001-0.0396.h5\n926/926 [==============================] - 6s 7ms/step - loss: 0.0390 - root_mean_squared_error: 0.1975 - val_loss: 0.0396 - val_root_mean_squared_error: 0.1990\nEpoch 2/30\n921/926 [============================>.] - ETA: 0s - loss: 0.0349 - root_mean_squared_error: 0.1869\nEpoch 00002: val_loss improved from 0.03959 to 0.03933, saving model to /content/drive/My Drive/epochs/model_5_9_camera7.0002-0.0393.h5\n926/926 [==============================] - 6s 7ms/step - loss: 0.0349 - root_mean_squared_error: 0.1869 - val_loss: 0.0393 - val_root_mean_squared_error: 0.1983\nEpoch 3/30\n920/926 [============================>.] - ETA: 0s - loss: 0.0340 - root_mean_squared_error: 0.1845\nEpoch 00003: val_loss improved from 0.03933 to 0.03911, saving model to /content/drive/My Drive/epochs/model_5_9_camera7.0003-0.0391.h5\n926/926 [==============================] - 6s 7ms/step - loss: 0.0341 - root_mean_squared_error: 0.1846 - val_loss: 0.0391 - val_root_mean_squared_error: 0.1978\nEpoch 4/30\n918/926 [============================>.] - ETA: 0s - loss: 0.0336 - root_mean_squared_error: 0.1832\nEpoch 00004: val_loss improved from 0.03911 to 0.03889, saving model to /content/drive/My Drive/epochs/model_5_9_camera7.0004-0.0389.h5\n926/926 [==============================] - 6s 7ms/step - loss: 0.0334 - root_mean_squared_error: 0.1827 - val_loss: 0.0389 - val_root_mean_squared_error: 0.1972\nEpoch 5/30\n920/926 [============================>.] - ETA: 0s - loss: 0.0326 - root_mean_squared_error: 0.1804\nEpoch 00005: val_loss did not improve from 0.03889\n926/926 [==============================] - 6s 7ms/step - loss: 0.0327 - root_mean_squared_error: 0.1808 - val_loss: 0.0391 - val_root_mean_squared_error: 0.1977\nEpoch 6/30\n923/926 [============================>.] - ETA: 0s - loss: 0.0323 - root_mean_squared_error: 0.1797\nEpoch 00006: val_loss did not improve from 0.03889\n926/926 [==============================] - 6s 7ms/step - loss: 0.0322 - root_mean_squared_error: 0.1795 - val_loss: 0.0395 - val_root_mean_squared_error: 0.1986\nEpoch 7/30\n923/926 [============================>.] - ETA: 0s - loss: 0.0319 - root_mean_squared_error: 0.1785\nEpoch 00007: val_loss did not improve from 0.03889\n926/926 [==============================] - 6s 7ms/step - loss: 0.0320 - root_mean_squared_error: 0.1788 - val_loss: 0.0392 - val_root_mean_squared_error: 0.1980\nEpoch 8/30\n924/926 [============================>.] - ETA: 0s - loss: 0.0312 - root_mean_squared_error: 0.1766\nEpoch 00008: val_loss did not improve from 0.03889\n926/926 [==============================] - 6s 7ms/step - loss: 0.0315 - root_mean_squared_error: 0.1776 - val_loss: 0.0396 - val_root_mean_squared_error: 0.1990\nEpoch 9/30\n926/926 [==============================] - ETA: 0s - loss: 0.0313 - root_mean_squared_error: 0.1769\nEpoch 00009: val_loss did not improve from 0.03889\n926/926 [==============================] - 6s 7ms/step - loss: 0.0313 - root_mean_squared_error: 0.1769 - val_loss: 0.0398 - val_root_mean_squared_error: 0.1996\nEpoch 10/30\n923/926 [============================>.] - ETA: 0s - loss: 0.0309 - root_mean_squared_error: 0.1759\nEpoch 00010: val_loss did not improve from 0.03889\n926/926 [==============================] - 6s 7ms/step - loss: 0.0309 - root_mean_squared_error: 0.1759 - val_loss: 0.0395 - val_root_mean_squared_error: 0.1988\nEpoch 11/30\n919/926 [============================>.] - ETA: 0s - loss: 0.0306 - root_mean_squared_error: 0.1749\nEpoch 00011: val_loss did not improve from 0.03889\n926/926 [==============================] - 6s 7ms/step - loss: 0.0307 - root_mean_squared_error: 0.1753 - val_loss: 0.0398 - val_root_mean_squared_error: 0.1995\nEpoch 12/30\n921/926 [============================>.] - ETA: 0s - loss: 0.0307 - root_mean_squared_error: 0.1752\nEpoch 00012: val_loss did not improve from 0.03889\n926/926 [==============================] - 6s 7ms/step - loss: 0.0306 - root_mean_squared_error: 0.1749 - val_loss: 0.0401 - val_root_mean_squared_error: 0.2002\nEpoch 13/30\n918/926 [============================>.] - ETA: 0s - loss: 0.0303 - root_mean_squared_error: 0.1741\nEpoch 00013: val_loss did not improve from 0.03889\n926/926 [==============================] - 6s 7ms/step - loss: 0.0302 - root_mean_squared_error: 0.1739 - val_loss: 0.0400 - val_root_mean_squared_error: 0.2000\nEpoch 14/30\n922/926 [============================>.] - ETA: 0s - loss: 0.0301 - root_mean_squared_error: 0.1735\nEpoch 00014: val_loss did not improve from 0.03889\n926/926 [==============================] - 6s 6ms/step - loss: 0.0301 - root_mean_squared_error: 0.1734 - val_loss: 0.0402 - val_root_mean_squared_error: 0.2005\nEpoch 15/30\n922/926 [============================>.] - ETA: 0s - loss: 0.0299 - root_mean_squared_error: 0.1728\nEpoch 00015: val_loss did not improve from 0.03889\n926/926 [==============================] - 6s 7ms/step - loss: 0.0299 - root_mean_squared_error: 0.1728 - val_loss: 0.0405 - val_root_mean_squared_error: 0.2012\nEpoch 16/30\n917/926 [============================>.] - ETA: 0s - loss: 0.0297 - root_mean_squared_error: 0.1724\nEpoch 00016: val_loss did not improve from 0.03889\n926/926 [==============================] - 6s 7ms/step - loss: 0.0297 - root_mean_squared_error: 0.1722 - val_loss: 0.0407 - val_root_mean_squared_error: 0.2018\nEpoch 17/30\n925/926 [============================>.] - ETA: 0s - loss: 0.0295 - root_mean_squared_error: 0.1718\nEpoch 00017: val_loss did not improve from 0.03889\n926/926 [==============================] - 6s 7ms/step - loss: 0.0295 - root_mean_squared_error: 0.1718 - val_loss: 0.0405 - val_root_mean_squared_error: 0.2013\nEpoch 18/30\n922/926 [============================>.] - ETA: 0s - loss: 0.0291 - root_mean_squared_error: 0.1705\nEpoch 00018: val_loss did not improve from 0.03889\n926/926 [==============================] - 6s 7ms/step - loss: 0.0292 - root_mean_squared_error: 0.1708 - val_loss: 0.0410 - val_root_mean_squared_error: 0.2026\nEpoch 19/30\n925/926 [============================>.] - ETA: 0s - loss: 0.0292 - root_mean_squared_error: 0.1708\nEpoch 00019: val_loss did not improve from 0.03889\n926/926 [==============================] - 6s 7ms/step - loss: 0.0292 - root_mean_squared_error: 0.1707 - val_loss: 0.0411 - val_root_mean_squared_error: 0.2028\nEpoch 20/30\n918/926 [============================>.] - ETA: 0s - loss: 0.0290 - root_mean_squared_error: 0.1702\nEpoch 00020: val_loss did not improve from 0.03889\n926/926 [==============================] - 6s 7ms/step - loss: 0.0289 - root_mean_squared_error: 0.1700 - val_loss: 0.0417 - val_root_mean_squared_error: 0.2043\nEpoch 21/30\n919/926 [============================>.] - ETA: 0s - loss: 0.0289 - root_mean_squared_error: 0.1701\nEpoch 00021: val_loss did not improve from 0.03889\n926/926 [==============================] - 6s 6ms/step - loss: 0.0288 - root_mean_squared_error: 0.1697 - val_loss: 0.0412 - val_root_mean_squared_error: 0.2031\nEpoch 22/30\n921/926 [============================>.] - ETA: 0s - loss: 0.0288 - root_mean_squared_error: 0.1696\nEpoch 00022: val_loss did not improve from 0.03889\n926/926 [==============================] - 6s 7ms/step - loss: 0.0288 - root_mean_squared_error: 0.1696 - val_loss: 0.0407 - val_root_mean_squared_error: 0.2018\nEpoch 23/30\n925/926 [============================>.] - ETA: 0s - loss: 0.0287 - root_mean_squared_error: 0.1693\nEpoch 00023: val_loss did not improve from 0.03889\n926/926 [==============================] - 6s 7ms/step - loss: 0.0286 - root_mean_squared_error: 0.1692 - val_loss: 0.0408 - val_root_mean_squared_error: 0.2019\nEpoch 24/30\n921/926 [============================>.] - ETA: 0s - loss: 0.0285 - root_mean_squared_error: 0.1688\nEpoch 00024: val_loss did not improve from 0.03889\n926/926 [==============================] - 6s 7ms/step - loss: 0.0285 - root_mean_squared_error: 0.1688 - val_loss: 0.0412 - val_root_mean_squared_error: 0.2030\nEpoch 25/30\n922/926 [============================>.] - ETA: 0s - loss: 0.0284 - root_mean_squared_error: 0.1684\nEpoch 00025: val_loss did not improve from 0.03889\n926/926 [==============================] - 6s 7ms/step - loss: 0.0284 - root_mean_squared_error: 0.1684 - val_loss: 0.0415 - val_root_mean_squared_error: 0.2037\nEpoch 26/30\n923/926 [============================>.] - ETA: 0s - loss: 0.0283 - root_mean_squared_error: 0.1682\nEpoch 00026: val_loss did not improve from 0.03889\n926/926 [==============================] - 6s 7ms/step - loss: 0.0283 - root_mean_squared_error: 0.1682 - val_loss: 0.0419 - val_root_mean_squared_error: 0.2046\nEpoch 27/30\n919/926 [============================>.] - ETA: 0s - loss: 0.0282 - root_mean_squared_error: 0.1680\nEpoch 00027: val_loss did not improve from 0.03889\n926/926 [==============================] - 6s 7ms/step - loss: 0.0282 - root_mean_squared_error: 0.1678 - val_loss: 0.0419 - val_root_mean_squared_error: 0.2046\nEpoch 28/30\n926/926 [==============================] - ETA: 0s - loss: 0.0280 - root_mean_squared_error: 0.1675\nEpoch 00028: val_loss did not improve from 0.03889\n926/926 [==============================] - 6s 7ms/step - loss: 0.0280 - root_mean_squared_error: 0.1675 - val_loss: 0.0423 - val_root_mean_squared_error: 0.2057\nEpoch 29/30\n921/926 [============================>.] - ETA: 0s - loss: 0.0279 - root_mean_squared_error: 0.1672\nEpoch 00029: val_loss did not improve from 0.03889\n926/926 [==============================] - 6s 7ms/step - loss: 0.0279 - root_mean_squared_error: 0.1671 - val_loss: 0.0417 - val_root_mean_squared_error: 0.2043\nEpoch 30/30\n918/926 [============================>.] - ETA: 0s - loss: 0.0278 - root_mean_squared_error: 0.1667\nEpoch 00030: val_loss did not improve from 0.03889\n926/926 [==============================] - 6s 7ms/step - loss: 0.0277 - root_mean_squared_error: 0.1666 - val_loss: 0.0432 - val_root_mean_squared_error: 0.2078\n" ], [ "ticks = [i for i in range(0, 101, 10)]\nlabels = [i for i in range(0, 101, 10)]\nlabels[0] = 1", "_____no_output_____" ], [ "train_loss = history.history['loss']\ntest_loss = history.history['val_loss']\n\n# Set figure size.\nplt.figure(figsize=(20, 8))\n\n# Generate line plot of training, testing loss over epochs.\nplt.plot(train_loss, label='Training Loss', color='#185fad')\nplt.plot(test_loss, label='Testing Loss', color='orange')\n\n# Set title\nplt.title('Training and Testing Loss by Epoch for Camera7', fontsize = 25)\nplt.xlabel('Epoch', fontsize = 18)\nplt.ylabel('Mean Squared Error', fontsize = 18)\nplt.xticks(ticks, labels)\n\nplt.legend(fontsize = 18)\n\nplt.savefig('/content/drive/My Drive/images/train_test_loss_model5_9_camera7.png');", "_____no_output_____" ], [ "def model_history(model_name): \n model = pd.DataFrame({'loss': history.history['loss'],\n 'root_mean_squared_error': history.history['root_mean_squared_error'],\n 'val_loss': history.history['val_loss'],\n 'val_root_mean_squared_error': history.history['val_root_mean_squared_error']},\n columns = ['loss', 'root_mean_squared_error', 'val_loss', 'val_root_mean_squared_error'])\n\n model.to_csv(f'/content/drive/My Drive/datasets/{model_name}.csv', index=False)\n return model", "_____no_output_____" ], [ "model_3_camera7 = model_history('model_5_9_camera7')", "_____no_output_____" ], [ "model_2_camera9.head()", "_____no_output_____" ], [ "#################### end of training camera9 data for model 1", "_____no_output_____" ], [ "####################### testing new model to see if I'm actually training on the same model", "_____no_output_____" ], [ "model = Sequential()\n\nmodel.add(Conv2D(16, (3, 3), input_shape=(80, 160, 1), activation='relu'))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\nmodel.add(Dropout(0.25))\n\nmodel.add(Conv2D(32, (3, 3), activation='relu'))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\nmodel.add(Dropout(0.25))\n\nmodel.add(Conv2D(64, (3, 3), activation='relu'))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\nmodel.add(Dropout(0.25))\n\nmodel.add(Flatten())\nmodel.add(Dense(300, activation='relu'))\nmodel.add(Dropout(.5))\n\nmodel.add(Dense(100, activation='relu'))\nmodel.add(Dropout(.25))\n\nmodel.add(Dense(20, activation='relu'))\nmodel.add(Dense(1))\n\nmodel.compile(loss='mse', optimizer=Adam(lr=1e-04), metrics=[RootMeanSquaredError()])\n\nfilepath = \"/content/drive/My Drive/epochs/model_1_camera9_standalone.{epoch:04d}-{val_loss:.4f}.h5\"\ncheckpoint = ModelCheckpoint(filepath, monitor='val_loss', verbose=1, save_best_only=True, mode='min')\ncallbacks_list = [checkpoint]", "_____no_output_____" ], [ "model.compile(loss='mse', optimizer=Adam(lr=1e-04), metrics=[RootMeanSquaredError()])", "_____no_output_____" ], [ "filepath = \"/content/drive/My Drive/epochs/model_1_camera9_standalone.{epoch:04d}-{val_loss:.4f}.h5\"\ncheckpoint = ModelCheckpoint(filepath, monitor='val_loss', verbose=1, save_best_only=True, mode='min')\ncallbacks_list = [checkpoint]", "_____no_output_____" ], [ "history = model.fit(X_train,\n y_train,\n batch_size=64,\n validation_data=(X_test, y_test),\n epochs=100,\n verbose=1,\n callbacks=callbacks_list)", "Epoch 1/100\n595/599 [============================>.] - ETA: 0s - loss: 0.0560 - root_mean_squared_error: 0.2366\nEpoch 00001: val_loss improved from inf to 0.05401, saving model to /content/drive/My Drive/epochs/model_1_camera9_standalone.0001-0.0540.h5\n599/599 [==============================] - 8s 13ms/step - loss: 0.0557 - root_mean_squared_error: 0.2359 - val_loss: 0.0540 - val_root_mean_squared_error: 0.2324\nEpoch 2/100\n597/599 [============================>.] - ETA: 0s - loss: 0.0551 - root_mean_squared_error: 0.2347\nEpoch 00002: val_loss improved from 0.05401 to 0.05400, saving model to /content/drive/My Drive/epochs/model_1_camera9_standalone.0002-0.0540.h5\n599/599 [==============================] - 7s 12ms/step - loss: 0.0552 - root_mean_squared_error: 0.2349 - val_loss: 0.0540 - val_root_mean_squared_error: 0.2324\nEpoch 3/100\n594/599 [============================>.] - ETA: 0s - loss: 0.0552 - root_mean_squared_error: 0.2349\nEpoch 00003: val_loss improved from 0.05400 to 0.05400, saving model to /content/drive/My Drive/epochs/model_1_camera9_standalone.0003-0.0540.h5\n599/599 [==============================] - 7s 12ms/step - loss: 0.0551 - root_mean_squared_error: 0.2347 - val_loss: 0.0540 - val_root_mean_squared_error: 0.2324\nEpoch 4/100\n595/599 [============================>.] - ETA: 0s - loss: 0.0547 - root_mean_squared_error: 0.2339\nEpoch 00004: val_loss improved from 0.05400 to 0.05388, saving model to /content/drive/My Drive/epochs/model_1_camera9_standalone.0004-0.0539.h5\n599/599 [==============================] - 7s 12ms/step - loss: 0.0544 - root_mean_squared_error: 0.2332 - val_loss: 0.0539 - val_root_mean_squared_error: 0.2321\nEpoch 5/100\n597/599 [============================>.] - ETA: 0s - loss: 0.0506 - root_mean_squared_error: 0.2249\nEpoch 00005: val_loss improved from 0.05388 to 0.05236, saving model to /content/drive/My Drive/epochs/model_1_camera9_standalone.0005-0.0524.h5\n599/599 [==============================] - 7s 12ms/step - loss: 0.0504 - root_mean_squared_error: 0.2246 - val_loss: 0.0524 - val_root_mean_squared_error: 0.2288\nEpoch 6/100\n595/599 [============================>.] - ETA: 0s - loss: 0.0479 - root_mean_squared_error: 0.2189\nEpoch 00006: val_loss improved from 0.05236 to 0.04944, saving model to /content/drive/My Drive/epochs/model_1_camera9_standalone.0006-0.0494.h5\n599/599 [==============================] - 7s 12ms/step - loss: 0.0478 - root_mean_squared_error: 0.2185 - val_loss: 0.0494 - val_root_mean_squared_error: 0.2224\nEpoch 7/100\n598/599 [============================>.] - ETA: 0s - loss: 0.0451 - root_mean_squared_error: 0.2124\nEpoch 00007: val_loss improved from 0.04944 to 0.04866, saving model to /content/drive/My Drive/epochs/model_1_camera9_standalone.0007-0.0487.h5\n599/599 [==============================] - 7s 12ms/step - loss: 0.0451 - root_mean_squared_error: 0.2123 - val_loss: 0.0487 - val_root_mean_squared_error: 0.2206\nEpoch 8/100\n597/599 [============================>.] - ETA: 0s - loss: 0.0429 - root_mean_squared_error: 0.2072\nEpoch 00008: val_loss improved from 0.04866 to 0.04707, saving model to /content/drive/My Drive/epochs/model_1_camera9_standalone.0008-0.0471.h5\n599/599 [==============================] - 7s 12ms/step - loss: 0.0431 - root_mean_squared_error: 0.2076 - val_loss: 0.0471 - val_root_mean_squared_error: 0.2170\nEpoch 9/100\n594/599 [============================>.] - ETA: 0s - loss: 0.0422 - root_mean_squared_error: 0.2055\nEpoch 00009: val_loss did not improve from 0.04707\n599/599 [==============================] - 7s 12ms/step - loss: 0.0422 - root_mean_squared_error: 0.2054 - val_loss: 0.0484 - val_root_mean_squared_error: 0.2200\nEpoch 10/100\n598/599 [============================>.] - ETA: 0s - loss: 0.0390 - root_mean_squared_error: 0.1975\nEpoch 00010: val_loss improved from 0.04707 to 0.04410, saving model to /content/drive/My Drive/epochs/model_1_camera9_standalone.0010-0.0441.h5\n599/599 [==============================] - 7s 12ms/step - loss: 0.0390 - root_mean_squared_error: 0.1975 - val_loss: 0.0441 - val_root_mean_squared_error: 0.2100\nEpoch 11/100\n598/599 [============================>.] - ETA: 0s - loss: 0.0388 - root_mean_squared_error: 0.1970\nEpoch 00011: val_loss improved from 0.04410 to 0.04318, saving model to /content/drive/My Drive/epochs/model_1_camera9_standalone.0011-0.0432.h5\n599/599 [==============================] - 7s 12ms/step - loss: 0.0388 - root_mean_squared_error: 0.1969 - val_loss: 0.0432 - val_root_mean_squared_error: 0.2078\nEpoch 12/100\n596/599 [============================>.] - ETA: 0s - loss: 0.0351 - root_mean_squared_error: 0.1873\nEpoch 00012: val_loss improved from 0.04318 to 0.03872, saving model to /content/drive/My Drive/epochs/model_1_camera9_standalone.0012-0.0387.h5\n599/599 [==============================] - 7s 12ms/step - loss: 0.0351 - root_mean_squared_error: 0.1873 - val_loss: 0.0387 - val_root_mean_squared_error: 0.1968\nEpoch 13/100\n595/599 [============================>.] - ETA: 0s - loss: 0.0337 - root_mean_squared_error: 0.1836\nEpoch 00013: val_loss did not improve from 0.03872\n599/599 [==============================] - 7s 12ms/step - loss: 0.0344 - root_mean_squared_error: 0.1854 - val_loss: 0.0426 - val_root_mean_squared_error: 0.2064\nEpoch 14/100\n597/599 [============================>.] - ETA: 0s - loss: 0.0306 - root_mean_squared_error: 0.1749\nEpoch 00014: val_loss improved from 0.03872 to 0.03488, saving model to /content/drive/My Drive/epochs/model_1_camera9_standalone.0014-0.0349.h5\n599/599 [==============================] - 7s 12ms/step - loss: 0.0308 - root_mean_squared_error: 0.1754 - val_loss: 0.0349 - val_root_mean_squared_error: 0.1868\nEpoch 15/100\n599/599 [==============================] - ETA: 0s - loss: 0.0291 - root_mean_squared_error: 0.1707\nEpoch 00015: val_loss improved from 0.03488 to 0.02948, saving model to /content/drive/My Drive/epochs/model_1_camera9_standalone.0015-0.0295.h5\n599/599 [==============================] - 7s 12ms/step - loss: 0.0291 - root_mean_squared_error: 0.1707 - val_loss: 0.0295 - val_root_mean_squared_error: 0.1717\nEpoch 16/100\n596/599 [============================>.] - ETA: 0s - loss: 0.0264 - root_mean_squared_error: 0.1625\nEpoch 00016: val_loss improved from 0.02948 to 0.02898, saving model to /content/drive/My Drive/epochs/model_1_camera9_standalone.0016-0.0290.h5\n599/599 [==============================] - 7s 12ms/step - loss: 0.0265 - root_mean_squared_error: 0.1629 - val_loss: 0.0290 - val_root_mean_squared_error: 0.1702\nEpoch 17/100\n595/599 [============================>.] - ETA: 0s - loss: 0.0257 - root_mean_squared_error: 0.1602\nEpoch 00017: val_loss improved from 0.02898 to 0.02874, saving model to /content/drive/My Drive/epochs/model_1_camera9_standalone.0017-0.0287.h5\n599/599 [==============================] - 7s 12ms/step - loss: 0.0256 - root_mean_squared_error: 0.1600 - val_loss: 0.0287 - val_root_mean_squared_error: 0.1695\nEpoch 18/100\n598/599 [============================>.] - ETA: 0s - loss: 0.0219 - root_mean_squared_error: 0.1480\nEpoch 00018: val_loss improved from 0.02874 to 0.02825, saving model to /content/drive/My Drive/epochs/model_1_camera9_standalone.0018-0.0282.h5\n599/599 [==============================] - 7s 12ms/step - loss: 0.0219 - root_mean_squared_error: 0.1481 - val_loss: 0.0282 - val_root_mean_squared_error: 0.1681\nEpoch 19/100\n595/599 [============================>.] - ETA: 0s - loss: 0.0212 - root_mean_squared_error: 0.1455\nEpoch 00019: val_loss improved from 0.02825 to 0.02553, saving model to /content/drive/My Drive/epochs/model_1_camera9_standalone.0019-0.0255.h5\n599/599 [==============================] - 7s 12ms/step - loss: 0.0211 - root_mean_squared_error: 0.1452 - val_loss: 0.0255 - val_root_mean_squared_error: 0.1598\nEpoch 20/100\n595/599 [============================>.] - ETA: 0s - loss: 0.0213 - root_mean_squared_error: 0.1460\nEpoch 00020: val_loss did not improve from 0.02553\n599/599 [==============================] - 7s 12ms/step - loss: 0.0212 - root_mean_squared_error: 0.1457 - val_loss: 0.0260 - val_root_mean_squared_error: 0.1612\nEpoch 21/100\n597/599 [============================>.] - ETA: 0s - loss: 0.0194 - root_mean_squared_error: 0.1395\nEpoch 00021: val_loss improved from 0.02553 to 0.02488, saving model to /content/drive/My Drive/epochs/model_1_camera9_standalone.0021-0.0249.h5\n599/599 [==============================] - 7s 12ms/step - loss: 0.0194 - root_mean_squared_error: 0.1394 - val_loss: 0.0249 - val_root_mean_squared_error: 0.1577\nEpoch 22/100\n595/599 [============================>.] - ETA: 0s - loss: 0.0176 - root_mean_squared_error: 0.1326\nEpoch 00022: val_loss improved from 0.02488 to 0.02432, saving model to /content/drive/My Drive/epochs/model_1_camera9_standalone.0022-0.0243.h5\n599/599 [==============================] - 7s 12ms/step - loss: 0.0175 - root_mean_squared_error: 0.1322 - val_loss: 0.0243 - val_root_mean_squared_error: 0.1560\nEpoch 23/100\n599/599 [==============================] - ETA: 0s - loss: 0.0174 - root_mean_squared_error: 0.1320\nEpoch 00023: val_loss improved from 0.02432 to 0.02417, saving model to /content/drive/My Drive/epochs/model_1_camera9_standalone.0023-0.0242.h5\n599/599 [==============================] - 7s 12ms/step - loss: 0.0174 - root_mean_squared_error: 0.1320 - val_loss: 0.0242 - val_root_mean_squared_error: 0.1555\nEpoch 24/100\n597/599 [============================>.] - ETA: 0s - loss: 0.0174 - root_mean_squared_error: 0.1320\nEpoch 00024: val_loss improved from 0.02417 to 0.02046, saving model to /content/drive/My Drive/epochs/model_1_camera9_standalone.0024-0.0205.h5\n599/599 [==============================] - 7s 12ms/step - loss: 0.0174 - root_mean_squared_error: 0.1319 - val_loss: 0.0205 - val_root_mean_squared_error: 0.1430\nEpoch 25/100\n595/599 [============================>.] - ETA: 0s - loss: 0.0176 - root_mean_squared_error: 0.1326\nEpoch 00025: val_loss did not improve from 0.02046\n599/599 [==============================] - 7s 12ms/step - loss: 0.0175 - root_mean_squared_error: 0.1323 - val_loss: 0.0209 - val_root_mean_squared_error: 0.1444\nEpoch 26/100\n596/599 [============================>.] - ETA: 0s - loss: 0.0161 - root_mean_squared_error: 0.1267\nEpoch 00026: val_loss improved from 0.02046 to 0.01855, saving model to /content/drive/My Drive/epochs/model_1_camera9_standalone.0026-0.0185.h5\n599/599 [==============================] - 7s 12ms/step - loss: 0.0160 - root_mean_squared_error: 0.1266 - val_loss: 0.0185 - val_root_mean_squared_error: 0.1362\nEpoch 27/100\n599/599 [==============================] - ETA: 0s - loss: 0.0164 - root_mean_squared_error: 0.1279\nEpoch 00027: val_loss improved from 0.01855 to 0.01844, saving model to /content/drive/My Drive/epochs/model_1_camera9_standalone.0027-0.0184.h5\n599/599 [==============================] - 7s 12ms/step - loss: 0.0164 - root_mean_squared_error: 0.1279 - val_loss: 0.0184 - val_root_mean_squared_error: 0.1358\nEpoch 28/100\n599/599 [==============================] - ETA: 0s - loss: 0.0144 - root_mean_squared_error: 0.1200\nEpoch 00028: val_loss improved from 0.01844 to 0.01764, saving model to /content/drive/My Drive/epochs/model_1_camera9_standalone.0028-0.0176.h5\n599/599 [==============================] - 7s 12ms/step - loss: 0.0144 - root_mean_squared_error: 0.1200 - val_loss: 0.0176 - val_root_mean_squared_error: 0.1328\nEpoch 29/100\n597/599 [============================>.] - ETA: 0s - loss: 0.0138 - root_mean_squared_error: 0.1175\nEpoch 00029: val_loss did not improve from 0.01764\n599/599 [==============================] - 7s 12ms/step - loss: 0.0138 - root_mean_squared_error: 0.1174 - val_loss: 0.0180 - val_root_mean_squared_error: 0.1343\nEpoch 30/100\n596/599 [============================>.] - ETA: 0s - loss: 0.0136 - root_mean_squared_error: 0.1167\nEpoch 00030: val_loss did not improve from 0.01764\n599/599 [==============================] - 7s 12ms/step - loss: 0.0136 - root_mean_squared_error: 0.1166 - val_loss: 0.0182 - val_root_mean_squared_error: 0.1351\nEpoch 31/100\n597/599 [============================>.] - ETA: 0s - loss: 0.0130 - root_mean_squared_error: 0.1140\nEpoch 00031: val_loss did not improve from 0.01764\n599/599 [==============================] - 7s 12ms/step - loss: 0.0133 - root_mean_squared_error: 0.1152 - val_loss: 0.0184 - val_root_mean_squared_error: 0.1355\nEpoch 32/100\n596/599 [============================>.] - ETA: 0s - loss: 0.0128 - root_mean_squared_error: 0.1133\nEpoch 00032: val_loss improved from 0.01764 to 0.01658, saving model to /content/drive/My Drive/epochs/model_1_camera9_standalone.0032-0.0166.h5\n599/599 [==============================] - 7s 12ms/step - loss: 0.0129 - root_mean_squared_error: 0.1135 - val_loss: 0.0166 - val_root_mean_squared_error: 0.1288\nEpoch 33/100\n594/599 [============================>.] - ETA: 0s - loss: 0.0137 - root_mean_squared_error: 0.1171\nEpoch 00033: val_loss improved from 0.01658 to 0.01639, saving model to /content/drive/My Drive/epochs/model_1_camera9_standalone.0033-0.0164.h5\n599/599 [==============================] - 7s 12ms/step - loss: 0.0138 - root_mean_squared_error: 0.1175 - val_loss: 0.0164 - val_root_mean_squared_error: 0.1280\nEpoch 34/100\n598/599 [============================>.] - ETA: 0s - loss: 0.0130 - root_mean_squared_error: 0.1139\nEpoch 00034: val_loss improved from 0.01639 to 0.01585, saving model to /content/drive/My Drive/epochs/model_1_camera9_standalone.0034-0.0159.h5\n599/599 [==============================] - 7s 12ms/step - loss: 0.0130 - root_mean_squared_error: 0.1139 - val_loss: 0.0159 - val_root_mean_squared_error: 0.1259\nEpoch 35/100\n595/599 [============================>.] - ETA: 0s - loss: 0.0125 - root_mean_squared_error: 0.1118\nEpoch 00035: val_loss did not improve from 0.01585\n599/599 [==============================] - 7s 12ms/step - loss: 0.0125 - root_mean_squared_error: 0.1118 - val_loss: 0.0166 - val_root_mean_squared_error: 0.1290\nEpoch 36/100\n598/599 [============================>.] - ETA: 0s - loss: 0.0125 - root_mean_squared_error: 0.1116\nEpoch 00036: val_loss did not improve from 0.01585\n599/599 [==============================] - 7s 12ms/step - loss: 0.0125 - root_mean_squared_error: 0.1116 - val_loss: 0.0163 - val_root_mean_squared_error: 0.1277\nEpoch 37/100\n599/599 [==============================] - ETA: 0s - loss: 0.0127 - root_mean_squared_error: 0.1128\nEpoch 00037: val_loss did not improve from 0.01585\n599/599 [==============================] - 7s 12ms/step - loss: 0.0127 - root_mean_squared_error: 0.1128 - val_loss: 0.0163 - val_root_mean_squared_error: 0.1275\nEpoch 38/100\n598/599 [============================>.] - ETA: 0s - loss: 0.0111 - root_mean_squared_error: 0.1054\nEpoch 00038: val_loss improved from 0.01585 to 0.01574, saving model to /content/drive/My Drive/epochs/model_1_camera9_standalone.0038-0.0157.h5\n599/599 [==============================] - 7s 12ms/step - loss: 0.0111 - root_mean_squared_error: 0.1053 - val_loss: 0.0157 - val_root_mean_squared_error: 0.1254\nEpoch 39/100\n597/599 [============================>.] - ETA: 0s - loss: 0.0112 - root_mean_squared_error: 0.1058\nEpoch 00039: val_loss did not improve from 0.01574\n599/599 [==============================] - 7s 12ms/step - loss: 0.0112 - root_mean_squared_error: 0.1057 - val_loss: 0.0163 - val_root_mean_squared_error: 0.1275\nEpoch 40/100\n596/599 [============================>.] - ETA: 0s - loss: 0.0115 - root_mean_squared_error: 0.1075\nEpoch 00040: val_loss did not improve from 0.01574\n599/599 [==============================] - 7s 12ms/step - loss: 0.0115 - root_mean_squared_error: 0.1073 - val_loss: 0.0168 - val_root_mean_squared_error: 0.1295\nEpoch 41/100\n595/599 [============================>.] - ETA: 0s - loss: 0.0117 - root_mean_squared_error: 0.1081\nEpoch 00041: val_loss did not improve from 0.01574\n599/599 [==============================] - 7s 12ms/step - loss: 0.0117 - root_mean_squared_error: 0.1080 - val_loss: 0.0163 - val_root_mean_squared_error: 0.1275\nEpoch 42/100\n596/599 [============================>.] - ETA: 0s - loss: 0.0111 - root_mean_squared_error: 0.1051\nEpoch 00042: val_loss improved from 0.01574 to 0.01504, saving model to /content/drive/My Drive/epochs/model_1_camera9_standalone.0042-0.0150.h5\n599/599 [==============================] - 7s 12ms/step - loss: 0.0111 - root_mean_squared_error: 0.1056 - val_loss: 0.0150 - val_root_mean_squared_error: 0.1226\nEpoch 43/100\n597/599 [============================>.] - ETA: 0s - loss: 0.0105 - root_mean_squared_error: 0.1025\nEpoch 00043: val_loss did not improve from 0.01504\n599/599 [==============================] - 7s 12ms/step - loss: 0.0105 - root_mean_squared_error: 0.1024 - val_loss: 0.0177 - val_root_mean_squared_error: 0.1329\nEpoch 44/100\n597/599 [============================>.] - ETA: 0s - loss: 0.0104 - root_mean_squared_error: 0.1018\nEpoch 00044: val_loss improved from 0.01504 to 0.01477, saving model to /content/drive/My Drive/epochs/model_1_camera9_standalone.0044-0.0148.h5\n599/599 [==============================] - 7s 12ms/step - loss: 0.0104 - root_mean_squared_error: 0.1020 - val_loss: 0.0148 - val_root_mean_squared_error: 0.1215\nEpoch 45/100\n598/599 [============================>.] - ETA: 0s - loss: 0.0104 - root_mean_squared_error: 0.1021\nEpoch 00045: val_loss improved from 0.01477 to 0.01433, saving model to /content/drive/My Drive/epochs/model_1_camera9_standalone.0045-0.0143.h5\n599/599 [==============================] - 7s 12ms/step - loss: 0.0104 - root_mean_squared_error: 0.1021 - val_loss: 0.0143 - val_root_mean_squared_error: 0.1197\nEpoch 46/100\n596/599 [============================>.] - ETA: 0s - loss: 0.0100 - root_mean_squared_error: 0.1000\nEpoch 00046: val_loss did not improve from 0.01433\n599/599 [==============================] - 7s 12ms/step - loss: 0.0100 - root_mean_squared_error: 0.1001 - val_loss: 0.0146 - val_root_mean_squared_error: 0.1207\nEpoch 47/100\n597/599 [============================>.] - ETA: 0s - loss: 0.0103 - root_mean_squared_error: 0.1016\nEpoch 00047: val_loss did not improve from 0.01433\n599/599 [==============================] - 7s 12ms/step - loss: 0.0103 - root_mean_squared_error: 0.1016 - val_loss: 0.0160 - val_root_mean_squared_error: 0.1263\nEpoch 48/100\n598/599 [============================>.] - ETA: 0s - loss: 0.0094 - root_mean_squared_error: 0.0972\nEpoch 00048: val_loss improved from 0.01433 to 0.01258, saving model to /content/drive/My Drive/epochs/model_1_camera9_standalone.0048-0.0126.h5\n599/599 [==============================] - 7s 12ms/step - loss: 0.0095 - root_mean_squared_error: 0.0974 - val_loss: 0.0126 - val_root_mean_squared_error: 0.1122\nEpoch 49/100\n595/599 [============================>.] - ETA: 0s - loss: 0.0094 - root_mean_squared_error: 0.0972\nEpoch 00049: val_loss did not improve from 0.01258\n599/599 [==============================] - 7s 12ms/step - loss: 0.0094 - root_mean_squared_error: 0.0970 - val_loss: 0.0150 - val_root_mean_squared_error: 0.1224\nEpoch 50/100\n597/599 [============================>.] - ETA: 0s - loss: 0.0094 - root_mean_squared_error: 0.0968\nEpoch 00050: val_loss did not improve from 0.01258\n599/599 [==============================] - 7s 12ms/step - loss: 0.0094 - root_mean_squared_error: 0.0969 - val_loss: 0.0131 - val_root_mean_squared_error: 0.1145\nEpoch 51/100\n597/599 [============================>.] - ETA: 0s - loss: 0.0097 - root_mean_squared_error: 0.0983\nEpoch 00051: val_loss did not improve from 0.01258\n599/599 [==============================] - 7s 12ms/step - loss: 0.0096 - root_mean_squared_error: 0.0982 - val_loss: 0.0143 - val_root_mean_squared_error: 0.1197\nEpoch 52/100\n598/599 [============================>.] - ETA: 0s - loss: 0.0093 - root_mean_squared_error: 0.0965\nEpoch 00052: val_loss did not improve from 0.01258\n599/599 [==============================] - 7s 12ms/step - loss: 0.0093 - root_mean_squared_error: 0.0964 - val_loss: 0.0134 - val_root_mean_squared_error: 0.1158\nEpoch 53/100\n599/599 [==============================] - ETA: 0s - loss: 0.0098 - root_mean_squared_error: 0.0989\nEpoch 00053: val_loss did not improve from 0.01258\n599/599 [==============================] - 7s 12ms/step - loss: 0.0098 - root_mean_squared_error: 0.0989 - val_loss: 0.0126 - val_root_mean_squared_error: 0.1125\nEpoch 54/100\n598/599 [============================>.] - ETA: 0s - loss: 0.0087 - root_mean_squared_error: 0.0933\nEpoch 00054: val_loss did not improve from 0.01258\n599/599 [==============================] - 7s 12ms/step - loss: 0.0087 - root_mean_squared_error: 0.0933 - val_loss: 0.0143 - val_root_mean_squared_error: 0.1196\nEpoch 55/100\n598/599 [============================>.] - ETA: 0s - loss: 0.0084 - root_mean_squared_error: 0.0917\nEpoch 00055: val_loss improved from 0.01258 to 0.01195, saving model to /content/drive/My Drive/epochs/model_1_camera9_standalone.0055-0.0120.h5\n599/599 [==============================] - 7s 12ms/step - loss: 0.0084 - root_mean_squared_error: 0.0917 - val_loss: 0.0120 - val_root_mean_squared_error: 0.1093\nEpoch 56/100\n596/599 [============================>.] - ETA: 0s - loss: 0.0092 - root_mean_squared_error: 0.0961\nEpoch 00056: val_loss did not improve from 0.01195\n599/599 [==============================] - 7s 12ms/step - loss: 0.0092 - root_mean_squared_error: 0.0960 - val_loss: 0.0147 - val_root_mean_squared_error: 0.1213\nEpoch 57/100\n597/599 [============================>.] - ETA: 0s - loss: 0.0085 - root_mean_squared_error: 0.0920\nEpoch 00057: val_loss did not improve from 0.01195\n599/599 [==============================] - 7s 12ms/step - loss: 0.0084 - root_mean_squared_error: 0.0919 - val_loss: 0.0123 - val_root_mean_squared_error: 0.1111\nEpoch 58/100\n596/599 [============================>.] - ETA: 0s - loss: 0.0083 - root_mean_squared_error: 0.0911\nEpoch 00058: val_loss did not improve from 0.01195\n599/599 [==============================] - 7s 12ms/step - loss: 0.0083 - root_mean_squared_error: 0.0910 - val_loss: 0.0124 - val_root_mean_squared_error: 0.1112\nEpoch 59/100\n594/599 [============================>.] - ETA: 0s - loss: 0.0088 - root_mean_squared_error: 0.0939\nEpoch 00059: val_loss did not improve from 0.01195\n599/599 [==============================] - 7s 12ms/step - loss: 0.0088 - root_mean_squared_error: 0.0937 - val_loss: 0.0134 - val_root_mean_squared_error: 0.1157\nEpoch 60/100\n598/599 [============================>.] - ETA: 0s - loss: 0.0087 - root_mean_squared_error: 0.0930\nEpoch 00060: val_loss did not improve from 0.01195\n599/599 [==============================] - 7s 12ms/step - loss: 0.0086 - root_mean_squared_error: 0.0930 - val_loss: 0.0139 - val_root_mean_squared_error: 0.1177\nEpoch 61/100\n597/599 [============================>.] - ETA: 0s - loss: 0.0084 - root_mean_squared_error: 0.0919\nEpoch 00061: val_loss improved from 0.01195 to 0.01176, saving model to /content/drive/My Drive/epochs/model_1_camera9_standalone.0061-0.0118.h5\n599/599 [==============================] - 7s 12ms/step - loss: 0.0084 - root_mean_squared_error: 0.0918 - val_loss: 0.0118 - val_root_mean_squared_error: 0.1085\nEpoch 62/100\n597/599 [============================>.] - ETA: 0s - loss: 0.0082 - root_mean_squared_error: 0.0907\nEpoch 00062: val_loss did not improve from 0.01176\n599/599 [==============================] - 7s 12ms/step - loss: 0.0082 - root_mean_squared_error: 0.0907 - val_loss: 0.0133 - val_root_mean_squared_error: 0.1152\nEpoch 63/100\n596/599 [============================>.] - ETA: 0s - loss: 0.0081 - root_mean_squared_error: 0.0901\nEpoch 00063: val_loss improved from 0.01176 to 0.01075, saving model to /content/drive/My Drive/epochs/model_1_camera9_standalone.0063-0.0107.h5\n599/599 [==============================] - 7s 12ms/step - loss: 0.0081 - root_mean_squared_error: 0.0901 - val_loss: 0.0107 - val_root_mean_squared_error: 0.1037\nEpoch 64/100\n595/599 [============================>.] - ETA: 0s - loss: 0.0081 - root_mean_squared_error: 0.0901\nEpoch 00064: val_loss did not improve from 0.01075\n599/599 [==============================] - 7s 12ms/step - loss: 0.0081 - root_mean_squared_error: 0.0899 - val_loss: 0.0126 - val_root_mean_squared_error: 0.1123\nEpoch 65/100\n595/599 [============================>.] - ETA: 0s - loss: 0.0076 - root_mean_squared_error: 0.0871\nEpoch 00065: val_loss did not improve from 0.01075\n599/599 [==============================] - 7s 12ms/step - loss: 0.0076 - root_mean_squared_error: 0.0870 - val_loss: 0.0115 - val_root_mean_squared_error: 0.1070\nEpoch 66/100\n595/599 [============================>.] - ETA: 0s - loss: 0.0081 - root_mean_squared_error: 0.0900\nEpoch 00066: val_loss did not improve from 0.01075\n599/599 [==============================] - 7s 12ms/step - loss: 0.0081 - root_mean_squared_error: 0.0900 - val_loss: 0.0122 - val_root_mean_squared_error: 0.1104\nEpoch 67/100\n595/599 [============================>.] - ETA: 0s - loss: 0.0077 - root_mean_squared_error: 0.0877\nEpoch 00067: val_loss did not improve from 0.01075\n599/599 [==============================] - 7s 12ms/step - loss: 0.0077 - root_mean_squared_error: 0.0877 - val_loss: 0.0122 - val_root_mean_squared_error: 0.1102\nEpoch 68/100\n598/599 [============================>.] - ETA: 0s - loss: 0.0080 - root_mean_squared_error: 0.0896\nEpoch 00068: val_loss did not improve from 0.01075\n599/599 [==============================] - 7s 12ms/step - loss: 0.0080 - root_mean_squared_error: 0.0896 - val_loss: 0.0135 - val_root_mean_squared_error: 0.1162\nEpoch 69/100\n599/599 [==============================] - ETA: 0s - loss: 0.0073 - root_mean_squared_error: 0.0856\nEpoch 00069: val_loss improved from 0.01075 to 0.01010, saving model to /content/drive/My Drive/epochs/model_1_camera9_standalone.0069-0.0101.h5\n599/599 [==============================] - 7s 12ms/step - loss: 0.0073 - root_mean_squared_error: 0.0856 - val_loss: 0.0101 - val_root_mean_squared_error: 0.1005\nEpoch 70/100\n595/599 [============================>.] - ETA: 0s - loss: 0.0070 - root_mean_squared_error: 0.0839\nEpoch 00070: val_loss did not improve from 0.01010\n599/599 [==============================] - 7s 12ms/step - loss: 0.0071 - root_mean_squared_error: 0.0843 - val_loss: 0.0104 - val_root_mean_squared_error: 0.1021\nEpoch 71/100\n595/599 [============================>.] - ETA: 0s - loss: 0.0080 - root_mean_squared_error: 0.0895\nEpoch 00071: val_loss did not improve from 0.01010\n599/599 [==============================] - 7s 12ms/step - loss: 0.0080 - root_mean_squared_error: 0.0893 - val_loss: 0.0123 - val_root_mean_squared_error: 0.1110\nEpoch 72/100\n596/599 [============================>.] - ETA: 0s - loss: 0.0074 - root_mean_squared_error: 0.0863\nEpoch 00072: val_loss did not improve from 0.01010\n599/599 [==============================] - 7s 12ms/step - loss: 0.0074 - root_mean_squared_error: 0.0862 - val_loss: 0.0124 - val_root_mean_squared_error: 0.1112\nEpoch 73/100\n595/599 [============================>.] - ETA: 0s - loss: 0.0071 - root_mean_squared_error: 0.0845\nEpoch 00073: val_loss did not improve from 0.01010\n599/599 [==============================] - 7s 12ms/step - loss: 0.0072 - root_mean_squared_error: 0.0847 - val_loss: 0.0115 - val_root_mean_squared_error: 0.1075\nEpoch 74/100\n599/599 [==============================] - ETA: 0s - loss: 0.0072 - root_mean_squared_error: 0.0850\nEpoch 00074: val_loss did not improve from 0.01010\n599/599 [==============================] - 7s 12ms/step - loss: 0.0072 - root_mean_squared_error: 0.0850 - val_loss: 0.0133 - val_root_mean_squared_error: 0.1152\nEpoch 75/100\n596/599 [============================>.] - ETA: 0s - loss: 0.0069 - root_mean_squared_error: 0.0833\nEpoch 00075: val_loss did not improve from 0.01010\n599/599 [==============================] - 7s 12ms/step - loss: 0.0070 - root_mean_squared_error: 0.0834 - val_loss: 0.0116 - val_root_mean_squared_error: 0.1077\nEpoch 76/100\n599/599 [==============================] - ETA: 0s - loss: 0.0071 - root_mean_squared_error: 0.0841\nEpoch 00076: val_loss improved from 0.01010 to 0.00983, saving model to /content/drive/My Drive/epochs/model_1_camera9_standalone.0076-0.0098.h5\n599/599 [==============================] - 7s 12ms/step - loss: 0.0071 - root_mean_squared_error: 0.0841 - val_loss: 0.0098 - val_root_mean_squared_error: 0.0992\nEpoch 77/100\n596/599 [============================>.] - ETA: 0s - loss: 0.0066 - root_mean_squared_error: 0.0813\nEpoch 00077: val_loss did not improve from 0.00983\n599/599 [==============================] - 7s 12ms/step - loss: 0.0066 - root_mean_squared_error: 0.0813 - val_loss: 0.0120 - val_root_mean_squared_error: 0.1094\nEpoch 78/100\n598/599 [============================>.] - ETA: 0s - loss: 0.0072 - root_mean_squared_error: 0.0850\nEpoch 00078: val_loss did not improve from 0.00983\n599/599 [==============================] - 7s 12ms/step - loss: 0.0072 - root_mean_squared_error: 0.0850 - val_loss: 0.0105 - val_root_mean_squared_error: 0.1024\nEpoch 79/100\n595/599 [============================>.] - ETA: 0s - loss: 0.0078 - root_mean_squared_error: 0.0885\nEpoch 00079: val_loss did not improve from 0.00983\n599/599 [==============================] - 7s 12ms/step - loss: 0.0078 - root_mean_squared_error: 0.0883 - val_loss: 0.0109 - val_root_mean_squared_error: 0.1045\nEpoch 80/100\n596/599 [============================>.] - ETA: 0s - loss: 0.0064 - root_mean_squared_error: 0.0798\nEpoch 00080: val_loss did not improve from 0.00983\n599/599 [==============================] - 7s 12ms/step - loss: 0.0064 - root_mean_squared_error: 0.0803 - val_loss: 0.0109 - val_root_mean_squared_error: 0.1043\nEpoch 81/100\n598/599 [============================>.] - ETA: 0s - loss: 0.0062 - root_mean_squared_error: 0.0789\nEpoch 00081: val_loss did not improve from 0.00983\n599/599 [==============================] - 7s 12ms/step - loss: 0.0062 - root_mean_squared_error: 0.0789 - val_loss: 0.0111 - val_root_mean_squared_error: 0.1051\nEpoch 82/100\n596/599 [============================>.] - ETA: 0s - loss: 0.0067 - root_mean_squared_error: 0.0819\nEpoch 00082: val_loss did not improve from 0.00983\n599/599 [==============================] - 7s 12ms/step - loss: 0.0067 - root_mean_squared_error: 0.0817 - val_loss: 0.0110 - val_root_mean_squared_error: 0.1050\nEpoch 83/100\n594/599 [============================>.] - ETA: 0s - loss: 0.0066 - root_mean_squared_error: 0.0814\nEpoch 00083: val_loss did not improve from 0.00983\n599/599 [==============================] - 7s 11ms/step - loss: 0.0066 - root_mean_squared_error: 0.0812 - val_loss: 0.0104 - val_root_mean_squared_error: 0.1019\nEpoch 84/100\n596/599 [============================>.] - ETA: 0s - loss: 0.0070 - root_mean_squared_error: 0.0838\nEpoch 00084: val_loss did not improve from 0.00983\n599/599 [==============================] - 7s 11ms/step - loss: 0.0071 - root_mean_squared_error: 0.0840 - val_loss: 0.0116 - val_root_mean_squared_error: 0.1078\nEpoch 85/100\n595/599 [============================>.] - ETA: 0s - loss: 0.0060 - root_mean_squared_error: 0.0773\nEpoch 00085: val_loss improved from 0.00983 to 0.00980, saving model to /content/drive/My Drive/epochs/model_1_camera9_standalone.0085-0.0098.h5\n599/599 [==============================] - 7s 12ms/step - loss: 0.0060 - root_mean_squared_error: 0.0774 - val_loss: 0.0098 - val_root_mean_squared_error: 0.0990\nEpoch 86/100\n595/599 [============================>.] - ETA: 0s - loss: 0.0064 - root_mean_squared_error: 0.0797\nEpoch 00086: val_loss did not improve from 0.00980\n599/599 [==============================] - 7s 12ms/step - loss: 0.0063 - root_mean_squared_error: 0.0797 - val_loss: 0.0119 - val_root_mean_squared_error: 0.1093\nEpoch 87/100\n596/599 [============================>.] - ETA: 0s - loss: 0.0061 - root_mean_squared_error: 0.0782\nEpoch 00087: val_loss did not improve from 0.00980\n599/599 [==============================] - 7s 12ms/step - loss: 0.0061 - root_mean_squared_error: 0.0782 - val_loss: 0.0106 - val_root_mean_squared_error: 0.1029\nEpoch 88/100\n597/599 [============================>.] - ETA: 0s - loss: 0.0067 - root_mean_squared_error: 0.0816\nEpoch 00088: val_loss did not improve from 0.00980\n599/599 [==============================] - 7s 12ms/step - loss: 0.0067 - root_mean_squared_error: 0.0818 - val_loss: 0.0102 - val_root_mean_squared_error: 0.1011\nEpoch 89/100\n597/599 [============================>.] - ETA: 0s - loss: 0.0060 - root_mean_squared_error: 0.0772\nEpoch 00089: val_loss did not improve from 0.00980\n599/599 [==============================] - 7s 12ms/step - loss: 0.0060 - root_mean_squared_error: 0.0772 - val_loss: 0.0102 - val_root_mean_squared_error: 0.1012\nEpoch 90/100\n595/599 [============================>.] - ETA: 0s - loss: 0.0060 - root_mean_squared_error: 0.0772\nEpoch 00090: val_loss did not improve from 0.00980\n599/599 [==============================] - 7s 12ms/step - loss: 0.0060 - root_mean_squared_error: 0.0772 - val_loss: 0.0118 - val_root_mean_squared_error: 0.1085\nEpoch 91/100\n594/599 [============================>.] - ETA: 0s - loss: 0.0060 - root_mean_squared_error: 0.0776\nEpoch 00091: val_loss did not improve from 0.00980\n599/599 [==============================] - 7s 12ms/step - loss: 0.0060 - root_mean_squared_error: 0.0774 - val_loss: 0.0108 - val_root_mean_squared_error: 0.1037\nEpoch 92/100\n596/599 [============================>.] - ETA: 0s - loss: 0.0060 - root_mean_squared_error: 0.0773\nEpoch 00092: val_loss did not improve from 0.00980\n599/599 [==============================] - 7s 12ms/step - loss: 0.0060 - root_mean_squared_error: 0.0774 - val_loss: 0.0114 - val_root_mean_squared_error: 0.1066\nEpoch 93/100\n598/599 [============================>.] - ETA: 0s - loss: 0.0057 - root_mean_squared_error: 0.0757\nEpoch 00093: val_loss improved from 0.00980 to 0.00964, saving model to /content/drive/My Drive/epochs/model_1_camera9_standalone.0093-0.0096.h5\n599/599 [==============================] - 7s 12ms/step - loss: 0.0057 - root_mean_squared_error: 0.0757 - val_loss: 0.0096 - val_root_mean_squared_error: 0.0982\nEpoch 94/100\n597/599 [============================>.] - ETA: 0s - loss: 0.0061 - root_mean_squared_error: 0.0784\nEpoch 00094: val_loss did not improve from 0.00964\n599/599 [==============================] - 7s 12ms/step - loss: 0.0061 - root_mean_squared_error: 0.0783 - val_loss: 0.0105 - val_root_mean_squared_error: 0.1023\nEpoch 95/100\n597/599 [============================>.] - ETA: 0s - loss: 0.0061 - root_mean_squared_error: 0.0780\nEpoch 00095: val_loss did not improve from 0.00964\n599/599 [==============================] - 7s 12ms/step - loss: 0.0061 - root_mean_squared_error: 0.0781 - val_loss: 0.0102 - val_root_mean_squared_error: 0.1010\nEpoch 96/100\n595/599 [============================>.] - ETA: 0s - loss: 0.0062 - root_mean_squared_error: 0.0790\nEpoch 00096: val_loss did not improve from 0.00964\n599/599 [==============================] - 7s 12ms/step - loss: 0.0062 - root_mean_squared_error: 0.0788 - val_loss: 0.0102 - val_root_mean_squared_error: 0.1009\nEpoch 97/100\n595/599 [============================>.] - ETA: 0s - loss: 0.0057 - root_mean_squared_error: 0.0752\nEpoch 00097: val_loss did not improve from 0.00964\n599/599 [==============================] - 7s 12ms/step - loss: 0.0056 - root_mean_squared_error: 0.0751 - val_loss: 0.0101 - val_root_mean_squared_error: 0.1007\nEpoch 98/100\n596/599 [============================>.] - ETA: 0s - loss: 0.0058 - root_mean_squared_error: 0.0761\nEpoch 00098: val_loss improved from 0.00964 to 0.00963, saving model to /content/drive/My Drive/epochs/model_1_camera9_standalone.0098-0.0096.h5\n599/599 [==============================] - 7s 12ms/step - loss: 0.0059 - root_mean_squared_error: 0.0765 - val_loss: 0.0096 - val_root_mean_squared_error: 0.0981\nEpoch 99/100\n599/599 [==============================] - ETA: 0s - loss: 0.0059 - root_mean_squared_error: 0.0771\nEpoch 00099: val_loss improved from 0.00963 to 0.00910, saving model to /content/drive/My Drive/epochs/model_1_camera9_standalone.0099-0.0091.h5\n599/599 [==============================] - 7s 12ms/step - loss: 0.0059 - root_mean_squared_error: 0.0771 - val_loss: 0.0091 - val_root_mean_squared_error: 0.0954\nEpoch 100/100\n595/599 [============================>.] - ETA: 0s - loss: 0.0059 - root_mean_squared_error: 0.0766\nEpoch 00100: val_loss improved from 0.00910 to 0.00858, saving model to /content/drive/My Drive/epochs/model_1_camera9_standalone.0100-0.0086.h5\n599/599 [==============================] - 7s 12ms/step - loss: 0.0059 - root_mean_squared_error: 0.0767 - val_loss: 0.0086 - val_root_mean_squared_error: 0.0926\n" ], [ "ticks = [i for i in range(0, 101, 10)]\nlabels = [i for i in range(0, 101, 10)]\nlabels[0] = 1", "_____no_output_____" ], [ "train_loss = history.history['loss']\ntest_loss = history.history['val_loss']\n\n# Set figure size.\nplt.figure(figsize=(20, 8))\n\n# Generate line plot of training, testing loss over epochs.\nplt.plot(train_loss, label='Training Loss', color='#185fad')\nplt.plot(test_loss, label='Testing Loss', color='orange')\n\n# Set title\nplt.title('Training and Testing Loss by Epoch for Camera9', fontsize = 25)\nplt.xlabel('Epoch', fontsize = 18)\nplt.ylabel('Mean Squared Error', fontsize = 18)\nplt.xticks(ticks, labels)\n\nplt.legend(fontsize = 18)\n\nplt.savefig('/content/drive/My Drive/images/train_test_loss_model1_camera9_standalone.png');", "_____no_output_____" ], [ "def model_history(model_name): \n model = pd.DataFrame({'loss': history.history['loss'],\n 'root_mean_squared_error': history.history['root_mean_squared_error'],\n 'val_loss': history.history['val_loss'],\n 'val_root_mean_squared_error': history.history['val_root_mean_squared_error']},\n columns = ['loss', 'root_mean_squared_error', 'val_loss', 'val_root_mean_squared_error'])\n\n model.to_csv(f'/content/drive/My Drive/datasets/{model_name}.csv', index=False)\n return model", "_____no_output_____" ], [ "model_1_camera9 = model_history('model_1_camera9_standalone')", "_____no_output_____" ], [ "model_1_camera9.head()", "_____no_output_____" ], [ "#################### end of training camera9 data for model 1", "_____no_output_____" ], [ "model = Sequential()\n\nmodel.add(Conv2D(16, (3, 3), input_shape=(80, 160, 1), activation='relu'))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\n\nmodel.add(Conv2D(32, (3, 3), activation='relu'))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\n\nmodel.add(Conv2D(64, (3, 3), activation='relu'))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\n\nmodel.add(Flatten())\nmodel.add(Dense(300, activation='relu'))\nmodel.add(Dropout(.5))\n\nmodel.add(Dense(100, activation='relu'))\nmodel.add(Dropout(.25))\n\nmodel.add(Dense(20, activation='relu'))\nmodel.add(Dense(1))\n\nmodel.compile(loss='mse', optimizer='adam', metrics=[RootMeanSquaredError()])\n\nfrom keras.callbacks import *\nfilepath = \"/content/drive/My Drive/model_1_shuffled_redropout.{epoch:03d}-{val_loss:.3f}.h5\"\ncheckpoint = ModelCheckpoint(filepath, monitor='val_loss', verbose=1, save_best_only=True, mode='max')\ncallbacks_list = [checkpoint]\n\nhistory = model.fit(X_train,\n y_train,\n batch_size=64,\n validation_data=(X_test, y_test),\n epochs=15,\n verbose=1,\n callbacks=callbacks_list)", "_____no_output_____" ], [ "model = Sequential()\n\nmodel.add(Conv2D(16, (3, 3), input_shape=(80, 160, 1), activation='relu'))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\nmodel.add(Dropout(0.25))\n\nmodel.add(Conv2D(32, (3, 3), activation='relu'))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\nmodel.add(Dropout(0.25))\n\nmodel.add(Conv2D(64, (3, 3), activation='relu'))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\nmodel.add(Dropout(0.25))\n\nmodel.add(Flatten())\nmodel.add(Dense(300, activation='relu'))\nmodel.add(Dropout(.5))\n\nmodel.add(Dense(100, activation='relu'))\nmodel.add(Dropout(.25))\n\nmodel.add(Dense(20, activation='relu'))\nmodel.add(Dense(1))", "_____no_output_____" ], [ "model.compile(loss='mse', optimizer=Adam(lr=1e-04), metrics=[RootMeanSquaredError()])", "_____no_output_____" ], [ "from keras.callbacks import *\nfilepath = \"/content/drive/My Drive/epochs/model_1_camera1_lr:0.0001.{epoch:04d}-{val_loss:.4f}.h5\"\ncheckpoint = ModelCheckpoint(filepath, monitor='val_loss', verbose=1, save_best_only=True, mode='min')\ncallbacks_list = [checkpoint]", "_____no_output_____" ], [ "history = model.fit(X_train,\n y_train,\n batch_size=64,\n validation_data=(X_test, y_test),\n epochs=100,\n verbose=1,\n callbacks=callbacks_list)", "Epoch 1/100\n637/641 [============================>.] - ETA: 0s - loss: 0.2557 - root_mean_squared_error: 0.5057\nEpoch 00001: val_loss improved from inf to 0.23474, saving model to /content/drive/My Drive/epochs/model_1_camera1_lr:0.0001.0001-0.2347.h5\n641/641 [==============================] - 9s 13ms/step - loss: 0.2548 - root_mean_squared_error: 0.5047 - val_loss: 0.2347 - val_root_mean_squared_error: 0.4845\nEpoch 2/100\n640/641 [============================>.] - ETA: 0s - loss: 0.2531 - root_mean_squared_error: 0.5031\nEpoch 00002: val_loss did not improve from 0.23474\n641/641 [==============================] - 8s 12ms/step - loss: 0.2529 - root_mean_squared_error: 0.5029 - val_loss: 0.2347 - val_root_mean_squared_error: 0.4845\nEpoch 3/100\n636/641 [============================>.] - ETA: 0s - loss: 0.2505 - root_mean_squared_error: 0.5005\nEpoch 00003: val_loss did not improve from 0.23474\n641/641 [==============================] - 8s 12ms/step - loss: 0.2529 - root_mean_squared_error: 0.5029 - val_loss: 0.2348 - val_root_mean_squared_error: 0.4845\nEpoch 4/100\n637/641 [============================>.] - ETA: 0s - loss: 0.2536 - root_mean_squared_error: 0.5036\nEpoch 00004: val_loss did not improve from 0.23474\n641/641 [==============================] - 8s 12ms/step - loss: 0.2528 - root_mean_squared_error: 0.5028 - val_loss: 0.2347 - val_root_mean_squared_error: 0.4845\nEpoch 5/100\n639/641 [============================>.] - ETA: 0s - loss: 0.2524 - root_mean_squared_error: 0.5024\nEpoch 00005: val_loss improved from 0.23474 to 0.23471, saving model to /content/drive/My Drive/epochs/model_1_camera1_lr:0.0001.0005-0.2347.h5\n641/641 [==============================] - 8s 12ms/step - loss: 0.2528 - root_mean_squared_error: 0.5028 - val_loss: 0.2347 - val_root_mean_squared_error: 0.4845\nEpoch 6/100\n640/641 [============================>.] - ETA: 0s - loss: 0.2528 - root_mean_squared_error: 0.5028\nEpoch 00006: val_loss improved from 0.23471 to 0.23468, saving model to /content/drive/My Drive/epochs/model_1_camera1_lr:0.0001.0006-0.2347.h5\n641/641 [==============================] - 8s 12ms/step - loss: 0.2526 - root_mean_squared_error: 0.5026 - val_loss: 0.2347 - val_root_mean_squared_error: 0.4844\nEpoch 7/100\n638/641 [============================>.] - ETA: 0s - loss: 0.2516 - root_mean_squared_error: 0.5016\nEpoch 00007: val_loss improved from 0.23468 to 0.23409, saving model to /content/drive/My Drive/epochs/model_1_camera1_lr:0.0001.0007-0.2341.h5\n641/641 [==============================] - 8s 12ms/step - loss: 0.2518 - root_mean_squared_error: 0.5018 - val_loss: 0.2341 - val_root_mean_squared_error: 0.4838\nEpoch 8/100\n641/641 [==============================] - ETA: 0s - loss: 0.2504 - root_mean_squared_error: 0.5004\nEpoch 00008: val_loss improved from 0.23409 to 0.23267, saving model to /content/drive/My Drive/epochs/model_1_camera1_lr:0.0001.0008-0.2327.h5\n641/641 [==============================] - 8s 12ms/step - loss: 0.2504 - root_mean_squared_error: 0.5004 - val_loss: 0.2327 - val_root_mean_squared_error: 0.4824\nEpoch 9/100\n640/641 [============================>.] - ETA: 0s - loss: 0.2454 - root_mean_squared_error: 0.4954\nEpoch 00009: val_loss improved from 0.23267 to 0.22157, saving model to /content/drive/My Drive/epochs/model_1_camera1_lr:0.0001.0009-0.2216.h5\n641/641 [==============================] - 8s 12ms/step - loss: 0.2452 - root_mean_squared_error: 0.4951 - val_loss: 0.2216 - val_root_mean_squared_error: 0.4707\nEpoch 10/100\n639/641 [============================>.] - ETA: 0s - loss: 0.2307 - root_mean_squared_error: 0.4803\nEpoch 00010: val_loss improved from 0.22157 to 0.20416, saving model to /content/drive/My Drive/epochs/model_1_camera1_lr:0.0001.0010-0.2042.h5\n641/641 [==============================] - 8s 12ms/step - loss: 0.2302 - root_mean_squared_error: 0.4798 - val_loss: 0.2042 - val_root_mean_squared_error: 0.4518\nEpoch 11/100\n636/641 [============================>.] - ETA: 0s - loss: 0.2148 - root_mean_squared_error: 0.4635\nEpoch 00011: val_loss improved from 0.20416 to 0.18700, saving model to /content/drive/My Drive/epochs/model_1_camera1_lr:0.0001.0011-0.1870.h5\n641/641 [==============================] - 8s 12ms/step - loss: 0.2147 - root_mean_squared_error: 0.4634 - val_loss: 0.1870 - val_root_mean_squared_error: 0.4324\nEpoch 12/100\n640/641 [============================>.] - ETA: 0s - loss: 0.1939 - root_mean_squared_error: 0.4404\nEpoch 00012: val_loss improved from 0.18700 to 0.17739, saving model to /content/drive/My Drive/epochs/model_1_camera1_lr:0.0001.0012-0.1774.h5\n641/641 [==============================] - 8s 12ms/step - loss: 0.1939 - root_mean_squared_error: 0.4403 - val_loss: 0.1774 - val_root_mean_squared_error: 0.4212\nEpoch 13/100\n641/641 [==============================] - ETA: 0s - loss: 0.1851 - root_mean_squared_error: 0.4302\nEpoch 00013: val_loss improved from 0.17739 to 0.17037, saving model to /content/drive/My Drive/epochs/model_1_camera1_lr:0.0001.0013-0.1704.h5\n641/641 [==============================] - 8s 12ms/step - loss: 0.1851 - root_mean_squared_error: 0.4302 - val_loss: 0.1704 - val_root_mean_squared_error: 0.4128\nEpoch 14/100\n638/641 [============================>.] - ETA: 0s - loss: 0.1747 - root_mean_squared_error: 0.4180\nEpoch 00014: val_loss improved from 0.17037 to 0.15766, saving model to /content/drive/My Drive/epochs/model_1_camera1_lr:0.0001.0014-0.1577.h5\n641/641 [==============================] - 8s 12ms/step - loss: 0.1745 - root_mean_squared_error: 0.4177 - val_loss: 0.1577 - val_root_mean_squared_error: 0.3971\nEpoch 15/100\n638/641 [============================>.] - ETA: 0s - loss: 0.1619 - root_mean_squared_error: 0.4024\nEpoch 00015: val_loss improved from 0.15766 to 0.15265, saving model to /content/drive/My Drive/epochs/model_1_camera1_lr:0.0001.0015-0.1527.h5\n641/641 [==============================] - 8s 13ms/step - loss: 0.1623 - root_mean_squared_error: 0.4029 - val_loss: 0.1527 - val_root_mean_squared_error: 0.3907\nEpoch 16/100\n639/641 [============================>.] - ETA: 0s - loss: 0.1521 - root_mean_squared_error: 0.3900\nEpoch 00016: val_loss improved from 0.15265 to 0.14152, saving model to /content/drive/My Drive/epochs/model_1_camera1_lr:0.0001.0016-0.1415.h5\n641/641 [==============================] - 8s 12ms/step - loss: 0.1519 - root_mean_squared_error: 0.3898 - val_loss: 0.1415 - val_root_mean_squared_error: 0.3762\nEpoch 17/100\n637/641 [============================>.] - ETA: 0s - loss: 0.1425 - root_mean_squared_error: 0.3775\nEpoch 00017: val_loss improved from 0.14152 to 0.13354, saving model to /content/drive/My Drive/epochs/model_1_camera1_lr:0.0001.0017-0.1335.h5\n641/641 [==============================] - 8s 13ms/step - loss: 0.1419 - root_mean_squared_error: 0.3767 - val_loss: 0.1335 - val_root_mean_squared_error: 0.3654\nEpoch 18/100\n637/641 [============================>.] - ETA: 0s - loss: 0.1365 - root_mean_squared_error: 0.3695\nEpoch 00018: val_loss improved from 0.13354 to 0.12435, saving model to /content/drive/My Drive/epochs/model_1_camera1_lr:0.0001.0018-0.1243.h5\n641/641 [==============================] - 8s 13ms/step - loss: 0.1362 - root_mean_squared_error: 0.3691 - val_loss: 0.1243 - val_root_mean_squared_error: 0.3526\nEpoch 19/100\n639/641 [============================>.] - ETA: 0s - loss: 0.1246 - root_mean_squared_error: 0.3530\nEpoch 00019: val_loss improved from 0.12435 to 0.11462, saving model to /content/drive/My Drive/epochs/model_1_camera1_lr:0.0001.0019-0.1146.h5\n641/641 [==============================] - 8s 12ms/step - loss: 0.1244 - root_mean_squared_error: 0.3527 - val_loss: 0.1146 - val_root_mean_squared_error: 0.3386\nEpoch 20/100\n638/641 [============================>.] - ETA: 0s - loss: 0.1228 - root_mean_squared_error: 0.3504\nEpoch 00020: val_loss improved from 0.11462 to 0.10926, saving model to /content/drive/My Drive/epochs/model_1_camera1_lr:0.0001.0020-0.1093.h5\n641/641 [==============================] - 8s 12ms/step - loss: 0.1224 - root_mean_squared_error: 0.3498 - val_loss: 0.1093 - val_root_mean_squared_error: 0.3305\nEpoch 21/100\n638/641 [============================>.] - ETA: 0s - loss: 0.1091 - root_mean_squared_error: 0.3303\nEpoch 00021: val_loss improved from 0.10926 to 0.10532, saving model to /content/drive/My Drive/epochs/model_1_camera1_lr:0.0001.0021-0.1053.h5\n641/641 [==============================] - 8s 12ms/step - loss: 0.1089 - root_mean_squared_error: 0.3300 - val_loss: 0.1053 - val_root_mean_squared_error: 0.3245\nEpoch 22/100\n639/641 [============================>.] - ETA: 0s - loss: 0.1071 - root_mean_squared_error: 0.3272\nEpoch 00022: val_loss improved from 0.10532 to 0.10483, saving model to /content/drive/My Drive/epochs/model_1_camera1_lr:0.0001.0022-0.1048.h5\n641/641 [==============================] - 8s 12ms/step - loss: 0.1073 - root_mean_squared_error: 0.3276 - val_loss: 0.1048 - val_root_mean_squared_error: 0.3238\nEpoch 23/100\n639/641 [============================>.] - ETA: 0s - loss: 0.0982 - root_mean_squared_error: 0.3134\nEpoch 00023: val_loss did not improve from 0.10483\n641/641 [==============================] - 8s 12ms/step - loss: 0.0981 - root_mean_squared_error: 0.3133 - val_loss: 0.1128 - val_root_mean_squared_error: 0.3359\nEpoch 24/100\n638/641 [============================>.] - ETA: 0s - loss: 0.0928 - root_mean_squared_error: 0.3046\nEpoch 00024: val_loss improved from 0.10483 to 0.09814, saving model to /content/drive/My Drive/epochs/model_1_camera1_lr:0.0001.0024-0.0981.h5\n641/641 [==============================] - 8s 12ms/step - loss: 0.0928 - root_mean_squared_error: 0.3046 - val_loss: 0.0981 - val_root_mean_squared_error: 0.3133\nEpoch 25/100\n640/641 [============================>.] - ETA: 0s - loss: 0.0960 - root_mean_squared_error: 0.3098\nEpoch 00025: val_loss improved from 0.09814 to 0.09731, saving model to /content/drive/My Drive/epochs/model_1_camera1_lr:0.0001.0025-0.0973.h5\n641/641 [==============================] - 8s 12ms/step - loss: 0.0959 - root_mean_squared_error: 0.3097 - val_loss: 0.0973 - val_root_mean_squared_error: 0.3119\nEpoch 26/100\n640/641 [============================>.] - ETA: 0s - loss: 0.0899 - root_mean_squared_error: 0.2998\nEpoch 00026: val_loss improved from 0.09731 to 0.08995, saving model to /content/drive/My Drive/epochs/model_1_camera1_lr:0.0001.0026-0.0899.h5\n641/641 [==============================] - 8s 12ms/step - loss: 0.0898 - root_mean_squared_error: 0.2997 - val_loss: 0.0899 - val_root_mean_squared_error: 0.2999\nEpoch 27/100\n640/641 [============================>.] - ETA: 0s - loss: 0.0840 - root_mean_squared_error: 0.2898\nEpoch 00027: val_loss did not improve from 0.08995\n641/641 [==============================] - 8s 12ms/step - loss: 0.0843 - root_mean_squared_error: 0.2903 - val_loss: 0.0910 - val_root_mean_squared_error: 0.3016\nEpoch 28/100\n636/641 [============================>.] - ETA: 0s - loss: 0.0845 - root_mean_squared_error: 0.2907\nEpoch 00028: val_loss improved from 0.08995 to 0.07935, saving model to /content/drive/My Drive/epochs/model_1_camera1_lr:0.0001.0028-0.0794.h5\n641/641 [==============================] - 8s 12ms/step - loss: 0.0845 - root_mean_squared_error: 0.2907 - val_loss: 0.0794 - val_root_mean_squared_error: 0.2817\nEpoch 29/100\n638/641 [============================>.] - ETA: 0s - loss: 0.0809 - root_mean_squared_error: 0.2845\nEpoch 00029: val_loss did not improve from 0.07935\n641/641 [==============================] - 8s 12ms/step - loss: 0.0809 - root_mean_squared_error: 0.2844 - val_loss: 0.0886 - val_root_mean_squared_error: 0.2977\nEpoch 30/100\n639/641 [============================>.] - ETA: 0s - loss: 0.0772 - root_mean_squared_error: 0.2779\nEpoch 00030: val_loss did not improve from 0.07935\n641/641 [==============================] - 8s 12ms/step - loss: 0.0774 - root_mean_squared_error: 0.2781 - val_loss: 0.0810 - val_root_mean_squared_error: 0.2847\nEpoch 31/100\n641/641 [==============================] - ETA: 0s - loss: 0.0754 - root_mean_squared_error: 0.2746\nEpoch 00031: val_loss improved from 0.07935 to 0.07370, saving model to /content/drive/My Drive/epochs/model_1_camera1_lr:0.0001.0031-0.0737.h5\n641/641 [==============================] - 8s 12ms/step - loss: 0.0754 - root_mean_squared_error: 0.2746 - val_loss: 0.0737 - val_root_mean_squared_error: 0.2715\nEpoch 32/100\n639/641 [============================>.] - ETA: 0s - loss: 0.0749 - root_mean_squared_error: 0.2737\nEpoch 00032: val_loss did not improve from 0.07370\n641/641 [==============================] - 8s 12ms/step - loss: 0.0748 - root_mean_squared_error: 0.2735 - val_loss: 0.0888 - val_root_mean_squared_error: 0.2980\nEpoch 33/100\n640/641 [============================>.] - ETA: 0s - loss: 0.0692 - root_mean_squared_error: 0.2631\nEpoch 00033: val_loss did not improve from 0.07370\n641/641 [==============================] - 8s 12ms/step - loss: 0.0692 - root_mean_squared_error: 0.2631 - val_loss: 0.0746 - val_root_mean_squared_error: 0.2730\nEpoch 34/100\n639/641 [============================>.] - ETA: 0s - loss: 0.0700 - root_mean_squared_error: 0.2646\nEpoch 00034: val_loss did not improve from 0.07370\n641/641 [==============================] - 8s 12ms/step - loss: 0.0699 - root_mean_squared_error: 0.2643 - val_loss: 0.0749 - val_root_mean_squared_error: 0.2736\nEpoch 35/100\n637/641 [============================>.] - ETA: 0s - loss: 0.0670 - root_mean_squared_error: 0.2588\nEpoch 00035: val_loss improved from 0.07370 to 0.06660, saving model to /content/drive/My Drive/epochs/model_1_camera1_lr:0.0001.0035-0.0666.h5\n641/641 [==============================] - 8s 12ms/step - loss: 0.0676 - root_mean_squared_error: 0.2601 - val_loss: 0.0666 - val_root_mean_squared_error: 0.2581\nEpoch 36/100\n638/641 [============================>.] - ETA: 0s - loss: 0.0691 - root_mean_squared_error: 0.2628\nEpoch 00036: val_loss did not improve from 0.06660\n641/641 [==============================] - 8s 12ms/step - loss: 0.0690 - root_mean_squared_error: 0.2628 - val_loss: 0.0733 - val_root_mean_squared_error: 0.2708\nEpoch 37/100\n640/641 [============================>.] - ETA: 0s - loss: 0.0639 - root_mean_squared_error: 0.2528\nEpoch 00037: val_loss did not improve from 0.06660\n641/641 [==============================] - 8s 12ms/step - loss: 0.0639 - root_mean_squared_error: 0.2527 - val_loss: 0.0716 - val_root_mean_squared_error: 0.2676\nEpoch 38/100\n638/641 [============================>.] - ETA: 0s - loss: 0.0639 - root_mean_squared_error: 0.2527\nEpoch 00038: val_loss did not improve from 0.06660\n641/641 [==============================] - 8s 12ms/step - loss: 0.0638 - root_mean_squared_error: 0.2525 - val_loss: 0.0719 - val_root_mean_squared_error: 0.2681\nEpoch 39/100\n640/641 [============================>.] - ETA: 0s - loss: 0.0654 - root_mean_squared_error: 0.2557\nEpoch 00039: val_loss did not improve from 0.06660\n641/641 [==============================] - 8s 12ms/step - loss: 0.0653 - root_mean_squared_error: 0.2556 - val_loss: 0.0762 - val_root_mean_squared_error: 0.2761\nEpoch 40/100\n639/641 [============================>.] - ETA: 0s - loss: 0.0635 - root_mean_squared_error: 0.2519\nEpoch 00040: val_loss did not improve from 0.06660\n641/641 [==============================] - 8s 12ms/step - loss: 0.0634 - root_mean_squared_error: 0.2518 - val_loss: 0.0667 - val_root_mean_squared_error: 0.2583\nEpoch 41/100\n640/641 [============================>.] - ETA: 0s - loss: 0.0569 - root_mean_squared_error: 0.2385\nEpoch 00041: val_loss improved from 0.06660 to 0.06556, saving model to /content/drive/My Drive/epochs/model_1_camera1_lr:0.0001.0041-0.0656.h5\n641/641 [==============================] - 8s 12ms/step - loss: 0.0568 - root_mean_squared_error: 0.2384 - val_loss: 0.0656 - val_root_mean_squared_error: 0.2560\nEpoch 42/100\n640/641 [============================>.] - ETA: 0s - loss: 0.0578 - root_mean_squared_error: 0.2405\nEpoch 00042: val_loss did not improve from 0.06556\n641/641 [==============================] - 8s 12ms/step - loss: 0.0578 - root_mean_squared_error: 0.2404 - val_loss: 0.0688 - val_root_mean_squared_error: 0.2623\nEpoch 43/100\n638/641 [============================>.] - ETA: 0s - loss: 0.0561 - root_mean_squared_error: 0.2369\nEpoch 00043: val_loss did not improve from 0.06556\n641/641 [==============================] - 8s 12ms/step - loss: 0.0560 - root_mean_squared_error: 0.2366 - val_loss: 0.0728 - val_root_mean_squared_error: 0.2699\nEpoch 44/100\n640/641 [============================>.] - ETA: 0s - loss: 0.0580 - root_mean_squared_error: 0.2408\nEpoch 00044: val_loss improved from 0.06556 to 0.06468, saving model to /content/drive/My Drive/epochs/model_1_camera1_lr:0.0001.0044-0.0647.h5\n641/641 [==============================] - 8s 12ms/step - loss: 0.0580 - root_mean_squared_error: 0.2408 - val_loss: 0.0647 - val_root_mean_squared_error: 0.2543\nEpoch 45/100\n638/641 [============================>.] - ETA: 0s - loss: 0.0551 - root_mean_squared_error: 0.2348\nEpoch 00045: val_loss did not improve from 0.06468\n641/641 [==============================] - 8s 12ms/step - loss: 0.0553 - root_mean_squared_error: 0.2351 - val_loss: 0.0697 - val_root_mean_squared_error: 0.2641\nEpoch 46/100\n638/641 [============================>.] - ETA: 0s - loss: 0.0596 - root_mean_squared_error: 0.2442\nEpoch 00046: val_loss did not improve from 0.06468\n641/641 [==============================] - 8s 12ms/step - loss: 0.0598 - root_mean_squared_error: 0.2445 - val_loss: 0.0798 - val_root_mean_squared_error: 0.2825\nEpoch 47/100\n640/641 [============================>.] - ETA: 0s - loss: 0.0564 - root_mean_squared_error: 0.2375\nEpoch 00047: val_loss did not improve from 0.06468\n641/641 [==============================] - 8s 12ms/step - loss: 0.0565 - root_mean_squared_error: 0.2377 - val_loss: 0.0650 - val_root_mean_squared_error: 0.2550\nEpoch 48/100\n640/641 [============================>.] - ETA: 0s - loss: 0.0505 - root_mean_squared_error: 0.2247\nEpoch 00048: val_loss did not improve from 0.06468\n641/641 [==============================] - 8s 12ms/step - loss: 0.0506 - root_mean_squared_error: 0.2249 - val_loss: 0.0749 - val_root_mean_squared_error: 0.2736\nEpoch 49/100\n640/641 [============================>.] - ETA: 0s - loss: 0.0540 - root_mean_squared_error: 0.2323\nEpoch 00049: val_loss improved from 0.06468 to 0.06420, saving model to /content/drive/My Drive/epochs/model_1_camera1_lr:0.0001.0049-0.0642.h5\n641/641 [==============================] - 8s 12ms/step - loss: 0.0540 - root_mean_squared_error: 0.2323 - val_loss: 0.0642 - val_root_mean_squared_error: 0.2534\nEpoch 50/100\n637/641 [============================>.] - ETA: 0s - loss: 0.0538 - root_mean_squared_error: 0.2320\nEpoch 00050: val_loss improved from 0.06420 to 0.06117, saving model to /content/drive/My Drive/epochs/model_1_camera1_lr:0.0001.0050-0.0612.h5\n641/641 [==============================] - 8s 12ms/step - loss: 0.0539 - root_mean_squared_error: 0.2322 - val_loss: 0.0612 - val_root_mean_squared_error: 0.2473\nEpoch 51/100\n639/641 [============================>.] - ETA: 0s - loss: 0.0547 - root_mean_squared_error: 0.2338\nEpoch 00051: val_loss improved from 0.06117 to 0.06054, saving model to /content/drive/My Drive/epochs/model_1_camera1_lr:0.0001.0051-0.0605.h5\n641/641 [==============================] - 8s 12ms/step - loss: 0.0550 - root_mean_squared_error: 0.2345 - val_loss: 0.0605 - val_root_mean_squared_error: 0.2461\nEpoch 52/100\n639/641 [============================>.] - ETA: 0s - loss: 0.0524 - root_mean_squared_error: 0.2289\nEpoch 00052: val_loss did not improve from 0.06054\n641/641 [==============================] - 8s 12ms/step - loss: 0.0524 - root_mean_squared_error: 0.2290 - val_loss: 0.0666 - val_root_mean_squared_error: 0.2580\nEpoch 53/100\n640/641 [============================>.] - ETA: 0s - loss: 0.0497 - root_mean_squared_error: 0.2230\nEpoch 00053: val_loss improved from 0.06054 to 0.06053, saving model to /content/drive/My Drive/epochs/model_1_camera1_lr:0.0001.0053-0.0605.h5\n641/641 [==============================] - 8s 12ms/step - loss: 0.0497 - root_mean_squared_error: 0.2230 - val_loss: 0.0605 - val_root_mean_squared_error: 0.2460\nEpoch 54/100\n637/641 [============================>.] - ETA: 0s - loss: 0.0497 - root_mean_squared_error: 0.2230\nEpoch 00054: val_loss did not improve from 0.06053\n641/641 [==============================] - 8s 12ms/step - loss: 0.0497 - root_mean_squared_error: 0.2229 - val_loss: 0.0654 - val_root_mean_squared_error: 0.2558\nEpoch 55/100\n638/641 [============================>.] - ETA: 0s - loss: 0.0518 - root_mean_squared_error: 0.2276\nEpoch 00055: val_loss did not improve from 0.06053\n641/641 [==============================] - 8s 12ms/step - loss: 0.0520 - root_mean_squared_error: 0.2280 - val_loss: 0.0657 - val_root_mean_squared_error: 0.2564\nEpoch 56/100\n639/641 [============================>.] - ETA: 0s - loss: 0.0477 - root_mean_squared_error: 0.2184\nEpoch 00056: val_loss improved from 0.06053 to 0.05946, saving model to /content/drive/My Drive/epochs/model_1_camera1_lr:0.0001.0056-0.0595.h5\n641/641 [==============================] - 8s 12ms/step - loss: 0.0478 - root_mean_squared_error: 0.2186 - val_loss: 0.0595 - val_root_mean_squared_error: 0.2439\nEpoch 57/100\n638/641 [============================>.] - ETA: 0s - loss: 0.0482 - root_mean_squared_error: 0.2196\nEpoch 00057: val_loss did not improve from 0.05946\n641/641 [==============================] - 8s 12ms/step - loss: 0.0481 - root_mean_squared_error: 0.2193 - val_loss: 0.0632 - val_root_mean_squared_error: 0.2514\nEpoch 58/100\n638/641 [============================>.] - ETA: 0s - loss: 0.0457 - root_mean_squared_error: 0.2138\nEpoch 00058: val_loss improved from 0.05946 to 0.05750, saving model to /content/drive/My Drive/epochs/model_1_camera1_lr:0.0001.0058-0.0575.h5\n641/641 [==============================] - 8s 12ms/step - loss: 0.0456 - root_mean_squared_error: 0.2136 - val_loss: 0.0575 - val_root_mean_squared_error: 0.2398\nEpoch 59/100\n640/641 [============================>.] - ETA: 0s - loss: 0.0516 - root_mean_squared_error: 0.2271\nEpoch 00059: val_loss did not improve from 0.05750\n641/641 [==============================] - 8s 12ms/step - loss: 0.0516 - root_mean_squared_error: 0.2271 - val_loss: 0.0650 - val_root_mean_squared_error: 0.2550\nEpoch 60/100\n641/641 [==============================] - ETA: 0s - loss: 0.0482 - root_mean_squared_error: 0.2196\nEpoch 00060: val_loss improved from 0.05750 to 0.05612, saving model to /content/drive/My Drive/epochs/model_1_camera1_lr:0.0001.0060-0.0561.h5\n641/641 [==============================] - 8s 12ms/step - loss: 0.0482 - root_mean_squared_error: 0.2196 - val_loss: 0.0561 - val_root_mean_squared_error: 0.2369\nEpoch 61/100\n641/641 [==============================] - ETA: 0s - loss: 0.0450 - root_mean_squared_error: 0.2121\nEpoch 00061: val_loss did not improve from 0.05612\n641/641 [==============================] - 8s 12ms/step - loss: 0.0450 - root_mean_squared_error: 0.2121 - val_loss: 0.0615 - val_root_mean_squared_error: 0.2480\nEpoch 62/100\n641/641 [==============================] - ETA: 0s - loss: 0.0423 - root_mean_squared_error: 0.2056\nEpoch 00062: val_loss did not improve from 0.05612\n641/641 [==============================] - 8s 12ms/step - loss: 0.0423 - root_mean_squared_error: 0.2056 - val_loss: 0.0644 - val_root_mean_squared_error: 0.2538\nEpoch 63/100\n640/641 [============================>.] - ETA: 0s - loss: 0.0454 - root_mean_squared_error: 0.2132\nEpoch 00063: val_loss did not improve from 0.05612\n641/641 [==============================] - 8s 12ms/step - loss: 0.0454 - root_mean_squared_error: 0.2131 - val_loss: 0.0601 - val_root_mean_squared_error: 0.2452\nEpoch 64/100\n640/641 [============================>.] - ETA: 0s - loss: 0.0446 - root_mean_squared_error: 0.2112\nEpoch 00064: val_loss improved from 0.05612 to 0.05331, saving model to /content/drive/My Drive/epochs/model_1_camera1_lr:0.0001.0064-0.0533.h5\n641/641 [==============================] - 8s 12ms/step - loss: 0.0446 - root_mean_squared_error: 0.2111 - val_loss: 0.0533 - val_root_mean_squared_error: 0.2309\nEpoch 65/100\n640/641 [============================>.] - ETA: 0s - loss: 0.0434 - root_mean_squared_error: 0.2084\nEpoch 00065: val_loss did not improve from 0.05331\n641/641 [==============================] - 8s 12ms/step - loss: 0.0434 - root_mean_squared_error: 0.2083 - val_loss: 0.0687 - val_root_mean_squared_error: 0.2621\nEpoch 66/100\n640/641 [============================>.] - ETA: 0s - loss: 0.0423 - root_mean_squared_error: 0.2057\nEpoch 00066: val_loss did not improve from 0.05331\n641/641 [==============================] - 8s 12ms/step - loss: 0.0423 - root_mean_squared_error: 0.2056 - val_loss: 0.0579 - val_root_mean_squared_error: 0.2407\nEpoch 67/100\n640/641 [============================>.] - ETA: 0s - loss: 0.0413 - root_mean_squared_error: 0.2033\nEpoch 00067: val_loss did not improve from 0.05331\n641/641 [==============================] - 8s 12ms/step - loss: 0.0413 - root_mean_squared_error: 0.2033 - val_loss: 0.0553 - val_root_mean_squared_error: 0.2352\nEpoch 68/100\n638/641 [============================>.] - ETA: 0s - loss: 0.0417 - root_mean_squared_error: 0.2042\nEpoch 00068: val_loss did not improve from 0.05331\n641/641 [==============================] - 8s 12ms/step - loss: 0.0417 - root_mean_squared_error: 0.2042 - val_loss: 0.0548 - val_root_mean_squared_error: 0.2340\nEpoch 69/100\n641/641 [==============================] - ETA: 0s - loss: 0.0442 - root_mean_squared_error: 0.2102\nEpoch 00069: val_loss did not improve from 0.05331\n641/641 [==============================] - 8s 12ms/step - loss: 0.0442 - root_mean_squared_error: 0.2102 - val_loss: 0.0640 - val_root_mean_squared_error: 0.2529\nEpoch 70/100\n641/641 [==============================] - ETA: 0s - loss: 0.0432 - root_mean_squared_error: 0.2077\nEpoch 00070: val_loss did not improve from 0.05331\n641/641 [==============================] - 8s 12ms/step - loss: 0.0432 - root_mean_squared_error: 0.2077 - val_loss: 0.0573 - val_root_mean_squared_error: 0.2393\nEpoch 71/100\n636/641 [============================>.] - ETA: 0s - loss: 0.0401 - root_mean_squared_error: 0.2003\nEpoch 00071: val_loss did not improve from 0.05331\n641/641 [==============================] - 8s 12ms/step - loss: 0.0409 - root_mean_squared_error: 0.2022 - val_loss: 0.0612 - val_root_mean_squared_error: 0.2474\nEpoch 72/100\n636/641 [============================>.] - ETA: 0s - loss: 0.0401 - root_mean_squared_error: 0.2004\nEpoch 00072: val_loss did not improve from 0.05331\n641/641 [==============================] - 8s 12ms/step - loss: 0.0403 - root_mean_squared_error: 0.2007 - val_loss: 0.0539 - val_root_mean_squared_error: 0.2322\nEpoch 73/100\n641/641 [==============================] - ETA: 0s - loss: 0.0400 - root_mean_squared_error: 0.2000\nEpoch 00073: val_loss did not improve from 0.05331\n641/641 [==============================] - 8s 12ms/step - loss: 0.0400 - root_mean_squared_error: 0.2000 - val_loss: 0.0571 - val_root_mean_squared_error: 0.2389\nEpoch 74/100\n640/641 [============================>.] - ETA: 0s - loss: 0.0402 - root_mean_squared_error: 0.2005\nEpoch 00074: val_loss did not improve from 0.05331\n641/641 [==============================] - 8s 12ms/step - loss: 0.0402 - root_mean_squared_error: 0.2005 - val_loss: 0.0660 - val_root_mean_squared_error: 0.2569\nEpoch 75/100\n639/641 [============================>.] - ETA: 0s - loss: 0.0398 - root_mean_squared_error: 0.1995\nEpoch 00075: val_loss did not improve from 0.05331\n641/641 [==============================] - 8s 12ms/step - loss: 0.0397 - root_mean_squared_error: 0.1993 - val_loss: 0.0579 - val_root_mean_squared_error: 0.2405\nEpoch 76/100\n641/641 [==============================] - ETA: 0s - loss: 0.0387 - root_mean_squared_error: 0.1966\nEpoch 00076: val_loss did not improve from 0.05331\n641/641 [==============================] - 8s 12ms/step - loss: 0.0387 - root_mean_squared_error: 0.1966 - val_loss: 0.0593 - val_root_mean_squared_error: 0.2435\nEpoch 77/100\n636/641 [============================>.] - ETA: 0s - loss: 0.0350 - root_mean_squared_error: 0.1871\nEpoch 00077: val_loss did not improve from 0.05331\n641/641 [==============================] - 8s 12ms/step - loss: 0.0350 - root_mean_squared_error: 0.1870 - val_loss: 0.0540 - val_root_mean_squared_error: 0.2323\nEpoch 78/100\n639/641 [============================>.] - ETA: 0s - loss: 0.0385 - root_mean_squared_error: 0.1963\nEpoch 00078: val_loss did not improve from 0.05331\n641/641 [==============================] - 8s 12ms/step - loss: 0.0384 - root_mean_squared_error: 0.1961 - val_loss: 0.0568 - val_root_mean_squared_error: 0.2383\nEpoch 79/100\n638/641 [============================>.] - ETA: 0s - loss: 0.0431 - root_mean_squared_error: 0.2076\nEpoch 00079: val_loss did not improve from 0.05331\n641/641 [==============================] - 8s 12ms/step - loss: 0.0431 - root_mean_squared_error: 0.2075 - val_loss: 0.0575 - val_root_mean_squared_error: 0.2399\nEpoch 80/100\n640/641 [============================>.] - ETA: 0s - loss: 0.0400 - root_mean_squared_error: 0.1999\nEpoch 00080: val_loss did not improve from 0.05331\n641/641 [==============================] - 8s 12ms/step - loss: 0.0399 - root_mean_squared_error: 0.1999 - val_loss: 0.0551 - val_root_mean_squared_error: 0.2346\nEpoch 81/100\n641/641 [==============================] - ETA: 0s - loss: 0.0386 - root_mean_squared_error: 0.1964\nEpoch 00081: val_loss did not improve from 0.05331\n641/641 [==============================] - 8s 12ms/step - loss: 0.0386 - root_mean_squared_error: 0.1964 - val_loss: 0.0581 - val_root_mean_squared_error: 0.2410\nEpoch 82/100\n641/641 [==============================] - ETA: 0s - loss: 0.0377 - root_mean_squared_error: 0.1940\nEpoch 00082: val_loss did not improve from 0.05331\n641/641 [==============================] - 8s 12ms/step - loss: 0.0377 - root_mean_squared_error: 0.1940 - val_loss: 0.0563 - val_root_mean_squared_error: 0.2374\nEpoch 83/100\n641/641 [==============================] - ETA: 0s - loss: 0.0354 - root_mean_squared_error: 0.1881\nEpoch 00083: val_loss did not improve from 0.05331\n641/641 [==============================] - 8s 12ms/step - loss: 0.0354 - root_mean_squared_error: 0.1881 - val_loss: 0.0560 - val_root_mean_squared_error: 0.2367\nEpoch 84/100\n637/641 [============================>.] - ETA: 0s - loss: 0.0391 - root_mean_squared_error: 0.1977\nEpoch 00084: val_loss improved from 0.05331 to 0.05100, saving model to /content/drive/My Drive/epochs/model_1_camera1_lr:0.0001.0084-0.0510.h5\n641/641 [==============================] - 8s 12ms/step - loss: 0.0405 - root_mean_squared_error: 0.2011 - val_loss: 0.0510 - val_root_mean_squared_error: 0.2258\nEpoch 85/100\n640/641 [============================>.] - ETA: 0s - loss: 0.0371 - root_mean_squared_error: 0.1927\nEpoch 00085: val_loss did not improve from 0.05100\n641/641 [==============================] - 8s 12ms/step - loss: 0.0371 - root_mean_squared_error: 0.1926 - val_loss: 0.0533 - val_root_mean_squared_error: 0.2309\nEpoch 86/100\n637/641 [============================>.] - ETA: 0s - loss: 0.0381 - root_mean_squared_error: 0.1953\nEpoch 00086: val_loss did not improve from 0.05100\n641/641 [==============================] - 8s 12ms/step - loss: 0.0381 - root_mean_squared_error: 0.1952 - val_loss: 0.0514 - val_root_mean_squared_error: 0.2267\nEpoch 87/100\n641/641 [==============================] - ETA: 0s - loss: 0.0359 - root_mean_squared_error: 0.1893\nEpoch 00087: val_loss did not improve from 0.05100\n641/641 [==============================] - 8s 12ms/step - loss: 0.0359 - root_mean_squared_error: 0.1893 - val_loss: 0.0599 - val_root_mean_squared_error: 0.2447\nEpoch 88/100\n640/641 [============================>.] - ETA: 0s - loss: 0.0350 - root_mean_squared_error: 0.1870\nEpoch 00088: val_loss did not improve from 0.05100\n641/641 [==============================] - 8s 12ms/step - loss: 0.0349 - root_mean_squared_error: 0.1869 - val_loss: 0.0551 - val_root_mean_squared_error: 0.2347\nEpoch 89/100\n639/641 [============================>.] - ETA: 0s - loss: 0.0358 - root_mean_squared_error: 0.1893\nEpoch 00089: val_loss did not improve from 0.05100\n641/641 [==============================] - 8s 12ms/step - loss: 0.0358 - root_mean_squared_error: 0.1892 - val_loss: 0.0579 - val_root_mean_squared_error: 0.2407\nEpoch 90/100\n641/641 [==============================] - ETA: 0s - loss: 0.0333 - root_mean_squared_error: 0.1825\nEpoch 00090: val_loss did not improve from 0.05100\n641/641 [==============================] - 8s 12ms/step - loss: 0.0333 - root_mean_squared_error: 0.1825 - val_loss: 0.0565 - val_root_mean_squared_error: 0.2378\nEpoch 91/100\n636/641 [============================>.] - ETA: 0s - loss: 0.0378 - root_mean_squared_error: 0.1946\nEpoch 00091: val_loss did not improve from 0.05100\n641/641 [==============================] - 7s 12ms/step - loss: 0.0377 - root_mean_squared_error: 0.1941 - val_loss: 0.0560 - val_root_mean_squared_error: 0.2366\nEpoch 92/100\n637/641 [============================>.] - ETA: 0s - loss: 0.0347 - root_mean_squared_error: 0.1862\nEpoch 00092: val_loss did not improve from 0.05100\n641/641 [==============================] - 8s 12ms/step - loss: 0.0346 - root_mean_squared_error: 0.1859 - val_loss: 0.0552 - val_root_mean_squared_error: 0.2348\nEpoch 93/100\n640/641 [============================>.] - ETA: 0s - loss: 0.0326 - root_mean_squared_error: 0.1805\nEpoch 00093: val_loss did not improve from 0.05100\n641/641 [==============================] - 8s 12ms/step - loss: 0.0326 - root_mean_squared_error: 0.1805 - val_loss: 0.0619 - val_root_mean_squared_error: 0.2488\nEpoch 94/100\n637/641 [============================>.] - ETA: 0s - loss: 0.0364 - root_mean_squared_error: 0.1907\nEpoch 00094: val_loss did not improve from 0.05100\n641/641 [==============================] - 8s 12ms/step - loss: 0.0362 - root_mean_squared_error: 0.1904 - val_loss: 0.0516 - val_root_mean_squared_error: 0.2271\nEpoch 95/100\n640/641 [============================>.] - ETA: 0s - loss: 0.0337 - root_mean_squared_error: 0.1837\nEpoch 00095: val_loss improved from 0.05100 to 0.04950, saving model to /content/drive/My Drive/epochs/model_1_camera1_lr:0.0001.0095-0.0495.h5\n641/641 [==============================] - 8s 12ms/step - loss: 0.0338 - root_mean_squared_error: 0.1837 - val_loss: 0.0495 - val_root_mean_squared_error: 0.2225\nEpoch 96/100\n641/641 [==============================] - ETA: 0s - loss: 0.0318 - root_mean_squared_error: 0.1783\nEpoch 00096: val_loss did not improve from 0.04950\n641/641 [==============================] - 8s 12ms/step - loss: 0.0318 - root_mean_squared_error: 0.1783 - val_loss: 0.0533 - val_root_mean_squared_error: 0.2309\nEpoch 97/100\n641/641 [==============================] - ETA: 0s - loss: 0.0369 - root_mean_squared_error: 0.1920\nEpoch 00097: val_loss did not improve from 0.04950\n641/641 [==============================] - 8s 12ms/step - loss: 0.0369 - root_mean_squared_error: 0.1920 - val_loss: 0.0505 - val_root_mean_squared_error: 0.2248\nEpoch 98/100\n638/641 [============================>.] - ETA: 0s - loss: 0.0364 - root_mean_squared_error: 0.1907\nEpoch 00098: val_loss did not improve from 0.04950\n641/641 [==============================] - 8s 12ms/step - loss: 0.0363 - root_mean_squared_error: 0.1906 - val_loss: 0.0567 - val_root_mean_squared_error: 0.2382\nEpoch 99/100\n639/641 [============================>.] - ETA: 0s - loss: 0.0317 - root_mean_squared_error: 0.1781\nEpoch 00099: val_loss did not improve from 0.04950\n641/641 [==============================] - 8s 12ms/step - loss: 0.0317 - root_mean_squared_error: 0.1781 - val_loss: 0.0544 - val_root_mean_squared_error: 0.2332\nEpoch 100/100\n641/641 [==============================] - ETA: 0s - loss: 0.0331 - root_mean_squared_error: 0.1818\nEpoch 00100: val_loss did not improve from 0.04950\n641/641 [==============================] - 8s 12ms/step - loss: 0.0331 - root_mean_squared_error: 0.1818 - val_loss: 0.0560 - val_root_mean_squared_error: 0.2367\n" ], [ "ticks = [i for i in range(0, 101, 10)]\nlabels = [i for i in range(0, 101, 10)]\nlabels[0] = 1", "_____no_output_____" ], [ "labels", "_____no_output_____" ], [ "ticks", "_____no_output_____" ], [ "train_loss = history.history['loss']\ntest_loss = history.history['val_loss']\n\n# Set figure size.\nplt.figure(figsize=(20, 8))\n\n# Generate line plot of training, testing loss over epochs.\nplt.plot(train_loss, label='Training Loss', color='#185fad')\nplt.plot(test_loss, label='Testing Loss', color='orange')\n\n# Set title\nplt.title('Training and Testing Loss by Epoch for Camera1', fontsize = 25)\nplt.xlabel('Epoch', fontsize = 18)\nplt.ylabel('Mean Squared Error', fontsize = 18)\nplt.xticks(ticks, labels)\n\nplt.legend(fontsize = 18);", "_____no_output_____" ], [ "print(history.history.keys())", "dict_keys(['loss', 'root_mean_squared_error', 'val_loss', 'val_root_mean_squared_error'])\n" ], [ "model_1_camera1 = pd.DataFrame({'loss': history.history['loss'],\n 'root_mean_squared_error': history.history['root_mean_squared_error'],\n 'val_loss': history.history['val_loss'],\n 'val_root_mean_squared_error': history.history['val_root_mean_squared_error']},\n columns = ['loss', 'root_mean_squared_error', 'val_loss', 'val_root_mean_squared_error'])", "_____no_output_____" ], [ "model_1_camera1.to_csv('/content/drive/My Drive/datasets/model_1_camera1.csv', index=False)", "_____no_output_____" ], [ "train_loss = history.history['loss']\ntest_loss = history.history['val_loss']\n\n# Set figure size.\nplt.figure(figsize=(12, 8))\n\n# Generate line plot of training, testing loss over epochs.\nplt.plot(train_loss, label='Training Loss', color='#185fad')\nplt.plot(test_loss, label='Testing Loss', color='orange')\n\n# Set title\nplt.title('Training and Testing Loss by Epoch', fontsize = 25)\nplt.xlabel('Epoch', fontsize = 18)\nplt.ylabel('Adam', fontsize = 18)\nplt.xticks(ticks, labels)\n\nplt.legend(fontsize = 18);", "_____no_output_____" ], [ "model_2 = Sequential()\n\nmodel_2.add(Conv2D(16, (3, 3), input_shape=(80, 160, 1), activation='relu'))\nmodel_2.add(MaxPooling2D(pool_size=(2, 2)))\nmodel_2.add(Dropout(.25))\n\nmodel_2.add(Conv2D(32, (3, 3), activation='relu'))\nmodel_2.add(MaxPooling2D(pool_size=(2, 2)))\nmodel_2.add(Dropout(.25))\n\nmodel_2.add(Conv2D(64, (3, 3), activation='relu'))\nmodel_2.add(MaxPooling2D(pool_size=(2, 2)))\nmodel_2.add(Dropout(.25))\n\nmodel_2.add(Flatten())\nmodel_2.add(Dense(4096, activation='relu'))\nmodel_2.add(Dropout(.5))\n\nmodel_2.add(Dense(2048, activation='relu'))\nmodel_2.add(Dropout(.5))\n\nmodel_2.add(Dense(1024, activation='relu'))\nmodel_2.add(Dropout(.5))\n\nmodel_2.add(Dense(512, activation='relu'))\nmodel_2.add(Dropout(.5))\n\nmodel_2.add(Dense(1))\n\nmodel_2.compile(loss='mse', optimizer='adam', metrics=[RootMeanSquaredError()])", "_____no_output_____" ], [ "from keras.callbacks import *\nfilepath = \"/content/drive/My Drive/epochs/model_2_shuffled.{epoch:03d}-{val_loss:.3f}.h5\"\ncheckpoint = ModelCheckpoint(filepath, monitor='val_loss', verbose=1, save_best_only=True, mode='max')\ncallbacks_list = [checkpoint]", "_____no_output_____" ], [ "history = model_2.fit(X_train,\n y_train,\n batch_size=64,\n validation_data=(X_test, y_test),\n epochs=30,\n verbose=1,\n callbacks=callbacks_list)", "Train on 41003 samples, validate on 10251 samples\nEpoch 1/30\n41003/41003 [==============================] - 41s 1ms/step - loss: 0.3027 - root_mean_squared_error: 0.5502 - val_loss: 0.2351 - val_root_mean_squared_error: 0.4849\n\nEpoch 00001: val_loss improved from -inf to 0.23513, saving model to /content/drive/My Drive/epochs/model_2_shuffled.001-0.235.h5\nEpoch 2/30\n41003/41003 [==============================] - 37s 896us/step - loss: 0.2532 - root_mean_squared_error: 0.5032 - val_loss: 0.2347 - val_root_mean_squared_error: 0.4845\n\nEpoch 00002: val_loss did not improve from 0.23513\nEpoch 3/30\n41003/41003 [==============================] - 37s 902us/step - loss: 0.2531 - root_mean_squared_error: 0.5031 - val_loss: 0.2348 - val_root_mean_squared_error: 0.4846\n\nEpoch 00003: val_loss did not improve from 0.23513\nEpoch 4/30\n41003/41003 [==============================] - 37s 911us/step - loss: 0.2531 - root_mean_squared_error: 0.5031 - val_loss: 0.2348 - val_root_mean_squared_error: 0.4846\n\nEpoch 00004: val_loss did not improve from 0.23513\nEpoch 5/30\n41003/41003 [==============================] - 36s 879us/step - loss: 0.2529 - root_mean_squared_error: 0.5029 - val_loss: 0.2347 - val_root_mean_squared_error: 0.4845\n\nEpoch 00005: val_loss did not improve from 0.23513\nEpoch 6/30\n41003/41003 [==============================] - 36s 873us/step - loss: 0.2530 - root_mean_squared_error: 0.5029 - val_loss: 0.2348 - val_root_mean_squared_error: 0.4846\n\nEpoch 00006: val_loss did not improve from 0.23513\nEpoch 7/30\n41003/41003 [==============================] - 36s 872us/step - loss: 0.2529 - root_mean_squared_error: 0.5029 - val_loss: 0.2348 - val_root_mean_squared_error: 0.4846\n\nEpoch 00007: val_loss did not improve from 0.23513\nEpoch 8/30\n41003/41003 [==============================] - 36s 874us/step - loss: 0.2529 - root_mean_squared_error: 0.5029 - val_loss: 0.2349 - val_root_mean_squared_error: 0.4847\n\nEpoch 00008: val_loss did not improve from 0.23513\nEpoch 9/30\n41003/41003 [==============================] - 36s 873us/step - loss: 0.2529 - root_mean_squared_error: 0.5029 - val_loss: 0.2353 - val_root_mean_squared_error: 0.4850\n\nEpoch 00009: val_loss improved from 0.23513 to 0.23527, saving model to /content/drive/My Drive/epochs/model_2_shuffled.009-0.235.h5\nEpoch 10/30\n41003/41003 [==============================] - 36s 876us/step - loss: 0.2529 - root_mean_squared_error: 0.5029 - val_loss: 0.2347 - val_root_mean_squared_error: 0.4845\n\nEpoch 00010: val_loss did not improve from 0.23527\nEpoch 11/30\n41003/41003 [==============================] - 36s 873us/step - loss: 0.2529 - root_mean_squared_error: 0.5029 - val_loss: 0.2348 - val_root_mean_squared_error: 0.4845\n\nEpoch 00011: val_loss did not improve from 0.23527\nEpoch 12/30\n41003/41003 [==============================] - 36s 874us/step - loss: 0.2535 - root_mean_squared_error: 0.5035 - val_loss: 0.2351 - val_root_mean_squared_error: 0.4849\n\nEpoch 00012: val_loss did not improve from 0.23527\nEpoch 13/30\n41003/41003 [==============================] - 36s 872us/step - loss: 0.2529 - root_mean_squared_error: 0.5029 - val_loss: 0.2359 - val_root_mean_squared_error: 0.4857\n\nEpoch 00013: val_loss improved from 0.23527 to 0.23593, saving model to /content/drive/My Drive/epochs/model_2_shuffled.013-0.236.h5\nEpoch 14/30\n41003/41003 [==============================] - 36s 880us/step - loss: 0.2529 - root_mean_squared_error: 0.5029 - val_loss: 0.2349 - val_root_mean_squared_error: 0.4846\n\nEpoch 00014: val_loss did not improve from 0.23593\nEpoch 15/30\n41003/41003 [==============================] - 36s 875us/step - loss: 0.2530 - root_mean_squared_error: 0.5029 - val_loss: 0.2348 - val_root_mean_squared_error: 0.4845\n\nEpoch 00015: val_loss did not improve from 0.23593\nEpoch 16/30\n41003/41003 [==============================] - 36s 875us/step - loss: 0.2531 - root_mean_squared_error: 0.5031 - val_loss: 0.2347 - val_root_mean_squared_error: 0.4845\n\nEpoch 00016: val_loss did not improve from 0.23593\nEpoch 17/30\n41003/41003 [==============================] - 36s 872us/step - loss: 0.2530 - root_mean_squared_error: 0.5030 - val_loss: 0.2347 - val_root_mean_squared_error: 0.4845\n\nEpoch 00017: val_loss did not improve from 0.23593\nEpoch 18/30\n41003/41003 [==============================] - 36s 875us/step - loss: 0.2529 - root_mean_squared_error: 0.5029 - val_loss: 0.2349 - val_root_mean_squared_error: 0.4846\n\nEpoch 00018: val_loss did not improve from 0.23593\nEpoch 19/30\n41003/41003 [==============================] - 36s 877us/step - loss: 0.2529 - root_mean_squared_error: 0.5029 - val_loss: 0.2347 - val_root_mean_squared_error: 0.4845\n\nEpoch 00019: val_loss did not improve from 0.23593\nEpoch 20/30\n41003/41003 [==============================] - 36s 871us/step - loss: 0.2529 - root_mean_squared_error: 0.5029 - val_loss: 0.2347 - val_root_mean_squared_error: 0.4845\n\nEpoch 00020: val_loss did not improve from 0.23593\nEpoch 21/30\n41003/41003 [==============================] - 36s 874us/step - loss: 0.2528 - root_mean_squared_error: 0.5028 - val_loss: 0.2348 - val_root_mean_squared_error: 0.4845\n\nEpoch 00021: val_loss did not improve from 0.23593\nEpoch 22/30\n41003/41003 [==============================] - 36s 871us/step - loss: 0.2529 - root_mean_squared_error: 0.5029 - val_loss: 0.2348 - val_root_mean_squared_error: 0.4845\n\nEpoch 00022: val_loss did not improve from 0.23593\nEpoch 23/30\n41003/41003 [==============================] - 36s 868us/step - loss: 0.2529 - root_mean_squared_error: 0.5029 - val_loss: 0.2347 - val_root_mean_squared_error: 0.4845\n\nEpoch 00023: val_loss did not improve from 0.23593\nEpoch 24/30\n41003/41003 [==============================] - 36s 868us/step - loss: 0.2529 - root_mean_squared_error: 0.5029 - val_loss: 0.2347 - val_root_mean_squared_error: 0.4845\n\nEpoch 00024: val_loss did not improve from 0.23593\nEpoch 25/30\n41003/41003 [==============================] - 36s 877us/step - loss: 0.2529 - root_mean_squared_error: 0.5029 - val_loss: 0.2348 - val_root_mean_squared_error: 0.4845\n\nEpoch 00025: val_loss did not improve from 0.23593\nEpoch 26/30\n41003/41003 [==============================] - 36s 871us/step - loss: 0.2529 - root_mean_squared_error: 0.5028 - val_loss: 0.2347 - val_root_mean_squared_error: 0.4845\n\nEpoch 00026: val_loss did not improve from 0.23593\nEpoch 27/30\n41003/41003 [==============================] - 36s 867us/step - loss: 0.2528 - root_mean_squared_error: 0.5028 - val_loss: 0.2347 - val_root_mean_squared_error: 0.4845\n\nEpoch 00027: val_loss did not improve from 0.23593\nEpoch 28/30\n41003/41003 [==============================] - 36s 876us/step - loss: 0.2528 - root_mean_squared_error: 0.5028 - val_loss: 0.2347 - val_root_mean_squared_error: 0.4845\n\nEpoch 00028: val_loss did not improve from 0.23593\nEpoch 29/30\n41003/41003 [==============================] - 36s 883us/step - loss: 0.2529 - root_mean_squared_error: 0.5028 - val_loss: 0.2348 - val_root_mean_squared_error: 0.4845\n\nEpoch 00029: val_loss did not improve from 0.23593\nEpoch 30/30\n41003/41003 [==============================] - 36s 880us/step - loss: 0.2531 - root_mean_squared_error: 0.5031 - val_loss: 0.2349 - val_root_mean_squared_error: 0.4846\n\nEpoch 00030: val_loss did not improve from 0.23593\n" ], [ "model_3 = Sequential()\n\nmodel_3.add(Conv2D(16, (3, 3), input_shape=(80, 160, 1), activation='relu'))\nmodel_3.add(MaxPooling2D(pool_size=(2, 2)))\nmodel_3.add(Dropout(.25))\n\nmodel_3.add(Conv2D(32, (3, 3), activation='relu'))\nmodel_3.add(MaxPooling2D(pool_size=(2, 2)))\nmodel_3.add(Dropout(.25))\n\nmodel_3.add(Conv2D(64, (3, 3), activation='relu'))\nmodel_3.add(MaxPooling2D(pool_size=(2, 2)))\nmodel_3.add(Dropout(.25))\n\nmodel_3.add(Conv2D(128, (3, 3), activation='relu'))\nmodel_3.add(MaxPooling2D(pool_size=(2, 2)))\nmodel_3.add(Dropout(.25))\n\nmodel_3.add(Flatten())\nmodel_3.add(Dense(4096, activation='relu'))\nmodel_3.add(Dropout(.5))\n\nmodel_3.add(Dense(2048, activation='relu'))\nmodel_3.add(Dropout(.5))\n\nmodel_3.add(Dense(1024, activation='relu'))\nmodel_3.add(Dropout(.5))\n\nmodel_3.add(Dense(512, activation='relu'))\nmodel_3.add(Dropout(.5))\n\nmodel_3.add(Dense(1))\n\nmodel_3.compile(loss='mse', optimizer='adam', metrics=[RootMeanSquaredError()])", "_____no_output_____" ], [ "from keras.callbacks import *\nfilepath = \"/content/drive/My Drive/epochs/model_3_shuffled.{epoch:03d}-{val_loss:.3f}.h5\"\ncheckpoint = ModelCheckpoint(filepath, monitor='val_loss', verbose=1, save_best_only=True, mode='max')\ncallbacks_list = [checkpoint]", "_____no_output_____" ], [ "history = model_3.fit(X_train,\n y_train,\n batch_size=64,\n validation_data=(X_test, y_test),\n epochs=15,\n verbose=1,\n callbacks=callbacks_list)", "Train on 41003 samples, validate on 10251 samples\nEpoch 1/15\n41003/41003 [==============================] - 26s 633us/step - loss: 0.2560 - root_mean_squared_error: 0.5060 - val_loss: 0.2350 - val_root_mean_squared_error: 0.4848\n\nEpoch 00001: val_loss improved from -inf to 0.23499, saving model to /content/drive/My Drive/epochs/model_3_shuffled.001-0.235.h5\nEpoch 2/15\n41003/41003 [==============================] - 25s 621us/step - loss: 0.2531 - root_mean_squared_error: 0.5031 - val_loss: 0.2348 - val_root_mean_squared_error: 0.4846\n\nEpoch 00002: val_loss did not improve from 0.23499\nEpoch 3/15\n41003/41003 [==============================] - 25s 618us/step - loss: 0.2530 - root_mean_squared_error: 0.5030 - val_loss: 0.2349 - val_root_mean_squared_error: 0.4846\n\nEpoch 00003: val_loss did not improve from 0.23499\nEpoch 4/15\n41003/41003 [==============================] - 25s 621us/step - loss: 0.2529 - root_mean_squared_error: 0.5029 - val_loss: 0.2348 - val_root_mean_squared_error: 0.4846\n\nEpoch 00004: val_loss did not improve from 0.23499\nEpoch 5/15\n41003/41003 [==============================] - 25s 622us/step - loss: 0.2529 - root_mean_squared_error: 0.5029 - val_loss: 0.2348 - val_root_mean_squared_error: 0.4846\n\nEpoch 00005: val_loss did not improve from 0.23499\nEpoch 6/15\n41003/41003 [==============================] - 26s 628us/step - loss: 0.2529 - root_mean_squared_error: 0.5029 - val_loss: 0.2348 - val_root_mean_squared_error: 0.4845\n\nEpoch 00006: val_loss did not improve from 0.23499\nEpoch 7/15\n41003/41003 [==============================] - 26s 623us/step - loss: 0.2528 - root_mean_squared_error: 0.5028 - val_loss: 0.2348 - val_root_mean_squared_error: 0.4846\n\nEpoch 00007: val_loss did not improve from 0.23499\nEpoch 8/15\n41003/41003 [==============================] - 26s 623us/step - loss: 0.2529 - root_mean_squared_error: 0.5028 - val_loss: 0.2348 - val_root_mean_squared_error: 0.4845\n\nEpoch 00008: val_loss did not improve from 0.23499\nEpoch 9/15\n41003/41003 [==============================] - 26s 624us/step - loss: 0.2529 - root_mean_squared_error: 0.5028 - val_loss: 0.2347 - val_root_mean_squared_error: 0.4845\n\nEpoch 00009: val_loss did not improve from 0.23499\nEpoch 10/15\n41003/41003 [==============================] - 26s 627us/step - loss: 0.2529 - root_mean_squared_error: 0.5029 - val_loss: 0.2348 - val_root_mean_squared_error: 0.4845\n\nEpoch 00010: val_loss did not improve from 0.23499\nEpoch 11/15\n41003/41003 [==============================] - 26s 624us/step - loss: 0.2528 - root_mean_squared_error: 0.5028 - val_loss: 0.2348 - val_root_mean_squared_error: 0.4845\n\nEpoch 00011: val_loss did not improve from 0.23499\nEpoch 12/15\n41003/41003 [==============================] - 26s 623us/step - loss: 0.2528 - root_mean_squared_error: 0.5028 - val_loss: 0.2347 - val_root_mean_squared_error: 0.4845\n\nEpoch 00012: val_loss did not improve from 0.23499\nEpoch 13/15\n41003/41003 [==============================] - 25s 621us/step - loss: 0.2528 - root_mean_squared_error: 0.5028 - val_loss: 0.2347 - val_root_mean_squared_error: 0.4845\n\nEpoch 00013: val_loss did not improve from 0.23499\nEpoch 14/15\n41003/41003 [==============================] - 25s 621us/step - loss: 0.2529 - root_mean_squared_error: 0.5028 - val_loss: 0.2348 - val_root_mean_squared_error: 0.4845\n\nEpoch 00014: val_loss did not improve from 0.23499\nEpoch 15/15\n41003/41003 [==============================] - 25s 620us/step - loss: 0.2529 - root_mean_squared_error: 0.5028 - val_loss: 0.2347 - val_root_mean_squared_error: 0.4845\n\nEpoch 00015: val_loss did not improve from 0.23499\n" ], [ "####### loading code\nX = load('/content/drive/My Drive/camera1_train.npz')\nX = X.f.arr_0\n\nlog1 = pd.read_csv('/content/drive/My Drive/log1_train.csv')\ny = log1['steering_avg_radian']\ny = y.to_numpy()\ny = y.reshape(y.shape[0], 1)\n\nfrom sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, shuffle=False)\n####### end of loading code", "_____no_output_____" ], [ "!cp -r \"/content/drive/My Drive/camera1_train.npz\" ./camera1_train.npz", "_____no_output_____" ], [ "X = load('./camera1_train.npz')", "_____no_output_____" ], [ "X = X.f.arr_0", "_____no_output_____" ], [ "log1 = pd.read_csv('/content/drive/My Drive/log1_train.csv')\ny = log1['steering_avg_radian']\ny = y.to_numpy()\ny = y.reshape(y.shape[0], 1)", "_____no_output_____" ], [ "from sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, shuffle=True)", "_____no_output_____" ], [ "savez_compressed('/content/drive/My Drive/X_train_shuffled', X_train)", "_____no_output_____" ], [ "savez_compressed('/content/drive/My Drive/X_test_shuffled', X_test)", "_____no_output_____" ], [ "savez_compressed('/content/drive/My Drive/y_train_shuffled', y_train)", "_____no_output_____" ], [ "savez_compressed('/content/drive/My Drive/y_test_shuffled', y_test)", "_____no_output_____" ], [ "!nvidia-smi", "Mon Aug 3 15:26:27 2020 \n+-----------------------------------------------------------------------------+\n| NVIDIA-SMI 450.57 Driver Version: 418.67 CUDA Version: 10.1 |\n|-------------------------------+----------------------+----------------------+\n| GPU Name Persistence-M| Bus-Id Disp.A | Volatile Uncorr. ECC |\n| Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. |\n| | | MIG M. |\n|===============================+======================+======================|\n| 0 Tesla T4 Off | 00000000:00:04.0 Off | 0 |\n| N/A 72C P0 33W / 70W | 3167MiB / 15079MiB | 0% Default |\n| | | ERR! |\n+-------------------------------+----------------------+----------------------+\n \n+-----------------------------------------------------------------------------+\n| Processes: |\n| GPU GI CI PID Type Process name GPU Memory |\n| ID ID Usage |\n|=============================================================================|\n| No running processes found |\n+-----------------------------------------------------------------------------+\n" ], [ "####### loading code from drive\nX_train = load('/content/drive/My Drive/X_train.npz')\nX_train = X_train.f.arr_0\nX_test = load('/content/drive/My Drive/X_test.npz')\nX_test = X_test.f.arr_0\ny_train = load('/content/drive/My Drive/y_train.npz')\ny_train = y_train.f.arr_0\ny_test = load('/content/drive/My Drive/y_test.npz')\ny_test = y_test.f.arr_0\n####### end of loading code", "_____no_output_____" ], [ "!cp -r \"/content/drive/My Drive/X_train.npz\" ./X_train.npz\n!cp -r \"/content/drive/My Drive/X_test.npz\" ./X_test.npz\n!cp -r \"/content/drive/My Drive/y_train.npz\" ./y_train.npz\n!cp -r \"/content/drive/My Drive/y_test.npz\" ./y_test.npz", "_____no_output_____" ], [ "####### loading code from vm\nX_train = load('./X_train.npz')\nX_train = X_train.f.arr_0\nX_test = load('./X_test.npz')\nX_test = X_test.f.arr_0\ny_train = load('./y_train.npz')\ny_train = y_train.f.arr_0\ny_test = load('./y_test.npz')\ny_test = y_test.f.arr_0\n####### end of loading code", "_____no_output_____" ], [ "# for shuffled data\n\n!cp -r \"/content/drive/My Drive/X_train_shuffled.npz\" ./X_train.npz\n!cp -r \"/content/drive/My Drive/X_test_shuffled.npz\" ./X_test.npz\n!cp -r \"/content/drive/My Drive/y_train_shuffled.npz\" ./y_train.npz\n!cp -r \"/content/drive/My Drive/y_test_shuffled.npz\" ./y_test.npz", "_____no_output_____" ], [ "# for shuffled data\n####### loading code from vm\nX_train = load('./X_train.npz')\nX_train = X_train.f.arr_0\nX_test = load('./X_test.npz')\nX_test = X_test.f.arr_0\ny_train = load('./y_train.npz')\ny_train = y_train.f.arr_0\ny_test = load('./y_test.npz')\ny_test = y_test.f.arr_0\n####### end of loading code", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
d0df6c6233d1f678880f2ee1ecaa086e193e1268
2,018
ipynb
Jupyter Notebook
spectrum_slice/spectrum_slice.ipynb
rsprouse/phonapps
1b375b0a99f5a11f36ac4692bcd5b850433b18d9
[ "BSD-3-Clause" ]
2
2020-07-20T06:59:06.000Z
2020-10-19T14:18:58.000Z
spectrum_slice/spectrum_slice.ipynb
rsprouse/phonapps
1b375b0a99f5a11f36ac4692bcd5b850433b18d9
[ "BSD-3-Clause" ]
null
null
null
spectrum_slice/spectrum_slice.ipynb
rsprouse/phonapps
1b375b0a99f5a11f36ac4692bcd5b850433b18d9
[ "BSD-3-Clause" ]
1
2020-05-21T11:05:39.000Z
2020-05-21T11:05:39.000Z
36.035714
359
0.675421
[ [ [ "# Spectrogram and spectrum slice display\n\nSelect an audio file from the dropdown list to display its waveform spectrogram. Click on a part of the waveform or spectrogram to select a spectrum slice to display. Mouse over the spectrum slice to explore the frequencies. The circle tracks the current frequency in the spectrum slice and spectrogram, and current values are displayed in the table.\n\nSpectrogram and spectrum slice values below the low threshold are shaded blue. The analysis window slider adjusts the spectrogram analysis window.", "_____no_output_____" ] ], [ [ "import os\nos.environ['BOKEH_RESOURCES'] = 'inline' # To ensure we load monkeypatched version of bokeh rather than from cdn\nfrom spectrum_slice_app import spectrum_slice_app\nfrom bokeh_phon.utils import remote_jupyter_proxy_url_callback, set_default_jupyter_url\nfrom bokeh.io import show\n# Set this value to the hostname you find in your browser url bar. Do not include\n# any path found after the hostname.\nset_default_jupyter_url('https://hub.gke2.mybinder.org/')\n# If running locally, exclude the notebook_url parameter.\n# show(spectrum_slice_app) # Run in local jupyter notebook (not in jupyterhub/binderhub)\nshow(spectrum_slice_app, notebook_url=remote_jupyter_proxy_url_callback)", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code" ] ]
d0df7a9b85c5f5c5a12947a09f50e531dd503233
4,726
ipynb
Jupyter Notebook
Module-05/.ipynb_checkpoints/BigO_solution-checkpoint.ipynb
FairozaAmira/python-training
6912b70dec0ed2a14abe8d59f67201fdc4ec277e
[ "Apache-2.0" ]
1
2021-04-05T01:35:29.000Z
2021-04-05T01:35:29.000Z
Module-05/.ipynb_checkpoints/BigO_solution-checkpoint.ipynb
FairozaAmira/python-training
6912b70dec0ed2a14abe8d59f67201fdc4ec277e
[ "Apache-2.0" ]
null
null
null
Module-05/.ipynb_checkpoints/BigO_solution-checkpoint.ipynb
FairozaAmira/python-training
6912b70dec0ed2a14abe8d59f67201fdc4ec277e
[ "Apache-2.0" ]
2
2021-03-01T04:42:29.000Z
2021-03-01T04:42:59.000Z
22.187793
248
0.498942
[ [ [ "<a href=\"https://colab.research.google.com/github/FairozaAmira/AI_Programming_1_e/blob/master/Lesson12/BigO_solution.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ], [ "# Solution for Big O Notation exercises", "_____no_output_____" ], [ "**Question 1**\n\nDesign an algorithm that calculates the factorial of a number , 1) By using for loop, 2) By using if-else statement. Then, calculate the time needed to run the program.\n\nfor example, \n$4! = 4 \\times 3 \\times 2 \\times 1 = 24 $\n\nWhich algorithm is the fastest one? \n\n*Source: https://stackabuse.com/big-o-notation-and-algorithm-analysis-with-python-examples/*", "_____no_output_____" ] ], [ [ "%%writefile fact1.py\n\n#By using for-loop\n\nfrom memory_profiler import profile\n\n@profile\ndef fact(n):\n product = 1\n for i in range(n):\n product = product * (i+1)\n return product\n\nprint(fact(4))\n# print(%timeit fact(4))\n# print(%timeit fact(50))", "Overwriting fact1.py\n" ], [ "#By using for-loop\n\ndef fact(n):\n product = 1\n for i in range(n):\n product = product * (i+1)\n return product\n\nprint(fact(4))\n%timeit fact(4)\n%timeit fact(50)", "24\n412 ns ± 6.98 ns per loop (mean ± std. dev. of 7 runs, 1000000 loops each)\n3.71 µs ± 29.3 ns per loop (mean ± std. dev. of 7 runs, 100000 loops each)\n" ], [ "%%writefile fact2.py\n\nfrom memory_profiler import profile\n\n#By using if-else statement\n@profile\ndef fact2 (n):\n if n == 0:\n return 1\n else:\n return n * fact2(n-1)\n\nprint(fact2(4))\n# %timeit fact2(4)\n# %timeit fact2(50)", "Writing fact2.py\n" ], [ "#By using if-else statement\n\ndef fact2 (n):\n if n == 0:\n return 1\n else:\n return n * fact2(n-1)\n\nprint(fact2(4))\n%timeit fact2(4)\n%timeit fact2(50)", "24\n505 ns ± 34.7 ns per loop (mean ± std. dev. of 7 runs, 1000000 loops each)\n8.21 µs ± 1.38 µs per loop (mean ± std. dev. of 7 runs, 100000 loops each)\n" ] ] ]
[ "markdown", "code" ]
[ [ "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code" ] ]
d0df88a7f5c82553bf53a09ac4bd2d7f2ccd58d4
5,868
ipynb
Jupyter Notebook
Terms-and-Abbreviations/LSTM.ipynb
gymk/ANLP
3052b12d307e4b2c8e192f3ac3b6e393932a0d77
[ "MIT" ]
7
2020-08-25T23:10:34.000Z
2022-01-11T16:43:37.000Z
Terms-and-Abbreviations/LSTM.ipynb
gymk/ANLP
3052b12d307e4b2c8e192f3ac3b6e393932a0d77
[ "MIT" ]
null
null
null
Terms-and-Abbreviations/LSTM.ipynb
gymk/ANLP
3052b12d307e4b2c8e192f3ac3b6e393932a0d77
[ "MIT" ]
11
2020-09-28T09:08:01.000Z
2022-03-29T18:06:22.000Z
34.928571
326
0.622359
[ [ [ "# LSTM - Long Short Term Memory", "_____no_output_____" ], [ "- From [v1] Lecture 60\n - LSTM, another variation of RNN", "_____no_output_____" ], [ " ## Study Links", "_____no_output_____" ], [ "- [An empirical exploration of recurrent network architectures](https://dl.acm.org/citation.cfm?id=3045367)\n- https://dblp.uni-trier.de/db/journals/corr/corr1506.html\n - [A Critical Review of Recurrent Neural Networks for Sequence Learning](https://arxiv.org/pdf/1506.00019.pdf)\n- [Deep Learning](https://web.cs.hacettepe.edu.tr/~aykut/classes/spring2018/cmp784/slides/lec7-recurrent-neural-nets.pdf)", "_____no_output_____" ], [ "## Problems with Vanilla RNN", "_____no_output_____" ], [ "- The component of the gradient in directions that correspond to long-term dependencies is [small](https://dl.acm.org/citation.cfm?id=3045118.3045367)\n - From state $t$ to state $0$\n- The components of the gradient in directions that correspond to short-term dependencies are large\n- As a result, RNNs can easily learn the short-term but not the long-term dependencies", "_____no_output_____" ], [ "## LSTM", "_____no_output_____" ], [ "- In LSTM network, the network is the same as a standard RNN, except that the summation units in the hidden layer are replaced by memory blocks\n - This will be done in $\\large s_t$\n- The multiplicative gates allow LSTM memory cells to store and access information over periods of time, thereby mitigating the [vanishing gradient problem](https://dblp.uni-trier.de/db/journals/corr/corr1506.html)\n - LSTM will have multiple gates that allows cells to keep some information or loose some information\n - By doing this we want to acheive the long term dependency in the network\n - At the same time, solving the problem of Vanishing Gradient Problem [37]\n- Along with the hidden state vector $\\large h_t$, LSTM maintains a memory vector $\\large C_t$\n - $\\large \\large h_{t} = \\text{tanh} \\left(Uh_{t-1}+ W {x_{t}} \\right)$ going to be replaced with $\\large C_t$\n - $\\large C_t$ is going to tell us how to condition the values of $\\large U$, so that _vanishing gradient problem disappears_\n- At each time step, the LSTM can choose to read from, write to, or reset the cell using explicit gating mechanisms\n - A small computer kind of logic exist inside, which is able to read, write and reset operations, so that $\\large h$ values are very well conditioned\n- LSTM computes well behaved gradients by controlling the values using the gates", "_____no_output_____" ], [ "## LSTM Cell", "_____no_output_____" ], [ "- See [Recurrent Neural Networks (RNN) and Long Short-Term Memory (LSTM)](https://www.youtube.com/watch?v=WCUNPb-5EYI&list=PLVZqlMpoM6kaJX_2lLKjEhWI0NlqHfqzp&index=5&t=0s) and [A friendly introduction to Recurrent Neural Networks](https://www.youtube.com/watch?v=UNmqTiOnRfg) to get very good intuition of how LSTM works", "_____no_output_____" ], [ "- Below diagram shows how does a LSTM Cell look like\n - $\\large h_t$ is replaced by this cell\n- The operations depicted in below diagram are performed during Forward Pass", "_____no_output_____" ], [ "![LSTM_Cell](images/LSTM_Cell.jpg)", "_____no_output_____" ], [ "- $\\large C_t$ $\\Rightarrow$ Previous memory state\n - $\\large C_t$ is a vector\n- $\\large h_t$ $\\Rightarrow$ Previous state of the hidden unit\n- $\\large W$ $Rightarrpw$ denotes the weight vector\n- $\\large q_t$ $\\Rightarrow$ netsum of Input vector with weight vector $\\large W$\n- $\\large f_t$ $\\Rightarrow$ is the forget gate, computed using vector $\\large W_f$, having a $Sigmoid$ as activation function\n- $\\large i_t$ $\\Rightarrow$ is the input cell, computed using vector $\\large W_i$, having a $Sigmoid$ as activation function\n- $\\large \\widetilde{C_t}$ $\\Rightarrow$ new computed memory, computed using vector $\\large W_{\\widetilde{C_t}}$, having a $tanh$ activation function\n- $\\large O_t$is the output gate, computed using vector $\\large W_{O_t}$, having a $Sigmoid$ activation function\n- $\\large \\otimes$ refers to element wise multiplication\n- $\\large \\oplus$ refers to element wise addition", "_____no_output_____" ], [ "## LSTM - Forward Pass", "_____no_output_____" ], [ "![LSTM_Forward_Pass](images/LSTM_Forward_Pass.jpg)", "_____no_output_____" ] ] ]
[ "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ] ]
d0df8e9a600ff653c86ca2bfeb47027351976c1a
172,412
ipynb
Jupyter Notebook
algos/Multi_Worker_Actor_Critic.ipynb
olonok69/RL_Stable_baselines
3634cc60e8de8e9dfa7cb50fdc6272284b8a7cc9
[ "MIT" ]
null
null
null
algos/Multi_Worker_Actor_Critic.ipynb
olonok69/RL_Stable_baselines
3634cc60e8de8e9dfa7cb50fdc6272284b8a7cc9
[ "MIT" ]
null
null
null
algos/Multi_Worker_Actor_Critic.ipynb
olonok69/RL_Stable_baselines
3634cc60e8de8e9dfa7cb50fdc6272284b8a7cc9
[ "MIT" ]
null
null
null
64.16524
3,298
0.591194
[ [ [ "<a href=\"https://colab.research.google.com/github/abhisheksuran/Atari_DQN/blob/master/Multi_Worker_Actor_Critic.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ] ], [ [ "import numpy as np\nimport tensorflow as tf \nimport gym\nimport tensorflow_probability as tfp\nfrom multiprocessing import Process, Queue, Barrier, Lock\nimport tensorflow.keras.losses as kls", "_____no_output_____" ], [ "!pip3 install box2d-py", "Collecting box2d-py\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/06/bd/6cdc3fd994b0649dcf5d9bad85bd9e26172308bbe9a421bfc6fdbf5081a6/box2d_py-2.3.8-cp36-cp36m-manylinux1_x86_64.whl (448kB)\n\r\u001b[K |▊ | 10kB 16.0MB/s eta 0:00:01\r\u001b[K |█▌ | 20kB 1.6MB/s eta 0:00:01\r\u001b[K |██▏ | 30kB 2.2MB/s eta 0:00:01\r\u001b[K |███ | 40kB 2.5MB/s eta 0:00:01\r\u001b[K |███▋ | 51kB 1.9MB/s eta 0:00:01\r\u001b[K |████▍ | 61kB 2.2MB/s eta 0:00:01\r\u001b[K |█████▏ | 71kB 2.4MB/s eta 0:00:01\r\u001b[K |█████▉ | 81kB 2.6MB/s eta 0:00:01\r\u001b[K |██████▋ | 92kB 2.8MB/s eta 0:00:01\r\u001b[K |███████▎ | 102kB 2.7MB/s eta 0:00:01\r\u001b[K |████████ | 112kB 2.7MB/s eta 0:00:01\r\u001b[K |████████▊ | 122kB 2.7MB/s eta 0:00:01\r\u001b[K |█████████▌ | 133kB 2.7MB/s eta 0:00:01\r\u001b[K |██████████▎ | 143kB 2.7MB/s eta 0:00:01\r\u001b[K |███████████ | 153kB 2.7MB/s eta 0:00:01\r\u001b[K |███████████▊ | 163kB 2.7MB/s eta 0:00:01\r\u001b[K |████████████▍ | 174kB 2.7MB/s eta 0:00:01\r\u001b[K |█████████████▏ | 184kB 2.7MB/s eta 0:00:01\r\u001b[K |█████████████▉ | 194kB 2.7MB/s eta 0:00:01\r\u001b[K |██████████████▋ | 204kB 2.7MB/s eta 0:00:01\r\u001b[K |███████████████▍ | 215kB 2.7MB/s eta 0:00:01\r\u001b[K |████████████████ | 225kB 2.7MB/s eta 0:00:01\r\u001b[K |████████████████▉ | 235kB 2.7MB/s eta 0:00:01\r\u001b[K |█████████████████▌ | 245kB 2.7MB/s eta 0:00:01\r\u001b[K |██████████████████▎ | 256kB 2.7MB/s eta 0:00:01\r\u001b[K |███████████████████ | 266kB 2.7MB/s eta 0:00:01\r\u001b[K |███████████████████▊ | 276kB 2.7MB/s eta 0:00:01\r\u001b[K |████████████████████▌ | 286kB 2.7MB/s eta 0:00:01\r\u001b[K |█████████████████████▏ | 296kB 2.7MB/s eta 0:00:01\r\u001b[K |██████████████████████ | 307kB 2.7MB/s eta 0:00:01\r\u001b[K |██████████████████████▋ | 317kB 2.7MB/s eta 0:00:01\r\u001b[K |███████████████████████▍ | 327kB 2.7MB/s eta 0:00:01\r\u001b[K |████████████████████████ | 337kB 2.7MB/s eta 0:00:01\r\u001b[K |████████████████████████▉ | 348kB 2.7MB/s eta 0:00:01\r\u001b[K |█████████████████████████▋ | 358kB 2.7MB/s eta 0:00:01\r\u001b[K |██████████████████████████▎ | 368kB 2.7MB/s eta 0:00:01\r\u001b[K |███████████████████████████ | 378kB 2.7MB/s eta 0:00:01\r\u001b[K |███████████████████████████▊ | 389kB 2.7MB/s eta 0:00:01\r\u001b[K |████████████████████████████▌ | 399kB 2.7MB/s eta 0:00:01\r\u001b[K |█████████████████████████████▏ | 409kB 2.7MB/s eta 0:00:01\r\u001b[K |██████████████████████████████ | 419kB 2.7MB/s eta 0:00:01\r\u001b[K |██████████████████████████████▊ | 430kB 2.7MB/s eta 0:00:01\r\u001b[K |███████████████████████████████▍| 440kB 2.7MB/s eta 0:00:01\r\u001b[K |████████████████████████████████| 450kB 2.7MB/s \n\u001b[?25hInstalling collected packages: box2d-py\nSuccessfully installed box2d-py-2.3.8\n" ], [ "env= gym.make(\"CartPole-v0\")\nlow = env.observation_space.low\nhigh = env.observation_space.high", "_____no_output_____" ], [ "class critic(tf.keras.Model):\n def __init__(self):\n super().__init__()\n self.d1 = tf.keras.layers.Dense(128,activation='relu')\n #self.d2 = tf.keras.layers.Dense(32,activation='relu')\n self.v = tf.keras.layers.Dense(1, activation = None)\n\n def call(self, input_data):\n x = self.d1(input_data)\n #x = self.d2(x)\n v = self.v(x)\n return v\n \n\nclass actor(tf.keras.Model):\n def __init__(self):\n super().__init__()\n self.d1 = tf.keras.layers.Dense(128,activation='relu')\n #self.d2 = tf.keras.layers.Dense(32,activation='relu')\n self.a = tf.keras.layers.Dense(2,activation='softmax')\n\n def call(self, input_data):\n x = self.d1(input_data)\n #x = self.d2(x)\n a = self.a(x)\n return a", "_____no_output_____" ], [ "class agent():\n def __init__(self, gamma = 0.99):\n self.gamma = gamma\n self.a_opt = tf.keras.optimizers.RMSprop(learning_rate=7e-3)\n self.c_opt = tf.keras.optimizers.RMSprop(learning_rate=7e-3)\n self.actor = actor()\n self.critic = critic()\n\n \n def act(self,state):\n prob = self.actor(np.array([state]))\n prob = prob.numpy()\n dist = tfp.distributions.Categorical(probs=prob, dtype=tf.float32)\n action = dist.sample()\n return int(action.numpy()[0])\n \n\n\n def actor_loss(self, probs, actions, td):\n \n probability = []\n log_probability= []\n for pb,a in zip(probs,actions):\n dist = tfp.distributions.Categorical(probs=pb, dtype=tf.float32)\n log_prob = dist.log_prob(a)\n prob = dist.prob(a)\n probability.append(prob)\n log_probability.append(log_prob)\n\n # print(probability)\n # print(log_probability)\n\n p_loss= []\n e_loss = []\n td = td.numpy()\n #print(td)\n for pb, t, lpb in zip(probability, td, log_probability):\n t = tf.constant(t)\n policy_loss = tf.math.multiply(lpb,t)\n entropy_loss = tf.math.negative(tf.math.multiply(pb,lpb))\n p_loss.append(policy_loss)\n e_loss.append(entropy_loss)\n p_loss = tf.stack(p_loss)\n e_loss = tf.stack(e_loss)\n p_loss = tf.reduce_mean(p_loss)\n e_loss = tf.reduce_mean(e_loss)\n # print(p_loss)\n # print(e_loss)\n loss = -p_loss - 0.0001 * e_loss\n #print(loss)\n return loss\n\n def learn(self, states, actions, discnt_rewards):\n discnt_rewards = tf.reshape(discnt_rewards, (len(discnt_rewards),))\n \n with tf.GradientTape() as tape1, tf.GradientTape() as tape2:\n p = self.actor(states, training=True)\n v = self.critic(states,training=True)\n v = tf.reshape(v, (len(v),))\n td = tf.math.subtract(discnt_rewards, v)\n # print(discnt_rewards)\n # print(v)\n #print(td.numpy())\n a_loss = self.actor_loss(p, actions, td)\n c_loss = 0.5*kls.mean_squared_error(discnt_rewards, v)\n grads1 = tape1.gradient(a_loss, self.actor.trainable_variables)\n grads2 = tape2.gradient(c_loss, self.critic.trainable_variables)\n self.a_opt.apply_gradients(zip(grads1, self.actor.trainable_variables))\n self.c_opt.apply_gradients(zip(grads2, self.critic.trainable_variables))\n return a_loss, c_loss", "_____no_output_____" ], [ "def preprocess1(states, actions, rewards, gamma, s_queue, a_queue, r_queue, lock):\n discnt_rewards = []\n sum_reward = 0\n rewards.reverse()\n for r in rewards:\n sum_reward = r + gamma*sum_reward\n discnt_rewards.append(sum_reward)\n discnt_rewards.reverse()\n states = np.array(states, dtype=np.float32)\n actions = np.array(actions, dtype=np.int32)\n discnt_rewards = np.array(discnt_rewards, dtype=np.float32)\n #exp = np.array([states, actions,discnt_rewards])\n lock.acquire()\n s_queue.put(states)\n a_queue.put(actions)\n r_queue.put(discnt_rewards)\n lock.release()\n\ndef preprocess2(s_queue, a_queue, r_queue):\n states = []\n while not s_queue.empty():\n states.append(s_queue.get())\n\n actions = []\n while not a_queue.empty():\n actions.append(a_queue.get())\n dis_rewards = []\n while not r_queue.empty():\n dis_rewards.append(r_queue.get())\n\n state_batch = np.concatenate(*(states,), axis=0) \n action_batch = np.concatenate(*(actions,), axis=None) \n reward_batch = np.concatenate(*(dis_rewards,), axis=None) \n # exp = np.transpose(exp) \n\n return state_batch, action_batch, reward_batch\n\ndef runner(barrier, lock, s_queue, a_queue, r_queue):\n tf.random.set_seed(360)\n agentoo7 = agent()\n steps = 2000\n ep_reward = []\n total_avgr = []\n for s in range(steps):\n \n done = False\n state = env.reset()\n total_reward = 0\n all_aloss = []\n all_closs = []\n rewards = []\n states = []\n actions = []\n \n while not done:\n \n action = agentoo7.act(state)\n next_state, reward, done, _ = env.step(action)\n rewards.append(reward)\n states.append(state)\n #actions.append(tf.one_hot(action, 2, dtype=tf.int32).numpy().tolist())\n actions.append(action)\n state = next_state\n total_reward += reward\n \n if done:\n ep_reward.append(total_reward)\n avg_reward = np.mean(ep_reward[-100:])\n total_avgr.append(avg_reward)\n print(\"total reward after {} steps is {} and avg reward is {}\".format(s, total_reward, avg_reward))\n preprocess1(states, actions, rewards, 1, s_queue, a_queue, r_queue, lock)\n b = barrier.wait()\n if b == 0:\n if (s_queue.qsize() == 10) & (a_queue.qsize() == 10) & (r_queue.qsize() == 10):\n print(s_queue.qsize())\n print(a_queue.qsize())\n print(r_queue.qsize())\n state_batch, action_batch, reward_batch = preprocess2(s_queue, a_queue, r_queue) \n # print(state_batch)\n # print(action_batch)\n # print(reward_batch) \n al,cl = agentoo7.learn(state_batch, action_batch, reward_batch) \n all_aloss.append(al)\n all_closs.append(cl)\n print(f\"al{al}\") \n print(f\"cl{cl}\") \n\n barrier.wait() \n ", "_____no_output_____" ], [ "barrier = Barrier(10)\ns_queue = Queue()\na_queue = Queue()\nr_queue = Queue()\nlock = Lock()\n\nprocesses = []\nfor i in range(10):\n worker = Process(target=runner, args=(barrier, lock, s_queue, a_queue, r_queue))\n processes.append(worker)\n worker.start()\n\nfor process in processes:\n process.join() \n", "WARNING:tensorflow:Layer actor is casting an input tensor from dtype float64 to the layer's dtype of float32, which is new behavior in TensorFlow 2. The layer has dtype float32 because it's dtype defaults to floatx.\n\nIf you intended to run this layer in float32, you can safely ignore this warning. If in doubt, this warning is likely only an issue if you are porting a TensorFlow 1.X model to TensorFlow 2.\n\nTo change all layers to have dtype float64 by default, call `tf.keras.backend.set_floatx('float64')`. To change just this layer, pass dtype='float64' to the layer constructor. If you are the author of this layer, you can disable autocasting by passing autocast=False to the base Layer constructor.\n\nWARNING:tensorflow:Layer actor is casting an input tensor from dtype float64 to the layer's dtype of float32, which is new behavior in TensorFlow 2. The layer has dtype float32 because it's dtype defaults to floatx.\n\nIf you intended to run this layer in float32, you can safely ignore this warning. If in doubt, this warning is likely only an issue if you are porting a TensorFlow 1.X model to TensorFlow 2.\n\nTo change all layers to have dtype float64 by default, call `tf.keras.backend.set_floatx('float64')`. To change just this layer, pass dtype='float64' to the layer constructor. If you are the author of this layer, you can disable autocasting by passing autocast=False to the base Layer constructor.\n\nWARNING:tensorflow:Layer actor is casting an input tensor from dtype float64 to the layer's dtype of float32, which is new behavior in TensorFlow 2. The layer has dtype float32 because it's dtype defaults to floatx.\n\nIf you intended to run this layer in float32, you can safely ignore this warning. If in doubt, this warning is likely only an issue if you are porting a TensorFlow 1.X model to TensorFlow 2.\n\nTo change all layers to have dtype float64 by default, call `tf.keras.backend.set_floatx('float64')`. To change just this layer, pass dtype='float64' to the layer constructor. If you are the author of this layer, you can disable autocasting by passing autocast=False to the base Layer constructor.\n\nWARNING:tensorflow:Layer actor is casting an input tensor from dtype float64 to the layer's dtype of float32, which is new behavior in TensorFlow 2. The layer has dtype float32 because it's dtype defaults to floatx.\n\nIf you intended to run this layer in float32, you can safely ignore this warning. If in doubt, this warning is likely only an issue if you are porting a TensorFlow 1.X model to TensorFlow 2.\n\nTo change all layers to have dtype float64 by default, call `tf.keras.backend.set_floatx('float64')`. To change just this layer, pass dtype='float64' to the layer constructor. If you are the author of this layer, you can disable autocasting by passing autocast=False to the base Layer constructor.\n\nWARNING:tensorflow:Layer actor is casting an input tensor from dtype float64 to the layer's dtype of float32, which is new behavior in TensorFlow 2. The layer has dtype float32 because it's dtype defaults to floatx.\n\nIf you intended to run this layer in float32, you can safely ignore this warning. If in doubt, this warning is likely only an issue if you are porting a TensorFlow 1.X model to TensorFlow 2.\n\nTo change all layers to have dtype float64 by default, call `tf.keras.backend.set_floatx('float64')`. To change just this layer, pass dtype='float64' to the layer constructor. If you are the author of this layer, you can disable autocasting by passing autocast=False to the base Layer constructor.\n\nWARNING:tensorflow:Layer actor is casting an input tensor from dtype float64 to the layer's dtype of float32, which is new behavior in TensorFlow 2. The layer has dtype float32 because it's dtype defaults to floatx.\n\nIf you intended to run this layer in float32, you can safely ignore this warning. If in doubt, this warning is likely only an issue if you are porting a TensorFlow 1.X model to TensorFlow 2.\n\nTo change all layers to have dtype float64 by default, call `tf.keras.backend.set_floatx('float64')`. To change just this layer, pass dtype='float64' to the layer constructor. If you are the author of this layer, you can disable autocasting by passing autocast=False to the base Layer constructor.\n\nWARNING:tensorflow:Layer actor is casting an input tensor from dtype float64 to the layer's dtype of float32, which is new behavior in TensorFlow 2. The layer has dtype float32 because it's dtype defaults to floatx.\n\nIf you intended to run this layer in float32, you can safely ignore this warning. If in doubt, this warning is likely only an issue if you are porting a TensorFlow 1.X model to TensorFlow 2.\n\nTo change all layers to have dtype float64 by default, call `tf.keras.backend.set_floatx('float64')`. To change just this layer, pass dtype='float64' to the layer constructor. If you are the author of this layer, you can disable autocasting by passing autocast=False to the base Layer constructor.\n\nWARNING:tensorflow:Layer actor is casting an input tensor from dtype float64 to the layer's dtype of float32, which is new behavior in TensorFlow 2. The layer has dtype float32 because it's dtype defaults to floatx.\n\nIf you intended to run this layer in float32, you can safely ignore this warning. If in doubt, this warning is likely only an issue if you are porting a TensorFlow 1.X model to TensorFlow 2.\n\nTo change all layers to have dtype float64 by default, call `tf.keras.backend.set_floatx('float64')`. To change just this layer, pass dtype='float64' to the layer constructor. If you are the author of this layer, you can disable autocasting by passing autocast=False to the base Layer constructor.\n\nWARNING:tensorflow:Layer actor is casting an input tensor from dtype float64 to the layer's dtype of float32, which is new behavior in TensorFlow 2. The layer has dtype float32 because it's dtype defaults to floatx.\n\nIf you intended to run this layer in float32, you can safely ignore this warning. If in doubt, this warning is likely only an issue if you are porting a TensorFlow 1.X model to TensorFlow 2.\n\nTo change all layers to have dtype float64 by default, call `tf.keras.backend.set_floatx('float64')`. To change just this layer, pass dtype='float64' to the layer constructor. If you are the author of this layer, you can disable autocasting by passing autocast=False to the base Layer constructor.\nWARNING:tensorflow:Layer actor is casting an input tensor from dtype float64 to the layer's dtype of float32, which is new behavior in TensorFlow 2. The layer has dtype float32 because it's dtype defaults to floatx.\n\nIf you intended to run this layer in float32, you can safely ignore this warning. If in doubt, this warning is likely only an issue if you are porting a TensorFlow 1.X model to TensorFlow 2.\n\nTo change all layers to have dtype float64 by default, call `tf.keras.backend.set_floatx('float64')`. To change just this layer, pass dtype='float64' to the layer constructor. If you are the author of this layer, you can disable autocasting by passing autocast=False to the base Layer constructor.\n\n\ntotal reward after 0 steps is 28.0 and avg reward is 28.0\ntotal reward after 0 steps is 28.0 and avg reward is 28.0\ntotal reward after 0 steps is 28.0 and avg reward is 28.0\ntotal reward after 0 steps is 28.0 and avg reward is 28.0\ntotal reward after 0 steps is 28.0 and avg reward is 28.0\ntotal reward after 0 steps is 28.0 and avg reward is 28.0\ntotal reward after 0 steps is 28.0 and avg reward is 28.0\ntotal reward after 0 steps is 28.0 and avg reward is 28.0\ntotal reward after 0 steps is 28.0 and avg reward is 28.0\ntotal reward after 0 steps is 28.0 and avg reward is 28.0\n10\n10\n10\nal9.992398262023926\ncl138.4424591064453\ntotal reward after 1 steps is 11.0 and avg reward is 19.5\ntotal reward after 1 steps is 18.0 and avg reward is 23.0\ntotal reward after 1 steps is 18.0 and avg reward is 23.0\ntotal reward after 1 steps is 18.0 and avg reward is 23.0\ntotal reward after 1 steps is 18.0 and avg reward is 23.0\ntotal reward after 1 steps is 18.0 and avg reward is 23.0\ntotal reward after 1 steps is 18.0 and avg reward is 23.0\ntotal reward after 1 steps is 18.0 and avg reward is 23.0\ntotal reward after 1 steps is 18.0 and avg reward is 23.0\ntotal reward after 1 steps is 18.0 and avg reward is 23.0\n10\n10\n10\nal5.852129936218262\ncl53.59456253051758\ntotal reward after 2 steps is 9.0 and avg reward is 16.0\ntotal reward after 2 steps is 20.0 and avg reward is 22.0\ntotal reward after 2 steps is 20.0 and avg reward is 22.0\ntotal reward after 2 steps is 20.0 and avg reward is 22.0\ntotal reward after 2 steps is 20.0 and avg reward is 22.0\ntotal reward after 2 steps is 20.0 and avg reward is 22.0\ntotal reward after 2 steps is 20.0 and avg reward is 22.0\ntotal reward after 2 steps is 20.0 and avg reward is 22.0\ntotal reward after 2 steps is 20.0 and avg reward is 22.0\ntotal reward after 2 steps is 20.0 and avg reward is 22.0\n10\n10\n10\nal8.015111923217773\ncl65.08858489990234\ntotal reward after 3 steps is 17.0 and avg reward is 16.25\ntotal reward after 3 steps is 42.0 and avg reward is 27.0\ntotal reward after 3 steps is 42.0 and avg reward is 27.0\ntotal reward after 3 steps is 42.0 and avg reward is 27.0\ntotal reward after 3 steps is 42.0 and avg reward is 27.0\ntotal reward after 3 steps is 42.0 and avg reward is 27.0\ntotal reward after 3 steps is 42.0 and avg reward is 27.0\ntotal reward after 3 steps is 42.0 and avg reward is 27.0\ntotal reward after 3 steps is 42.0 and avg reward is 27.0\ntotal reward after 3 steps is 42.0 and avg reward is 27.0\n10\n10\n10\nal14.080832481384277\ncl278.2972412109375\ntotal reward after 4 steps is 18.0 and avg reward is 16.6\ntotal reward after 4 steps is 20.0 and avg reward is 25.6\ntotal reward after 4 steps is 20.0 and avg reward is 25.6\ntotal reward after 4 steps is 20.0 and avg reward is 25.6\ntotal reward after 4 steps is 20.0 and avg reward is 25.6\ntotal reward after 4 steps is 20.0 and avg reward is 25.6\ntotal reward after 4 steps is 20.0 and avg reward is 25.6\ntotal reward after 4 steps is 20.0 and avg reward is 25.6\ntotal reward after 4 steps is 20.0 and avg reward is 25.6\ntotal reward after 4 steps is 20.0 and avg reward is 25.6\n10\n10\n10\nal6.4069504737854\ncl58.82269287109375\ntotal reward after 5 steps is 17.0 and avg reward is 16.666666666666668\ntotal reward after 5 steps is 75.0 and avg reward is 33.833333333333336\ntotal reward after 5 steps is 75.0 and avg reward is 33.833333333333336\ntotal reward after 5 steps is 75.0 and avg reward is 33.833333333333336\ntotal reward after 5 steps is 75.0 and avg reward is 33.833333333333336\ntotal reward after 5 steps is 75.0 and avg reward is 33.833333333333336\ntotal reward after 5 steps is 75.0 and avg reward is 33.833333333333336\ntotal reward after 5 steps is 75.0 and avg reward is 33.833333333333336\ntotal reward after 5 steps is 75.0 and avg reward is 33.833333333333336\ntotal reward after 5 steps is 75.0 and avg reward is 33.833333333333336\n10\n10\n10\nal24.54178237915039\ncl894.6021728515625\ntotal reward after 6 steps is 15.0 and avg reward is 31.142857142857142\ntotal reward after 6 steps is 15.0 and avg reward is 31.142857142857142\ntotal reward after 6 steps is 15.0 and avg reward is 31.142857142857142\ntotal reward after 6 steps is 15.0 and avg reward is 31.142857142857142\ntotal reward after 6 steps is 15.0 and avg reward is 31.142857142857142\ntotal reward after 6 steps is 15.0 and avg reward is 31.142857142857142\ntotal reward after 6 steps is 15.0 and avg reward is 31.142857142857142\ntotal reward after 6 steps is 15.0 and avg reward is 31.142857142857142\ntotal reward after 6 steps is 28.0 and avg reward is 18.285714285714285\ntotal reward after 6 steps is 15.0 and avg reward is 31.142857142857142\n10\n10\n10\nal6.266841888427734\ncl57.694671630859375\ntotal reward after 7 steps is 16.0 and avg reward is 29.25\ntotal reward after 7 steps is 35.0 and avg reward is 20.375\ntotal reward after 7 steps is 31.0 and avg reward is 31.125\ntotal reward after 7 steps is 31.0 and avg reward is 31.125\ntotal reward after 7 steps is 31.0 and avg reward is 31.125\ntotal reward after 7 steps is 31.0 and avg reward is 31.125\ntotal reward after 7 steps is 31.0 and avg reward is 31.125\ntotal reward after 7 steps is 31.0 and avg reward is 31.125\ntotal reward after 7 steps is 31.0 and avg reward is 31.125\ntotal reward after 7 steps is 31.0 and avg reward is 31.125\n10\n10\n10\nal10.784611701965332\ncl162.67962646484375\ntotal reward after 8 steps is 14.0 and avg reward is 27.555555555555557\ntotal reward after 8 steps is 22.0 and avg reward is 30.11111111111111\ntotal reward after 8 steps is 22.0 and avg reward is 30.11111111111111\ntotal reward after 8 steps is 22.0 and avg reward is 30.11111111111111\ntotal reward after 8 steps is 22.0 and avg reward is 30.11111111111111\ntotal reward after 8 steps is 22.0 and avg reward is 30.11111111111111\ntotal reward after 8 steps is 22.0 and avg reward is 30.11111111111111\ntotal reward after 8 steps is 22.0 and avg reward is 30.11111111111111\ntotal reward after 8 steps is 22.0 and avg reward is 30.11111111111111\ntotal reward after 8 steps is 27.0 and avg reward is 21.11111111111111\n10\n10\n10\nal7.162203311920166\ncl80.91149139404297\ntotal reward after 9 steps is 12.0 and avg reward is 26.0\ntotal reward after 9 steps is 35.0 and avg reward is 30.6\ntotal reward after 9 steps is 35.0 and avg reward is 30.6\ntotal reward after 9 steps is 35.0 and avg reward is 30.6\ntotal reward after 9 steps is 35.0 and avg reward is 30.6\ntotal reward after 9 steps is 35.0 and avg reward is 30.6\ntotal reward after 9 steps is 35.0 and avg reward is 30.6\ntotal reward after 9 steps is 35.0 and avg reward is 30.6\ntotal reward after 9 steps is 35.0 and avg reward is 30.6\ntotal reward after 9 steps is 49.0 and avg reward is 23.9\n10\n10\n10\nal12.811543464660645\ncl220.13259887695312\ntotal reward after 10 steps is 15.0 and avg reward is 29.181818181818183\ntotal reward after 10 steps is 15.0 and avg reward is 29.181818181818183\ntotal reward after 10 steps is 15.0 and avg reward is 29.181818181818183\ntotal reward after 10 steps is 15.0 and avg reward is 29.181818181818183\ntotal reward after 10 steps is 15.0 and avg reward is 29.181818181818183\ntotal reward after 10 steps is 15.0 and avg reward is 29.181818181818183\ntotal reward after 10 steps is 15.0 and avg reward is 29.181818181818183\ntotal reward after 10 steps is 15.0 and avg reward is 29.181818181818183\ntotal reward after 10 steps is 44.0 and avg reward is 27.636363636363637\ntotal reward after 10 steps is 54.0 and avg reward is 26.636363636363637\n10\n10\n10\nal10.724054336547852\ncl213.0997314453125\ntotal reward after 11 steps is 14.0 and avg reward is 27.916666666666668\ntotal reward after 11 steps is 11.0 and avg reward is 27.666666666666668\ntotal reward after 11 steps is 11.0 and avg reward is 27.666666666666668\ntotal reward after 11 steps is 11.0 and avg reward is 27.666666666666668\ntotal reward after 11 steps is 11.0 and avg reward is 27.666666666666668\ntotal reward after 11 steps is 11.0 and avg reward is 27.666666666666668\ntotal reward after 11 steps is 11.0 and avg reward is 27.666666666666668\ntotal reward after 11 steps is 11.0 and avg reward is 27.666666666666668\ntotal reward after 11 steps is 13.0 and avg reward is 25.5\ntotal reward after 11 steps is 18.0 and avg reward is 26.833333333333332\n10\n10\n10\nal5.281088829040527\ncl27.924283981323242\ntotal reward after 12 steps is 14.0 and avg reward is 25.846153846153847\ntotal reward after 12 steps is 18.0 and avg reward is 26.923076923076923\ntotal reward after 12 steps is 18.0 and avg reward is 26.923076923076923\ntotal reward after 12 steps is 18.0 and avg reward is 26.923076923076923\ntotal reward after 12 steps is 40.0 and avg reward is 28.846153846153847\ntotal reward after 12 steps is 18.0 and avg reward is 26.923076923076923\ntotal reward after 12 steps is 18.0 and avg reward is 26.923076923076923\ntotal reward after 12 steps is 18.0 and avg reward is 26.923076923076923\ntotal reward after 12 steps is 18.0 and avg reward is 26.923076923076923\ntotal reward after 12 steps is 75.0 and avg reward is 29.307692307692307\n10\n10\n10\nal12.213509559631348\ncl331.5197448730469\ntotal reward after 13 steps is 25.0 and avg reward is 28.571428571428573\ntotal reward after 13 steps is 29.0 and avg reward is 27.071428571428573\ntotal reward after 13 steps is 29.0 and avg reward is 27.071428571428573\ntotal reward after 13 steps is 29.0 and avg reward is 27.071428571428573\ntotal reward after 13 steps is 29.0 and avg reward is 27.071428571428573\ntotal reward after 13 steps is 22.0 and avg reward is 28.785714285714285\ntotal reward after 13 steps is 42.0 and avg reward is 27.0\ntotal reward after 13 steps is 29.0 and avg reward is 27.071428571428573\ntotal reward after 13 steps is 29.0 and avg reward is 27.071428571428573\ntotal reward after 13 steps is 29.0 and avg reward is 27.071428571428573\n10\n10\n10\nal10.207479476928711\ncl152.43833923339844\ntotal reward after 14 steps is 21.0 and avg reward is 28.066666666666666\ntotal reward after 14 steps is 22.0 and avg reward is 26.666666666666668\ntotal reward after 14 steps is 46.0 and avg reward is 28.333333333333332\ntotal reward after 14 steps is 46.0 and avg reward is 28.333333333333332\ntotal reward after 14 steps is 46.0 and avg reward is 28.333333333333332\ntotal reward after 14 steps is 46.0 and avg reward is 28.333333333333332\ntotal reward after 14 steps is 46.0 and avg reward is 28.333333333333332\ntotal reward after 14 steps is 46.0 and avg reward is 28.333333333333332\ntotal reward after 14 steps is 45.0 and avg reward is 29.866666666666667\ntotal reward after 14 steps is 46.0 and avg reward is 28.333333333333332\n10\n10\n10\nal14.116483688354492\ncl312.8587341308594\ntotal reward after 15 steps is 19.0 and avg reward is 26.1875\ntotal reward after 15 steps is 25.0 and avg reward is 27.875\ntotal reward after 15 steps is 21.0 and avg reward is 27.875\ntotal reward after 15 steps is 21.0 and avg reward is 27.875\ntotal reward after 15 steps is 21.0 and avg reward is 27.875\ntotal reward after 15 steps is 21.0 and avg reward is 27.875\ntotal reward after 15 steps is 21.0 and avg reward is 27.875\ntotal reward after 15 steps is 21.0 and avg reward is 27.875\ntotal reward after 15 steps is 21.0 and avg reward is 27.875\ntotal reward after 15 steps is 51.0 and avg reward is 31.1875\n10\n10\n10\nal8.415167808532715\ncl138.4480438232422\ntotal reward after 16 steps is 28.0 and avg reward is 26.294117647058822\ntotal reward after 16 steps is 20.0 and avg reward is 30.529411764705884\ntotal reward after 16 steps is 41.0 and avg reward is 28.647058823529413\ntotal reward after 16 steps is 41.0 and avg reward is 28.647058823529413\ntotal reward after 16 steps is 41.0 and avg reward is 28.647058823529413\ntotal reward after 16 steps is 41.0 and avg reward is 28.647058823529413\ntotal reward after 16 steps is 48.0 and avg reward is 29.058823529411764\ntotal reward after 16 steps is 41.0 and avg reward is 28.647058823529413\ntotal reward after 16 steps is 41.0 and avg reward is 28.647058823529413\ntotal reward after 16 steps is 41.0 and avg reward is 28.647058823529413\n10\n10\n10\nal12.394094467163086\ncl249.5789794921875\ntotal reward after 17 steps is 16.0 and avg reward is 29.72222222222222\ntotal reward after 17 steps is 19.0 and avg reward is 28.11111111111111\ntotal reward after 17 steps is 19.0 and avg reward is 28.11111111111111\ntotal reward after 17 steps is 19.0 and avg reward is 28.11111111111111\ntotal reward after 17 steps is 19.0 and avg reward is 28.11111111111111\ntotal reward after 17 steps is 19.0 and avg reward is 28.11111111111111\ntotal reward after 17 steps is 19.0 and avg reward is 28.11111111111111\ntotal reward after 17 steps is 19.0 and avg reward is 28.11111111111111\ntotal reward after 17 steps is 21.0 and avg reward is 28.61111111111111\ntotal reward after 17 steps is 57.0 and avg reward is 28.0\n10\n10\n10\nal8.517853736877441\ncl165.36598205566406\ntotal reward after 18 steps is 28.0 and avg reward is 28.105263157894736\ntotal reward after 18 steps is 32.0 and avg reward is 28.210526315789473\ntotal reward after 18 steps is 28.0 and avg reward is 28.105263157894736\ntotal reward after 18 steps is 28.0 and avg reward is 28.105263157894736\ntotal reward after 18 steps is 28.0 and avg reward is 28.105263157894736\ntotal reward after 18 steps is 28.0 and avg reward is 28.105263157894736\ntotal reward after 18 steps is 28.0 and avg reward is 28.105263157894736\ntotal reward after 18 steps is 28.0 and avg reward is 28.105263157894736\ntotal reward after 18 steps is 71.0 and avg reward is 31.894736842105264\ntotal reward after 18 steps is 86.0 and avg reward is 31.63157894736842\n10\n10\n10\nal17.042213439941406\ncl522.1785278320312\ntotal reward after 19 steps is 29.0 and avg reward is 28.15\ntotal reward after 19 steps is 29.0 and avg reward is 28.15\ntotal reward after 19 steps is 29.0 and avg reward is 28.15\ntotal reward after 19 steps is 29.0 and avg reward is 28.15\ntotal reward after 19 steps is 29.0 and avg reward is 28.15\ntotal reward after 19 steps is 28.0 and avg reward is 31.45\ntotal reward after 19 steps is 29.0 and avg reward is 28.15\ntotal reward after 19 steps is 34.0 and avg reward is 32.0\ntotal reward after 19 steps is 48.0 and avg reward is 29.1\ntotal reward after 19 steps is 94.0 and avg reward is 31.5\n10\n10\n10\nal16.676000595092773\ncl518.5724487304688\ntotal reward after 20 steps is 18.0 and avg reward is 27.666666666666668\ntotal reward after 20 steps is 18.0 and avg reward is 27.666666666666668\ntotal reward after 20 steps is 18.0 and avg reward is 27.666666666666668\ntotal reward after 20 steps is 18.0 and avg reward is 27.666666666666668\ntotal reward after 20 steps is 18.0 and avg reward is 27.666666666666668\ntotal reward after 20 steps is 24.0 and avg reward is 28.857142857142858\ntotal reward after 20 steps is 35.0 and avg reward is 32.142857142857146\ntotal reward after 20 steps is 44.0 and avg reward is 28.904761904761905\ntotal reward after 20 steps is 53.0 and avg reward is 32.523809523809526\ntotal reward after 20 steps is 51.0 and avg reward is 32.38095238095238\n10\n10\n10\nal12.6957368850708\ncl263.06439208984375\ntotal reward after 21 steps is 14.0 and avg reward is 31.318181818181817\ntotal reward after 21 steps is 17.0 and avg reward is 31.681818181818183\ntotal reward after 21 steps is 15.0 and avg reward is 31.727272727272727\ntotal reward after 21 steps is 24.0 and avg reward is 27.5\ntotal reward after 21 steps is 24.0 and avg reward is 27.5\ntotal reward after 21 steps is 24.0 and avg reward is 27.5\ntotal reward after 21 steps is 24.0 and avg reward is 27.5\ntotal reward after 21 steps is 24.0 and avg reward is 27.5\ntotal reward after 21 steps is 36.0 and avg reward is 29.181818181818183\ntotal reward after 21 steps is 98.0 and avg reward is 32.04545454545455\n10\n10\n10\nal15.178759574890137\ncl566.1830444335938\ntotal reward after 22 steps is 29.0 and avg reward is 27.565217391304348\ntotal reward after 22 steps is 29.0 and avg reward is 27.565217391304348\ntotal reward after 22 steps is 29.0 and avg reward is 27.565217391304348\ntotal reward after 22 steps is 29.0 and avg reward is 27.565217391304348\ntotal reward after 22 steps is 35.0 and avg reward is 27.82608695652174\ntotal reward after 22 steps is 42.0 and avg reward is 29.73913043478261\ntotal reward after 22 steps is 71.0 and avg reward is 33.391304347826086\ntotal reward after 22 steps is 51.0 and avg reward is 32.56521739130435\ntotal reward after 22 steps is 61.0 and avg reward is 33.30434782608695\ntotal reward after 22 steps is 60.0 and avg reward is 32.56521739130435\n10\n10\n10\nal16.99210548400879\ncl451.7137451171875\ntotal reward after 23 steps is 13.0 and avg reward is 26.958333333333332\ntotal reward after 23 steps is 13.0 and avg reward is 26.958333333333332\ntotal reward after 23 steps is 34.0 and avg reward is 27.833333333333332\ntotal reward after 23 steps is 13.0 and avg reward is 26.958333333333332\ntotal reward after 23 steps is 39.0 and avg reward is 32.833333333333336\ntotal reward after 23 steps is 39.0 and avg reward is 32.833333333333336\ntotal reward after 23 steps is 46.0 and avg reward is 30.416666666666668\ntotal reward after 23 steps is 43.0 and avg reward is 28.458333333333332\ntotal reward after 23 steps is 35.0 and avg reward is 33.458333333333336\ntotal reward after 23 steps is 72.0 and avg reward is 34.916666666666664\n10\n10\n10\nal15.108821868896484\ncl374.4198913574219\ntotal reward after 24 steps is 17.0 and avg reward is 32.2\ntotal reward after 24 steps is 17.0 and avg reward is 32.2\ntotal reward after 24 steps is 17.0 and avg reward is 28.0\ntotal reward after 24 steps is 23.0 and avg reward is 26.8\ntotal reward after 24 steps is 22.0 and avg reward is 27.6\ntotal reward after 24 steps is 34.0 and avg reward is 27.24\ntotal reward after 24 steps is 40.0 and avg reward is 35.12\ntotal reward after 24 steps is 23.0 and avg reward is 26.8\ntotal reward after 24 steps is 26.0 and avg reward is 30.24\ntotal reward after 24 steps is 58.0 and avg reward is 34.44\n10\n10\n10\nal9.800870895385742\ncl196.19961547851562\ntotal reward after 25 steps is 36.0 and avg reward is 32.34615384615385\ntotal reward after 25 steps is 17.0 and avg reward is 26.423076923076923\ntotal reward after 25 steps is 17.0 and avg reward is 26.423076923076923\ntotal reward after 25 steps is 27.0 and avg reward is 30.115384615384617\ntotal reward after 25 steps is 28.0 and avg reward is 34.19230769230769\ntotal reward after 25 steps is 36.0 and avg reward is 32.34615384615385\ntotal reward after 25 steps is 42.0 and avg reward is 27.807692307692307\ntotal reward after 25 steps is 48.0 and avg reward is 35.61538461538461\ntotal reward after 25 steps is 53.0 and avg reward is 28.576923076923077\ntotal reward after 25 steps is 61.0 and avg reward is 29.26923076923077\n10\n10\n10\nal12.2647066116333\ncl289.5592041015625\ntotal reward after 26 steps is 10.0 and avg reward is 27.88888888888889\ntotal reward after 26 steps is 48.0 and avg reward is 32.925925925925924\ntotal reward after 26 steps is 16.0 and avg reward is 26.037037037037038\ntotal reward after 26 steps is 18.0 and avg reward is 29.666666666666668\ntotal reward after 26 steps is 13.0 and avg reward is 34.77777777777778\ntotal reward after 26 steps is 16.0 and avg reward is 26.037037037037038\ntotal reward after 26 steps is 29.0 and avg reward is 27.85185185185185\ntotal reward after 26 steps is 39.0 and avg reward is 34.370370370370374\ntotal reward after 26 steps is 33.0 and avg reward is 29.40740740740741\ntotal reward after 26 steps is 50.0 and avg reward is 33.0\n10\n10\n10\nal11.42040729522705\ncl232.58692932128906\ntotal reward after 27 steps is 31.0 and avg reward is 28.0\ntotal reward after 27 steps is 13.0 and avg reward is 33.607142857142854\ntotal reward after 27 steps is 18.0 and avg reward is 25.75\ntotal reward after 27 steps is 18.0 and avg reward is 25.75\ntotal reward after 27 steps is 17.0 and avg reward is 34.142857142857146\ntotal reward after 27 steps is 24.0 and avg reward is 27.714285714285715\ntotal reward after 27 steps is 39.0 and avg reward is 30.0\ntotal reward after 27 steps is 34.0 and avg reward is 29.571428571428573\ntotal reward after 27 steps is 38.0 and avg reward is 33.17857142857143\ntotal reward after 27 steps is 47.0 and avg reward is 33.42857142857143\n10\n10\n10\nal10.39838981628418\ncl191.50711059570312\ntotal reward after 28 steps is 15.0 and avg reward is 27.275862068965516\ntotal reward after 28 steps is 16.0 and avg reward is 29.103448275862068\ntotal reward after 28 steps is 16.0 and avg reward is 32.58620689655172\ntotal reward after 28 steps is 18.0 and avg reward is 27.655172413793103\ntotal reward after 28 steps is 28.0 and avg reward is 29.93103448275862\ntotal reward after 28 steps is 27.0 and avg reward is 33.206896551724135\ntotal reward after 28 steps is 31.0 and avg reward is 33.51724137931034\ntotal reward after 28 steps is 39.0 and avg reward is 34.310344827586206\ntotal reward after 28 steps is 40.0 and avg reward is 26.24137931034483\ntotal reward after 28 steps is 40.0 and avg reward is 26.24137931034483\n10\n10\n10\nal10.092854499816895\ncl172.6815948486328\ntotal reward after 29 steps is 36.0 and avg reward is 27.566666666666666\ntotal reward after 29 steps is 22.0 and avg reward is 32.233333333333334\ntotal reward after 29 steps is 27.0 and avg reward is 26.266666666666666\ntotal reward after 29 steps is 27.0 and avg reward is 26.266666666666666\ntotal reward after 29 steps is 42.0 and avg reward is 33.8\ntotal reward after 29 steps is 42.0 and avg reward is 29.533333333333335\ntotal reward after 29 steps is 48.0 and avg reward is 28.333333333333332\ntotal reward after 29 steps is 72.0 and avg reward is 31.333333333333332\ntotal reward after 29 steps is 65.0 and avg reward is 35.333333333333336\ntotal reward after 29 steps is 88.0 and avg reward is 35.03333333333333\n10\n10\n10\nal17.573923110961914\ncl597.6307373046875\ntotal reward after 30 steps is 15.0 and avg reward is 25.903225806451612\ntotal reward after 30 steps is 15.0 and avg reward is 25.903225806451612\ntotal reward after 30 steps is 17.0 and avg reward is 34.74193548387097\ntotal reward after 30 steps is 28.0 and avg reward is 33.61290322580645\ntotal reward after 30 steps is 27.0 and avg reward is 31.193548387096776\ntotal reward after 30 steps is 48.0 and avg reward is 28.225806451612904\ntotal reward after 30 steps is 38.0 and avg reward is 28.64516129032258\ntotal reward after 30 steps is 44.0 and avg reward is 35.32258064516129\ntotal reward after 30 steps is 59.0 and avg reward is 33.096774193548384\ntotal reward after 30 steps is 54.0 and avg reward is 30.322580645161292\n10\n10\n10\nal14.53907299041748\ncl333.0526123046875\ntotal reward after 31 steps is 32.0 and avg reward is 26.09375\ntotal reward after 31 steps is 27.0 and avg reward is 28.1875\ntotal reward after 31 steps is 29.0 and avg reward is 31.125\ntotal reward after 31 steps is 23.0 and avg reward is 30.09375\ntotal reward after 31 steps is 23.0 and avg reward is 28.46875\ntotal reward after 31 steps is 24.0 and avg reward is 25.84375\ntotal reward after 31 steps is 24.0 and avg reward is 34.40625\ntotal reward after 31 steps is 48.0 and avg reward is 34.0625\ntotal reward after 31 steps is 67.0 and avg reward is 34.15625\ntotal reward after 31 steps is 63.0 and avg reward is 36.1875\n10\n10\n10\nal13.999964714050293\ncl370.5421142578125\ntotal reward after 32 steps is 21.0 and avg reward is 33.666666666666664\ntotal reward after 32 steps is 17.0 and avg reward is 25.575757575757574\ntotal reward after 32 steps is 30.0 and avg reward is 36.0\ntotal reward after 32 steps is 36.0 and avg reward is 34.21212121212121\ntotal reward after 32 steps is 32.0 and avg reward is 34.333333333333336\ntotal reward after 32 steps is 33.0 and avg reward is 28.606060606060606\ntotal reward after 32 steps is 56.0 and avg reward is 29.03030303030303\ntotal reward after 32 steps is 50.0 and avg reward is 30.696969696969695\ntotal reward after 32 steps is 77.0 and avg reward is 27.636363636363637\ntotal reward after 32 steps is 73.0 and avg reward is 32.39393939393939\n10\n10\n10\nal16.12615966796875\ncl495.03387451171875\ntotal reward after 33 steps is 29.0 and avg reward is 30.647058823529413\ntotal reward after 33 steps is 27.0 and avg reward is 32.23529411764706\ntotal reward after 33 steps is 36.0 and avg reward is 28.823529411764707\ntotal reward after 33 steps is 45.0 and avg reward is 34.64705882352941\ntotal reward after 33 steps is 51.0 and avg reward is 34.705882352941174\ntotal reward after 33 steps is 45.0 and avg reward is 26.147058823529413\ntotal reward after 33 steps is 48.0 and avg reward is 36.35294117647059\ntotal reward after 33 steps is 77.0 and avg reward is 34.94117647058823\ntotal reward after 33 steps is 62.0 and avg reward is 28.647058823529413\ntotal reward after 33 steps is 81.0 and avg reward is 30.558823529411764\n10\n10\n10\nal17.934158325195312\ncl584.6973266601562\ntotal reward after 34 steps is 21.0 and avg reward is 35.91428571428571\ntotal reward after 34 steps is 33.0 and avg reward is 28.942857142857143\ntotal reward after 34 steps is 42.0 and avg reward is 26.6\ntotal reward after 34 steps is 40.0 and avg reward is 35.08571428571429\ntotal reward after 34 steps is 39.0 and avg reward is 34.77142857142857\ntotal reward after 34 steps is 42.0 and avg reward is 32.51428571428571\ntotal reward after 34 steps is 49.0 and avg reward is 35.114285714285714\ntotal reward after 34 steps is 47.0 and avg reward is 31.114285714285714\ntotal reward after 34 steps is 75.0 and avg reward is 31.82857142857143\ntotal reward after 34 steps is 94.0 and avg reward is 30.514285714285716\n10\n10\n10\nal16.670467376708984\ncl569.1519165039062\ntotal reward after 35 steps is 17.0 and avg reward is 26.333333333333332\ntotal reward after 35 steps is 21.0 and avg reward is 34.388888888888886\ntotal reward after 35 steps is 21.0 and avg reward is 30.833333333333332\ntotal reward after 35 steps is 21.0 and avg reward is 34.72222222222222\ntotal reward after 35 steps is 38.0 and avg reward is 32.666666666666664\ntotal reward after 35 steps is 41.0 and avg reward is 30.805555555555557\ntotal reward after 35 steps is 34.0 and avg reward is 31.88888888888889\ntotal reward after 35 steps is 52.0 and avg reward is 29.583333333333332\ntotal reward after 35 steps is 46.0 and avg reward is 35.388888888888886\ntotal reward after 35 steps is 71.0 and avg reward is 36.888888888888886\n10\n10\n10\nal15.120789527893066\ncl375.33892822265625\ntotal reward after 36 steps is 19.0 and avg reward is 33.972972972972975\ntotal reward after 36 steps is 30.0 and avg reward is 35.24324324324324\ntotal reward after 36 steps is 31.0 and avg reward is 30.83783783783784\ntotal reward after 36 steps is 37.0 and avg reward is 32.027027027027025\ntotal reward after 36 steps is 46.0 and avg reward is 30.027027027027028\ntotal reward after 36 steps is 52.0 and avg reward is 37.2972972972973\ntotal reward after 36 steps is 66.0 and avg reward is 31.756756756756758\ntotal reward after 36 steps is 56.0 and avg reward is 35.2972972972973\ntotal reward after 36 steps is 69.0 and avg reward is 33.648648648648646\ntotal reward after 36 steps is 86.0 and avg reward is 27.945945945945947\n10\n10\n10\nal18.51643180847168\ncl607.329345703125\ntotal reward after 37 steps is 18.0 and avg reward is 34.8421052631579\ntotal reward after 37 steps is 20.0 and avg reward is 30.55263157894737\ntotal reward after 37 steps is 25.0 and avg reward is 34.973684210526315\ntotal reward after 37 steps is 30.0 and avg reward is 31.973684210526315\ntotal reward after 37 steps is 51.0 and avg reward is 34.421052631578945\ntotal reward after 37 steps is 30.0 and avg reward is 28.0\ntotal reward after 37 steps is 43.0 and avg reward is 32.05263157894737\ntotal reward after 37 steps is 35.0 and avg reward is 33.68421052631579\ntotal reward after 37 steps is 41.0 and avg reward is 30.31578947368421\ntotal reward after 37 steps is 52.0 and avg reward is 37.68421052631579\n10\n10\n10\nal11.500791549682617\ncl238.18356323242188\ntotal reward after 38 steps is 14.0 and avg reward is 27.641025641025642\ntotal reward after 38 steps is 20.0 and avg reward is 34.05128205128205\ntotal reward after 38 steps is 23.0 and avg reward is 37.30769230769231\ntotal reward after 38 steps is 26.0 and avg reward is 30.435897435897434\ntotal reward after 38 steps is 35.0 and avg reward is 30.435897435897434\ntotal reward after 38 steps is 38.0 and avg reward is 35.05128205128205\ntotal reward after 38 steps is 55.0 and avg reward is 35.35897435897436\ntotal reward after 38 steps is 57.0 and avg reward is 34.282051282051285\ntotal reward after 38 steps is 81.0 and avg reward is 33.23076923076923\ntotal reward after 38 steps is 83.0 and avg reward is 33.35897435897436\n10\n10\n10\nal17.845558166503906\ncl620.176513671875\ntotal reward after 39 steps is 23.0 and avg reward is 34.75\ntotal reward after 39 steps is 22.0 and avg reward is 35.025\ntotal reward after 39 steps is 31.0 and avg reward is 33.3\ntotal reward after 39 steps is 45.0 and avg reward is 34.55\ntotal reward after 39 steps is 57.0 and avg reward is 31.1\ntotal reward after 39 steps is 47.0 and avg reward is 37.55\ntotal reward after 39 steps is 63.0 and avg reward is 28.525\ntotal reward after 39 steps is 48.0 and avg reward is 30.875\ntotal reward after 39 steps is 78.0 and avg reward is 35.15\ntotal reward after 39 steps is 89.0 and avg reward is 34.625\n10\n10\n10\nal18.095556259155273\ncl624.3853149414062\ntotal reward after 40 steps is 14.0 and avg reward is 34.24390243902439\ntotal reward after 40 steps is 17.0 and avg reward is 34.1219512195122\ntotal reward after 40 steps is 25.0 and avg reward is 28.4390243902439\ntotal reward after 40 steps is 27.0 and avg reward is 34.829268292682926\ntotal reward after 40 steps is 47.0 and avg reward is 33.63414634146341\ntotal reward after 40 steps is 41.0 and avg reward is 34.78048780487805\ntotal reward after 40 steps is 42.0 and avg reward is 31.365853658536587\ntotal reward after 40 steps is 57.0 and avg reward is 35.68292682926829\ntotal reward after 40 steps is 49.0 and avg reward is 31.317073170731707\ntotal reward after 40 steps is 75.0 and avg reward is 38.46341463414634\n10\n10\n10\nal14.761391639709473\ncl395.5699462890625\ntotal reward after 41 steps is 37.0 and avg reward is 34.30952380952381\ntotal reward after 41 steps is 23.0 and avg reward is 34.54761904761905\ntotal reward after 41 steps is 26.0 and avg reward is 34.57142857142857\ntotal reward after 41 steps is 24.0 and avg reward is 33.404761904761905\ntotal reward after 41 steps is 25.0 and avg reward is 33.904761904761905\ntotal reward after 41 steps is 31.0 and avg reward is 35.57142857142857\ntotal reward after 41 steps is 35.0 and avg reward is 28.595238095238095\ntotal reward after 41 steps is 47.0 and avg reward is 31.738095238095237\ntotal reward after 41 steps is 72.0 and avg reward is 39.26190476190476\ntotal reward after 41 steps is 76.0 and avg reward is 32.38095238095238\n10\n10\n10\nal14.181807518005371\ncl422.74896240234375\ntotal reward after 42 steps is 17.0 and avg reward is 35.13953488372093\ntotal reward after 42 steps is 17.0 and avg reward is 33.51162790697674\ntotal reward after 42 steps is 25.0 and avg reward is 32.2093023255814\ntotal reward after 42 steps is 41.0 and avg reward is 28.88372093023256\ntotal reward after 42 steps is 55.0 and avg reward is 33.906976744186046\ntotal reward after 42 steps is 55.0 and avg reward is 39.627906976744185\ntotal reward after 42 steps is 57.0 and avg reward is 32.325581395348834\ntotal reward after 42 steps is 59.0 and avg reward is 34.883720930232556\ntotal reward after 42 steps is 82.0 and avg reward is 35.674418604651166\ntotal reward after 42 steps is 70.0 and avg reward is 35.372093023255815\n10\n10\n10\nal17.927221298217773\ncl593.637451171875\ntotal reward after 43 steps is 14.0 and avg reward is 31.90909090909091\ntotal reward after 43 steps is 43.0 and avg reward is 35.31818181818182\ntotal reward after 43 steps is 40.0 and avg reward is 32.38636363636363\ntotal reward after 43 steps is 43.0 and avg reward is 34.11363636363637\ntotal reward after 43 steps is 44.0 and avg reward is 35.56818181818182\ntotal reward after 43 steps is 47.0 and avg reward is 29.295454545454547\ntotal reward after 43 steps is 66.0 and avg reward is 34.25\ntotal reward after 43 steps is 68.0 and avg reward is 35.63636363636363\ntotal reward after 43 steps is 125.0 and avg reward is 37.70454545454545\ntotal reward after 43 steps is 147.0 and avg reward is 42.06818181818182\n10\n10\n10\nal27.73690414428711\ncl1601.8402099609375\ntotal reward after 44 steps is 12.0 and avg reward is 33.62222222222222\ntotal reward after 44 steps is 18.0 and avg reward is 32.06666666666667\ntotal reward after 44 steps is 17.0 and avg reward is 33.86666666666667\ntotal reward after 44 steps is 24.0 and avg reward is 37.4\ntotal reward after 44 steps is 36.0 and avg reward is 32.0\ntotal reward after 44 steps is 50.0 and avg reward is 35.888888888888886\ntotal reward after 44 steps is 49.0 and avg reward is 35.62222222222222\ntotal reward after 44 steps is 43.0 and avg reward is 42.08888888888889\ntotal reward after 44 steps is 46.0 and avg reward is 35.86666666666667\ntotal reward after 44 steps is 82.0 and avg reward is 30.466666666666665\n10\n10\n10\nal15.43785572052002\ncl461.0514831542969\ntotal reward after 45 steps is 35.0 and avg reward is 33.65217391304348\ntotal reward after 45 steps is 26.0 and avg reward is 33.69565217391305\ntotal reward after 45 steps is 38.0 and avg reward is 42.0\ntotal reward after 45 steps is 44.0 and avg reward is 30.76086956521739\ntotal reward after 45 steps is 58.0 and avg reward is 36.108695652173914\ntotal reward after 45 steps is 53.0 and avg reward is 32.52173913043478\ntotal reward after 45 steps is 73.0 and avg reward is 36.67391304347826\ntotal reward after 45 steps is 78.0 and avg reward is 38.28260869565217\ntotal reward after 45 steps is 81.0 and avg reward is 33.06521739130435\ntotal reward after 45 steps is 86.0 and avg reward is 36.97826086956522\n10\n10\n10\nal19.77219581604004\ncl742.0128173828125\ntotal reward after 46 steps is 25.0 and avg reward is 33.46808510638298\ntotal reward after 46 steps is 23.0 and avg reward is 32.319148936170215\ntotal reward after 46 steps is 26.0 and avg reward is 30.659574468085108\ntotal reward after 46 steps is 26.0 and avg reward is 36.4468085106383\ntotal reward after 46 steps is 26.0 and avg reward is 38.02127659574468\ntotal reward after 46 steps is 36.0 and avg reward is 36.1063829787234\ntotal reward after 46 steps is 54.0 and avg reward is 34.12765957446808\ntotal reward after 46 steps is 48.0 and avg reward is 33.38297872340426\ntotal reward after 46 steps is 49.0 and avg reward is 37.234042553191486\ntotal reward after 46 steps is 115.0 and avg reward is 43.5531914893617\n10\n10\n10\nal18.28784942626953\ncl776.2933959960938\ntotal reward after 47 steps is 19.0 and avg reward is 43.041666666666664\ntotal reward after 47 steps is 37.0 and avg reward is 32.416666666666664\ntotal reward after 47 steps is 39.0 and avg reward is 37.270833333333336\ntotal reward after 47 steps is 41.0 and avg reward is 36.541666666666664\ntotal reward after 47 steps is 40.0 and avg reward is 33.604166666666664\ntotal reward after 47 steps is 44.0 and avg reward is 38.145833333333336\ntotal reward after 47 steps is 51.0 and avg reward is 31.083333333333332\ntotal reward after 47 steps is 46.0 and avg reward is 36.3125\ntotal reward after 47 steps is 90.0 and avg reward is 34.5625\ntotal reward after 47 steps is 83.0 and avg reward is 35.145833333333336\n10\n10\n10\nal16.314010620117188\ncl569.0404052734375\ntotal reward after 48 steps is 25.0 and avg reward is 37.02040816326531\ntotal reward after 48 steps is 59.0 and avg reward is 38.57142857142857\ntotal reward after 48 steps is 30.0 and avg reward is 35.04081632653061\ntotal reward after 48 steps is 43.0 and avg reward is 36.673469387755105\ntotal reward after 48 steps is 54.0 and avg reward is 43.265306122448976\ntotal reward after 48 steps is 55.0 and avg reward is 36.69387755102041\ntotal reward after 48 steps is 60.0 and avg reward is 32.97959183673469\ntotal reward after 48 steps is 69.0 and avg reward is 31.857142857142858\ntotal reward after 48 steps is 65.0 and avg reward is 34.244897959183675\ntotal reward after 48 steps is 93.0 and avg reward is 35.755102040816325\n10\n10\n10\nal18.365388870239258\ncl633.516357421875\ntotal reward after 49 steps is 15.0 and avg reward is 38.1\ntotal reward after 49 steps is 22.0 and avg reward is 35.48\ntotal reward after 49 steps is 22.0 and avg reward is 34.78\ntotal reward after 49 steps is 42.0 and avg reward is 37.12\ntotal reward after 49 steps is 48.0 and avg reward is 43.36\ntotal reward after 49 steps is 35.0 and avg reward is 34.26\ntotal reward after 49 steps is 39.0 and avg reward is 36.72\ntotal reward after 49 steps is 42.0 and avg reward is 32.06\ntotal reward after 49 steps is 46.0 and avg reward is 33.24\ntotal reward after 49 steps is 69.0 and avg reward is 37.34\n10\n10\n10\nal13.274703025817871\ncl347.3951416015625\ntotal reward after 50 steps is 26.0 and avg reward is 37.86274509803921\ntotal reward after 50 steps is 28.0 and avg reward is 33.13725490196079\ntotal reward after 50 steps is 39.0 and avg reward is 35.549019607843135\ntotal reward after 50 steps is 38.0 and avg reward is 34.84313725490196\ntotal reward after 50 steps is 44.0 and avg reward is 32.294117647058826\ntotal reward after 50 steps is 44.0 and avg reward is 43.372549019607845\ntotal reward after 50 steps is 39.0 and avg reward is 36.76470588235294\ntotal reward after 50 steps is 53.0 and avg reward is 37.431372549019606\ntotal reward after 50 steps is 55.0 and avg reward is 37.68627450980392\ntotal reward after 50 steps is 65.0 and avg reward is 34.86274509803921\n10\n10\n10\nal13.997148513793945\ncl364.5175476074219\ntotal reward after 51 steps is 19.0 and avg reward is 34.55769230769231\ntotal reward after 51 steps is 23.0 and avg reward is 34.61538461538461\ntotal reward after 51 steps is 24.0 and avg reward is 37.17307692307692\ntotal reward after 51 steps is 27.0 and avg reward is 32.19230769230769\ntotal reward after 51 steps is 42.0 and avg reward is 33.30769230769231\ntotal reward after 51 steps is 64.0 and avg reward is 38.36538461538461\ntotal reward after 51 steps is 61.0 and avg reward is 43.71153846153846\ntotal reward after 51 steps is 69.0 and avg reward is 37.38461538461539\ntotal reward after 51 steps is 71.0 and avg reward is 38.32692307692308\ntotal reward after 51 steps is 102.0 and avg reward is 36.82692307692308\n10\n10\n10\nal19.660165786743164\ncl757.6302490234375\ntotal reward after 52 steps is 12.0 and avg reward is 43.113207547169814\ntotal reward after 52 steps is 33.0 and avg reward is 34.58490566037736\ntotal reward after 52 steps is 54.0 and avg reward is 37.490566037735846\ntotal reward after 52 steps is 55.0 and avg reward is 33.716981132075475\ntotal reward after 52 steps is 57.0 and avg reward is 37.20754716981132\ntotal reward after 52 steps is 62.0 and avg reward is 37.84905660377358\ntotal reward after 52 steps is 53.0 and avg reward is 32.58490566037736\ntotal reward after 52 steps is 59.0 and avg reward is 38.75471698113208\ntotal reward after 52 steps is 92.0 and avg reward is 39.339622641509436\ntotal reward after 52 steps is 132.0 and avg reward is 36.39622641509434\n10\n10\n10\nal21.622278213500977\ncl1059.0753173828125\ntotal reward after 53 steps is 25.0 and avg reward is 32.44444444444444\ntotal reward after 53 steps is 35.0 and avg reward is 37.166666666666664\ntotal reward after 53 steps is 33.0 and avg reward is 39.22222222222222\ntotal reward after 53 steps is 38.0 and avg reward is 34.648148148148145\ntotal reward after 53 steps is 59.0 and avg reward is 34.18518518518518\ntotal reward after 53 steps is 71.0 and avg reward is 38.46296296296296\ntotal reward after 53 steps is 57.0 and avg reward is 37.851851851851855\ntotal reward after 53 steps is 72.0 and avg reward is 39.370370370370374\ntotal reward after 53 steps is 74.0 and avg reward is 37.092592592592595\ntotal reward after 53 steps is 104.0 and avg reward is 44.24074074074074\n10\n10\n10\nal20.242216110229492\ncl819.842529296875\ntotal reward after 54 steps is 33.0 and avg reward is 39.10909090909091\ntotal reward after 54 steps is 37.0 and avg reward is 32.527272727272724\ntotal reward after 54 steps is 45.0 and avg reward is 39.472727272727276\ntotal reward after 54 steps is 55.0 and avg reward is 44.43636363636364\ntotal reward after 54 steps is 70.0 and avg reward is 39.03636363636364\ntotal reward after 54 steps is 77.0 and avg reward is 34.96363636363636\ntotal reward after 54 steps is 100.0 and avg reward is 35.836363636363636\ntotal reward after 54 steps is 115.0 and avg reward is 38.58181818181818\ntotal reward after 54 steps is 131.0 and avg reward is 39.54545454545455\ntotal reward after 54 steps is 161.0 and avg reward is 39.345454545454544\n10\n10\n10\nal31.276203155517578\ncl2020.16455078125\ntotal reward after 55 steps is 38.0 and avg reward is 39.44642857142857\ntotal reward after 55 steps is 46.0 and avg reward is 36.017857142857146\ntotal reward after 55 steps is 47.0 and avg reward is 32.785714285714285\ntotal reward after 55 steps is 71.0 and avg reward is 44.910714285714285\ntotal reward after 55 steps is 77.0 and avg reward is 39.267857142857146\ntotal reward after 55 steps is 75.0 and avg reward is 40.17857142857143\ntotal reward after 55 steps is 78.0 and avg reward is 39.732142857142854\ntotal reward after 55 steps is 87.0 and avg reward is 39.964285714285715\ntotal reward after 55 steps is 126.0 and avg reward is 36.589285714285715\ntotal reward after 55 steps is 101.0 and avg reward is 40.44642857142857\n10\n10\n10\nal25.236541748046875\ncl1230.3369140625\ntotal reward after 56 steps is 21.0 and avg reward is 40.10526315789474\ntotal reward after 56 steps is 43.0 and avg reward is 40.01754385964912\ntotal reward after 56 steps is 42.0 and avg reward is 36.12280701754386\ntotal reward after 56 steps is 53.0 and avg reward is 39.68421052631579\ntotal reward after 56 steps is 49.0 and avg reward is 39.89473684210526\ntotal reward after 56 steps is 58.0 and avg reward is 45.14035087719298\ntotal reward after 56 steps is 69.0 and avg reward is 39.78947368421053\ntotal reward after 56 steps is 66.0 and avg reward is 33.36842105263158\ntotal reward after 56 steps is 66.0 and avg reward is 37.10526315789474\ntotal reward after 56 steps is 77.0 and avg reward is 40.824561403508774\n10\n10\n10\nal17.40622901916504\ncl575.4571533203125\ntotal reward after 57 steps is 20.0 and avg reward is 39.55172413793103\ntotal reward after 57 steps is 16.0 and avg reward is 39.37931034482759\ntotal reward after 57 steps is 64.0 and avg reward is 40.51724137931034\ntotal reward after 57 steps is 49.0 and avg reward is 39.8448275862069\ntotal reward after 57 steps is 56.0 and avg reward is 33.758620689655174\ntotal reward after 57 steps is 69.0 and avg reward is 40.51724137931034\ntotal reward after 57 steps is 68.0 and avg reward is 41.293103448275865\ntotal reward after 57 steps is 70.0 and avg reward is 45.56896551724138\ntotal reward after 57 steps is 96.0 and avg reward is 37.1551724137931\ntotal reward after 57 steps is 80.0 and avg reward is 37.8448275862069\n10\n10\n10\nal19.846996307373047\ncl771.4884643554688\ntotal reward after 58 steps is 40.0 and avg reward is 39.559322033898304\ntotal reward after 58 steps is 45.0 and avg reward is 37.28813559322034\ntotal reward after 58 steps is 42.0 and avg reward is 40.54237288135593\ntotal reward after 58 steps is 43.0 and avg reward is 41.32203389830509\ntotal reward after 58 steps is 42.0 and avg reward is 40.54237288135593\ntotal reward after 58 steps is 56.0 and avg reward is 34.13559322033898\ntotal reward after 58 steps is 70.0 and avg reward is 38.389830508474574\ntotal reward after 58 steps is 84.0 and avg reward is 40.59322033898305\ntotal reward after 58 steps is 130.0 and avg reward is 40.91525423728814\ntotal reward after 58 steps is 125.0 and avg reward is 46.91525423728814\n10\n10\n10\nal23.83233642578125\ncl1290.25390625\ntotal reward after 59 steps is 15.0 and avg reward is 36.916666666666664\ntotal reward after 59 steps is 15.0 and avg reward is 38.0\ntotal reward after 59 steps is 39.0 and avg reward is 39.55\ntotal reward after 59 steps is 45.0 and avg reward is 34.31666666666667\ntotal reward after 59 steps is 47.0 and avg reward is 41.016666666666666\ntotal reward after 59 steps is 80.0 and avg reward is 41.2\ntotal reward after 59 steps is 97.0 and avg reward is 47.75\ntotal reward after 59 steps is 105.0 and avg reward is 41.61666666666667\ntotal reward after 59 steps is 96.0 and avg reward is 42.233333333333334\ntotal reward after 59 steps is 167.0 and avg reward is 42.7\n10\n10\n10\nal31.111818313598633\ncl1987.615966796875\ntotal reward after 60 steps is 20.0 and avg reward is 40.67213114754098\ntotal reward after 60 steps is 31.0 and avg reward is 34.26229508196721\ntotal reward after 60 steps is 38.0 and avg reward is 41.557377049180324\ntotal reward after 60 steps is 56.0 and avg reward is 42.91803278688525\ntotal reward after 60 steps is 53.0 and avg reward is 41.39344262295082\ntotal reward after 60 steps is 76.0 and avg reward is 42.78688524590164\ntotal reward after 60 steps is 99.0 and avg reward is 37.9344262295082\ntotal reward after 60 steps is 102.0 and avg reward is 39.049180327868854\ntotal reward after 60 steps is 111.0 and avg reward is 48.78688524590164\ntotal reward after 60 steps is 142.0 and avg reward is 41.22950819672131\n10\n10\n10\nal27.853195190429688\ncl1608.57275390625\ntotal reward after 61 steps is 27.0 and avg reward is 41.0\ntotal reward after 61 steps is 35.0 and avg reward is 41.29032258064516\ntotal reward after 61 steps is 44.0 and avg reward is 41.596774193548384\ntotal reward after 61 steps is 52.0 and avg reward is 48.83870967741935\ntotal reward after 61 steps is 52.0 and avg reward is 38.16129032258065\ntotal reward after 61 steps is 80.0 and avg reward is 35.0\ntotal reward after 61 steps is 110.0 and avg reward is 43.87096774193548\ntotal reward after 61 steps is 159.0 and avg reward is 44.79032258064516\ntotal reward after 61 steps is 154.0 and avg reward is 40.903225806451616\ntotal reward after 61 steps is 200.0 and avg reward is 43.24193548387097\n10\n10\n10\nal36.935020446777344\ncl3089.979736328125\ntotal reward after 62 steps is 37.0 and avg reward is 38.142857142857146\ntotal reward after 62 steps is 46.0 and avg reward is 35.17460317460318\ntotal reward after 62 steps is 45.0 and avg reward is 48.77777777777778\ntotal reward after 62 steps is 45.0 and avg reward is 44.79365079365079\ntotal reward after 62 steps is 49.0 and avg reward is 41.41269841269841\ntotal reward after 62 steps is 62.0 and avg reward is 44.15873015873016\ntotal reward after 62 steps is 65.0 and avg reward is 43.58730158730159\ntotal reward after 62 steps is 79.0 and avg reward is 41.507936507936506\ntotal reward after 62 steps is 138.0 and avg reward is 43.12698412698413\ntotal reward after 62 steps is 196.0 and avg reward is 43.46031746031746\n10\n10\n10\nal34.09537887573242\ncl2529.914306640625\ntotal reward after 63 steps is 53.0 and avg reward is 43.28125\ntotal reward after 63 steps is 61.0 and avg reward is 41.8125\ntotal reward after 63 steps is 62.0 and avg reward is 38.515625\ntotal reward after 63 steps is 68.0 and avg reward is 45.15625\ntotal reward after 63 steps is 63.0 and avg reward is 49.0\ntotal reward after 63 steps is 75.0 and avg reward is 41.9375\ntotal reward after 63 steps is 94.0 and avg reward is 36.09375\ntotal reward after 63 steps is 94.0 and avg reward is 44.375\ntotal reward after 63 steps is 105.0 and avg reward is 45.109375\ntotal reward after 63 steps is 200.0 and avg reward is 45.90625\n10\n10\n10\nal32.23212432861328\ncl2322.193359375\ntotal reward after 64 steps is 28.0 and avg reward is 38.353846153846156\ntotal reward after 64 steps is 28.0 and avg reward is 43.04615384615385\ntotal reward after 64 steps is 27.0 and avg reward is 41.58461538461538\ntotal reward after 64 steps is 41.0 and avg reward is 41.92307692307692\ntotal reward after 64 steps is 65.0 and avg reward is 46.2\ntotal reward after 64 steps is 48.0 and avg reward is 45.15384615384615\ntotal reward after 64 steps is 76.0 and avg reward is 44.86153846153846\ntotal reward after 64 steps is 82.0 and avg reward is 36.8\ntotal reward after 64 steps is 94.0 and avg reward is 45.90769230769231\ntotal reward after 64 steps is 114.0 and avg reward is 50.0\n10\n10\n10\nal22.335994720458984\ncl1038.1856689453125\ntotal reward after 65 steps is 33.0 and avg reward is 44.68181818181818\ntotal reward after 65 steps is 33.0 and avg reward is 49.74242424242424\ntotal reward after 65 steps is 47.0 and avg reward is 38.484848484848484\ntotal reward after 65 steps is 56.0 and avg reward is 46.06060606060606\ntotal reward after 65 steps is 69.0 and avg reward is 46.54545454545455\ntotal reward after 65 steps is 77.0 and avg reward is 45.63636363636363\ntotal reward after 65 steps is 75.0 and avg reward is 37.378787878787875\ntotal reward after 65 steps is 78.0 and avg reward is 43.57575757575758\ntotal reward after 65 steps is 76.0 and avg reward is 42.43939393939394\ntotal reward after 65 steps is 103.0 and avg reward is 42.515151515151516\n10\n10\n10\nal21.221797943115234\ncl890.1739501953125\ntotal reward after 66 steps is 20.0 and avg reward is 45.25373134328358\ntotal reward after 66 steps is 33.0 and avg reward is 42.298507462686565\ntotal reward after 66 steps is 50.0 and avg reward is 44.76119402985075\ntotal reward after 66 steps is 42.0 and avg reward is 46.0\ntotal reward after 66 steps is 38.0 and avg reward is 42.44776119402985\ntotal reward after 66 steps is 54.0 and avg reward is 37.62686567164179\ntotal reward after 66 steps is 60.0 and avg reward is 43.82089552238806\ntotal reward after 66 steps is 64.0 and avg reward is 38.865671641791046\ntotal reward after 66 steps is 130.0 and avg reward is 50.940298507462686\ntotal reward after 66 steps is 151.0 and avg reward is 48.1044776119403\n10\n10\n10\nal27.188926696777344\ncl1607.6141357421875\ntotal reward after 67 steps is 19.0 and avg reward is 42.10294117647059\ntotal reward after 67 steps is 21.0 and avg reward is 44.411764705882355\ntotal reward after 67 steps is 27.0 and avg reward is 47.794117647058826\ntotal reward after 67 steps is 41.0 and avg reward is 45.9264705882353\ntotal reward after 67 steps is 41.0 and avg reward is 45.19117647058823\ntotal reward after 67 steps is 39.0 and avg reward is 42.25\ntotal reward after 67 steps is 61.0 and avg reward is 37.970588235294116\ntotal reward after 67 steps is 64.0 and avg reward is 44.11764705882353\ntotal reward after 67 steps is 86.0 and avg reward is 39.55882352941177\ntotal reward after 67 steps is 87.0 and avg reward is 51.470588235294116\n10\n10\n10\nal18.523616790771484\ncl688.6710205078125\ntotal reward after 68 steps is 25.0 and avg reward is 42.0\ntotal reward after 68 steps is 33.0 and avg reward is 43.95652173913044\ntotal reward after 68 steps is 45.0 and avg reward is 51.3768115942029\ntotal reward after 68 steps is 47.0 and avg reward is 38.10144927536232\ntotal reward after 68 steps is 40.0 and avg reward is 39.56521739130435\ntotal reward after 68 steps is 53.0 and avg reward is 44.53623188405797\ntotal reward after 68 steps is 50.0 and avg reward is 42.21739130434783\ntotal reward after 68 steps is 74.0 and avg reward is 46.333333333333336\ntotal reward after 68 steps is 82.0 and avg reward is 45.72463768115942\ntotal reward after 68 steps is 200.0 and avg reward is 50.0\n10\n10\n10\nal29.597007751464844\ncl2381.6064453125\ntotal reward after 69 steps is 38.0 and avg reward is 41.94285714285714\ntotal reward after 69 steps is 42.0 and avg reward is 43.92857142857143\ntotal reward after 69 steps is 59.0 and avg reward is 42.457142857142856\ntotal reward after 69 steps is 77.0 and avg reward is 46.77142857142857\ntotal reward after 69 steps is 79.0 and avg reward is 50.41428571428571\ntotal reward after 69 steps is 78.0 and avg reward is 51.75714285714286\ntotal reward after 69 steps is 79.0 and avg reward is 46.2\ntotal reward after 69 steps is 97.0 and avg reward is 38.94285714285714\ntotal reward after 69 steps is 128.0 and avg reward is 45.72857142857143\ntotal reward after 69 steps is 160.0 and avg reward is 41.285714285714285\n10\n10\n10\nal29.248804092407227\ncl1772.9066162109375\ntotal reward after 70 steps is 53.0 and avg reward is 46.29577464788732\ntotal reward after 70 steps is 56.0 and avg reward is 39.183098591549296\ntotal reward after 70 steps is 61.0 and avg reward is 46.971830985915496\ntotal reward after 70 steps is 65.0 and avg reward is 51.943661971830984\ntotal reward after 70 steps is 78.0 and avg reward is 46.183098591549296\ntotal reward after 70 steps is 87.0 and avg reward is 43.08450704225352\ntotal reward after 70 steps is 104.0 and avg reward is 42.16901408450704\ntotal reward after 70 steps is 119.0 and avg reward is 44.985915492957744\ntotal reward after 70 steps is 138.0 and avg reward is 43.29577464788732\ntotal reward after 70 steps is 176.0 and avg reward is 52.183098591549296\n10\n10\n10\nal32.89006805419922\ncl2142.647705078125\ntotal reward after 71 steps is 33.0 and avg reward is 39.09722222222222\ntotal reward after 71 steps is 43.0 and avg reward is 46.25\ntotal reward after 71 steps is 51.0 and avg reward is 45.06944444444444\ntotal reward after 71 steps is 55.0 and avg reward is 47.083333333333336\ntotal reward after 71 steps is 54.0 and avg reward is 46.291666666666664\ntotal reward after 71 steps is 85.0 and avg reward is 43.875\ntotal reward after 71 steps is 87.0 and avg reward is 52.43055555555556\ntotal reward after 71 steps is 88.0 and avg reward is 43.708333333333336\ntotal reward after 71 steps is 99.0 and avg reward is 52.833333333333336\ntotal reward after 71 steps is 172.0 and avg reward is 43.97222222222222\n10\n10\n10\nal28.457443237304688\ncl1846.0443115234375\ntotal reward after 72 steps is 30.0 and avg reward is 43.78082191780822\ntotal reward after 72 steps is 46.0 and avg reward is 39.19178082191781\ntotal reward after 72 steps is 61.0 and avg reward is 52.54794520547945\ntotal reward after 72 steps is 62.0 and avg reward is 46.50684931506849\ntotal reward after 72 steps is 90.0 and avg reward is 44.342465753424655\ntotal reward after 72 steps is 101.0 and avg reward is 44.657534246575345\ntotal reward after 72 steps is 139.0 and avg reward is 46.35616438356164\ntotal reward after 72 steps is 169.0 and avg reward is 48.75342465753425\ntotal reward after 72 steps is 196.0 and avg reward is 48.3013698630137\ntotal reward after 72 steps is 177.0 and avg reward is 54.534246575342465\n10\n10\n10\nal41.39468765258789\ncl3513.11865234375\ntotal reward after 73 steps is 31.0 and avg reward is 44.472972972972975\ntotal reward after 73 steps is 45.0 and avg reward is 44.351351351351354\ntotal reward after 73 steps is 52.0 and avg reward is 48.351351351351354\ntotal reward after 73 steps is 63.0 and avg reward is 48.945945945945944\ntotal reward after 73 steps is 66.0 and avg reward is 39.554054054054056\ntotal reward after 73 steps is 78.0 and avg reward is 52.891891891891895\ntotal reward after 73 steps is 102.0 and avg reward is 47.25675675675676\ntotal reward after 73 steps is 160.0 and avg reward is 45.351351351351354\ntotal reward after 73 steps is 139.0 and avg reward is 47.608108108108105\ntotal reward after 73 steps is 165.0 and avg reward is 56.027027027027025\n10\n10\n10\nal33.33711242675781\ncl2387.264404296875\ntotal reward after 74 steps is 23.0 and avg reward is 39.333333333333336\ntotal reward after 74 steps is 25.0 and avg reward is 52.52\ntotal reward after 74 steps is 47.0 and avg reward is 45.373333333333335\ntotal reward after 74 steps is 47.0 and avg reward is 47.25333333333333\ntotal reward after 74 steps is 53.0 and avg reward is 47.68\ntotal reward after 74 steps is 83.0 and avg reward is 49.4\ntotal reward after 74 steps is 81.0 and avg reward is 48.78666666666667\ntotal reward after 74 steps is 114.0 and avg reward is 45.4\ntotal reward after 74 steps is 139.0 and avg reward is 57.13333333333333\ntotal reward after 74 steps is 114.0 and avg reward is 45.28\n10\n10\n10\nal28.359588623046875\ncl1608.0404052734375\ntotal reward after 75 steps is 55.0 and avg reward is 48.86842105263158\ntotal reward after 75 steps is 64.0 and avg reward is 45.526315789473685\ntotal reward after 75 steps is 59.0 and avg reward is 57.1578947368421\ntotal reward after 75 steps is 63.0 and avg reward is 49.578947368421055\ntotal reward after 75 steps is 59.0 and avg reward is 52.60526315789474\ntotal reward after 75 steps is 73.0 and avg reward is 39.776315789473685\ntotal reward after 75 steps is 87.0 and avg reward is 47.776315789473685\ntotal reward after 75 steps is 113.0 and avg reward is 46.28947368421053\ntotal reward after 75 steps is 138.0 and avg reward is 46.5921052631579\ntotal reward after 75 steps is 172.0 and avg reward is 49.31578947368421\n10\n10\n10\nal30.0025691986084\ncl1964.3115234375\ntotal reward after 76 steps is 36.0 and avg reward is 46.45454545454545\ntotal reward after 76 steps is 44.0 and avg reward is 45.506493506493506\ntotal reward after 76 steps is 48.0 and avg reward is 46.311688311688314\ntotal reward after 76 steps is 51.0 and avg reward is 49.5974025974026\ntotal reward after 76 steps is 90.0 and avg reward is 49.4025974025974\ntotal reward after 76 steps is 55.0 and avg reward is 52.63636363636363\ntotal reward after 76 steps is 63.0 and avg reward is 40.077922077922075\ntotal reward after 76 steps is 100.0 and avg reward is 49.97402597402598\ntotal reward after 76 steps is 174.0 and avg reward is 49.41558441558441\ntotal reward after 76 steps is 175.0 and avg reward is 58.688311688311686\n10\n10\n10\nal32.833091735839844\ncl2542.066650390625\ntotal reward after 77 steps is 12.0 and avg reward is 45.87179487179487\ntotal reward after 77 steps is 46.0 and avg reward is 49.37179487179487\ntotal reward after 77 steps is 36.0 and avg reward is 49.42307692307692\ntotal reward after 77 steps is 41.0 and avg reward is 45.44871794871795\ntotal reward after 77 steps is 48.0 and avg reward is 49.94871794871795\ntotal reward after 77 steps is 54.0 and avg reward is 58.62820512820513\ntotal reward after 77 steps is 54.0 and avg reward is 46.55128205128205\ntotal reward after 77 steps is 91.0 and avg reward is 53.12820512820513\ntotal reward after 77 steps is 108.0 and avg reward is 50.15384615384615\ntotal reward after 77 steps is 109.0 and avg reward is 40.96153846153846\n10\n10\n10\nal21.400087356567383\ncl998.9573364257812\ntotal reward after 78 steps is 31.0 and avg reward is 49.91139240506329\ntotal reward after 78 steps is 50.0 and avg reward is 45.50632911392405\ntotal reward after 78 steps is 58.0 and avg reward is 49.48101265822785\ntotal reward after 78 steps is 58.0 and avg reward is 53.18987341772152\ntotal reward after 78 steps is 69.0 and avg reward is 49.67088607594937\ntotal reward after 78 steps is 90.0 and avg reward is 50.45569620253165\ntotal reward after 78 steps is 140.0 and avg reward is 47.734177215189874\ntotal reward after 78 steps is 139.0 and avg reward is 42.20253164556962\ntotal reward after 78 steps is 170.0 and avg reward is 47.44303797468354\ntotal reward after 78 steps is 200.0 and avg reward is 60.41772151898734\n10\n10\n10\nal37.074459075927734\ncl3067.054443359375\ntotal reward after 79 steps is 35.0 and avg reward is 49.3\ntotal reward after 79 steps is 37.0 and avg reward is 52.9875\ntotal reward after 79 steps is 49.0 and avg reward is 47.75\ntotal reward after 79 steps is 61.0 and avg reward is 49.8125\ntotal reward after 79 steps is 99.0 and avg reward is 50.525\ntotal reward after 79 steps is 117.0 and avg reward is 46.4\ntotal reward after 79 steps is 127.0 and avg reward is 43.2625\ntotal reward after 79 steps is 140.0 and avg reward is 51.575\ntotal reward after 79 steps is 141.0 and avg reward is 48.6125\ntotal reward after 79 steps is 187.0 and avg reward is 62.0\n10\n10\n10\nal37.13872146606445\ncl2806.493896484375\ntotal reward after 80 steps is 42.0 and avg reward is 49.20987654320987\ntotal reward after 80 steps is 48.0 and avg reward is 52.925925925925924\ntotal reward after 80 steps is 51.0 and avg reward is 51.5679012345679\ntotal reward after 80 steps is 62.0 and avg reward is 47.925925925925924\ntotal reward after 80 steps is 68.0 and avg reward is 43.5679012345679\ntotal reward after 80 steps is 74.0 and avg reward is 46.74074074074074\ntotal reward after 80 steps is 84.0 and avg reward is 62.27160493827161\ntotal reward after 80 steps is 152.0 and avg reward is 51.77777777777778\ntotal reward after 80 steps is 160.0 and avg reward is 51.17283950617284\ntotal reward after 80 steps is 192.0 and avg reward is 50.382716049382715\n10\n10\n10\nal35.6575813293457\ncl2869.303955078125\ntotal reward after 81 steps is 37.0 and avg reward is 46.6219512195122\ntotal reward after 81 steps is 35.0 and avg reward is 51.573170731707314\ntotal reward after 81 steps is 51.0 and avg reward is 43.65853658536585\ntotal reward after 81 steps is 64.0 and avg reward is 48.1219512195122\ntotal reward after 81 steps is 92.0 and avg reward is 62.63414634146341\ntotal reward after 81 steps is 99.0 and avg reward is 52.146341463414636\ntotal reward after 81 steps is 111.0 and avg reward is 53.63414634146341\ntotal reward after 81 steps is 147.0 and avg reward is 52.34146341463415\ntotal reward after 81 steps is 200.0 and avg reward is 51.048780487804876\ntotal reward after 81 steps is 200.0 and avg reward is 52.207317073170735\n10\n10\n10\nal41.73839569091797\ncl3634.970458984375\ntotal reward after 82 steps is 28.0 and avg reward is 46.397590361445786\ntotal reward after 82 steps is 26.0 and avg reward is 52.024096385542165\ntotal reward after 82 steps is 45.0 and avg reward is 52.06024096385542\ntotal reward after 82 steps is 42.0 and avg reward is 53.493975903614455\ntotal reward after 82 steps is 88.0 and avg reward is 48.602409638554214\ntotal reward after 82 steps is 126.0 and avg reward is 52.46987951807229\ntotal reward after 82 steps is 139.0 and avg reward is 53.25301204819277\ntotal reward after 82 steps is 143.0 and avg reward is 52.1566265060241\ntotal reward after 82 steps is 197.0 and avg reward is 64.25301204819277\ntotal reward after 82 steps is 200.0 and avg reward is 45.54216867469879\n10\n10\n10\nal42.77234649658203\ncl3818.3955078125\ntotal reward after 83 steps is 23.0 and avg reward is 51.714285714285715\ntotal reward after 83 steps is 57.0 and avg reward is 53.535714285714285\ntotal reward after 83 steps is 80.0 and avg reward is 46.79761904761905\ntotal reward after 83 steps is 62.0 and avg reward is 45.73809523809524\ntotal reward after 83 steps is 87.0 and avg reward is 64.52380952380952\ntotal reward after 83 steps is 82.0 and avg reward is 49.0\ntotal reward after 83 steps is 100.0 and avg reward is 52.595238095238095\ntotal reward after 83 steps is 180.0 and avg reward is 53.67857142857143\ntotal reward after 83 steps is 176.0 and avg reward is 54.714285714285715\ntotal reward after 83 steps is 200.0 and avg reward is 54.226190476190474\n10\n10\n10\nal40.7575798034668\ncl3492.797119140625\ntotal reward after 84 steps is 61.0 and avg reward is 53.62352941176471\ntotal reward after 84 steps is 68.0 and avg reward is 51.90588235294118\ntotal reward after 84 steps is 93.0 and avg reward is 54.68235294117647\ntotal reward after 84 steps is 86.0 and avg reward is 46.21176470588235\ntotal reward after 84 steps is 92.0 and avg reward is 49.50588235294118\ntotal reward after 84 steps is 134.0 and avg reward is 55.64705882352941\ntotal reward after 84 steps is 110.0 and avg reward is 47.54117647058823\ntotal reward after 84 steps is 106.0 and avg reward is 65.01176470588236\ntotal reward after 84 steps is 142.0 and avg reward is 53.64705882352941\ntotal reward after 84 steps is 200.0 and avg reward is 55.4\n10\n10\n10\nal36.023555755615234\ncl2638.862060546875\ntotal reward after 85 steps is 41.0 and avg reward is 49.406976744186046\ntotal reward after 85 steps is 76.0 and avg reward is 52.18604651162791\ntotal reward after 85 steps is 80.0 and avg reward is 47.91860465116279\ntotal reward after 85 steps is 89.0 and avg reward is 54.05813953488372\ntotal reward after 85 steps is 142.0 and avg reward is 54.651162790697676\ntotal reward after 85 steps is 152.0 and avg reward is 56.76744186046512\ntotal reward after 85 steps is 157.0 and avg reward is 56.58139534883721\ntotal reward after 85 steps is 161.0 and avg reward is 47.54651162790697\ntotal reward after 85 steps is 163.0 and avg reward is 66.15116279069767\ntotal reward after 85 steps is 163.0 and avg reward is 55.94186046511628\n10\n10\n10\nal40.8541259765625\ncl3275.11279296875\ntotal reward after 86 steps is 39.0 and avg reward is 52.03448275862069\ntotal reward after 86 steps is 51.0 and avg reward is 65.97701149425288\ntotal reward after 86 steps is 69.0 and avg reward is 49.632183908045974\ntotal reward after 86 steps is 69.0 and avg reward is 56.0919540229885\ntotal reward after 86 steps is 85.0 and avg reward is 57.0919540229885\ntotal reward after 86 steps is 82.0 and avg reward is 54.37931034482759\ntotal reward after 86 steps is 88.0 and avg reward is 55.03448275862069\ntotal reward after 86 steps is 107.0 and avg reward is 48.59770114942529\ntotal reward after 86 steps is 143.0 and avg reward is 48.64367816091954\ntotal reward after 86 steps is 176.0 and avg reward is 57.95402298850575\n10\n10\n10\nal32.01396560668945\ncl2171.974853515625\ntotal reward after 87 steps is 52.0 and avg reward is 48.68181818181818\ntotal reward after 87 steps is 55.0 and avg reward is 54.38636363636363\ntotal reward after 87 steps is 62.0 and avg reward is 65.93181818181819\ntotal reward after 87 steps is 97.0 and avg reward is 55.51136363636363\ntotal reward after 87 steps is 107.0 and avg reward is 56.67045454545455\ntotal reward after 87 steps is 131.0 and avg reward is 49.53409090909091\ntotal reward after 87 steps is 140.0 and avg reward is 58.88636363636363\ntotal reward after 87 steps is 165.0 and avg reward is 53.31818181818182\ntotal reward after 87 steps is 149.0 and avg reward is 58.13636363636363\ntotal reward after 87 steps is 196.0 and avg reward is 51.29545454545455\n10\n10\n10\nal39.042381286621094\ncl3248.287353515625\ntotal reward after 88 steps is 30.0 and avg reward is 55.2247191011236\ntotal reward after 88 steps is 33.0 and avg reward is 57.853932584269664\ntotal reward after 88 steps is 62.0 and avg reward is 56.73033707865169\ntotal reward after 88 steps is 73.0 and avg reward is 54.59550561797753\ntotal reward after 88 steps is 82.0 and avg reward is 51.640449438202246\ntotal reward after 88 steps is 130.0 and avg reward is 50.438202247191015\ntotal reward after 88 steps is 167.0 and avg reward is 67.06741573033707\ntotal reward after 88 steps is 140.0 and avg reward is 54.29213483146067\ntotal reward after 88 steps is 200.0 and avg reward is 50.38202247191011\ntotal reward after 88 steps is 199.0 and avg reward is 60.46067415730337\n10\n10\n10\nal40.12248992919922\ncl3719.2177734375\ntotal reward after 89 steps is 40.0 and avg reward is 54.43333333333333\ntotal reward after 89 steps is 44.0 and avg reward is 50.36666666666667\ntotal reward after 89 steps is 53.0 and avg reward is 54.27777777777778\ntotal reward after 89 steps is 83.0 and avg reward is 55.53333333333333\ntotal reward after 89 steps is 99.0 and avg reward is 50.922222222222224\ntotal reward after 89 steps is 129.0 and avg reward is 67.75555555555556\ntotal reward after 89 steps is 153.0 and avg reward is 52.766666666666666\ntotal reward after 89 steps is 181.0 and avg reward is 61.8\ntotal reward after 89 steps is 200.0 and avg reward is 58.32222222222222\ntotal reward after 89 steps is 200.0 and avg reward is 59.43333333333333\n10\n10\n10\nal45.67525100708008\ncl4064.944091796875\ntotal reward after 90 steps is 45.0 and avg reward is 54.32967032967033\ntotal reward after 90 steps is 28.0 and avg reward is 53.989010989010985\ntotal reward after 90 steps is 43.0 and avg reward is 55.395604395604394\ntotal reward after 90 steps is 62.0 and avg reward is 61.8021978021978\ntotal reward after 90 steps is 80.0 and avg reward is 59.65934065934066\ntotal reward after 90 steps is 70.0 and avg reward is 58.45054945054945\ntotal reward after 90 steps is 97.0 and avg reward is 53.252747252747255\ntotal reward after 90 steps is 176.0 and avg reward is 68.94505494505495\ntotal reward after 90 steps is 173.0 and avg reward is 51.714285714285715\ntotal reward after 90 steps is 199.0 and avg reward is 52.54945054945055\n10\n10\n10\nal39.95796585083008\ncl3428.7578125\ntotal reward after 91 steps is 17.0 and avg reward is 68.3804347826087\ntotal reward after 91 steps is 40.0 and avg reward is 54.17391304347826\ntotal reward after 91 steps is 33.0 and avg reward is 53.03260869565217\ntotal reward after 91 steps is 35.0 and avg reward is 53.78260869565217\ntotal reward after 91 steps is 90.0 and avg reward is 62.108695652173914\ntotal reward after 91 steps is 133.0 and avg reward is 52.59782608695652\ntotal reward after 91 steps is 148.0 and avg reward is 59.42391304347826\ntotal reward after 91 steps is 137.0 and avg reward is 56.28260869565217\ntotal reward after 91 steps is 128.0 and avg reward is 60.40217391304348\ntotal reward after 91 steps is 168.0 and avg reward is 53.80434782608695\n10\n10\n10\nal35.06290054321289\ncl2626.132568359375\ntotal reward after 92 steps is 52.0 and avg reward is 68.20430107526882\ntotal reward after 92 steps is 70.0 and avg reward is 54.344086021505376\ntotal reward after 92 steps is 72.0 and avg reward is 53.236559139784944\ntotal reward after 92 steps is 87.0 and avg reward is 54.13978494623656\ntotal reward after 92 steps is 106.0 and avg reward is 54.365591397849464\ntotal reward after 92 steps is 135.0 and avg reward is 62.89247311827957\ntotal reward after 92 steps is 157.0 and avg reward is 60.473118279569896\ntotal reward after 92 steps is 144.0 and avg reward is 53.58064516129032\ntotal reward after 92 steps is 200.0 and avg reward is 57.82795698924731\ntotal reward after 92 steps is 200.0 and avg reward is 61.903225806451616\n10\n10\n10\nal39.78306579589844\ncl3537.312744140625\ntotal reward after 93 steps is 28.0 and avg reward is 61.54255319148936\ntotal reward after 93 steps is 55.0 and avg reward is 57.797872340425535\ntotal reward after 93 steps is 60.0 and avg reward is 53.648936170212764\ntotal reward after 93 steps is 118.0 and avg reward is 61.08510638297872\ntotal reward after 93 steps is 130.0 and avg reward is 55.148936170212764\ntotal reward after 93 steps is 137.0 and avg reward is 55.244680851063826\ntotal reward after 93 steps is 141.0 and avg reward is 54.170212765957444\ntotal reward after 93 steps is 149.0 and avg reward is 69.06382978723404\ntotal reward after 93 steps is 195.0 and avg reward is 55.638297872340424\ntotal reward after 93 steps is 200.0 and avg reward is 64.35106382978724\n10\n10\n10\nal41.383514404296875\ncl3673.36572265625\ntotal reward after 94 steps is 50.0 and avg reward is 60.96842105263158\ntotal reward after 94 steps is 84.0 and avg reward is 54.48421052631579\ntotal reward after 94 steps is 99.0 and avg reward is 55.61052631578947\ntotal reward after 94 steps is 116.0 and avg reward is 58.410526315789475\ntotal reward after 94 steps is 119.0 and avg reward is 56.305263157894736\ntotal reward after 94 steps is 138.0 and avg reward is 56.11578947368421\ntotal reward after 94 steps is 145.0 and avg reward is 62.421052631578945\ntotal reward after 94 steps is 139.0 and avg reward is 65.13684210526316\ntotal reward after 94 steps is 158.0 and avg reward is 54.747368421052634\ntotal reward after 94 steps is 200.0 and avg reward is 70.4421052631579\n10\n10\n10\nal38.419883728027344\ncl3078.22216796875\ntotal reward after 95 steps is 40.0 and avg reward is 56.135416666666664\ntotal reward after 95 steps is 43.0 and avg reward is 54.625\ntotal reward after 95 steps is 76.0 and avg reward is 65.25\ntotal reward after 95 steps is 103.0 and avg reward is 56.104166666666664\ntotal reward after 95 steps is 132.0 and avg reward is 56.90625\ntotal reward after 95 steps is 154.0 and avg reward is 61.9375\ntotal reward after 95 steps is 158.0 and avg reward is 71.35416666666667\ntotal reward after 95 steps is 170.0 and avg reward is 63.541666666666664\ntotal reward after 95 steps is 177.0 and avg reward is 59.645833333333336\ntotal reward after 95 steps is 200.0 and avg reward is 56.0\n10\n10\n10\nal42.59444046020508\ncl3843.14208984375\ntotal reward after 96 steps is 78.0 and avg reward is 71.42268041237114\ntotal reward after 96 steps is 73.0 and avg reward is 54.81443298969072\ntotal reward after 96 steps is 82.0 and avg reward is 56.371134020618555\ntotal reward after 96 steps is 114.0 and avg reward is 56.597938144329895\ntotal reward after 96 steps is 123.0 and avg reward is 60.29896907216495\ntotal reward after 96 steps is 161.0 and avg reward is 57.21649484536083\ntotal reward after 96 steps is 182.0 and avg reward is 64.76288659793815\ntotal reward after 96 steps is 197.0 and avg reward is 58.350515463917525\ntotal reward after 96 steps is 200.0 and avg reward is 63.36082474226804\ntotal reward after 96 steps is 200.0 and avg reward is 66.63917525773196\n10\n10\n10\nal44.90726089477539\ncl4198.35791015625\ntotal reward after 97 steps is 61.0 and avg reward is 58.37755102040816\ntotal reward after 97 steps is 62.0 and avg reward is 56.6530612244898\ntotal reward after 97 steps is 83.0 and avg reward is 56.642857142857146\ntotal reward after 97 steps is 97.0 and avg reward is 63.704081632653065\ntotal reward after 97 steps is 121.0 and avg reward is 60.91836734693877\ntotal reward after 97 steps is 181.0 and avg reward is 72.54081632653062\ntotal reward after 97 steps is 161.0 and avg reward is 65.74489795918367\ntotal reward after 97 steps is 160.0 and avg reward is 58.265306122448976\ntotal reward after 97 steps is 173.0 and avg reward is 56.02040816326531\ntotal reward after 97 steps is 200.0 and avg reward is 68.0\n10\n10\n10\nal43.09117126464844\ncl3833.24169921875\ntotal reward after 98 steps is 64.0 and avg reward is 56.717171717171716\ntotal reward after 98 steps is 112.0 and avg reward is 68.44444444444444\ntotal reward after 98 steps is 116.0 and avg reward is 61.474747474747474\ntotal reward after 98 steps is 126.0 and avg reward is 56.72727272727273\ntotal reward after 98 steps is 136.0 and avg reward is 73.18181818181819\ntotal reward after 98 steps is 137.0 and avg reward is 66.46464646464646\ntotal reward after 98 steps is 144.0 and avg reward is 59.13131313131313\ntotal reward after 98 steps is 159.0 and avg reward is 57.686868686868685\ntotal reward after 98 steps is 156.0 and avg reward is 64.63636363636364\ntotal reward after 98 steps is 174.0 and avg reward is 59.54545454545455\n10\n10\n10\nal40.171905517578125\ncl3151.3212890625\ntotal reward after 99 steps is 91.0 and avg reward is 64.9\ntotal reward after 99 steps is 94.0 and avg reward is 58.05\ntotal reward after 99 steps is 116.0 and avg reward is 68.92\ntotal reward after 99 steps is 126.0 and avg reward is 57.42\ntotal reward after 99 steps is 134.0 and avg reward is 59.88\ntotal reward after 99 steps is 137.0 and avg reward is 60.32\ntotal reward after 99 steps is 141.0 and avg reward is 73.86\ntotal reward after 99 steps is 173.0 and avg reward is 57.88\ntotal reward after 99 steps is 200.0 and avg reward is 67.8\ntotal reward after 99 steps is 200.0 and avg reward is 62.86\n10\n10\n10\nal41.849971771240234\ncl3674.599609375\ntotal reward after 100 steps is 33.0 and avg reward is 64.95\ntotal reward after 100 steps is 35.0 and avg reward is 59.95\ntotal reward after 100 steps is 72.0 and avg reward is 69.36\ntotal reward after 100 steps is 72.0 and avg reward is 60.76\ntotal reward after 100 steps is 81.0 and avg reward is 74.39\ntotal reward after 100 steps is 156.0 and avg reward is 59.16\ntotal reward after 100 steps is 165.0 and avg reward is 64.23\ntotal reward after 100 steps is 179.0 and avg reward is 69.31\ntotal reward after 100 steps is 196.0 and avg reward is 59.1\ntotal reward after 100 steps is 200.0 and avg reward is 59.77\n10\n10\n10\nal40.79121398925781\ncl3934.298583984375\ntotal reward after 101 steps is 33.0 and avg reward is 69.51\ntotal reward after 101 steps is 54.0 and avg reward is 74.75\ntotal reward after 101 steps is 64.0 and avg reward is 59.56\ntotal reward after 101 steps is 95.0 and avg reward is 60.72\ntotal reward after 101 steps is 104.0 and avg reward is 60.63\ntotal reward after 101 steps is 129.0 and avg reward is 70.42\ntotal reward after 101 steps is 146.0 and avg reward is 60.44\ntotal reward after 101 steps is 145.0 and avg reward is 62.03\ntotal reward after 101 steps is 200.0 and avg reward is 66.12\ntotal reward after 101 steps is 200.0 and avg reward is 66.77\n10\n10\n10\nal41.224185943603516\ncl3667.186279296875\ntotal reward after 102 steps is 35.0 and avg reward is 59.71\ntotal reward after 102 steps is 61.0 and avg reward is 62.44\ntotal reward after 102 steps is 85.0 and avg reward is 61.37\ntotal reward after 102 steps is 88.0 and avg reward is 75.43\ntotal reward after 102 steps is 93.0 and avg reward is 61.36\ntotal reward after 102 steps is 113.0 and avg reward is 61.37\ntotal reward after 102 steps is 143.0 and avg reward is 70.74\ntotal reward after 102 steps is 165.0 and avg reward is 67.68\ntotal reward after 102 steps is 153.0 and avg reward is 68.1\ntotal reward after 102 steps is 175.0 and avg reward is 71.97\n10\n10\n10\nal42.648094177246094\ncl2951.04931640625\ntotal reward after 103 steps is 58.0 and avg reward is 75.59\ntotal reward after 103 steps is 67.0 and avg reward is 68.35\ntotal reward after 103 steps is 106.0 and avg reward is 60.35\ntotal reward after 103 steps is 132.0 and avg reward is 63.34\ntotal reward after 103 steps is 132.0 and avg reward is 62.26\ntotal reward after 103 steps is 142.0 and avg reward is 68.93\ntotal reward after 103 steps is 145.0 and avg reward is 62.4\ntotal reward after 103 steps is 154.0 and avg reward is 71.86\ntotal reward after 103 steps is 166.0 and avg reward is 73.21\ntotal reward after 103 steps is 180.0 and avg reward is 62.75\n10\n10\n10\nal39.511680603027344\ncl3041.023681640625\ntotal reward after 104 steps is 52.0 and avg reward is 68.67\ntotal reward after 104 steps is 67.0 and avg reward is 73.68\ntotal reward after 104 steps is 74.0 and avg reward is 60.89\ntotal reward after 104 steps is 82.0 and avg reward is 63.37\ntotal reward after 104 steps is 127.0 and avg reward is 76.66\ntotal reward after 104 steps is 134.0 and avg reward is 64.48\ntotal reward after 104 steps is 142.0 and avg reward is 63.62\ntotal reward after 104 steps is 132.0 and avg reward is 70.07\ntotal reward after 104 steps is 187.0 and avg reward is 73.53\ntotal reward after 104 steps is 200.0 and avg reward is 64.06\n10\n10\n10\nal37.53815460205078\ncl3159.8212890625\ntotal reward after 105 steps is 52.0 and avg reward is 60.66\ntotal reward after 105 steps is 58.0 and avg reward is 73.51\ntotal reward after 105 steps is 72.0 and avg reward is 73.5\ntotal reward after 105 steps is 107.0 and avg reward is 68.99\ntotal reward after 105 steps is 118.0 and avg reward is 71.08\ntotal reward after 105 steps is 126.0 and avg reward is 64.13\ntotal reward after 105 steps is 131.0 and avg reward is 77.22\ntotal reward after 105 steps is 155.0 and avg reward is 64.86\ntotal reward after 105 steps is 150.0 and avg reward is 65.23\ntotal reward after 105 steps is 178.0 and avg reward is 64.4\n10\n10\n10\nal38.15835189819336\ncl2887.334228515625\ntotal reward after 106 steps is 58.0 and avg reward is 65.66\ntotal reward after 106 steps is 81.0 and avg reward is 65.52\ntotal reward after 106 steps is 84.0 and avg reward is 61.35\ntotal reward after 106 steps is 91.0 and avg reward is 64.89\ntotal reward after 106 steps is 145.0 and avg reward is 74.81\ntotal reward after 106 steps is 130.0 and avg reward is 78.37\ntotal reward after 106 steps is 149.0 and avg reward is 74.84\ntotal reward after 106 steps is 152.0 and avg reward is 72.32\ntotal reward after 106 steps is 167.0 and avg reward is 65.92\ntotal reward after 106 steps is 200.0 and avg reward is 70.84\n10\n10\n10\nal41.343997955322266\ncl3455.89111328125\ntotal reward after 107 steps is 58.0 and avg reward is 72.55\ntotal reward after 107 steps is 59.0 and avg reward is 65.17\ntotal reward after 107 steps is 68.0 and avg reward is 75.21\ntotal reward after 107 steps is 73.0 and avg reward is 71.41\ntotal reward after 107 steps is 93.0 and avg reward is 78.99\ntotal reward after 107 steps is 127.0 and avg reward is 66.48\ntotal reward after 107 steps is 134.0 and avg reward is 62.38\ntotal reward after 107 steps is 170.0 and avg reward is 67.05\ntotal reward after 107 steps is 150.0 and avg reward is 67.11\ntotal reward after 107 steps is 163.0 and avg reward is 76.13\n10\n10\n10\nal34.093902587890625\ncl2617.501220703125\ntotal reward after 108 steps is 45.0 and avg reward is 66.71\ntotal reward after 108 steps is 82.0 and avg reward is 62.98\ntotal reward after 108 steps is 102.0 and avg reward is 76.01\ntotal reward after 108 steps is 104.0 and avg reward is 79.81\ntotal reward after 108 steps is 103.0 and avg reward is 73.31\ntotal reward after 108 steps is 117.0 and avg reward is 72.44\ntotal reward after 108 steps is 137.0 and avg reward is 68.2\ntotal reward after 108 steps is 138.0 and avg reward is 66.33\ntotal reward after 108 steps is 148.0 and avg reward is 77.39\ntotal reward after 108 steps is 182.0 and avg reward is 68.71\n10\n10\n10\nal35.80115509033203\ncl2738.172119140625\ntotal reward after 109 steps is 29.0 and avg reward is 66.65\ntotal reward after 109 steps is 37.0 and avg reward is 66.35\ntotal reward after 109 steps is 46.0 and avg reward is 68.31\ntotal reward after 109 steps is 66.0 and avg reward is 80.12\ntotal reward after 109 steps is 70.0 and avg reward is 73.02\ntotal reward after 109 steps is 78.0 and avg reward is 63.41\ntotal reward after 109 steps is 75.0 and avg reward is 73.57\ntotal reward after 109 steps is 136.0 and avg reward is 77.02\ntotal reward after 109 steps is 172.0 and avg reward is 78.76\ntotal reward after 109 steps is 169.0 and avg reward is 70.05\n10\n10\n10\nal33.05561447143555\ncl2517.038330078125\ntotal reward after 110 steps is 17.0 and avg reward is 80.14\ntotal reward after 110 steps is 30.0 and avg reward is 66.5\ntotal reward after 110 steps is 52.0 and avg reward is 70.42\ntotal reward after 110 steps is 70.0 and avg reward is 79.31\ntotal reward after 110 steps is 75.0 and avg reward is 64.01\ntotal reward after 110 steps is 122.0 and avg reward is 67.72\ntotal reward after 110 steps is 121.0 and avg reward is 74.24\ntotal reward after 110 steps is 142.0 and avg reward is 69.58\ntotal reward after 110 steps is 200.0 and avg reward is 78.87\ntotal reward after 110 steps is 200.0 and avg reward is 74.58\n10\n10\n10\nal38.77231979370117\ncl3370.841064453125\ntotal reward after 111 steps is 26.0 and avg reward is 79.02\ntotal reward after 111 steps is 48.0 and avg reward is 68.09\ntotal reward after 111 steps is 80.0 and avg reward is 70.27\ntotal reward after 111 steps is 86.0 and avg reward is 80.06\ntotal reward after 111 steps is 116.0 and avg reward is 67.55\ntotal reward after 111 steps is 117.0 and avg reward is 81.17\ntotal reward after 111 steps is 114.0 and avg reward is 75.25\ntotal reward after 111 steps is 125.0 and avg reward is 71.56\ntotal reward after 111 steps is 135.0 and avg reward is 75.75\ntotal reward after 111 steps is 174.0 and avg reward is 65.64\n10\n10\n10\nal33.542755126953125\ncl2420.804931640625\ntotal reward after 112 steps is 22.0 and avg reward is 79.06\ntotal reward after 112 steps is 30.0 and avg reward is 65.76\ntotal reward after 112 steps is 54.0 and avg reward is 67.91\ntotal reward after 112 steps is 60.0 and avg reward is 80.48\ntotal reward after 112 steps is 78.0 and avg reward is 70.87\ntotal reward after 112 steps is 120.0 and avg reward is 72.58\ntotal reward after 112 steps is 200.0 and avg reward is 82.77\ntotal reward after 112 steps is 200.0 and avg reward is 77.61\ntotal reward after 112 steps is 180.0 and avg reward is 76.3\ntotal reward after 112 steps is 200.0 and avg reward is 69.91\n10\n10\n10\nal46.778568267822266\ncl4528.46240234375\ntotal reward after 113 steps is 45.0 and avg reward is 76.53\ntotal reward after 113 steps is 48.0 and avg reward is 71.06\ntotal reward after 113 steps is 87.0 and avg reward is 68.49\ntotal reward after 113 steps is 96.0 and avg reward is 73.25\ntotal reward after 113 steps is 99.0 and avg reward is 81.18\ntotal reward after 113 steps is 114.0 and avg reward is 78.33\ntotal reward after 113 steps is 122.0 and avg reward is 70.84\ntotal reward after 113 steps is 152.0 and avg reward is 80.29\ntotal reward after 113 steps is 169.0 and avg reward is 84.21\ntotal reward after 113 steps is 190.0 and avg reward is 67.37\n10\n10\n10\nal36.38519287109375\ncl2814.187744140625\ntotal reward after 114 steps is 27.0 and avg reward is 80.99\ntotal reward after 114 steps is 41.0 and avg reward is 78.52\ntotal reward after 114 steps is 53.0 and avg reward is 84.53\ntotal reward after 114 steps is 60.0 and avg reward is 70.98\ntotal reward after 114 steps is 72.0 and avg reward is 67.63\ntotal reward after 114 steps is 77.0 and avg reward is 71.37\ntotal reward after 114 steps is 82.0 and avg reward is 80.65\ntotal reward after 114 steps is 105.0 and avg reward is 77.13\ntotal reward after 114 steps is 81.0 and avg reward is 68.84\ntotal reward after 114 steps is 190.0 and avg reward is 74.69\n10\n10\n10\nal27.97198486328125\ncl2004.677490234375\ntotal reward after 115 steps is 53.0 and avg reward is 67.95\ntotal reward after 115 steps is 66.0 and avg reward is 69.29\ntotal reward after 115 steps is 82.0 and avg reward is 81.26\ntotal reward after 115 steps is 81.0 and avg reward is 79.14\ntotal reward after 115 steps is 88.0 and avg reward is 77.5\ntotal reward after 115 steps is 98.0 and avg reward is 71.75\ntotal reward after 115 steps is 128.0 and avg reward is 75.76\ntotal reward after 115 steps is 144.0 and avg reward is 85.72\ntotal reward after 115 steps is 175.0 and avg reward is 82.53\ntotal reward after 115 steps is 181.0 and avg reward is 72.97\n10\n10\n10\nal36.573890686035156\ncl2844.768798828125\ntotal reward after 116 steps is 18.0 and avg reward is 85.42\ntotal reward after 116 steps is 73.0 and avg reward is 78.03\ntotal reward after 116 steps is 77.0 and avg reward is 76.12\ntotal reward after 116 steps is 101.0 and avg reward is 69.89\ntotal reward after 116 steps is 111.0 and avg reward is 81.96\ntotal reward after 116 steps is 123.0 and avg reward is 72.57\ntotal reward after 116 steps is 136.0 and avg reward is 68.9\ntotal reward after 116 steps is 159.0 and avg reward is 74.15\ntotal reward after 116 steps is 164.0 and avg reward is 80.5\ntotal reward after 116 steps is 200.0 and avg reward is 84.12\n10\n10\n10\nal37.56143569946289\ncl3020.4921875\ntotal reward after 117 steps is 46.0 and avg reward is 78.33\ntotal reward after 117 steps is 122.0 and avg reward is 73.6\ntotal reward after 117 steps is 137.0 and avg reward is 70.08\ntotal reward after 117 steps is 139.0 and avg reward is 85.32\ntotal reward after 117 steps is 153.0 and avg reward is 77.46\ntotal reward after 117 steps is 185.0 and avg reward is 87.06\ntotal reward after 117 steps is 199.0 and avg reward is 71.69\ntotal reward after 117 steps is 200.0 and avg reward is 75.96\ntotal reward after 117 steps is 200.0 and avg reward is 83.77\ntotal reward after 117 steps is 200.0 and avg reward is 81.93\n10\n10\n10\nal47.657073974609375\ncl4674.20068359375\ntotal reward after 118 steps is 36.0 and avg reward is 73.68\ntotal reward after 118 steps is 39.0 and avg reward is 70.19\ntotal reward after 118 steps is 50.0 and avg reward is 76.18\ntotal reward after 118 steps is 50.0 and avg reward is 86.7\ntotal reward after 118 steps is 81.0 and avg reward is 77.99\ntotal reward after 118 steps is 116.0 and avg reward is 78.78\ntotal reward after 118 steps is 120.0 and avg reward is 72.61\ntotal reward after 118 steps is 200.0 and avg reward is 87.04\ntotal reward after 118 steps is 200.0 and avg reward is 83.61\ntotal reward after 118 steps is 200.0 and avg reward is 85.49\n10\n10\n10\nal40.6076545715332\ncl4011.097412109375\ntotal reward after 119 steps is 36.0 and avg reward is 72.68\ntotal reward after 119 steps is 56.0 and avg reward is 76.45\ntotal reward after 119 steps is 101.0 and avg reward is 87.76\ntotal reward after 119 steps is 135.0 and avg reward is 86.55\ntotal reward after 119 steps is 134.0 and avg reward is 74.54\ntotal reward after 119 steps is 151.0 and avg reward is 79.21\ntotal reward after 119 steps is 147.0 and avg reward is 79.91\ntotal reward after 119 steps is 150.0 and avg reward is 71.4\ntotal reward after 119 steps is 200.0 and avg reward is 88.42\ntotal reward after 119 steps is 200.0 and avg reward is 84.67\n10\n10\n10\nal42.9360466003418\ncl3884.041259765625\ntotal reward after 120 steps is 27.0 and avg reward is 76.54\ntotal reward after 120 steps is 43.0 and avg reward is 71.65\ntotal reward after 120 steps is 86.0 and avg reward is 88.77\ntotal reward after 120 steps is 110.0 and avg reward is 75.4\ntotal reward after 120 steps is 113.0 and avg reward is 79.9\ntotal reward after 120 steps is 167.0 and avg reward is 74.17\ntotal reward after 120 steps is 143.0 and avg reward is 89.01\ntotal reward after 120 steps is 164.0 and avg reward is 88.01\ntotal reward after 120 steps is 200.0 and avg reward is 86.14\ntotal reward after 120 steps is 200.0 and avg reward is 81.56\n10\n10\n10\nal43.40553283691406\ncl4045.548583984375\ntotal reward after 121 steps is 51.0 and avg reward is 79.43\ntotal reward after 121 steps is 56.0 and avg reward is 88.33\ntotal reward after 121 steps is 65.0 and avg reward is 72.06\ntotal reward after 121 steps is 87.0 and avg reward is 77.17\ntotal reward after 121 steps is 112.0 and avg reward is 89.72\ntotal reward after 121 steps is 105.0 and avg reward is 76.09\ntotal reward after 121 steps is 115.0 and avg reward is 89.92\ntotal reward after 121 steps is 172.0 and avg reward is 83.14\ntotal reward after 121 steps is 200.0 and avg reward is 87.99\ntotal reward after 121 steps is 179.0 and avg reward is 75.72\n10\n10\n10\nal38.967750549316406\ncl3350.44384765625\ntotal reward after 122 steps is 84.0 and avg reward is 79.66\ntotal reward after 122 steps is 83.0 and avg reward is 76.5\ntotal reward after 122 steps is 91.0 and avg reward is 72.62\ntotal reward after 122 steps is 100.0 and avg reward is 88.48\ntotal reward after 122 steps is 121.0 and avg reward is 78.09\ntotal reward after 122 steps is 148.0 and avg reward is 76.91\ntotal reward after 122 steps is 151.0 and avg reward is 84.05\ntotal reward after 122 steps is 200.0 and avg reward is 90.04\ntotal reward after 122 steps is 197.0 and avg reward is 91.6\ntotal reward after 122 steps is 200.0 and avg reward is 91.01\n10\n10\n10\nal43.701290130615234\ncl4018.016357421875\ntotal reward after 123 steps is 109.0 and avg reward is 77.13\ntotal reward after 123 steps is 121.0 and avg reward is 84.87\ntotal reward after 123 steps is 125.0 and avg reward is 78.03\ntotal reward after 123 steps is 152.0 and avg reward is 80.46\ntotal reward after 123 steps is 140.0 and avg reward is 73.59\ntotal reward after 123 steps is 163.0 and avg reward is 92.29\ntotal reward after 123 steps is 165.0 and avg reward is 91.35\ntotal reward after 123 steps is 200.0 and avg reward is 90.09\ntotal reward after 123 steps is 177.0 and avg reward is 93.24\ntotal reward after 123 steps is 200.0 and avg reward is 79.96\n10\n10\n10\nal45.476139068603516\ncl4215.32373046875\ntotal reward after 124 steps is 19.0 and avg reward is 91.32\ntotal reward after 124 steps is 41.0 and avg reward is 78.1\ntotal reward after 124 steps is 68.0 and avg reward is 92.39\ntotal reward after 124 steps is 112.0 and avg reward is 91.04\ntotal reward after 124 steps is 113.0 and avg reward is 85.83\ntotal reward after 124 steps is 137.0 and avg reward is 94.38\ntotal reward after 124 steps is 167.0 and avg reward is 75.09\ntotal reward after 124 steps is 195.0 and avg reward is 81.68\ntotal reward after 124 steps is 200.0 and avg reward is 78.87\ntotal reward after 124 steps is 184.0 and avg reward is 81.9\n10\n10\n10\nal43.225982666015625\ncl4085.884765625\ntotal reward after 125 steps is 42.0 and avg reward is 74.9\ntotal reward after 125 steps is 95.0 and avg reward is 93.06\ntotal reward after 125 steps is 110.0 and avg reward is 95.31\ntotal reward after 125 steps is 131.0 and avg reward is 92.1\ntotal reward after 125 steps is 137.0 and avg reward is 79.97\ntotal reward after 125 steps is 184.0 and avg reward is 79.52\ntotal reward after 125 steps is 200.0 and avg reward is 92.68\ntotal reward after 125 steps is 200.0 and avg reward is 87.47\ntotal reward after 125 steps is 189.0 and avg reward is 83.4\ntotal reward after 125 steps is 200.0 and avg reward is 83.42\n10\n10\n10\nal47.694332122802734\ncl4748.7998046875\ntotal reward after 126 steps is 86.0 and avg reward is 75.43\ntotal reward after 126 steps is 87.0 and avg reward is 93.54\ntotal reward after 126 steps is 90.0 and avg reward is 93.08\ntotal reward after 126 steps is 102.0 and avg reward is 80.25\ntotal reward after 126 steps is 115.0 and avg reward is 96.3\ntotal reward after 126 steps is 123.0 and avg reward is 84.52\ntotal reward after 126 steps is 144.0 and avg reward is 93.44\ntotal reward after 126 steps is 155.0 and avg reward is 84.79\ntotal reward after 126 steps is 199.0 and avg reward is 81.78\ntotal reward after 126 steps is 200.0 and avg reward is 88.99\n10\n10\n10\nal40.659549713134766\ncl3539.611083984375\ntotal reward after 127 steps is 92.0 and avg reward is 80.93\ntotal reward after 127 steps is 106.0 and avg reward is 94.19\ntotal reward after 127 steps is 123.0 and avg reward is 97.35\ntotal reward after 127 steps is 133.0 and avg reward is 85.94\ntotal reward after 127 steps is 134.0 and avg reward is 94.04\ntotal reward after 127 steps is 174.0 and avg reward is 95.15\ntotal reward after 127 steps is 164.0 and avg reward is 90.16\ntotal reward after 127 steps is 195.0 and avg reward is 77.04\ntotal reward after 127 steps is 200.0 and avg reward is 86.35\ntotal reward after 127 steps is 200.0 and avg reward is 83.39\n10\n10\n10\nal44.7470817565918\ncl4324.79931640625\ntotal reward after 128 steps is 66.0 and avg reward is 81.44\ntotal reward after 128 steps is 22.0 and avg reward is 94.23\ntotal reward after 128 steps is 66.0 and avg reward is 86.62\ntotal reward after 128 steps is 66.0 and avg reward is 95.5\ntotal reward after 128 steps is 79.0 and avg reward is 97.74\ntotal reward after 128 steps is 115.0 and avg reward is 95.03\ntotal reward after 128 steps is 136.0 and avg reward is 78.24\ntotal reward after 128 steps is 156.0 and avg reward is 91.45\ntotal reward after 128 steps is 200.0 and avg reward is 85.11\ntotal reward after 128 steps is 200.0 and avg reward is 87.54\n10\n10\n10\nal39.392364501953125\ncl3573.899658203125\ntotal reward after 129 steps is 16.0 and avg reward is 95.24\ntotal reward after 129 steps is 80.0 and avg reward is 88.07\ntotal reward after 129 steps is 95.0 and avg reward is 82.03\ntotal reward after 129 steps is 116.0 and avg reward is 85.55\ntotal reward after 129 steps is 129.0 and avg reward is 79.11\ntotal reward after 129 steps is 131.0 and avg reward is 87.28\ntotal reward after 129 steps is 147.0 and avg reward is 96.28\ntotal reward after 129 steps is 173.0 and avg reward is 99.2\ntotal reward after 129 steps is 200.0 and avg reward is 92.57\ntotal reward after 129 steps is 200.0 and avg reward is 95.75\n10\n10\n10\nal40.10715866088867\ncl3589.30224609375\ntotal reward after 130 steps is 43.0 and avg reward is 95.8\ntotal reward after 130 steps is 43.0 and avg reward is 99.48\ntotal reward after 130 steps is 111.0 and avg reward is 96.8\ntotal reward after 130 steps is 127.0 and avg reward is 93.4\ntotal reward after 130 steps is 135.0 and avg reward is 79.92\ntotal reward after 130 steps is 200.0 and avg reward is 89.92\ntotal reward after 130 steps is 200.0 and avg reward is 96.96\ntotal reward after 130 steps is 200.0 and avg reward is 83.55\ntotal reward after 130 steps is 200.0 and avg reward is 87.28\ntotal reward after 130 steps is 200.0 and avg reward is 89.11\n10\n10\n10\nal47.08544921875\ncl4969.9912109375\ntotal reward after 131 steps is 27.0 and avg reward is 89.95\ntotal reward after 131 steps is 29.0 and avg reward is 93.06\ntotal reward after 131 steps is 109.0 and avg reward is 89.96\ntotal reward after 131 steps is 112.0 and avg reward is 97.25\ntotal reward after 131 steps is 118.0 and avg reward is 80.87\ntotal reward after 131 steps is 167.0 and avg reward is 88.66\ntotal reward after 131 steps is 195.0 and avg reward is 97.52\ntotal reward after 131 steps is 178.0 and avg reward is 98.26\ntotal reward after 131 steps is 172.0 and avg reward is 100.88\ntotal reward after 131 steps is 200.0 and avg reward is 85.28\n10\n10\n10\nal44.041465759277344\ncl4292.3544921875\ntotal reward after 132 steps is 45.0 and avg reward is 90.09\ntotal reward after 132 steps is 86.0 and avg reward is 90.64\ntotal reward after 132 steps is 68.0 and avg reward is 93.44\ntotal reward after 132 steps is 121.0 and avg reward is 89.14\ntotal reward after 132 steps is 146.0 and avg reward is 98.65\ntotal reward after 132 steps is 165.0 and avg reward is 99.7\ntotal reward after 132 steps is 183.0 and avg reward is 98.72\ntotal reward after 132 steps is 165.0 and avg reward is 86.37\ntotal reward after 132 steps is 200.0 and avg reward is 102.11\ntotal reward after 132 steps is 195.0 and avg reward is 82.32\n10\n10\n10\nal43.68299102783203\ncl4128.34521484375\ntotal reward after 133 steps is 40.0 and avg reward is 98.69\ntotal reward after 133 steps is 34.0 and avg reward is 89.98\ntotal reward after 133 steps is 60.0 and avg reward is 99.53\ntotal reward after 133 steps is 66.0 and avg reward is 102.15\ntotal reward after 133 steps is 156.0 and avg reward is 90.43\ntotal reward after 133 steps is 180.0 and avg reward is 91.99\ntotal reward after 133 steps is 187.0 and avg reward is 87.43\ntotal reward after 133 steps is 192.0 and avg reward is 100.13\ntotal reward after 133 steps is 200.0 and avg reward is 84.03\ntotal reward after 133 steps is 200.0 and avg reward is 94.96\n10\n10\n10\nal46.188114166259766\ncl4740.99267578125\ntotal reward after 134 steps is 29.0 and avg reward is 90.3\ntotal reward after 134 steps is 65.0 and avg reward is 99.78\ntotal reward after 134 steps is 109.0 and avg reward is 92.66\ntotal reward after 134 steps is 103.0 and avg reward is 95.78\ntotal reward after 134 steps is 136.0 and avg reward is 99.72\ntotal reward after 134 steps is 145.0 and avg reward is 88.13\ntotal reward after 134 steps is 150.0 and avg reward is 85.06\ntotal reward after 134 steps is 170.0 and avg reward is 101.34\ntotal reward after 134 steps is 167.0 and avg reward is 91.26\ntotal reward after 134 steps is 200.0 and avg reward is 103.21\n10\n10\n10\nal39.51399612426758\ncl3522.70947265625\ntotal reward after 135 steps is 45.0 and avg reward is 92.94\ntotal reward after 135 steps is 94.0 and avg reward is 91.99\ntotal reward after 135 steps is 93.0 and avg reward is 103.73\ntotal reward after 135 steps is 121.0 and avg reward is 89.0\ntotal reward after 135 steps is 146.0 and avg reward is 86.31\ntotal reward after 135 steps is 135.0 and avg reward is 100.55\ntotal reward after 135 steps is 200.0 and avg reward is 103.13\ntotal reward after 135 steps is 200.0 and avg reward is 91.92\ntotal reward after 135 steps is 200.0 and avg reward is 97.07\ntotal reward after 135 steps is 200.0 and avg reward is 101.32\n10\n10\n10\nal46.74763488769531\ncl4563.87744140625\ntotal reward after 136 steps is 50.0 and avg reward is 89.13\ntotal reward after 136 steps is 53.0 and avg reward is 86.53\ntotal reward after 136 steps is 97.0 and avg reward is 92.2\ntotal reward after 136 steps is 97.0 and avg reward is 97.52\ntotal reward after 136 steps is 100.0 and avg reward is 92.8\ntotal reward after 136 steps is 113.0 and avg reward is 101.22\ntotal reward after 136 steps is 163.0 and avg reward is 102.65\ntotal reward after 136 steps is 164.0 and avg reward is 104.21\ntotal reward after 136 steps is 200.0 and avg reward is 94.08\ntotal reward after 136 steps is 191.0 and avg reward is 104.98\n10\n10\n10\nal39.08695983886719\ncl3528.6396484375\ntotal reward after 137 steps is 46.0 and avg reward is 89.29\ntotal reward after 137 steps is 84.0 and avg reward is 97.84\ntotal reward after 137 steps is 94.0 and avg reward is 94.72\ntotal reward after 137 steps is 114.0 and avg reward is 87.47\ntotal reward after 137 steps is 143.0 and avg reward is 105.46\ntotal reward after 137 steps is 148.0 and avg reward is 93.33\ntotal reward after 137 steps is 179.0 and avg reward is 106.34\ntotal reward after 137 steps is 162.0 and avg reward is 102.43\ntotal reward after 137 steps is 180.0 and avg reward is 94.09\ntotal reward after 137 steps is 200.0 and avg reward is 104.4\n10\n10\n10\nal40.87954330444336\ncl3756.89453125\ntotal reward after 138 steps is 29.0 and avg reward is 88.77\ntotal reward after 138 steps is 36.0 and avg reward is 94.25\ntotal reward after 138 steps is 104.0 and avg reward is 93.8\ntotal reward after 138 steps is 113.0 and avg reward is 105.15\ntotal reward after 138 steps is 130.0 and avg reward is 106.21\ntotal reward after 138 steps is 181.0 and avg reward is 96.39\ntotal reward after 138 steps is 200.0 and avg reward is 104.08\ntotal reward after 138 steps is 183.0 and avg reward is 99.44\ntotal reward after 138 steps is 200.0 and avg reward is 89.21\ntotal reward after 138 steps is 200.0 and avg reward is 107.51\n10\n10\n10\nal51.758026123046875\ncl4565.3310546875\ntotal reward after 139 steps is 34.0 and avg reward is 89.07\ntotal reward after 139 steps is 49.0 and avg reward is 93.84\ntotal reward after 139 steps is 49.0 and avg reward is 96.25\ntotal reward after 139 steps is 49.0 and avg reward is 88.37\ntotal reward after 139 steps is 82.0 and avg reward is 108.02\ntotal reward after 139 steps is 91.0 and avg reward is 94.38\ntotal reward after 139 steps is 136.0 and avg reward is 107.35\ntotal reward after 139 steps is 138.0 and avg reward is 100.35\ntotal reward after 139 steps is 200.0 and avg reward is 106.92\ntotal reward after 139 steps is 200.0 and avg reward is 105.51\n10\n10\n10\nal38.56491470336914\ncl3504.898681640625\ntotal reward after 140 steps is 31.0 and avg reward is 88.89\ntotal reward after 140 steps is 53.0 and avg reward is 107.31\ntotal reward after 140 steps is 76.0 and avg reward is 105.85\ntotal reward after 140 steps is 80.0 and avg reward is 96.8\ntotal reward after 140 steps is 80.0 and avg reward is 94.47\ntotal reward after 140 steps is 86.0 and avg reward is 107.94\ntotal reward after 140 steps is 117.0 and avg reward is 108.72\ntotal reward after 140 steps is 200.0 and avg reward is 95.81\ntotal reward after 140 steps is 200.0 and avg reward is 101.6\ntotal reward after 140 steps is 200.0 and avg reward is 89.96\n10\n10\n10\nal39.822723388671875\ncl3913.77685546875\ntotal reward after 141 steps is 42.0 and avg reward is 90.12\ntotal reward after 141 steps is 46.0 and avg reward is 88.59\ntotal reward after 141 steps is 89.0 and avg reward is 96.39\ntotal reward after 141 steps is 111.0 and avg reward is 108.05\ntotal reward after 141 steps is 146.0 and avg reward is 102.34\ntotal reward after 141 steps is 166.0 and avg reward is 110.14\ntotal reward after 141 steps is 196.0 and avg reward is 96.18\ntotal reward after 141 steps is 200.0 and avg reward is 107.38\ntotal reward after 141 steps is 200.0 and avg reward is 98.45\ntotal reward after 141 steps is 200.0 and avg reward is 109.71\n10\n10\n10\nal45.40479278564453\ncl4517.85302734375\ntotal reward after 142 steps is 44.0 and avg reward is 107.25\ntotal reward after 142 steps is 72.0 and avg reward is 98.76\ntotal reward after 142 steps is 84.0 and avg reward is 90.14\ntotal reward after 142 steps is 133.0 and avg reward is 103.12\ntotal reward after 142 steps is 139.0 and avg reward is 97.61\ntotal reward after 142 steps is 143.0 and avg reward is 89.77\ntotal reward after 142 steps is 144.0 and avg reward is 97.45\ntotal reward after 142 steps is 200.0 and avg reward is 109.46\ntotal reward after 142 steps is 200.0 and avg reward is 111.59\ntotal reward after 142 steps is 197.0 and avg reward is 110.98\n10\n10\n10\nal42.45890426635742\ncl4059.337646484375\ntotal reward after 143 steps is 98.0 and avg reward is 99.27\ntotal reward after 143 steps is 99.0 and avg reward is 102.64\ntotal reward after 143 steps is 100.0 and avg reward is 97.79\ntotal reward after 143 steps is 91.0 and avg reward is 109.69\ntotal reward after 143 steps is 124.0 and avg reward is 98.42\ntotal reward after 143 steps is 135.0 and avg reward is 111.89\ntotal reward after 143 steps is 140.0 and avg reward is 90.29\ntotal reward after 143 steps is 200.0 and avg reward is 109.11\ntotal reward after 143 steps is 189.0 and avg reward is 113.05\ntotal reward after 143 steps is 170.0 and avg reward is 91.07\n10\n10\n10\nal40.04091262817383\ncl3532.904052734375\ntotal reward after 144 steps is 50.0 and avg reward is 102.71\ntotal reward after 144 steps is 100.0 and avg reward is 109.75\ntotal reward after 144 steps is 109.0 and avg reward is 110.32\ntotal reward after 144 steps is 156.0 and avg reward is 91.61\ntotal reward after 144 steps is 175.0 and avg reward is 100.2\ntotal reward after 144 steps is 175.0 and avg reward is 99.68\ntotal reward after 144 steps is 175.0 and avg reward is 92.64\ntotal reward after 144 steps is 195.0 and avg reward is 99.57\ntotal reward after 144 steps is 200.0 and avg reward is 114.93\ntotal reward after 144 steps is 200.0 and avg reward is 113.39\n10\n10\n10\nal46.468204498291016\ncl4543.5107421875\ntotal reward after 145 steps is 89.0 and avg reward is 115.47\ntotal reward after 145 steps is 144.0 and avg reward is 113.97\ntotal reward after 145 steps is 143.0 and avg reward is 93.54\ntotal reward after 145 steps is 157.0 and avg reward is 100.67\ntotal reward after 145 steps is 163.0 and avg reward is 100.94\ntotal reward after 145 steps is 200.0 and avg reward is 101.76\ntotal reward after 145 steps is 187.0 and avg reward is 110.81\ntotal reward after 145 steps is 192.0 and avg reward is 111.51\ntotal reward after 145 steps is 200.0 and avg reward is 104.33\ntotal reward after 145 steps is 200.0 and avg reward is 92.83\n10\n10\n10\nal47.8980598449707\ncl4894.9580078125\ntotal reward after 146 steps is 102.0 and avg reward is 116.24\ntotal reward after 146 steps is 136.0 and avg reward is 101.67\ntotal reward after 146 steps is 148.0 and avg reward is 112.73\ntotal reward after 146 steps is 160.0 and avg reward is 104.78\ntotal reward after 146 steps is 177.0 and avg reward is 94.34\ntotal reward after 146 steps is 200.0 and avg reward is 112.33\ntotal reward after 146 steps is 200.0 and avg reward is 95.31\ntotal reward after 146 steps is 195.0 and avg reward is 115.43\ntotal reward after 146 steps is 200.0 and avg reward is 102.4\ntotal reward after 146 steps is 200.0 and avg reward is 103.5\n10\n10\n10\nal48.01845932006836\ncl5040.583984375\ntotal reward after 147 steps is 38.0 and avg reward is 101.95\ntotal reward after 147 steps is 99.0 and avg reward is 105.58\ntotal reward after 147 steps is 114.0 and avg reward is 112.57\ntotal reward after 147 steps is 116.0 and avg reward is 113.48\ntotal reward after 147 steps is 180.0 and avg reward is 117.64\ntotal reward after 147 steps is 189.0 and avg reward is 103.1\ntotal reward after 147 steps is 173.0 and avg reward is 95.63\ntotal reward after 147 steps is 195.0 and avg reward is 96.89\ntotal reward after 147 steps is 196.0 and avg reward is 117.0\ntotal reward after 147 steps is 200.0 and avg reward is 104.99\n10\n10\n10\nal44.14863967895508\ncl4537.62841796875\ntotal reward after 148 steps is 60.0 and avg reward is 103.15\ntotal reward after 148 steps is 69.0 and avg reward is 105.73\ntotal reward after 148 steps is 82.0 and avg reward is 117.81\ntotal reward after 148 steps is 95.0 and avg reward is 114.0\ntotal reward after 148 steps is 101.0 and avg reward is 105.31\ntotal reward after 148 steps is 137.0 and avg reward is 96.41\ntotal reward after 148 steps is 143.0 and avg reward is 97.72\ntotal reward after 148 steps is 169.0 and avg reward is 113.33\ntotal reward after 148 steps is 185.0 and avg reward is 103.5\ntotal reward after 148 steps is 195.0 and avg reward is 118.7\n10\n10\n10\nal36.780967712402344\ncl3335.349609375\ntotal reward after 149 steps is 41.0 and avg reward is 105.3\ntotal reward after 149 steps is 53.0 and avg reward is 102.99\ntotal reward after 149 steps is 76.0 and avg reward is 104.04\ntotal reward after 149 steps is 95.0 and avg reward is 98.21\ntotal reward after 149 steps is 148.0 and avg reward is 97.74\ntotal reward after 149 steps is 146.0 and avg reward is 106.71\ntotal reward after 149 steps is 200.0 and avg reward is 120.28\ntotal reward after 149 steps is 200.0 and avg reward is 115.61\ntotal reward after 149 steps is 200.0 and avg reward is 119.46\ntotal reward after 149 steps is 200.0 and avg reward is 115.11\n10\n10\n10\nal46.03339385986328\ncl4627.2255859375\ntotal reward after 150 steps is 46.0 and avg reward is 115.18\ntotal reward after 150 steps is 69.0 and avg reward is 119.5\ntotal reward after 150 steps is 102.0 and avg reward is 105.88\ntotal reward after 150 steps is 147.0 and avg reward is 121.22\ntotal reward after 150 steps is 177.0 and avg reward is 116.99\ntotal reward after 150 steps is 178.0 and avg reward is 99.26\ntotal reward after 150 steps is 200.0 and avg reward is 104.44\ntotal reward after 150 steps is 200.0 and avg reward is 108.27\ntotal reward after 150 steps is 200.0 and avg reward is 99.93\ntotal reward after 150 steps is 200.0 and avg reward is 105.66\n10\n10\n10\nal46.68574523925781\ncl4838.70849609375\ntotal reward after 151 steps is 61.0 and avg reward is 108.27\ntotal reward after 151 steps is 64.0 and avg reward is 104.37\ntotal reward after 151 steps is 116.0 and avg reward is 122.14\ntotal reward after 151 steps is 128.0 and avg reward is 100.79\ntotal reward after 151 steps is 139.0 and avg reward is 100.01\ntotal reward after 151 steps is 139.0 and avg reward is 106.82\ntotal reward after 151 steps is 148.0 and avg reward is 107.09\ntotal reward after 151 steps is 200.0 and avg reward is 116.16\ntotal reward after 151 steps is 194.0 and avg reward is 121.25\ntotal reward after 151 steps is 200.0 and avg reward is 118.3\n10\n10\n10\n" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ] ]
d0df9396a394386a68f7097e5c971ef343242e72
5,741
ipynb
Jupyter Notebook
Algorithms.ipynb
SatyamChoubey/pythonPrograms
f0a35bfdd79e0d2ef78b1fb92dfa9b12a0afc401
[ "MIT" ]
null
null
null
Algorithms.ipynb
SatyamChoubey/pythonPrograms
f0a35bfdd79e0d2ef78b1fb92dfa9b12a0afc401
[ "MIT" ]
null
null
null
Algorithms.ipynb
SatyamChoubey/pythonPrograms
f0a35bfdd79e0d2ef78b1fb92dfa9b12a0afc401
[ "MIT" ]
null
null
null
24.223629
88
0.44069
[ [ [ "# Formulate an algorithm to check for a student has passed in exam or not?\ny = float(input(\"Enter the minimum marks required to pass in the exam : \"))\nx = float(input('Enter the marks scored by student in the exam : '))\nif (x >= y):\n print('This student is passed in the exam')\nelse:\n print('This student is failed in the exam')", "Enter the minimum marks required to pass in the exam : 100\nEnter the marks scored by student in the exam : 0\nThis student is failed in the exam\n" ], [ "# Tree Traversal Algorithms. It is of 3 types : In, Pre, Post - order traveral.\n\n# creating a Node class\nclass Node:\n def __init__(self,val):\n self.childleft = None\n self.childright = None\n self.nodedata = val\n\n# creating an instance of the Node class to construct the tree\nroot = Node(1)\nroot.childleft = Node(2)\nroot.childright = Node(3)\nroot.childleft.childleft = Node(4)\nroot.childleft.childright = Node(5)\n", "_____no_output_____" ], [ "# perform In-order traversal : left-root-right\ndef InOrd(root):\n if root:\n InOrd(root.childleft)\n print(root.nodedata)\n InOrd(root.childright)\n \nInOrd(root)", "4\n2\n5\n1\n3\n" ], [ "# Pre-order traversal : root-left-right\ndef PreOrd(root): \n if root:\n print(root.nodedata)\n PreOrd(root.childleft)\n PreOrd(root.childright)\n \nPreOrd(root)", "1\n2\n4\n5\n3\n" ], [ "# Post-order traversal : left-right-root\ndef PostOrd(root):\n if root:\n PostOrd(root.childleft)\n PostOrd(root.childright)\n print(root.nodedata)\n \nPostOrd(root)", "4\n5\n2\n3\n1\n" ], [ "# Sorting Algorithms\n\n# Merge sort : Divide and Conquer algorithm\n\n# Python program for implementation of MergeSort \ndef mergeSort(arr): \n if len(arr) >1: \n mid = len(arr)//2 # Finding the mid of the array \n L = arr[:mid] # Dividing the array elements \n R = arr[mid:] # into 2 halves \n \n mergeSort(L) # Sorting the first half \n mergeSort(R) # Sorting the second half \n \n i = j = k = 0\n \n # Copy data to temp arrays L[] and R[] \n while i < len(L) and j < len(R): \n if L[i] < R[j]: \n arr[k] = L[i] \n i+= 1\n else: \n arr[k] = R[j] \n j+= 1\n k+= 1\n \n # Checking if any element was left \n while i < len(L): \n arr[k] = L[i] \n i+= 1\n k+= 1\n \n while j < len(R): \n arr[k] = R[j] \n j+= 1\n k+= 1\n \n# Code to print the list \ndef printList(arr): \n for i in range(len(arr)): \n print(arr[i], end =\" \") \n print() \n \n# driver code to test the above code \nif __name__ == '__main__': \n arr = [12, 11, 13, 5, 6, 7] \n print (\"Given array is\", end =\"\\n\") \n printList(arr) \n mergeSort(arr) \n print(\"Sorted array is: \", end =\"\\n\") \n printList(arr) \n \n ", "Given array is\n12 11 13 5 6 7 \nSorted array is: \n5 6 7 11 12 13 \n" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code" ] ]
d0dfb1c47dcecd062f4d1399bb5a554af75cbcd0
194,128
ipynb
Jupyter Notebook
Lecture 19 PCA/Let-s Code our Own PCA/Own PCA code.ipynb
Paraskk/Data-Science-and-Machine-Leaning-
b29223a82ea39f7860d3729d7297bac2a4724c8f
[ "MIT" ]
1
2020-08-06T07:33:59.000Z
2020-08-06T07:33:59.000Z
Lecture 19 PCA/Let-s Code our Own PCA/Own PCA code.ipynb
Udaysonu/Coding-Ninjas-Machine-Learning
4fd6b4b62f07b28dbe80c084ad820630f2351a76
[ "MIT" ]
null
null
null
Lecture 19 PCA/Let-s Code our Own PCA/Own PCA code.ipynb
Udaysonu/Coding-Ninjas-Machine-Learning
4fd6b4b62f07b28dbe80c084ad820630f2351a76
[ "MIT" ]
2
2020-08-27T13:03:33.000Z
2020-09-01T17:34:23.000Z
372.606526
89,034
0.915396
[ [ [ "import numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.decomposition import PCA", "_____no_output_____" ], [ "np.random.seed(2343243)", "_____no_output_____" ], [ "mean_vec1 = np.array([0,0,0])\ncov_mat1 = np.array([[1,0,0],[0,1,0],[0,0,1]])\nclass1 = np.random.multivariate_normal(mean_vec1, cov_mat1, 100)", "_____no_output_____" ], [ "mean_vec2 = np.array([1,1,1])\ncov_mat2 = np.array([[1,0,0],[0,1,0],[0,0,1]])\nclass2 = np.random.multivariate_normal(mean_vec2, cov_mat2, 100)", "_____no_output_____" ], [ "from matplotlib import pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D, proj3d\n\nfig = plt.figure(figsize=(8,8))\nax = fig.add_subplot(111,projection='3d')\nax.plot(class1[:, 0], class1[:, 1], class1[:, 2], 'o')\nax.plot(class2[:, 0], class2[:, 1], class2[:, 2], '^')\nplt.show()", "_____no_output_____" ], [ "all_data = np.concatenate((class1, class2))", "_____no_output_____" ], [ "pca = PCA(n_components = 2)\ntransformed_data = pca.fit_transform(all_data)\ntransformed_data", "_____no_output_____" ], [ "pca.components_", "_____no_output_____" ], [ "plt.plot(transformed_data[0:100,0],transformed_data[0:100,1],\"o\")\nplt.plot(transformed_data[100:200,0],transformed_data[100:200,1],\"^\")\nplt.show()", "_____no_output_____" ], [ "X_approx = pca.inverse_transform(transformed_data)\nfig = plt.figure(figsize=(8,8))\nax = fig.add_subplot(111,projection='3d')\nax.plot(X_approx[:, 0], X_approx[:, 1], X_approx[:, 2], '^')\nplt.show()", "_____no_output_____" ], [ "a = -0.409689\nb = 7.2827\nc = - 7.1008\ni = 10\na * X_approx[i][0] + b* X_approx[i][1] + c * X_approx[i][2]", "_____no_output_____" ] ], [ [ "## Own PCA code", "_____no_output_____" ] ], [ [ "all_data_t=all_data.T\ncov=np.cov(all_data_t)\ncov", "_____no_output_____" ], [ "np.linalg.eig(cov)", "_____no_output_____" ], [ "eig_val,eig_vectors=np.linalg.eig(cov)", "_____no_output_____" ], [ "eig_val_vector_pair=[]\nfor i in range(len(eig_val)):\n eig_vec=eig_vectors[:,i]\n eig_val_vector_pair.append((eig_val[i],eig_vec))", "_____no_output_____" ], [ "eig_val_vector_pair.sort(reverse=True)\neig_val_vector_pair", "_____no_output_____" ] ] ]
[ "code", "markdown", "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ] ]
d0dfd07377df2f3b1966949c54244d4724310c23
57,293
ipynb
Jupyter Notebook
Mission_To_Mars/.ipynb_checkpoints/mission_to_mars-checkpoint.ipynb
pharnb/web-scraping-challenge
641d9da87201c337b01ffc3ef88943156c25a56e
[ "ADSL" ]
null
null
null
Mission_To_Mars/.ipynb_checkpoints/mission_to_mars-checkpoint.ipynb
pharnb/web-scraping-challenge
641d9da87201c337b01ffc3ef88943156c25a56e
[ "ADSL" ]
null
null
null
Mission_To_Mars/.ipynb_checkpoints/mission_to_mars-checkpoint.ipynb
pharnb/web-scraping-challenge
641d9da87201c337b01ffc3ef88943156c25a56e
[ "ADSL" ]
null
null
null
61.014909
9,588
0.516346
[ [ [ "import pymongo\nfrom bs4 import BeautifulSoup\nimport requests\nimport pandas as pd\nfrom flask import Flask", "_____no_output_____" ], [ "# acquire full html contents to search through\nurl = 'https://mars.nasa.gov/news/'\nresponse = requests.get(url)\n# response.text\nsoup = BeautifulSoup(response.text,'html.parser')\n# print(soup.prettify())", "<!DOCTYPE html>\n<html lang=\"en\" xml:lang=\"en\" xmlns=\"http://www.w3.org/1999/xhtml\">\n <head>\n <meta content=\"text/html; charset=utf-8\" http-equiv=\"Content-Type\"/>\n <!-- Always force latest IE rendering engine or request Chrome Frame -->\n <meta content=\"IE=edge,chrome=1\" http-equiv=\"X-UA-Compatible\"/>\n <!-- Responsiveness -->\n <meta content=\"width=device-width, initial-scale=1.0\" name=\"viewport\"/>\n <!-- Favicon -->\n <link href=\"/apple-touch-icon.png\" rel=\"apple-touch-icon\" sizes=\"180x180\"/>\n <link href=\"/favicon-32x32.png\" rel=\"icon\" sizes=\"32x32\" type=\"image/png\"/>\n <link href=\"/favicon-16x16.png\" rel=\"icon\" sizes=\"16x16\" type=\"image/png\"/>\n <link href=\"/manifest.json\" rel=\"manifest\"/>\n <link color=\"#e48b55\" href=\"/safari-pinned-tab.svg\" rel=\"mask-icon\"/>\n <meta content=\"#000000\" name=\"theme-color\"/>\n <meta content=\"authenticity_token\" name=\"csrf-param\">\n <meta content=\"bhVIvjjY1tPMcUqoV5lzHm5TUMnzvYmp1Llwy2KPEDVILkhDHsleIeAFVQgMirb2flD8YTHa8y5vmtufaX6q3A==\" name=\"csrf-token\">\n <title>\n News – NASA’s Mars Exploration Program\n </title>\n <meta content=\"NASA’s Mars Exploration Program \" property=\"og:site_name\"/>\n <meta content=\"mars.nasa.gov\" name=\"author\"/>\n <meta content=\"Mars, missions, NASA, rover, Curiosity, Opportunity, InSight, Mars Reconnaissance Orbiter, facts\" name=\"keywords\"/>\n <meta content=\"NASA’s real-time portal for Mars exploration, featuring the latest news, images, and discoveries from the Red Planet.\" name=\"description\"/>\n <meta content=\"NASA’s real-time portal for Mars exploration, featuring the latest news, images, and discoveries from the Red Planet.\" property=\"og:description\"/>\n <meta content=\"News – NASA’s Mars Exploration Program \" property=\"og:title\"/>\n <meta content=\"https://mars.nasa.gov/news\" property=\"og:url\"/>\n <meta content=\"article\" property=\"og:type\"/>\n <meta content=\"2017-09-22 19:53:22 UTC\" property=\"og:updated_time\"/>\n <meta content=\"https://mars.nasa.gov/system/site_config_values/meta_share_images/1_mars-nasa-gov.jpg\" property=\"og:image\"/>\n <meta content=\"https://mars.nasa.gov/system/site_config_values/meta_share_images/1_mars-nasa-gov.jpg\" name=\"twitter:image\"/>\n <link href=\"https://mars.nasa.gov/system/site_config_values/meta_share_images/1_mars-nasa-gov.jpg\" rel=\"image_src\"/>\n <meta content=\"195570401081308\" property=\"fb:app_id\"/>\n <link href=\"https://fonts.googleapis.com/css?family=Montserrat:200,300,400,500,600,700|Raleway:300,400\" rel=\"stylesheet\"/>\n <link href=\"/assets/public_manifest-9467122247163ea3bf3012b71b5b39bf2bce26b82917aca331fb838f7f4a4b7e.css\" media=\"all\" rel=\"stylesheet\">\n <link href=\"/assets/mbcms/vendor/jquery.fancybox3-d5d81bdfc05a59e4ea72bca1d8b7fcc399bd3b61f7c06af95a8a48795df69d7a.css\" media=\"screen\" rel=\"stylesheet\">\n <link href=\"/assets/gulp/print-240f8bfaa7f6402dfd6c49ee3c1ffea57a89ddd4c8c90e2f2a5c7d63c5753e32.css\" media=\"print\" rel=\"stylesheet\">\n <script src=\"/assets/public_manifest-b7762ffb108de93fcd0be3bfd82579b100241cc4ee1c087af5f886c0903244cf.js\">\n </script>\n <script src=\"/assets/mbcms/vendor/jquery.fancybox3-bd48876205805faa43a79e74b656191a4ad37809923b4f3247b571ba82d4458c.js\">\n </script>\n <script src=\"/assets/mb_manifest-a0ae601bc18c852649e350709ab440161da58529f782ae84172c21f8ea27b714.js\">\n </script>\n <!--[if gt IE 8]><!-->\n <script src=\"/assets/not_ie8_manifest.js\">\n </script>\n <!--[if !IE]>-->\n <script src=\"/assets/not_ie8_manifest.js\">\n </script>\n <!--<![endif]-->\n <!-- /twitter cards -->\n <meta content=\"summary_large_image\" name=\"twitter:card\"/>\n <meta content=\"News \" name=\"twitter:title\"/>\n <meta content=\"NASA’s real-time portal for Mars exploration, featuring the latest news, images, and discoveries from the Red Planet.\" name=\"twitter:description\"/>\n <meta content=\"https://mars.nasa.gov/system/site_config_values/meta_share_images/1_mars-nasa-gov.jpg\" name=\"twitter:image\"/>\n </link>\n </link>\n </link>\n </meta>\n </meta>\n </head>\n <body id=\"news\">\n <svg display=\"none\" height=\"0\" width=\"0\">\n <symbol height=\"30\" id=\"circle_plus\" viewbox=\"0 0 30 30\" width=\"30\">\n <g fill-rule=\"evenodd\" transform=\"translate(1 1)\">\n <circle cx=\"14\" cy=\"14\" fill=\"#fff\" fill-opacity=\".1\" fill-rule=\"nonzero\" r=\"14\" stroke=\"inherit\" stroke-width=\"1\">\n </circle>\n <path class=\"the_plus\" d=\"m18.856 12.96v1.738h-4.004v3.938h-1.848v-3.938h-4.004v-1.738h4.004v-3.96h1.848v3.96z\" fill=\"inherit\" stroke-width=\"0\">\n </path>\n </g>\n </symbol>\n <symbol height=\"30\" id=\"circle_arrow\" viewbox=\"0 0 30 30\" width=\"30\" xmlns=\"http://www.w3.org/2000/svg\">\n <g transform=\"translate(1 1)\">\n <circle cx=\"14\" cy=\"14\" fill=\"#fff\" fill-opacity=\".1\" r=\"14\" stroke=\"inherit\" stroke-width=\"1\">\n </circle>\n <path class=\"the_arrow\" d=\"m8.5 15.00025h7.984l-2.342 2.42c-.189.197-.189.518 0 .715l.684.717c.188.197.494.197.684 0l4.35-4.506c.188-.199.188-.52 0-.717l-4.322-4.48c-.189-.199-.496-.199-.684 0l-.684.716c-.189.197-.189.519 0 .716l2.341 2.419h-8.011c-.276 0-.5.223-.5.5v1c0 .275.224.5.5.5z\" fill=\"inherit\" stroke-width=\"0\">\n </path>\n </g>\n </symbol>\n <symbol height=\"30\" id=\"circle_close\" viewbox=\"0 0 30 30\" width=\"30\">\n <g fill-rule=\"evenodd\" transform=\"translate(1 1)\">\n <circle cx=\"14\" cy=\"14\" fill=\"blue\" fill-opacity=\"1\" fill-rule=\"nonzero\" r=\"14\" stroke=\"inherit\" stroke-width=\"1\">\n </circle>\n <path class=\"the_plus\" d=\"m18.856 12.96v1.738h-4.004v3.938h-1.848v-3.938h-4.004v-1.738h4.004v-3.96h1.848v3.96z\" fill=\"inherit\" stroke-width=\"0\">\n </path>\n </g>\n </symbol>\n <symbol height=\"30\" id=\"circle_close_hover\" viewbox=\"0 0 30 30\" width=\"30\">\n <g fill-rule=\"evenodd\" transform=\"translate(1 1)\">\n <circle cx=\"14\" cy=\"14\" fill=\"white\" fill-opacity=\"1\" fill-rule=\"nonzero\" r=\"14\" stroke=\"inherit\" stroke-width=\"1\">\n </circle>\n <path class=\"the_plus\" d=\"m18.856 12.96v1.738h-4.004v3.938h-1.848v-3.938h-4.004v-1.738h4.004v-3.96h1.848v3.96z\" fill=\"inherit\" stroke-width=\"0\">\n </path>\n </g>\n </symbol>\n <symbol height=\"6\" id=\"chevron_down\" viewbox=\"0 0 10 6\" width=\"10\" xmlns=\"http://www.w3.org/2000/svg\">\n <path d=\"m59 7v2.72727273l5 3.27272727 5-3.27272727v-2.72727273l-5 3.2727273z\" transform=\"translate(-59 -7)\">\n </path>\n </symbol>\n <symbol height=\"16\" id=\"gear\" viewbox=\"0 0 16 16\" width=\"16\" xmlns=\"http://www.w3.org/2000/svg\">\n <path d=\"m68 9h-1.09c-.15-.91-.5-1.75-1.021-2.471l.761-.77c.39-.39.39-1.029 0-1.42-.391-.39-1.021-.39-1.41 0l-.771.77c-.719-.519-1.469-.869-2.469-1.019v-1.09c0-.55-.45-1-1-1s-1 .45-1 1v1.09c-1 .15-1.75.5-2.47 1.02l-.77-.77c-.389-.39-1.029-.39-1.42 0-.39.391-.39 1.03 0 1.42l.771.77c-.521.72-.871 1.56-1.021 2.47h-1.09c-.55 0-1 .48-1 1.029 0 .551.45.971 1.12.971h.97c.15.91.5 1.75 1.021 2.471l-.771.769c-.39.39-.39 1.029 0 1.42.391.39 1.021.39 1.41 0l.78-.77c.72.52 1.47.87 2.47 1.02v1.09c0 .55.45 1 1 1s1-.45 1-1v-1.09c1-.15 1.75-.5 2.47-1.02l.771.77c.391.39 1.02.39 1.41 0 .39-.391.39-1.03 0-1.42l-.761-.76c.51-.72.87-1.56 1.02-2.48h1.09c.55 0 1-.45 1-1s-.45-1-1-1zm-7 4c-1.66 0-3-1.35-3-3s1.34-3 3-3c1.65 0 3 1.35 3 3s-1.35 3-3 3z\" fill=\"#a79693\" transform=\"translate(-53 -2)\">\n </path>\n </symbol>\n </svg>\n <div data-react-cache-id=\"BrowseHappier-0\" data-react-class=\"BrowseHappier\" data-react-props='{\"gt\":1,\"lt\":11}'>\n </div>\n <div data-react-cache-id=\"HiPO-0\" data-react-class=\"HiPO\" data-react-props=\"{}\">\n </div>\n <div id=\"main_container\">\n <div id=\"site_body\">\n <div class=\"site_header_area\">\n <header class=\"site_header\">\n <div class=\"brand_area\">\n <div class=\"brand1\">\n <a class=\"nasa_logo\" href=\"http://www.nasa.gov\" target=\"_blank\" title=\"visit nasa.gov\">\n NASA\n </a>\n </div>\n <div class=\"brand2\">\n <a class=\"top_logo\" href=\"https://science.nasa.gov/\" target=\"_blank\" title=\"Explore NASA Science\">\n NASA Science\n </a>\n <a class=\"sub_logo\" href=\"/mars-exploration/#\" title=\"Mars\">\n Mars Exploration Program\n </a>\n </div>\n <img alt=\"\" class=\"print_only print_logo\" src=\"/assets/[email protected]\"/>\n </div>\n <a class=\"visuallyhidden focusable\" href=\"#page\">\n Skip Navigation\n </a>\n <div class=\"right_header_container\">\n <a class=\"menu_button\" href=\"javascript:void(0);\" id=\"menu_button\">\n <span class=\"menu_icon\">\n menu\n </span>\n </a>\n <a class=\"modal_close\" id=\"modal_close\">\n <span class=\"modal_close_icon\">\n </span>\n </a>\n <div class=\"nav_area\">\n <div id=\"site_nav_container\">\n <nav class=\"site_nav\" data-react-cache-id=\"Meganav-0\" data-react-class=\"Meganav\" data-react-props=\"{&quot;nav_items&quot;:[{&quot;name&quot;:&quot;Mars Now&quot;,&quot;style&quot;:&quot;icon&quot;,&quot;li_class&quot;:&quot;nav_icon mars_now&quot;,&quot;target&quot;:&quot;_self&quot;,&quot;link&quot;:&quot;/explore/mars-now&quot;,&quot;svg_icon_id&quot;:&quot;nav_icon&quot;,&quot;id&quot;:261,&quot;features&quot;:[{&quot;title&quot;:&quot;Mars Now&quot;,&quot;body&quot;:&quot;View a 3D visualization of all the missions exploring the Red Planet&quot;,&quot;image_src&quot;:&quot;/system/basic_html_elements/225_mars_now_nav.jpg&quot;,&quot;link&quot;:&quot;/explore/mars-now/&quot;,&quot;target&quot;:&quot;_self&quot;,&quot;categories&quot;:[]}],&quot;title&quot;:&quot;&quot;,&quot;short_description&quot;:&quot;View the current location and spacecraft communications activity of operating landers, rovers and orbiters using the NASA’s Mars Relay Network.&quot;},{&quot;name&quot;:&quot;The Red Planet&quot;,&quot;link&quot;:&quot;/#red_planet&quot;,&quot;target&quot;:&quot;_self&quot;,&quot;sections&quot;:[{&quot;items&quot;:[{&quot;name&quot;:&quot;Dashboard&quot;,&quot;link&quot;:&quot;/#red_planet/0&quot;,&quot;target&quot;:&quot;_self&quot;,&quot;id&quot;:9},{&quot;name&quot;:&quot;Science Goals&quot;,&quot;link&quot;:&quot;/#red_planet/1&quot;,&quot;target&quot;:&quot;_self&quot;,&quot;id&quot;:13},{&quot;name&quot;:&quot;The Planet&quot;,&quot;link&quot;:&quot;/#red_planet/2&quot;,&quot;target&quot;:&quot;_self&quot;,&quot;id&quot;:14},{&quot;name&quot;:&quot;Atmosphere&quot;,&quot;link&quot;:&quot;/#red_planet/3&quot;,&quot;target&quot;:&quot;_self&quot;,&quot;id&quot;:16},{&quot;name&quot;:&quot;Astrobiology&quot;,&quot;link&quot;:&quot;/#red_planet/4&quot;,&quot;target&quot;:&quot;_self&quot;,&quot;id&quot;:17},{&quot;name&quot;:&quot;Past, Present, Future, Timeline&quot;,&quot;link&quot;:&quot;/#red_planet/5&quot;,&quot;target&quot;:&quot;_self&quot;,&quot;id&quot;:18}]}],&quot;id&quot;:3,&quot;meganav_style&quot;:&quot;&quot;,&quot;features&quot;:[],&quot;short_description&quot;:null},{&quot;name&quot;:&quot;The Program&quot;,&quot;link&quot;:&quot;/#mars_exploration_program&quot;,&quot;target&quot;:&quot;_self&quot;,&quot;sections&quot;:[{&quot;items&quot;:[{&quot;name&quot;:&quot;Mission Statement&quot;,&quot;link&quot;:&quot;/#mars_exploration_program/0&quot;,&quot;target&quot;:&quot;_self&quot;,&quot;id&quot;:8},{&quot;name&quot;:&quot;About the Program&quot;,&quot;link&quot;:&quot;/#mars_exploration_program/1&quot;,&quot;target&quot;:&quot;_self&quot;,&quot;id&quot;:42},{&quot;name&quot;:&quot;Organization&quot;,&quot;link&quot;:&quot;/#mars_exploration_program/2&quot;,&quot;target&quot;:&quot;_self&quot;,&quot;id&quot;:43},{&quot;name&quot;:&quot;Why Mars?&quot;,&quot;link&quot;:&quot;/#mars_exploration_program/3&quot;,&quot;target&quot;:&quot;_self&quot;,&quot;id&quot;:51},{&quot;name&quot;:&quot;Research Programs&quot;,&quot;link&quot;:&quot;/#mars_exploration_program/4&quot;,&quot;target&quot;:&quot;_self&quot;,&quot;id&quot;:44},{&quot;name&quot;:&quot;Planetary Resources&quot;,&quot;link&quot;:&quot;/#mars_exploration_program/5&quot;,&quot;target&quot;:&quot;_self&quot;,&quot;id&quot;:52},{&quot;name&quot;:&quot;Technologies&quot;,&quot;link&quot;:&quot;/#mars_exploration_program/6&quot;,&quot;target&quot;:&quot;_self&quot;,&quot;id&quot;:56}]}],&quot;id&quot;:2,&quot;meganav_style&quot;:&quot;&quot;,&quot;features&quot;:[],&quot;short_description&quot;:null},{&quot;name&quot;:&quot;News \\u0026 Events&quot;,&quot;link&quot;:&quot;/#news_and_events&quot;,&quot;target&quot;:&quot;_self&quot;,&quot;sections&quot;:[{&quot;items&quot;:[{&quot;name&quot;:&quot;News&quot;,&quot;link&quot;:&quot;/news&quot;,&quot;target&quot;:&quot;_self&quot;,&quot;id&quot;:92},{&quot;name&quot;:&quot;Events&quot;,&quot;link&quot;:&quot;/events&quot;,&quot;target&quot;:&quot;_self&quot;,&quot;id&quot;:93}]}],&quot;id&quot;:4,&quot;meganav_style&quot;:&quot;&quot;,&quot;features&quot;:[],&quot;short_description&quot;:null},{&quot;name&quot;:&quot;Multimedia&quot;,&quot;link&quot;:&quot;/#multimedia&quot;,&quot;target&quot;:&quot;_self&quot;,&quot;sections&quot;:[{&quot;items&quot;:[{&quot;name&quot;:&quot;Images&quot;,&quot;link&quot;:&quot;/multimedia/images/&quot;,&quot;target&quot;:&quot;_self&quot;,&quot;id&quot;:90},{&quot;name&quot;:&quot;Videos&quot;,&quot;link&quot;:&quot;/multimedia/videos/&quot;,&quot;target&quot;:&quot;_self&quot;,&quot;id&quot;:91},{&quot;name&quot;:&quot;More Resources&quot;,&quot;link&quot;:&quot;/multimedia/more-resources/&quot;,&quot;target&quot;:&quot;_self&quot;,&quot;id&quot;:413}]}],&quot;id&quot;:5,&quot;meganav_style&quot;:&quot;&quot;,&quot;features&quot;:[],&quot;short_description&quot;:null},{&quot;name&quot;:&quot;Missions&quot;,&quot;link&quot;:&quot;/#missions_gallery_subnav&quot;,&quot;target&quot;:&quot;_self&quot;,&quot;sections&quot;:[{&quot;items&quot;:[{&quot;name&quot;:&quot;Past&quot;,&quot;link&quot;:&quot;/mars-exploration/missions/?category=167&quot;,&quot;target&quot;:&quot;_self&quot;,&quot;id&quot;:38},{&quot;name&quot;:&quot;Present&quot;,&quot;link&quot;:&quot;/mars-exploration/missions/?category=170&quot;,&quot;target&quot;:&quot;_self&quot;,&quot;id&quot;:59},{&quot;name&quot;:&quot;Future&quot;,&quot;link&quot;:&quot;/mars-exploration/missions/?category=171&quot;,&quot;target&quot;:&quot;_self&quot;,&quot;id&quot;:60},{&quot;name&quot;:&quot;International Partners&quot;,&quot;link&quot;:&quot;/mars-exploration/partners&quot;,&quot;target&quot;:&quot;_self&quot;,&quot;id&quot;:40}]}],&quot;id&quot;:6,&quot;meganav_style&quot;:&quot;&quot;,&quot;features&quot;:[],&quot;short_description&quot;:null},{&quot;name&quot;:&quot;More&quot;,&quot;link&quot;:&quot;/#more&quot;,&quot;target&quot;:&quot;_self&quot;,&quot;sections&quot;:[],&quot;id&quot;:7,&quot;meganav_style&quot;:&quot;&quot;,&quot;features&quot;:[],&quot;short_description&quot;:null}],&quot;gallery_subnav_items&quot;:[{&quot;thumb&quot;:&quot;/system/missions/list_view_images/23_PIA23764-RoverNamePlateonMars-320x240.jpg&quot;,&quot;id&quot;:23,&quot;title&quot;:&quot;Mars 2020 Perseverance Rover&quot;,&quot;description&quot;:&quot;A mission to investigate key questions about potential life on Mars. &quot;,&quot;date&quot;:&quot;July 17, 2020&quot;,&quot;url&quot;:&quot;/mars-exploration/missions/mars2020/&quot;,&quot;link_text&quot;:&quot;&quot;,&quot;target&quot;:&quot;_blank&quot;,&quot;mi_traveled&quot;:null,&quot;gallery_subnav_link&quot;:&quot;https://mars.nasa.gov/mars2020/&quot;},{&quot;thumb&quot;:&quot;/system/missions/list_view_images/2_PIA14175-thmfeat.jpg&quot;,&quot;id&quot;:2,&quot;title&quot;:&quot;Curiosity Rover&quot;,&quot;description&quot;:&quot;The largest and most capable rover ever sent to Mars.&quot;,&quot;date&quot;:&quot;November 26, 2011&quot;,&quot;url&quot;:&quot;/mars-exploration/missions/mars-science-laboratory&quot;,&quot;link_text&quot;:&quot;&quot;,&quot;target&quot;:&quot;_blank&quot;,&quot;mi_traveled&quot;:14.33,&quot;gallery_subnav_link&quot;:&quot;https://mars.nasa.gov/msl/home/&quot;},{&quot;thumb&quot;:&quot;/system/missions/list_view_images/21_PIA22743-320x240.jpg&quot;,&quot;id&quot;:21,&quot;title&quot;:&quot;InSight Lander&quot;,&quot;description&quot;:&quot;A mission to study the deep interior of Mars. &quot;,&quot;date&quot;:&quot;November 26, 2018&quot;,&quot;url&quot;:&quot;/mars-exploration/missions/insight/&quot;,&quot;link_text&quot;:&quot;&quot;,&quot;target&quot;:&quot;_blank&quot;,&quot;mi_traveled&quot;:null,&quot;gallery_subnav_link&quot;:&quot;https://mars.nasa.gov/insight/&quot;},{&quot;thumb&quot;:&quot;/system/missions/list_view_images/6_maven_320x240.jpg&quot;,&quot;id&quot;:6,&quot;title&quot;:&quot;MAVEN&quot;,&quot;description&quot;:&quot;Measures Mars' atmosphere to understand its climate change.&quot;,&quot;date&quot;:&quot;November 18, 2013&quot;,&quot;url&quot;:&quot;/mars-exploration/missions/maven&quot;,&quot;link_text&quot;:&quot;&quot;,&quot;target&quot;:&quot;_blank&quot;,&quot;mi_traveled&quot;:null,&quot;gallery_subnav_link&quot;:&quot;https://mars.nasa.gov/maven/&quot;},{&quot;thumb&quot;:&quot;/system/missions/list_view_images/8_MRO_320x240.jpg&quot;,&quot;id&quot;:8,&quot;title&quot;:&quot;Mars Reconnaissance Orbiter&quot;,&quot;description&quot;:&quot;Takes high-resolution imagery of Martian terrain with extraordinary clarity. &quot;,&quot;date&quot;:&quot;August 12, 2012&quot;,&quot;url&quot;:&quot;/mars-exploration/missions/mars-reconnaissance-orbiter&quot;,&quot;link_text&quot;:&quot;&quot;,&quot;target&quot;:&quot;_blank&quot;,&quot;mi_traveled&quot;:null,&quot;gallery_subnav_link&quot;:&quot;https://mars.nasa.gov/mro/&quot;},{&quot;thumb&quot;:&quot;/system/missions/list_view_images/5_mars_odyssey320x240.jpg&quot;,&quot;id&quot;:5,&quot;title&quot;:&quot;2001 Mars Odyssey&quot;,&quot;description&quot;:&quot;NASA's longest-lasting spacecraft at Mars. &quot;,&quot;date&quot;:&quot;April 7, 2001&quot;,&quot;url&quot;:&quot;/mars-exploration/missions/odyssey&quot;,&quot;link_text&quot;:&quot;&quot;,&quot;target&quot;:&quot;_blank&quot;,&quot;mi_traveled&quot;:null,&quot;gallery_subnav_link&quot;:&quot;https://mars.nasa.gov/odyssey/&quot;}],&quot;search&quot;:true,&quot;search_placeholder&quot;:{&quot;placeholder&quot;:&quot;&quot;},&quot;highlight_current&quot;:{&quot;highlight&quot;:true,&quot;current_id&quot;:83,&quot;parent_ids&quot;:[]},&quot;search_submit&quot;:&quot;/search/&quot;}\">\n </nav>\n </div>\n </div>\n </div>\n </header>\n </div>\n <div id=\"sticky_nav_spacer\">\n </div>\n <div id=\"page\">\n <div class=\"page_cover\">\n </div>\n <!-- title to go in the page_header -->\n <div class=\"header_mask\">\n <section class=\"content_page module\">\n </section>\n </div>\n <div class=\"grid_list_page module content_page\">\n <div class=\"grid_layout\">\n <article>\n <header id=\"page_header\">\n </header>\n <div class=\"react_grid_list grid_list_container\" data-react-cache-id=\"GridListPage-0\" data-react-class=\"GridListPage\" data-react-props='{\"left_column\":false,\"class_name\":\"\",\"default_view\":\"list_view\",\"model\":\"news_items\",\"view_toggle\":false,\"search\":\"true\",\"list_item\":\"News\",\"title\":\"News\",\"categories\":[\"19,165,184,204\"],\"order\":\"publish_date desc,created_at desc\",\"no_items_text\":\"There are no items matching these criteria.\",\"site_title\":\"NASA’s Mars Exploration Program \",\"short_title\":\"Mars\",\"site_share_image\":\"/system/site_config_values/meta_share_images/1_mars-nasa-gov.jpg\",\"per_page\":null,\"filters\":\"[ [ \\\"date\\\", [ [ \\\"2020\\\", \\\"2020\\\" ], [ \\\"2019\\\", \\\"2019\\\" ], [ \\\"2018\\\", \\\"2018\\\" ], [ \\\"2017\\\", \\\"2017\\\" ], [ \\\"2016\\\", \\\"2016\\\" ], [ \\\"2015\\\", \\\"2015\\\" ], [ \\\"2014\\\", \\\"2014\\\" ], [ \\\"2013\\\", \\\"2013\\\" ], [ \\\"2012\\\", \\\"2012\\\" ], [ \\\"2011\\\", \\\"2011\\\" ], [ \\\"2010\\\", \\\"2010\\\" ], [ \\\"2009\\\", \\\"2009\\\" ], [ \\\"2008\\\", \\\"2008\\\" ], [ \\\"2007\\\", \\\"2007\\\" ], [ \\\"2006\\\", \\\"2006\\\" ], [ \\\"2005\\\", \\\"2005\\\" ], [ \\\"2004\\\", \\\"2004\\\" ], [ \\\"2003\\\", \\\"2003\\\" ], [ \\\"2002\\\", \\\"2002\\\" ], [ \\\"2001\\\", \\\"2001\\\" ], [ \\\"2000\\\", \\\"2000\\\" ] ], [ \\\"Latest\\\", \\\"\\\" ], false ], [ \\\"categories\\\", [ [ \\\"Feature Stories\\\", 165 ], [ \\\"Press Releases\\\", 19 ], [ \\\"Spotlights\\\", 184 ], [ \\\"Status Reports\\\", 204 ] ], [ \\\"All Categories\\\", \\\"\\\" ], false ] ]\",\"conditions\":null,\"scope_in_title\":true,\"options\":{\"blank_scope\":\"Latest\"},\"results_in_title\":false}'>\n </div>\n </article>\n </div>\n </div>\n <section class=\"module suggested_features\">\n <div class=\"grid_layout\">\n <header>\n <h2 class=\"module_title\">\n You Might Also Like\n </h2>\n </header>\n <section>\n <script>\n $(document).ready(function(){\n $(\".features\").slick({\n dots: false,\n infinite: true,\n speed: 300,\n slide: '.features .slide',\n slidesToShow: 3,\n slidesToScroll: 3,\n lazyLoad: 'ondemand',\n centerMode: false,\n arrows: true,\n appendArrows: '.features .slick-nav',\n appendDots: \".features .slick-nav\",\n responsive: [{\"breakpoint\":953,\"settings\":{\"slidesToShow\":2,\"slidesToScroll\":2,\"centerMode\":false}},{\"breakpoint\":480,\"settings\":{\"slidesToShow\":1,\"slidesToScroll\":1,\"centerMode\":true,\"arrows\":false,\"centerPadding\":\"25px\"}}]\n });\n });\n </script>\n <div class=\"features\">\n <div class=\"slide\">\n <div class=\"image_and_description_container\">\n <a href=\"/news/8749/nasa-readies-perseverance-mars-rovers-earthly-twin/\">\n <div class=\"rollover_description\">\n <div class=\"rollover_description_inner\">\n Did you know NASA's next Mars rover has a nearly identical sibling on Earth for testing? Even better, it's about to roll for the first time through a replica Martian landscape.\n </div>\n <div class=\"overlay_arrow\">\n <img alt=\"More\" src=\"/assets/overlay-arrow.png\"/>\n </div>\n </div>\n <img alt=\"NASA Readies Perseverance Mars Rover's Earthly Twin \" class=\"img-lazy\" data-lazy=\"/system/news_items/list_view_images/8749_PIA23964-320.jpg\" src=\"/assets/loading_320x240.png\"/>\n </a>\n </div>\n <div class=\"content_title\">\n <a href=\"/news/8749/nasa-readies-perseverance-mars-rovers-earthly-twin/\">\n NASA Readies Perseverance Mars Rover's Earthly Twin\n </a>\n </div>\n </div>\n <div class=\"slide\">\n <div class=\"image_and_description_container\">\n <a href=\"/news/8716/nasa-to-broadcast-mars-2020-perseverance-launch-prelaunch-activities/\">\n <div class=\"rollover_description\">\n <div class=\"rollover_description_inner\">\n Starting July 27, news activities will cover everything from mission engineering and science to returning samples from Mars to, of course, the launch itself.\n </div>\n <div class=\"overlay_arrow\">\n <img alt=\"More\" src=\"/assets/overlay-arrow.png\"/>\n </div>\n </div>\n <img alt=\"NASA to Broadcast Mars 2020 Perseverance Launch, Prelaunch Activities\" class=\"img-lazy\" data-lazy=\"/system/news_items/list_view_images/8716_PIA23499-320x240.jpg\" src=\"/assets/loading_320x240.png\"/>\n </a>\n </div>\n <div class=\"content_title\">\n <a href=\"/news/8716/nasa-to-broadcast-mars-2020-perseverance-launch-prelaunch-activities/\">\n NASA to Broadcast Mars 2020 Perseverance Launch, Prelaunch Activities\n </a>\n </div>\n </div>\n <div class=\"slide\">\n <div class=\"image_and_description_container\">\n <a href=\"/news/8695/the-launch-is-approaching-for-nasas-next-mars-rover-perseverance/\">\n <div class=\"rollover_description\">\n <div class=\"rollover_description_inner\">\n The Red Planet's surface has been visited by eight NASA spacecraft. The ninth will be the first that includes a roundtrip ticket in its flight plan.\n </div>\n <div class=\"overlay_arrow\">\n <img alt=\"More\" src=\"/assets/overlay-arrow.png\"/>\n </div>\n </div>\n <img alt=\"The Launch Is Approaching for NASA's Next Mars Rover, Perseverance\" class=\"img-lazy\" data-lazy=\"/system/news_items/list_view_images/8695_24732_PIA23499-226.jpg\" src=\"/assets/loading_320x240.png\"/>\n </a>\n </div>\n <div class=\"content_title\">\n <a href=\"/news/8695/the-launch-is-approaching-for-nasas-next-mars-rover-perseverance/\">\n The Launch Is Approaching for NASA's Next Mars Rover, Perseverance\n </a>\n </div>\n </div>\n <div class=\"slide\">\n <div class=\"image_and_description_container\">\n <a href=\"/news/8692/nasa-to-hold-mars-2020-perseverance-rover-launch-briefing/\">\n <div class=\"rollover_description\">\n <div class=\"rollover_description_inner\">\n Learn more about the agency's next Red Planet mission during a live event on June 17.\n </div>\n <div class=\"overlay_arrow\">\n <img alt=\"More\" src=\"/assets/overlay-arrow.png\"/>\n </div>\n </div>\n <img alt=\"NASA to Hold Mars 2020 Perseverance Rover Launch Briefing\" class=\"img-lazy\" data-lazy=\"/system/news_items/list_view_images/8692_PIA23920-320x240.jpg\" src=\"/assets/loading_320x240.png\"/>\n </a>\n </div>\n <div class=\"content_title\">\n <a href=\"/news/8692/nasa-to-hold-mars-2020-perseverance-rover-launch-briefing/\">\n NASA to Hold Mars 2020 Perseverance Rover Launch Briefing\n </a>\n </div>\n </div>\n <div class=\"slide\">\n <div class=\"image_and_description_container\">\n <a href=\"/news/8659/alabama-high-school-student-names-nasas-mars-helicopter/\">\n <div class=\"rollover_description\">\n <div class=\"rollover_description_inner\">\n Vaneeza Rupani's essay was chosen as the name for the small spacecraft, which will mark NASA's first attempt at powered flight on another planet.\n </div>\n <div class=\"overlay_arrow\">\n <img alt=\"More\" src=\"/assets/overlay-arrow.png\"/>\n </div>\n </div>\n <img alt=\"Alabama High School Student Names NASA's Mars Helicopter\" class=\"img-lazy\" data-lazy=\"/system/news_items/list_view_images/8659_1-PIA23883-MAIN-320x240.jpg\" src=\"/assets/loading_320x240.png\"/>\n </a>\n </div>\n <div class=\"content_title\">\n <a href=\"/news/8659/alabama-high-school-student-names-nasas-mars-helicopter/\">\n Alabama High School Student Names NASA's Mars Helicopter\n </a>\n </div>\n </div>\n <div class=\"slide\">\n <div class=\"image_and_description_container\">\n <a href=\"/news/8645/mars-helicopter-attached-to-nasas-perseverance-rover/\">\n <div class=\"rollover_description\">\n <div class=\"rollover_description_inner\">\n The team also fueled the rover's sky crane to get ready for this summer's history-making launch.\n </div>\n <div class=\"overlay_arrow\">\n <img alt=\"More\" src=\"/assets/overlay-arrow.png\"/>\n </div>\n </div>\n <img alt=\"Mars Helicopter Attached to NASA's Perseverance Rover\" class=\"img-lazy\" data-lazy=\"/system/news_items/list_view_images/8645_PIA23824-RoverWithHelicopter-32x24.jpg\" src=\"/assets/loading_320x240.png\"/>\n </a>\n </div>\n <div class=\"content_title\">\n <a href=\"/news/8645/mars-helicopter-attached-to-nasas-perseverance-rover/\">\n Mars Helicopter Attached to NASA's Perseverance Rover\n </a>\n </div>\n </div>\n <div class=\"grid_layout\">\n <div class=\"slick-nav_container\">\n <div class=\"slick-nav\">\n </div>\n </div>\n </div>\n </div>\n </section>\n </div>\n </section>\n </div>\n <footer id=\"site_footer\">\n <div class=\"grid_layout\">\n <section class=\"upper_footer\">\n <div class=\"share_newsletter_container\">\n <div class=\"newsletter\">\n <h2>\n Get the Mars Newsletter\n </h2>\n <form action=\"/newsletter-subscribe\">\n <input id=\"email\" name=\"email\" placeholder=\"enter email address\" type=\"email\" value=\"\"/>\n <input data-disable-with=\"\" name=\"commit\" type=\"submit\" value=\"\"/>\n </form>\n </div>\n <div class=\"share\">\n <h2>\n Follow the Journey\n </h2>\n <div class=\"social_icons\">\n <!-- AddThis Button BEGIN -->\n <div class=\"addthis_toolbox addthis_default_style addthis_32x32_style\">\n <a addthis:userid=\"MarsCuriosity\" class=\"addthis_button_twitter_follow icon\">\n <img alt=\"twitter\" src=\"/assets/[email protected]\"/>\n </a>\n <a addthis:userid=\"MarsCuriosity\" class=\"addthis_button_facebook_follow icon\">\n <img alt=\"facebook\" src=\"/assets/[email protected]\"/>\n </a>\n <a addthis:userid=\"nasa\" class=\"addthis_button_instagram_follow icon\">\n <img alt=\"instagram\" src=\"/assets/[email protected]\"/>\n </a>\n <a addthis:url=\"https://mars.nasa.gov/rss/api/?feed=news&amp;category=all&amp;feedtype=rss\" class=\"addthis_button_rss_follow icon\">\n <img alt=\"rss\" src=\"/assets/[email protected]\"/>\n </a>\n </div>\n </div>\n <script src=\"//s7.addthis.com/js/300/addthis_widget.js#pubid=ra-5a690e4c1320e328\">\n </script>\n </div>\n </div>\n <div class=\"gradient_line\">\n </div>\n </section>\n <section class=\"sitemap\">\n <div class=\"sitemap_directory\" id=\"sitemap_directory\">\n <div class=\"sitemap_block\">\n <div class=\"footer_sitemap_item\">\n <h3 class=\"sitemap_title\">\n <a href=\"/#red_planet\">\n The Red Planet\n </a>\n </h3>\n <ul>\n <li>\n <div class=\"global_subnav_container\">\n <ul class=\"subnav\">\n <li>\n <a href=\"/#red_planet/0\" target=\"_self\">\n Dashboard\n </a>\n </li>\n <li>\n <a href=\"/#red_planet/1\" target=\"_self\">\n Science Goals\n </a>\n </li>\n <li>\n <a href=\"/#red_planet/2\" target=\"_self\">\n The Planet\n </a>\n </li>\n <li>\n <a href=\"/#red_planet/3\" target=\"_self\">\n Atmosphere\n </a>\n </li>\n <li>\n <a href=\"/#red_planet/4\" target=\"_self\">\n Astrobiology\n </a>\n </li>\n <li>\n <a href=\"/#red_planet/5\" target=\"_self\">\n Past, Present, Future, Timeline\n </a>\n </li>\n </ul>\n </div>\n </li>\n </ul>\n </div>\n </div>\n <div class=\"sitemap_block\">\n <div class=\"footer_sitemap_item\">\n <h3 class=\"sitemap_title\">\n <a href=\"/#mars_exploration_program\">\n The Program\n </a>\n </h3>\n <ul>\n <li>\n <div class=\"global_subnav_container\">\n <ul class=\"subnav\">\n <li>\n <a href=\"/#mars_exploration_program/0\" target=\"_self\">\n Mission Statement\n </a>\n </li>\n <li>\n <a href=\"/#mars_exploration_program/1\" target=\"_self\">\n About the Program\n </a>\n </li>\n <li>\n <a href=\"/#mars_exploration_program/2\" target=\"_self\">\n Organization\n </a>\n </li>\n <li>\n <a href=\"/#mars_exploration_program/3\" target=\"_self\">\n Why Mars?\n </a>\n </li>\n <li>\n <a href=\"/#mars_exploration_program/4\" target=\"_self\">\n Research Programs\n </a>\n </li>\n <li>\n <a href=\"/#mars_exploration_program/5\" target=\"_self\">\n Planetary Resources\n </a>\n </li>\n <li>\n <a href=\"/#mars_exploration_program/6\" target=\"_self\">\n Technologies\n </a>\n </li>\n </ul>\n </div>\n </li>\n </ul>\n </div>\n </div>\n <div class=\"sitemap_block\">\n <div class=\"footer_sitemap_item\">\n <h3 class=\"sitemap_title\">\n <a href=\"/#news_and_events\">\n News &amp; Events\n </a>\n </h3>\n <ul>\n <li>\n <div class=\"global_subnav_container\">\n <ul class=\"subnav\">\n <li class=\"current\">\n <a href=\"/news\" target=\"_self\">\n News\n </a>\n </li>\n <li>\n <a href=\"/events\" target=\"_self\">\n Events\n </a>\n </li>\n </ul>\n </div>\n </li>\n </ul>\n </div>\n </div>\n <div class=\"sitemap_block\">\n <div class=\"footer_sitemap_item\">\n <h3 class=\"sitemap_title\">\n <a href=\"/#multimedia\">\n Multimedia\n </a>\n </h3>\n <ul>\n <li>\n <div class=\"global_subnav_container\">\n <ul class=\"subnav\">\n <li>\n <a href=\"/multimedia/images/\" target=\"_self\">\n Images\n </a>\n </li>\n <li>\n <a href=\"/multimedia/videos/\" target=\"_self\">\n Videos\n </a>\n </li>\n <li>\n <a href=\"/multimedia/more-resources/\" target=\"_self\">\n More Resources\n </a>\n </li>\n </ul>\n </div>\n </li>\n </ul>\n </div>\n </div>\n <div class=\"sitemap_block\">\n <div class=\"footer_sitemap_item\">\n <h3 class=\"sitemap_title\">\n <a href=\"/#missions_gallery_subnav\">\n Missions\n </a>\n </h3>\n <ul>\n <li>\n <div class=\"global_subnav_container\">\n <ul class=\"subnav\">\n <li>\n <a href=\"/mars-exploration/missions/?category=167\" target=\"_self\">\n Past\n </a>\n </li>\n <li>\n <a href=\"/mars-exploration/missions/?category=170\" target=\"_self\">\n Present\n </a>\n </li>\n <li>\n <a href=\"/mars-exploration/missions/?category=171\" target=\"_self\">\n Future\n </a>\n </li>\n <li>\n <a href=\"/mars-exploration/partners\" target=\"_self\">\n International Partners\n </a>\n </li>\n </ul>\n </div>\n </li>\n </ul>\n </div>\n </div>\n <div class=\"sitemap_block\">\n <div class=\"footer_sitemap_item\">\n <h3 class=\"sitemap_title\">\n <a href=\"/#more\">\n More\n </a>\n </h3>\n <ul>\n <li>\n <div class=\"global_subnav_container\">\n <ul class=\"subnav\">\n </ul>\n </div>\n </li>\n </ul>\n </div>\n </div>\n </div>\n <div class=\"gradient_line\">\n </div>\n </section>\n <section class=\"lower_footer\">\n <div class=\"nav_container\">\n <nav>\n <ul>\n <li>\n <a href=\"http://science.nasa.gov/\" target=\"_blank\">\n NASA Science Mission Directorate\n </a>\n </li>\n <li>\n <a href=\"https://www.jpl.nasa.gov/copyrights.php\" target=\"_blank\">\n Privacy\n </a>\n </li>\n <li>\n <a href=\"http://www.jpl.nasa.gov/imagepolicy/\" target=\"_blank\">\n Image Policy\n </a>\n </li>\n <li>\n <a href=\"https://mars.nasa.gov/feedback/\" target=\"_self\">\n Feedback\n </a>\n </li>\n </ul>\n </nav>\n </div>\n <div class=\"credits\">\n <div class=\"footer_brands_top\">\n <p>\n Managed by the Mars Exploration Program and the Jet Propulsion Laboratory for NASA’s Science Mission Directorate\n </p>\n </div>\n <!-- .footer_brands -->\n <!-- %a.jpl{href: \"\", target: \"_blank\"}Institution -->\n <!-- -->\n <!-- %a.caltech{href: \"\", target: \"_blank\"}Institution -->\n <!-- .staff -->\n <!-- %p -->\n <!-- - get_staff_for_category(get_field_from_admin_config(:web_staff_category_id)) -->\n <!-- - @staff.each_with_index do |staff, idx| -->\n <!-- - unless staff.is_in_footer == 0 -->\n <!-- = staff.title + \": \" -->\n <!-- - if staff.contact_link =~ /@/ -->\n <!-- = mail_to staff.contact_link, staff.name, :subject => \"[#{@site_title}]\" -->\n <!-- - elsif staff.contact_link.present? -->\n <!-- = link_to staff.name, staff.contact_link -->\n <!-- - else -->\n <!-- = staff.name -->\n <!-- - unless (idx + 1 == @staff.size) -->\n <!-- %br -->\n </div>\n </section>\n </div>\n </footer>\n </div>\n </div>\n <script id=\"_fed_an_ua_tag\" src=\"https://dap.digitalgov.gov/Universal-Federated-Analytics-Min.js?agency=NASA&amp;subagency=JPL-Mars-MEPJPL&amp;pua=UA-9453474-9,UA-118212757-11&amp;dclink=true&amp;sp=searchbox&amp;exts=tif,tiff,wav\" type=\"text/javascript\">\n </script>\n </body>\n</html>\n\n" ], [ "#find all <div class=xx>, then take only the latest result + title, paragraph text\ntitle = soup.find_all('div',class_='content_title')[0].text\n# print(title)\nparagraph = soup.find_all('div',class_='rollover_description_inner')[0].text\n# print(paragraph)", "_____no_output_____" ], [ "from splinter import Browser\nfrom webdriver_manager.chrome import ChromeDriverManager", "_____no_output_____" ], [ "#open Splinter\nexecutable_path = {\"executable_path\": ChromeDriverManager().install()}\nbrowser = Browser('chrome', **executable_path, headless=False)\nurl = 'https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars'\nbrowser.visit(url)", "[WDM] - Current google-chrome version is 85.0.4183\n[WDM] - Get LATEST driver version for 85.0.4183\n" ], [ "html = browser.html\nsoup = BeautifulSoup(response.text,'html.parser')\n\n### I can't get this line to work but I'm pretty sure it's accurate\n# grab image text (see following line) and remove unnecessary text before adding to final url\nimage_url = soup.find('article')['style'].replace('background-image: url(','').replace(');', '')[1:-1]\n# background-image: url(' /spaceimages/images/wallpaper/PIA14293-1920x1200.jpg ');\nbase_url = 'https://www.jpl.nasa.gov'\nfeatured_image_url = base_url + image_url\nprint(featured_image_url)", "_____no_output_____" ], [ "url = 'https://space-facts.com/mars/'\n#grab table\ntable = pd.read_html(url)\n# table\n#drop non-mars info\nmars_table_df = table[0]\nmars_table_html = mars_table_df.to_html(classes='table table-striped')\nprint(mars_table_html)", "<table border=\"1\" class=\"dataframe table table-striped\">\n <thead>\n <tr style=\"text-align: right;\">\n <th></th>\n <th>0</th>\n <th>1</th>\n </tr>\n </thead>\n <tbody>\n <tr>\n <th>0</th>\n <td>Equatorial Diameter:</td>\n <td>6,792 km</td>\n </tr>\n <tr>\n <th>1</th>\n <td>Polar Diameter:</td>\n <td>6,752 km</td>\n </tr>\n <tr>\n <th>2</th>\n <td>Mass:</td>\n <td>6.39 × 10^23 kg (0.11 Earths)</td>\n </tr>\n <tr>\n <th>3</th>\n <td>Moons:</td>\n <td>2 (Phobos &amp; Deimos)</td>\n </tr>\n <tr>\n <th>4</th>\n <td>Orbit Distance:</td>\n <td>227,943,824 km (1.38 AU)</td>\n </tr>\n <tr>\n <th>5</th>\n <td>Orbit Period:</td>\n <td>687 days (1.9 years)</td>\n </tr>\n <tr>\n <th>6</th>\n <td>Surface Temperature:</td>\n <td>-87 to -5 °C</td>\n </tr>\n <tr>\n <th>7</th>\n <td>First Record:</td>\n <td>2nd millennium BC</td>\n </tr>\n <tr>\n <th>8</th>\n <td>Recorded By:</td>\n <td>Egyptian astronomers</td>\n </tr>\n </tbody>\n</table>\n" ], [ "executable_path = {\"executable_path\": ChromeDriverManager().install()}\nbrowser = Browser('chrome', **executable_path, headless=False)\nurl = 'https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars'\nbrowser.visit(url)\nhtml = browser.html\nsoup = BeautifulSoup(html, 'html.parser')\n\nhemispheres = soup.find_all('div', class_='item')\nhemispheres_list = []\nhemispheres\nbase_url = 'https://astrogeology.usgs.gov'\n\nfor data in hemispheres:\n title = data.find('h3').text\n #link to hemisphere page\n hemisphere_url = data.find('a', class_='itemLink product-item')['href']\n browser.visit(base_url + hemisphere_url)\n \n page2_html = browser.html\n soup = BeautifulSoup(page2_html, 'html.parser')\n image_url = soup.find('img',class_='wide-image')['src']\n full_image_url = base_url + image_url\n \n hemispheres_list.append({\"title\":title,\"img_url\":full_image_url})\n\nhemispheres_list", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code" ] ]
d0dfd1bfce81f0bcc33449c91a279b3d3ecf1e55
165,653
ipynb
Jupyter Notebook
analysis/us-treasury-rates.ipynb
pymoment/us-macro
6f48c3f96129ce140506725d9327730a072532e2
[ "MIT" ]
5
2021-01-10T00:34:34.000Z
2022-03-10T01:13:31.000Z
analysis/us-treasury-rates.ipynb
pymoment/us-macro
6f48c3f96129ce140506725d9327730a072532e2
[ "MIT" ]
1
2022-03-22T22:21:45.000Z
2022-03-22T22:21:45.000Z
analysis/us-treasury-rates.ipynb
pymoment/us-macro
6f48c3f96129ce140506725d9327730a072532e2
[ "MIT" ]
null
null
null
188.670843
29,226
0.494377
[ [ [ "# US Treasury Interest Rates / Yield Curve Data\n\n---\n\nA look at the US Treasury yield curve, according to interest rates published by the US Treasury.", "_____no_output_____" ] ], [ [ "import pandas as pd\nimport altair as alt\nimport numpy as np", "_____no_output_____" ], [ "url = 'https://www.treasury.gov/resource-center/data-chart-center/interest-rates/pages/TextView.aspx?data=yieldYear&year={year}'\n\ndef fetchRates(year):\n df = pd.read_html(url.format(year=year), skiprows=0, attrs={ \"class\": \"t-chart\" })[0]\n\n df['Date'] = pd.to_datetime(df.Date)\n \n return df.set_index('Date').resample('1m').last().reset_index()\n\nfetchTsRates = lambda years: pd.concat(map(fetchRates, years))\n\n#fetchRates(2019).head()", "_____no_output_____" ] ], [ [ "## How do the interest rates look for the past 4 years (by instrument)?", "_____no_output_____" ] ], [ [ "years = range(2016, 2022)\nfields = ['Date', '3 mo', '1 yr', '2 yr', '7 yr', '10 yr']\n\ndfm = fetchTsRates(years)[fields].melt(id_vars='Date', var_name='Maturity')\n\nalt.Chart(dfm).mark_line().encode(\n alt.X('Date:T', axis=alt.Axis(title='')),\n alt.Y('value:Q',\n axis=alt.Axis(title='Interest Rate [%]'),\n scale=alt.Scale(domain=[np.floor(dfm['value'].apply(float).min()), np.ceil(dfm['value'].apply(float).max())])),\n alt.Color('Maturity:N', sort=fields[1:]),\n tooltip=[alt.Tooltip('Date:T', format='%b %Y'), alt.Tooltip('Maturity:N'), alt.Tooltip('value:Q')]\n).properties(\n title='U.S. Treasury Yields from {y1} to {y2}'.format(y1=min(years), y2=max(years)),\n height=450,\n width=700,\n background='white'\n)", "_____no_output_____" ] ], [ [ "### Same chart as above, just a different mix of instruments", "_____no_output_____" ] ], [ [ "years = range(2016, 2022)\nfields = ['Date', '6 mo', '2 yr', '3 yr', '10 yr', '30 yr']\n\ndfm = fetchTsRates(years)[fields].melt(id_vars='Date', var_name='Maturity')\n\nc = alt.Chart(dfm).mark_line().encode(\n alt.X('Date:T', axis=alt.Axis(title='')),\n alt.Y('value:Q',\n axis=alt.Axis(title='Interest Rate [%]'),\n scale=alt.Scale(domain=[np.floor(dfm['value'].apply(float).min()), np.ceil(dfm['value'].apply(float).max())])),\n alt.Color('Maturity:N', sort=fields[1:]),\n tooltip=[alt.Tooltip('Date:T', format='%b %Y'), alt.Tooltip('Maturity:N'), alt.Tooltip('value:Q')]\n).properties(\n title='U.S. Treasury Yields from {y1} to {y2}'.format(y1=min(years), y2=max(years)),\n height=450,\n width=700,\n background='white'\n)\n\nc.save('us-treasury-rates.png')\nc.display()", "_____no_output_____" ] ], [ [ "## How did that chart look for the 4 years before 2008?", "_____no_output_____" ] ], [ [ "years = range(2004, 2010)\nfields = ['Date', '6 mo', '2 yr', '3 yr', '10 yr', '30 yr']\n\ndfm2 = fetchTsRates(years)[fields].melt(id_vars='Date', var_name='Maturity')\n\nalt.Chart(dfm2).mark_line().encode(\n alt.X('Date:T', axis=alt.Axis(title='', format='%b %Y')),\n alt.Y('value:Q',\n axis=alt.Axis(title='Interest Rate [%]'),\n scale=alt.Scale(domain=[np.floor(dfm2['value'].apply(float).min()), np.ceil(dfm2['value'].apply(float).max())])),\n alt.Color('Maturity:N', sort=fields[1:]),\n tooltip=[alt.Tooltip('Date:T', format='%b %Y'), alt.Tooltip('Maturity:N'), alt.Tooltip('value:Q')]\n).properties(\n title='U.S. Treasury Yields from {y1} to {y2}'.format(y1=min(years), y2=max(years)),\n height=450,\n width=700,\n background='white'\n)", "_____no_output_____" ], [ "year = 2019\n\nalt.Chart(fetchRates(year).melt(id_vars='Date', var_name='Maturity')).mark_line().encode(\n alt.X('Date:T', axis=alt.Axis(title='')),\n alt.Y('value:Q', axis=alt.Axis(title='Interest Rate [%]'), scale=alt.Scale(zero=False)),\n alt.Color('Maturity:N',\n sort=['1 mo', '2 mo', '3 mo', '6 mo', '1 yr', '2 yr', '3 yr', '5 yr', '7 yr', '10 yr', '20 yr', '30 yr']),\n tooltip=[alt.Tooltip('Date:T', format='%b %Y'), alt.Tooltip('Maturity:N'), alt.Tooltip('value:Q')]\n).properties(\n title='U.S. Treasury Yields for {year}'.format(year=year),\n height=450,\n width=700\n).interactive()", "_____no_output_____" ], [ "year = 2007\n\nalt.Chart(fetchRates(year).melt(id_vars='Date', var_name='Maturity')).mark_line().encode(\n alt.X('Date:T', axis=alt.Axis(title='')),\n alt.Y('value:Q', axis=alt.Axis(title='Interest Rate [%]'), scale=alt.Scale(zero=False)),\n alt.Color('Maturity:N',\n sort=['1 mo', '2 mo', '3 mo', '6 mo', '1 yr', '2 yr', '3 yr', '5 yr', '7 yr', '10 yr', '20 yr', '30 yr']),\n tooltip=[alt.Tooltip('Date:T', format='%b %Y'), alt.Tooltip('Maturity:N'), alt.Tooltip('value:Q')]\n).properties(\n title='U.S. Treasury Yields for {year}'.format(year=year),\n height=450,\n width=700\n).interactive()", "_____no_output_____" ], [ "year = 1996\n\nalt.Chart(fetchRates(year).melt(id_vars='Date', var_name='Maturity')).mark_line().encode(\n alt.X('Date:T', axis=alt.Axis(title='')),\n alt.Y('value:Q', axis=alt.Axis(title='Interest Rate [%]'), scale=alt.Scale(zero=False)),\n alt.Color('Maturity:N'),\n tooltip=[alt.Tooltip('Date:T', format='%b %Y'), alt.Tooltip('Maturity:N'), alt.Tooltip('value:Q')]\n).properties(\n title='U.S. Treasury Yields for {year}'.format(year=year),\n height=450,\n width=700\n).interactive()", "_____no_output_____" ] ], [ [ "## Visualizing the \"yield curve\" of US Treasuries", "_____no_output_____" ] ], [ [ "years = range(2004, 2009)\ninstruments = {\n 0.25: '3 Month T-bill',\n 0.5: '6 Month T-bill',\n 2: '2 Year Note',\n 10: '10 Year Note',\n 30: '30 Year Bond'\n}\nfieldsToYears = {'3 mo': 0.25, '6 mo': 0.5, '2 yr': 2, '10 yr': 10, '30 yr': 30}\nfields = [i for i in fieldsToYears.keys()]\n\ndfm2 = fetchTsRates(years)[fields + ['Date']].melt(id_vars='Date', var_name='Maturity')\n\ndfm2[\"Year\"] = dfm2.Date.apply(lambda v: v.year)\n\nalt.Chart(dfm2.groupby([\"Maturity\", \"Year\"]).agg({ \"value\": \"mean\" }).reset_index()).mark_line().encode(\n alt.X('Maturity:O', axis=alt.Axis(title='Maturity', labelAngle=0), sort=fields),\n alt.Y('value:Q', axis=alt.Axis(title='Interest Rate [%]')),\n alt.Color('Year:N'),\n tooltip=[alt.Tooltip('Date:T', format='%b %Y'), alt.Tooltip('Maturity:N'), alt.Tooltip('value:Q')]\n).properties(\n title='U.S. Treasury Yield comparison [{y1} to {y2}]'.format(y1=min(years), y2=max(years)),\n height=450,\n width=700\n)", "_____no_output_____" ], [ "years = range(2016, 2022)\ninstruments = {\n 0.25: '3 Month T-bill',\n 0.5: '6 Month T-bill',\n 2: '2 Year Note',\n 10: '10 Year Note',\n 30: '30 Year Bond'\n}\nfieldsToYears = {'3 mo': 0.25, '6 mo': 0.5, '2 yr': 2, '10 yr': 10, '30 yr': 30}\nfields = [i for i in fieldsToYears.keys()]\n\ndfm2 = fetchTsRates(years)[fields + ['Date']].melt(id_vars='Date', var_name='Maturity')\n\ndfm2[\"Year\"] = dfm2.Date.apply(lambda v: v.year)\n\nalt.Chart(dfm2.groupby([\"Maturity\", \"Year\"]).agg({ \"value\": \"mean\" }).reset_index()).mark_line().encode(\n alt.X('Maturity:O', axis=alt.Axis(title='Maturity', labelAngle=0), sort=fields),\n alt.Y('value:Q', axis=alt.Axis(title='Interest Rate [%]')),\n alt.Color('Year:N'),\n tooltip=[alt.Tooltip('Date:T', format='%b %Y'), alt.Tooltip('Maturity:N'), alt.Tooltip('value:Q')]\n).properties(\n title='Yearly Average U.S. Treasury Yield comparison [{y1} to {y2}]'.format(y1=min(years), y2=max(years)),\n height=450,\n width=700\n)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ] ]
d0dfdd910b9d2835eb882c9d125a2382fa1facb5
5,567
ipynb
Jupyter Notebook
plot_dbscan.ipynb
roobooot/dbscan
ceb99004d66a6665e206e067cb00c087b6eee99b
[ "MIT" ]
null
null
null
plot_dbscan.ipynb
roobooot/dbscan
ceb99004d66a6665e206e067cb00c087b6eee99b
[ "MIT" ]
null
null
null
plot_dbscan.ipynb
roobooot/dbscan
ceb99004d66a6665e206e067cb00c087b6eee99b
[ "MIT" ]
null
null
null
38.130137
991
0.557392
[ [ [ "%matplotlib inline", "_____no_output_____" ] ], [ [ "\n# Demo of DBSCAN clustering algorithm\n\n\nFinds core samples of high density and expands clusters from them.\n\n\n", "_____no_output_____" ] ], [ [ "print(__doc__)\n\nimport numpy as np\n\nfrom sklearn.cluster import DBSCAN\nfrom sklearn import metrics\nfrom sklearn.datasets.samples_generator import make_blobs\nfrom sklearn.preprocessing import StandardScaler\n\n\n# #############################################################################\n# Generate sample data\ncenters = [[1, 1], [-1, -1], [1, -1]]\nX, labels_true = make_blobs(n_samples=750, centers=centers, cluster_std=0.4,\n random_state=0)\n\nX = StandardScaler().fit_transform(X)\n\n# #############################################################################\n# Compute DBSCAN\ndb = DBSCAN(eps=0.3, min_samples=10).fit(X)\ncore_samples_mask = np.zeros_like(db.labels_, dtype=bool)\ncore_samples_mask[db.core_sample_indices_] = True\nlabels = db.labels_\n\n# Number of clusters in labels, ignoring noise if present.\nn_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)\n\nprint('Estimated number of clusters: %d' % n_clusters_)\nprint(\"Homogeneity: %0.3f\" % metrics.homogeneity_score(labels_true, labels))\nprint(\"Completeness: %0.3f\" % metrics.completeness_score(labels_true, labels))\nprint(\"V-measure: %0.3f\" % metrics.v_measure_score(labels_true, labels))\nprint(\"Adjusted Rand Index: %0.3f\"\n % metrics.adjusted_rand_score(labels_true, labels))\nprint(\"Adjusted Mutual Information: %0.3f\"\n % metrics.adjusted_mutual_info_score(labels_true, labels))\nprint(\"Silhouette Coefficient: %0.3f\"\n % metrics.silhouette_score(X, labels))\n\n# #############################################################################\n# Plot result\nimport matplotlib.pyplot as plt\n\n# Black removed and is used for noise instead.\nunique_labels = set(labels)\ncolors = [plt.cm.Spectral(each)\n for each in np.linspace(0, 1, len(unique_labels))]\nfor k, col in zip(unique_labels, colors):\n if k == -1:\n # Black used for noise.\n col = [0, 0, 0, 1]\n\n class_member_mask = (labels == k)\n\n xy = X[class_member_mask & core_samples_mask]\n plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=tuple(col),\n markeredgecolor='k', markersize=14)\n\n xy = X[class_member_mask & ~core_samples_mask]\n plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=tuple(col),\n markeredgecolor='k', markersize=6)\n\nplt.title('Estimated number of clusters: %d' % n_clusters_)\nplt.show()", "Automatically created module for IPython interactive environment\n" ] ] ]
[ "code", "markdown", "code" ]
[ [ "code" ], [ "markdown" ], [ "code" ] ]
d0dfddee4c2a7d32b03ce57070df1411489128ed
34,851
ipynb
Jupyter Notebook
notebooks/fileio/wradlib_radar_formats.ipynb
wradlib/wradlib-notebooks
4362c796abfd62e739f38f7e2de479173a2448a2
[ "MIT" ]
10
2019-01-31T09:15:29.000Z
2022-03-10T09:43:23.000Z
notebooks/fileio/wradlib_radar_formats.ipynb
wradlib/wradlib-notebooks
4362c796abfd62e739f38f7e2de479173a2448a2
[ "MIT" ]
18
2018-02-28T06:58:21.000Z
2021-09-08T07:40:32.000Z
notebooks/fileio/wradlib_radar_formats.ipynb
wradlib/wradlib-notebooks
4362c796abfd62e739f38f7e2de479173a2448a2
[ "MIT" ]
10
2018-02-28T14:05:24.000Z
2022-02-17T10:27:26.000Z
34.955868
1,374
0.630054
[ [ [ "This notebook is part of the $\\omega radlib$ documentation: https://docs.wradlib.org.\n\nCopyright (c) $\\omega radlib$ developers.\nDistributed under the MIT License. See LICENSE.txt for more info.", "_____no_output_____" ], [ "# Supported radar data formats", "_____no_output_____" ], [ "The binary encoding of many radar products is a major obstacle for many potential radar users. Often, decoder software is not easily available. In case formats are documented, the implementation of decoders is a major programming effort. This tutorial provides an overview of the data formats currently supported by $\\omega radlib$. We seek to continuously enhance the range of supported formats, so this document is only a snapshot. If you need a specific file format to be supported by $\\omega radlib$, please [raise an issue](https://github.com/wradlib/wradlib/issues/new) of type *enhancement*. You can provide support by adding documents which help to decode the format, e.g. format reference documents or software code in other languages for decoding the format.\n\nAt the moment, *supported format* means that the radar format can be read and further processed by wradlib. Normally, wradlib will return a numpy array of data values and a dictionary of metadata - if the file contains any.\n\n<div class=\"alert alert-warning\">\n\n**Note** <br>\n\nDue to recent developments in major data science packages (eg. [xarray](https://xarray.pydata.org)) wradlib supports as of version 1.10 reading of ``ODIM``, ``GAMIC`` and ``CfRadial`` (1 and 2) datasets into an `xarray` based data structure. Output to ``ODIM_H5`` and ``CfRadial2`` like data files as well as standard netCDF4 data files is easily possible.\n \n</div>", "_____no_output_____" ], [ "In the following, we will provide an overview of file formats which can be currently read by $\\omega radlib$. \n\nReading weather radar files is done via the [wradlib.io](https://docs.wradlib.org/en/latest/io.html) module. There you will find a complete function reference. ", "_____no_output_____" ] ], [ [ "import wradlib as wrl\nimport warnings\n#warnings.filterwarnings('ignore')\nimport matplotlib.pyplot as pl\nimport numpy as np\ntry:\n get_ipython().magic(\"matplotlib inline\")\nexcept:\n pl.ion()", "_____no_output_____" ] ], [ [ "## German Weather Service: DX format", "_____no_output_____" ], [ "The German Weather Service uses the DX file format to encode local radar sweeps. DX data are in polar coordinates. The naming convention is as follows: <pre>raa00-dx_&lt;location-id&gt;-&lt;YYMMDDHHMM&gt;-&lt;location-abreviation&gt;---bin</pre> or <pre>raa00-dx_&lt;location-id&gt;-&lt;YYYYMMDDHHMM&gt;-&lt;location-abreviation&gt;---bin</pre>\n[Read and plot DX radar data from DWD](wradlib_reading_dx.ipynb) provides an extensive introduction into working with DX data. For now, we would just like to know how to read the data:", "_____no_output_____" ] ], [ [ "fpath = 'dx/raa00-dx_10908-0806021655-fbg---bin.gz'\nf = wrl.util.get_wradlib_data_file(fpath)\ndata, metadata = wrl.io.read_dx(f)", "_____no_output_____" ] ], [ [ "Here, ``data`` is a two dimensional array of shape (number of azimuth angles, number of range gates). This means that the number of rows of the array corresponds to the number of azimuth angles of the radar sweep while the number of columns corresponds to the number of range gates per ray.", "_____no_output_____" ] ], [ [ "print(data.shape)\nprint(metadata.keys())", "_____no_output_____" ], [ "fig = pl.figure(figsize=(10, 10))\nax, im = wrl.vis.plot_ppi(data, fig=fig, proj='cg')", "_____no_output_____" ] ], [ [ "## German Weather Service: RADOLAN (quantitative) composit", "_____no_output_____" ], [ "The quantitative composite format of the DWD (German Weather Service) was established in the course of the [RADOLAN project](https://www.dwd.de/DE/leistungen/radolan/radolan.html). Most quantitative composite products from the DWD are distributed in this format, e.g. the R-series (RX, RY, RH, RW, ...), the S-series (SQ, SH, SF, ...), and the E-series (European quantitative composite, e.g. EZ, EH, EB). Please see the [composite format description](https://www.dwd.de/DE/leistungen/radolan/radolan_info/radolan_radvor_op_komposit_format_pdf.pdf?__blob=publicationFile&v=5) for a full reference and a full table of products (unfortunately only in German language). An extensive section covering many RADOLAN aspects is here: [RADOLAN](../radolan.ipynb)\n\nCurrently, the RADOLAN composites have a spatial resolution of 1km x 1km, with the national composits (R- and S-series) being 900 x 900 grids, and the European composits 1500 x 1400 grids. The projection is [polar-stereographic](../radolan/radolan_grid.ipynb#Polar-Stereographic-Projection). The products can be read by the following function:\n\n<div class=\"alert alert-warning\">\n\n**Note** <br>\n \nSince $\\omega radlib$ version 1.10 a ``RADOLAN`` reader [wradlib.io.radolan.open_radolan_dataset()](https://docs.wradlib.org/en/latest/generated/wradlib.io.radolan.open_radolan_dataset.html) based on [Xarray](https://xarray.pydata.org) is available. Please read the more indepth notebook [wradlib_radolan_backend](wradlib_radolan_backend.ipynb).\n \n</div>\n", "_____no_output_____" ] ], [ [ "fpath = 'radolan/misc/raa01-rw_10000-1408102050-dwd---bin.gz'\nf = wrl.util.get_wradlib_data_file(fpath)\ndata, metadata = wrl.io.read_radolan_composite(f)", "_____no_output_____" ] ], [ [ "Here, ``data`` is a two dimensional integer array of shape (number of rows, number of columns). Different product types might need different levels of postprocessing, e.g. if the product contains rain rates or accumulations, you will normally have to divide data by factor 10. ``metadata`` is again a dictionary which provides metadata from the files header section, e.g. using the keys *producttype*, *datetime*, *intervalseconds*, *nodataflag*. ", "_____no_output_____" ] ], [ [ "print(data.shape)\nprint(metadata.keys())", "_____no_output_____" ] ], [ [ "Masking the NoData (or missing) values can be done by:", "_____no_output_____" ] ], [ [ "maskeddata = np.ma.masked_equal(data, \n metadata[\"nodataflag\"])", "_____no_output_____" ], [ "fig = pl.figure(figsize=(10, 8))\n# get coordinates\nradolan_grid_xy = wrl.georef.get_radolan_grid(900, 900)\nx = radolan_grid_xy[:, :, 0]\ny = radolan_grid_xy[:, :, 1]\n\n# create quick plot with colorbar and title\npl.figure(figsize=(10, 8))\npl.pcolormesh(x, y, maskeddata)", "_____no_output_____" ] ], [ [ "## HDF5", "_____no_output_____" ], [ "### OPERA HDF5 (ODIM_H5)", "_____no_output_____" ], [ "[HDF5](https://www.hdfgroup.org/solutions/hdf5/) is a data model, library, and file format for storing and managing data. The [OPERA program](https://www.eumetnet.eu/activities/observations-programme/current-activities/opera/) developed a convention (or information model) on how to store and exchange radar data in hdf5 format. It is based on the work of [COST Action 717](https://e-services.cost.eu/files/domain_files/METEO/Action_717/final_report/final_report-717.pdf) and is used e.g. in real-time operations in the Nordic European countries. The OPERA Data and Information Model (ODIM) is documented e.g. in this [report](https://www.eol.ucar.edu/system/files/OPERA_2008_03_WP2.1b_ODIM_H5_v2.1.pdf). Make use of these documents in order to understand the organization of OPERA hdf5 files!\n\n<div class=\"alert alert-warning\">\n\n**Note** <br>\n \nSince $\\omega radlib$ version 1.10 an ``Odim_H5`` reader [wradlib.io.open_odim_dataset()](https://docs.wradlib.org/en/latest/generated/wradlib.io.hdf.open_odim_dataset.html) based on [Xarray](https://xarray.pydata.org) is available. Please read the more indepth notebook [wradlib_odim_backend](wradlib_odim_backend.ipynb).\n\nFormer `xarray`-based implementations will be deprecated in future versions.\n \n</div>\n\nThe hierarchical nature of HDF5 can be described as being similar to directories, files, and links on a hard-drive. Actual metadata are stored as so-called *attributes*, and these attributes are organized together in so-called *groups*. Binary data are stored as so-called *datasets*. As for ODIM_H5, the ``root`` (or top level) group contains three groups of metadata: these are called ``what`` (object, information model version, and date/time information), ``where`` (geographical information), and ``how`` (quality and optional/recommended metadata). For a very simple product, e.g. a CAPPI, the data is organized in a group called ``dataset1`` which contains another group called ``data1`` where the actual binary data are found in ``data``. In analogy with a file system on a hard-disk, the HDF5 file containing this simple product is organized like this:\n\n```\n /\n /what\n /where\n /how\n /dataset1\n /dataset1/data1\n /dataset1/data1/data\n```\n\nThe philosophy behind the $\\omega radlib$ interface to OPERA's data model is very straightforward: $\\omega radlib$ simply translates the complete file structure to *one* dictionary and returns this dictionary to the user. Thus, the potential complexity of the stored data is kept and it is left to the user how to proceed with this data. The keys of the output dictionary are strings that correspond to the \"directory trees\" shown above. Each key ending with ``/data`` points to a Dataset (i.e. a numpy array of data). Each key ending with ``/what``, ``/where`` or ``/how`` points to another dictionary of metadata. The entire output can be obtained by:", "_____no_output_____" ] ], [ [ "fpath = 'hdf5/knmi_polar_volume.h5'\nf = wrl.util.get_wradlib_data_file(fpath)\nfcontent = wrl.io.read_opera_hdf5(f)", "_____no_output_____" ] ], [ [ "The user should inspect the output obtained from his or her hdf5 file in order to see how access those items which should be further processed. In order to get a readable overview of the output dictionary, one can use the pretty printing module:", "_____no_output_____" ] ], [ [ "# which keyswords can be used to access the content?\nprint(fcontent.keys())\n# print the entire content including values of data and metadata\n# (numpy arrays will not be entirely printed)\nprint(fcontent['dataset1/data1/data'])", "_____no_output_____" ] ], [ [ "Please note that in order to experiment with such datasets, you can download hdf5 sample data from the [OPERA](https://www.eumetnet.eu/activities/observations-programme/current-activities/opera/) or use the example data provided with the [wradlib-data](https://github.com/wradlib/wradlib-data/) repository.", "_____no_output_____" ] ], [ [ "fig = pl.figure(figsize=(10, 10))\nim = wrl.vis.plot_ppi(fcontent['dataset1/data1/data'], fig=fig, proj='cg')", "_____no_output_____" ] ], [ [ "### GAMIC HDF5", "_____no_output_____" ], [ "GAMIC refers to the commercial [GAMIC Enigma MURAN software](https://www.gamic.com) which exports data in hdf5 format. The concept is quite similar to the above [OPERA HDF5 (ODIM_H5)](#OPERA-HDF5-(ODIM_H5)) format. \n\n<div class=\"alert alert-warning\">\n\n**Note** <br>\n \nSince $\\omega radlib$ version 1.10 an ``GAMIC`` reader [wradlib.io.hdf.open_gamic_dataset()](https://docs.wradlib.org/en/latest/generated/wradlib.io.hdf.open_gamic_dataset.html) based on [Xarray](https://xarray.pydata.org) is available. Please read the more indepth notebook [wradlib_gamic_backend](wradlib_gamic_backend.ipynb).\n\nFormer `xarray`-based implementations will be deprecated in future versions.\n \n</div>\n\nSuch a file (typical ending: *.mvol*) can be read by:", "_____no_output_____" ] ], [ [ "fpath = 'hdf5/2014-08-10--182000.ppi.mvol'\nf = wrl.util.get_wradlib_data_file(fpath)\ndata, metadata = wrl.io.read_gamic_hdf5(f)", "_____no_output_____" ] ], [ [ "While metadata represents the usual dictionary of metadata, the data variable is a dictionary which might contain several numpy arrays with the keywords of the dictionary indicating different moments.", "_____no_output_____" ] ], [ [ "print(metadata.keys())\nprint(metadata['VOL'])\nprint(metadata['SCAN0'].keys())", "_____no_output_____" ], [ "print(data['SCAN0'].keys())\nprint(data['SCAN0']['PHIDP'].keys())\nprint(data['SCAN0']['PHIDP']['data'].shape)", "_____no_output_____" ], [ "fig = pl.figure(figsize=(10, 10))\nim = wrl.vis.plot_ppi(data['SCAN0']['ZH']['data'], fig=fig, proj='cg')", "_____no_output_____" ] ], [ [ "### Generic HDF5", "_____no_output_____" ], [ "This is a generic hdf5 reader, which will read any hdf5 structure.", "_____no_output_____" ] ], [ [ "fpath = 'hdf5/2014-08-10--182000.ppi.mvol'\nf = wrl.util.get_wradlib_data_file(fpath)\nfcontent = wrl.io.read_generic_hdf5(f)", "_____no_output_____" ], [ "print(fcontent.keys())", "_____no_output_____" ], [ "print(fcontent['where'])\nprint(fcontent['how'])\nprint(fcontent['scan0/moment_3'].keys())\nprint(fcontent['scan0/moment_3']['attrs'])\nprint(fcontent['scan0/moment_3']['data'].shape)\n", "_____no_output_____" ], [ "fig = pl.figure(figsize=(10, 10))\nim = wrl.vis.plot_ppi(fcontent['scan0/moment_3']['data'], fig=fig, proj='cg')", "_____no_output_____" ] ], [ [ "## NetCDF", "_____no_output_____" ], [ "The NetCDF format also claims to be self-describing. However, as for all such formats, the developers of netCDF also admit that \"[...] the mere use of netCDF is not sufficient to make data self-describing and meaningful to both humans and machines [...]\" (see [here](https://www.unidata.ucar.edu/software/netcdf/documentation/historic/netcdf/Conventions.html). Different radar operators or data distributors will use different naming conventions and data hierarchies (i.e. \"data models\") that the reading program might need to know about.\n\n$\\omega radlib$ provides two solutions to address this challenge. The first one ignores the concept of data models and just pulls all data and metadata from a NetCDF file ([wradlib.io.read_generic_netcdf()](https://docs.wradlib.org/en/latest/generated/wradlib.io.netcdf.read_generic_netcdf.html). The second is designed for a specific data model used by the EDGE software ([wradlib.io.read_edge_netcdf()](https://docs.wradlib.org/en/latest/generated/wradlib.io.netcdf.read_edge_netcdf.html)).\n\n<div class=\"alert alert-warning\">\n\n**Note** <br>\n\nSince $\\omega radlib$ version 1.10 a ``Cf/Radial1`` reader [wradlib.io.xarray.open_cfradial1_dataset()](https://docs.wradlib.org/en/latest/generated/wradlib.io.netcdf.open_cfradial1_dataset.html) and a ``Cf/Radial2`` reader [wradlib.io.xarray.open_cfradial2_dataset()](https://docs.wradlib.org/en/latest/generated/wradlib.io.netcdf.open_cfradial2_dataset.html) for CF versions 1.X and 2 based on [Xarray](https://xarray.pydata.org/en/stable/) are available. Please read the more indepth notebooks [wradlib_cfradial1_backend](wradlib_cfradial1_backend.ipynb) and [wradlib_cfradial2_backend](wradlib_cfradial2_backend.ipynb).\n\n</div>", "_____no_output_____" ], [ "### Generic NetCDF reader (includes CfRadial)", "_____no_output_____" ], [ "$\\omega radlib$ provides a function that will virtually read any NetCDF file irrespective of the data model: [wradlib.io.read_generic_netcdf()](https://docs.wradlib.org/en/latest/generated/wradlib.io.netcdf.read_generic_netcdf.html). It is built upon Python's [netcdf4](https://unidata.github.io/netcdf4-python/) library. [wradlib.io.read_generic_netcdf()](https://docs.wradlib.org/en/latest/generated/wradlib.io.netcdf.read_generic_netcdf.html) will return only one object, a dictionary, that contains all the contents of the NetCDF file corresponding to the original file structure. This includes all the metadata, as well as the so called \"dimensions\" (describing the dimensions of the actual data arrays) and the \"variables\" which will contains the actual data. Users can use this dictionary at will in order to query data and metadata; however, they should make sure to consider the documentation of the corresponding data model. [wradlib.io.read_generic_netcdf()](https://docs.wradlib.org/en/latest/generated/wradlib.io.netcdf.read_generic_netcdf.html) has been shown to work with a lot of different data models, most notably **CfRadial** (see [here](https://ncar.github.io/CfRadial/) for details). A typical call to [wradlib.io.read_generic_netcdf()](https://docs.wradlib.org/en/latest/generated/wradlib.io.netcdf.read_generic_netcdf.html) would look like:", "_____no_output_____" ] ], [ [ "fpath = 'netcdf/example_cfradial_ppi.nc'\nf = wrl.util.get_wradlib_data_file(fpath)\noutdict = wrl.io.read_generic_netcdf(f)\nfor key in outdict.keys():\n print(key)", "_____no_output_____" ] ], [ [ "Please see [this example notebook](wradlib_generic_netcdf_example.ipynb) to get started.", "_____no_output_____" ], [ "### EDGE NetCDF", "_____no_output_____" ], [ "EDGE is a commercial software for radar control and data analysis provided by the Enterprise Electronics Corporation. It allows for netCDF data export. The resulting files can be read by [wradlib.io.read_generic_netcdf()](https://docs.wradlib.org/en/latest/generated/wradlib.io.netcdf.read_generic_netcdf.html), but $\\omega radlib$ also provides a specific function, [wradlib.io.read_edge_netcdf()](https://docs.wradlib.org/en/latest/generated/wradlib.io.netcdf.read_edge_netcdf.html) to return metadata and data as seperate objects:", "_____no_output_____" ] ], [ [ "fpath = 'netcdf/edge_netcdf.nc'\nf = wrl.util.get_wradlib_data_file(fpath) \ndata, metadata = wrl.io.read_edge_netcdf(f)\nprint(data.shape)\nprint(metadata.keys())", "_____no_output_____" ] ], [ [ "## Gematronik Rainbow", "_____no_output_____" ], [ "Rainbow refers to the commercial [RAINBOW®5 APPLICATION SOFTWARE](https://www.leonardogermany.com/en/products/rainbow-5) which exports data in an XML flavour, which due to binary data blobs violates XML standard. Gematronik provided python code for implementing this reader in $\\omega radlib$, which is very much appreciated.\n\nThe philosophy behind the $\\omega radlib$ interface to Gematroniks data model is very straightforward: $\\omega radlib$ simply translates the complete xml file structure to *one* dictionary and returns this dictionary to the user. Thus, the potential complexity of the stored data is kept and it is left to the user how to proceed with this data. The keys of the output dictionary are strings that correspond to the \"xml nodes\" and \"xml attributes\". Each ``data`` key points to a Dataset (i.e. a numpy array of data). Such a file (typical ending: *.vol* or *.azi*) can be read by:", "_____no_output_____" ] ], [ [ "fpath = 'rainbow/2013070308340000dBuZ.azi'\nf = wrl.util.get_wradlib_data_file(fpath)\nfcontent = wrl.io.read_rainbow(f)", "_____no_output_____" ] ], [ [ "The user should inspect the output obtained from his or her Rainbow file in order to see how access those items which should be further processed. In order to get a readable overview of the output dictionary, one can use the pretty printing module:", "_____no_output_____" ] ], [ [ "# which keyswords can be used to access the content?\nprint(fcontent.keys())\n# print the entire content including values of data and metadata\n# (numpy arrays will not be entirely printed)\nprint(fcontent['volume']['sensorinfo'])", "_____no_output_____" ] ], [ [ "You can check this [example notebook](wradlib_load_rainbow_example.ipynb) for getting a first impression.", "_____no_output_____" ], [ "## Vaisala Sigmet IRIS ", "_____no_output_____" ], [ "[IRIS](https://www.vaisala.com/en/products/instruments-sensors-and-other-measurement-devices/weather-radar-products/iris-focus) refers to the commercial Vaisala Sigmet **I**nteractive **R**adar **I**nformation **S**ystem. The Vaisala Sigmet Digital Receivers export data in a [well documented](ftp://ftp.sigmet.com/outgoing/manuals/IRIS_Programmers_Manual.pdf) binary format.\n\nThe philosophy behind the $\\omega radlib$ interface to the IRIS data model is very straightforward: $\\omega radlib$ simply translates the complete binary file structure to *one* dictionary and returns this dictionary to the user. Thus, the potential complexity of the stored data is kept and it is left to the user how to proceed with this data. The keys of the output dictionary are strings that correspond to the Sigmet Data Structures. \n\n\n<div class=\"alert alert-warning\">\n\n**Note** <br>\n \nSince $\\omega radlib$ version 1.12 an ``IRIS`` reader [wradlib.io.iris.open_iris_dataset()](https://docs.wradlib.org/en/latest/generated/wradlib.io.iris.open_iris_dataset.html) based on [Xarray](https://xarray.pydata.org) is available. Please read the more indepth notebook [wradlib_iris_backend](wradlib_iris_backend.ipynb).\n\nAt the same time the reader has changed with respect to performance. So the ray metadata is only read once per sweep and is only included once in the output of `read_iris`. Currently we keep backwards compatibility, but this behaviour is deprecated and will be changed in a future version. See the two examples below.\n \n</div>\n\n\nSuch a file (typical ending: *.RAWXXXX) can be read by:", "_____no_output_____" ] ], [ [ "fpath = 'sigmet/cor-main131125105503.RAW2049'\nf = wrl.util.get_wradlib_data_file(fpath)\nfcontent = wrl.io.read_iris(f, keep_old_sweep_data=False)", "_____no_output_____" ], [ "# which keywords can be used to access the content?\nprint(fcontent.keys())\n# print the entire content including values of data and \n# metadata of the first sweep\n# (numpy arrays will not be entirely printed)\nprint(fcontent['data'][1].keys())\nprint()\nprint(fcontent['data'][1]['ingest_data_hdrs'].keys())\nprint(fcontent['data'][1]['ingest_data_hdrs']['DB_DBZ'])\nprint()\nprint(fcontent['data'][1]['sweep_data'].keys())\nprint(fcontent['data'][1]['sweep_data']['DB_DBZ'])", "_____no_output_____" ], [ "fig = pl.figure(figsize=(10, 10))\nswp = fcontent['data'][1]['sweep_data']\nax, im = wrl.vis.plot_ppi(swp[\"DB_DBZ\"], fig=fig, proj='cg')", "_____no_output_____" ], [ "fpath = 'sigmet/cor-main131125105503.RAW2049'\nf = wrl.util.get_wradlib_data_file(fpath)\nfcontent = wrl.io.read_iris(f, keep_old_sweep_data=True)", "_____no_output_____" ], [ "# which keywords can be used to access the content?\nprint(fcontent.keys())\n# print the entire content including values of data and \n# metadata of the first sweep\n# (numpy arrays will not be entirely printed)\nprint(fcontent['data'][1].keys())\nprint()\nprint(fcontent['data'][1]['ingest_data_hdrs'].keys())\nprint(fcontent['data'][1]['ingest_data_hdrs']['DB_DBZ'])\nprint()\nprint(fcontent['data'][1]['sweep_data'].keys())\nprint(fcontent['data'][1]['sweep_data']['DB_DBZ'])", "_____no_output_____" ], [ "fig = pl.figure(figsize=(10, 10))\nswp = fcontent['data'][1]['sweep_data']\nax, im = wrl.vis.plot_ppi(swp[\"DB_DBZ\"][\"data\"], fig=fig, proj='cg')", "_____no_output_____" ] ], [ [ "## OPERA BUFR", "_____no_output_____" ], [ "**WARNING** $\\omega radlib$ does currently not support the BUFR format!\n\nThe Binary Universal Form for the Representation of meteorological data (BUFR) is a binary data format maintained by the World Meteorological Organization (WMO).\n\nThe BUFR format was adopted by [OPERA](https://www.eumetnet.eu/activities/observations-programme/current-activities/opera/) for the representation of weather radar data.\nA BUFR file consists of a set of *descriptors* which contain all the relevant metadata and a data section. \nThe *descriptors* are identified as a tuple of three integers. The meaning of these tupels is described in the so-called BUFR tables. There are generic BUFR tables provided by the WMO, but it is also possible to define so called *local tables* - which was done by the OPERA consortium for the purpose of radar data representation.\n \nIf you want to use BUFR files together with $\\omega radlib$, we recommend that you check out the [OPERA webpage](https://www.eumetnet.eu/activities/observations-programme/current-activities/opera/) where you will find software for BUFR decoding. In particular, you might want to check out [this tool](https://eumetnet.eu/wp-content/uploads/2017/04/bufr_opera_mf.zip) which seems to support the conversion of OPERA BUFR files to ODIM_H5 (which is supported by $\\omega radlib$). However, you have to build it yourself.\n\nIt would be great if someone could add a tutorial on how to use OPERA BUFR software together with $\\omega radlib$!", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ] ]
d0dff23ee017f8bdb3b8fd2af99542c944db0900
39,331
ipynb
Jupyter Notebook
notebooks/3_LSTM_Model.ipynb
rafiparvez/ICUReadmissionPrediction
fe2c2baafe6b9e730042929b9cb295528a921d9a
[ "MIT" ]
null
null
null
notebooks/3_LSTM_Model.ipynb
rafiparvez/ICUReadmissionPrediction
fe2c2baafe6b9e730042929b9cb295528a921d9a
[ "MIT" ]
null
null
null
notebooks/3_LSTM_Model.ipynb
rafiparvez/ICUReadmissionPrediction
fe2c2baafe6b9e730042929b9cb295528a921d9a
[ "MIT" ]
1
2021-11-10T04:35:43.000Z
2021-11-10T04:35:43.000Z
87.208426
18,374
0.779461
[ [ [ "import os\nimport pandas as pd\nimport random\nimport numpy as np\nimport matplotlib.pyplot as plt\n%matplotlib inline\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.layers import LSTM,Dropout\nfrom keras.layers.embeddings import Embedding\nfrom keras.preprocessing import sequence\nfrom keras.optimizers import RMSprop,Adam\nfrom keras.layers import Dense, Input, TimeDistributed, Masking\nfrom keras.models import Model\nimport sys\n\nrandom.seed(49297)\n\nmimic3_path=\"/home/rafiparvez1706/mimic\"\nvariable_map_file='../resources/itemid_to_variable_map.csv'\nvariable_ranges_file='../resources/variable_ranges.csv'\nchannel_info_file ='../resources/channel_info.json'\noutput_path =\"../data/root\"", "_____no_output_____" ], [ "train_dir = os.path.join(output_path, 'train')\ntrain_id_dirs_orig = [os.path.join(train_dir, subdir) for subdir in os.listdir(train_dir)]", "_____no_output_____" ], [ "valid_dir = os.path.join(output_path, 'valid')\nvalid_id_dirs_orig = [os.path.join(valid_dir, subdir) for subdir in os.listdir(valid_dir)]", "_____no_output_____" ], [ "maxlen=0\nlens=[]\ntrain_id_dirs=train_id_dirs_orig[0:1000]\nfor cnt, folder in enumerate(train_id_dirs):\n Xfname = os.path.join(folder,'cleaned_timeseries.csv')\n df_X = pd.read_csv(Xfname)\n X_train=df_X.loc[:, df_X.columns != 'Hours'].values\n lens.append(X_train.shape[0])\n maxlen=max(maxlen,X_train.shape[0])\n \n \n#maxlen=500\nprint(maxlen)", "_____no_output_____" ], [ "maxlen=200", "_____no_output_____" ] ], [ [ "## Preparing Sequential Data", "_____no_output_____" ] ], [ [ "def transform_data(folder,maxlen):\n Xfname = os.path.join(folder,'cleaned_timeseries.csv')\n df_X = pd.read_csv(Xfname)\n X_train=df_X.loc[:, df_X.columns != 'Hours'].values\n \n X_train = sequence.pad_sequences(X_train.T, dtype='float32', maxlen=maxlen, padding='post', truncating='post')\n X_train=X_train.T\n X_train= X_train.reshape(1, X_train.shape[0],X_train.shape[1])\n \n \n X_train=X_train.astype(np.float32)\n \n yfname = os.path.join(folder,'stays.csv')\n \n df_y = pd.read_csv(yfname)\n IsReadmitted = df_y.IsReadmitted.values[0].astype(np.float32)\n y_train=np.empty(len(df_X))\n y_train.fill(IsReadmitted)\n y_train = y_train.astype(np.float32)\n y_train = [y_train]\n y_train = sequence.pad_sequences(y_train, dtype='float32', maxlen=maxlen, padding='post', truncating='post')\n y_train=y_train.reshape(y_train.shape[0],y_train.shape[1],1) \n \n return X_train,y_train", "_____no_output_____" ], [ "X_train,y_train=transform_data(train_id_dirs[0], maxlen)\nX_train.shape", "_____no_output_____" ], [ "y_train.shape", "_____no_output_____" ] ], [ [ "## Defining Model", "_____no_output_____" ] ], [ [ "model = Sequential()\nnb_samples=len(train_id_dirs)\ninput_dim=58\n\nmodel.add(LSTM(output_dim=256, input_shape=(maxlen, 58), return_sequences=True))\nmodel.add(Dropout(0.10))\n\nmodel.add(LSTM(output_dim=128, return_sequences=True))\nmodel.add(Dropout(0.10))\n\nmodel.add(TimeDistributed(Dense(1, activation='sigmoid')))\n\noptimizer=Adam(lr=0.001, beta_1=0.5)\nmodel.compile(optimizer=optimizer, loss='binary_crossentropy', metrics=['accuracy'])\n\n# print layer shapes and model parameters\nmodel.summary()", "/home/rafiparvez1706/anaconda3/lib/python3.5/site-packages/ipykernel/__main__.py:5: UserWarning: Update your `LSTM` call to the Keras 2 API: `LSTM(return_sequences=True, units=256, input_shape=(200, 58))`\n" ], [ "train_id_dirs=random.sample(train_id_dirs_orig, 5000)\nTotal_count = len(train_id_dirs)", "_____no_output_____" ], [ "def generate_batches(files, batch_size):\n counter = 0\n while True:\n counter = (counter + 1) % len(files)\n folder = files[counter]\n X_train,y_train=transform_data(folder, maxlen) \n\n for cbatch in range(0, X_train.shape[0], batch_size):\n yield (X_train[cbatch:(cbatch + batch_size),:,:], y_train[cbatch:(cbatch + batch_size)])\n \n \n for cbatch in range(0, X_train.shape[0], batch_size):\n yield (X_train[cbatch:(cbatch + batch_size),:,:], y_train[cbatch:(cbatch + batch_size)])\n\n\n#train_files = [train_bundle_loc + \"bundle_\" + cb.__str__() for cb in range(nb_train_bundles)]\nbatch_size=8\nsamples_per_epoch=len(train_id_dirs)\nnum_epoch=4\ngen = generate_batches(files=train_id_dirs, batch_size=batch_size)\n\nhistory = model.fit_generator(gen, samples_per_epoch=samples_per_epoch, nb_epoch=num_epoch,verbose=1)", "Epoch 1/1\n\r 1/5000 [..............................] - ETA: 985s - loss: 0.0152 - acc: 1.0000" ], [ "model.save_weights(\"lstm_model.h5\")", "_____no_output_____" ] ], [ [ "## Model Evaluation", "_____no_output_____" ] ], [ [ "import pandas as pd\ndf = pd.DataFrame(train_id_dirs, columns=[\"trains\"])\ndf.to_csv('train_id_dirs_3.csv', index=False)", "_____no_output_____" ], [ "cv_id_dirs=random.sample(valid_id_dirs_orig, 2000)\nlen(cv_id_dirs)\n\nlist_pred=[]\nlist_lbl=[]\n\nfor cv_folder in cv_id_dirs:\n X_cv, y_cv = transform_data(cv_folder, maxlen)\n preds = model.predict(X_cv)\n #print(preds[0][:20])\n label = y_cv[:, 0, :].squeeze();\n prediction = preds[:, -1, :].squeeze()\n #print(label,prediction)\n list_lbl.append(label)\n list_pred.append(prediction)", "_____no_output_____" ], [ "from sklearn.metrics import roc_curve, auc\n# compute ROC curve for predictions\nrnn_roc = roc_curve(list_lbl,list_pred)\n\n# compute the area under the curve of prediction ROC\nrnn_auc = auc(rnn_roc[0], rnn_roc[1])\n\nplt.figure(figsize=(7, 5))\nline_kwargs = {'linewidth': 4, 'alpha': 0.8}\nplt.plot(rnn_roc[0], rnn_roc[1], label='LSTM AUROC: %0.3f' % rnn_auc, color='#6AA84F', **line_kwargs)\nplt.legend(loc='lower right', fontsize=20)", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ] ]
d0dff3742ff452805b49c22f84697cbaa4afde3c
4,397
ipynb
Jupyter Notebook
Practical_Statistics/Probability/Probability.ipynb
christineton/Udacity_Data_Analysis_Nanodegree
57d479e508d1740fc66295d00e0eaba3d9b38696
[ "MIT" ]
null
null
null
Practical_Statistics/Probability/Probability.ipynb
christineton/Udacity_Data_Analysis_Nanodegree
57d479e508d1740fc66295d00e0eaba3d9b38696
[ "MIT" ]
null
null
null
Practical_Statistics/Probability/Probability.ipynb
christineton/Udacity_Data_Analysis_Nanodegree
57d479e508d1740fc66295d00e0eaba3d9b38696
[ "MIT" ]
null
null
null
20.643192
79
0.507846
[ [ [ "# Coin Flips and Die Rolls", "_____no_output_____" ] ], [ [ "# import numpy\nimport numpy as np", "_____no_output_____" ] ], [ [ "### 1. Two fair coin flips produce exactly two heads", "_____no_output_____" ] ], [ [ "# simulate 1 million tests of two fair coin flips\ntests = np.random.randint(2, size=(int(1e6), 2))\n\n# sums of all tests\ntest_sums = tests.sum(axis=1)\n\n# proportion of tests that produced exactly two heads\n(test_sums == 0).mean()", "_____no_output_____" ] ], [ [ "### 2. Three fair coin flips produce exactly one head", "_____no_output_____" ] ], [ [ "# simulate 1 million tests of three fair coin flips\ntests = np.random.randint(2, size=(int(1e6), 3))\n\n# sums of all tests\ntest_sums = tests.sum(axis=1)\n\n# proportion of tests that produced exactly one head\n(test_sums == 2).mean()", "_____no_output_____" ] ], [ [ "### 3. Three bias coin flips with P(H) = 0.6 produce exactly one head", "_____no_output_____" ] ], [ [ "# simulate 1 million tests of three bias coin flips\n# hint: use np.random.choice()\ntests = np.random.choice([0, 1], size=(int(1e6), 3), p=[0.6, 0.4])\n\n# sums of all tests\ntest_sums = tests.sum(axis=1)\n\n# proportion of tests that produced exactly one head\n(test_sums == 2).mean()", "_____no_output_____" ] ], [ [ "### 4. A die rolls an even number", "_____no_output_____" ] ], [ [ "# simulate 1 million tests of one die roll\ntests = np.random.choice(np.arange(1, 7), size=int(1e6))\n\n# proportion of tests that produced an even number\n(tests % 2 == 0).mean()", "_____no_output_____" ] ], [ [ "### 5. Two dice roll a double", "_____no_output_____" ] ], [ [ "# simulate the first million die rolls\nfirst = np.random.choice(np.arange(6), size=int(1e6))\n\n# simulate the second million die rolls\nsecond = np.random.choice(np.arange(6), size=int(1e6))\n\n# proportion of tests where the 1st and 2nd die rolled the same number\n(first == second).mean()", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
d0e0001aebb6a27a8a73db512e65f52f98ab784f
17,119
ipynb
Jupyter Notebook
catVsDogv2.ipynb
mukul54/A-Simple-Cat-vs-Dog-Classifier-
112bde011bf332a2c67bfa3c26565ac995fcd635
[ "MIT" ]
5
2019-06-09T15:11:52.000Z
2020-02-21T12:30:57.000Z
catVsDogv2.ipynb
mukul54/A-Simple-Cat-vs-Dog-Classifier-
112bde011bf332a2c67bfa3c26565ac995fcd635
[ "MIT" ]
null
null
null
catVsDogv2.ipynb
mukul54/A-Simple-Cat-vs-Dog-Classifier-
112bde011bf332a2c67bfa3c26565ac995fcd635
[ "MIT" ]
2
2020-03-21T14:51:14.000Z
2020-11-16T00:35:55.000Z
71.329167
2,497
0.60208
[ [ [ "# This Python 3 environment comes with many helpful analytics libraries installed\n# It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python\n# For example, here's several helpful packages to load in \n\nimport numpy as np # linear algebra\nimport pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)\nimport torch\nimport matplotlib.pyplot as plt\nimport torchvision\nimport csv\nimport glob\nimport cv2\nimport torch.optim as optim\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom random import shuffle\nimport glob\n#from torchsummary import summary\n\n\n# Input data files are available in the \"../input/\" directory.\n# For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory\n\nimport os\nprint(os.listdir(\"../input\"))\n%matplotlib inline\n# Any results you write to the current directory are saved as output.", "['sample_submission.csv', 'train', 'test']\n" ], [ "\n#classes = ('plane', 'car', 'bird', 'cat',\n# 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')", "_____no_output_____" ], [ "class Net(nn.Module):\n def __init__(self):\n super(Net, self).__init__()\n self.conv1 = nn.Conv2d(3, 50, 5)\n self.pool1 = nn.MaxPool2d(2, 2)\n \n self.conv2 = nn.Conv2d(50, 100, 7)\n self.pool2 = nn.MaxPool2d(2,2)\n \n self.fc1 = nn.Linear(100 * 12 * 12, 120)\n self.fc2 = nn.Linear(120, 100)\n self.fc3 = nn.Linear(100, 2)\n\n def forward(self, x):\n x = self.pool1(F.relu(self.conv1(x)))\n x = self.pool2(F.relu(self.conv2(x)))\n x = x.view(-1, 100 * 12 * 12)\n x = F.relu(self.fc1(x))\n x = F.relu(self.fc2(x))\n x = F.sigmoid(self.fc3(x))\n return x\n\nnet = Net()\nprint(net)", "Net(\n (conv1): Conv2d(3, 50, kernel_size=(5, 5), stride=(1, 1))\n (pool1): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)\n (conv2): Conv2d(50, 100, kernel_size=(7, 7), stride=(1, 1))\n (pool2): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)\n (fc1): Linear(in_features=14400, out_features=120, bias=True)\n (fc2): Linear(in_features=120, out_features=100, bias=True)\n (fc3): Linear(in_features=100, out_features=2, bias=True)\n)\n" ], [ "'''\n#alternate way to create a list of file name and labels\n\nimport numpy as np\nimport os\nPATH = '../input/'\nfnames = np.array([f'train/{f}' for f in sorted(os.listdir(f'{PATH}train'))])\nlabels = np.array([(0 if 'cat' in fname else 1) for fname in fnames])\nprint(fnames[0:100] , labels[0:100])\n'''", "_____no_output_____" ], [ " shuffle_data = True # shuffle the addresses before saving\n cat_dog_train_path = '../input/train/*.jpg'\n # read addresses and labels from the 'train' folder\n addrs = glob.glob(cat_dog_train_path)\n labels = [ [1,0] if 'cat' in addr else [0,1] for addr in addrs] # 1 = Cat, 0 = Dog\n # to shuffle data\n if shuffle_data:\n c = list(zip(addrs, labels))\n shuffle(c)\n addrs, labels = zip(*c)\n print(labels[0:10])\n \n # Divide the hata into 60% train, 20% validation, and 20% test\n train_addrs = addrs[0:int(0.6*len(addrs))]\n train_labels = labels[0:int(0.6*len(labels))]\n #train_addrs.size\n \n val_addrs = addrs[int(0.6*len(addrs)):int(0.8*len(addrs))]\n val_labels = labels[int(0.6*len(addrs)):int(0.8*len(addrs))]\n \n test_addrs = addrs[int(0.8*len(addrs)):]\n test_labels = labels[int(0.8*len(labels)):]\n \n ", "([0, 1], [1, 0], [1, 0], [0, 1], [0, 1], [1, 0], [0, 1], [0, 1], [1, 0], [1, 0])\n" ], [ " # loop over train addresses\n train_data = []\n for i in range(len(train_addrs[:1000])):\n # print how many images are saved every 10 images\n if i % 1000 == 0 and i > 1:\n print ('Train data: {}/{}'.format(i, len(train_addrs)))\n # read an image and resize to (64, 64)\n # cv2 load images as BGR, convert it to RGB\n addr = train_addrs[i]\n img = cv2.imread(addr)\n img = cv2.resize(img, (64, 64), interpolation=cv2.INTER_CUBIC)\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n train_data.append([np.array(img), np.array(train_labels[i])])\n shuffle(train_data)\n np.save('train_data.npy', train_data)\n \n \n \n \n \n # loop over test addresses\n #creating test data\n test_data = []\n for i in range(len(test_addrs[:1000])):\n # print how many images are saved every 1000 images\n if i % 1000 == 0 and i > 1:\n print ('Test data: {}/{}'.format(i, len(test_addrs)))\n # read an image and resize to (64, 64)\n # cv2 load images as BGR, convert it to RGB\n addr = test_addrs[i]\n img = cv2.imread(addr)\n img = cv2.resize(img, (64, 64), interpolation=cv2.INTER_CUBIC)\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n test_data.append([np.array(img), np.array(labels[i])])\n shuffle(test_data)\n np.save('test_data.npy', test_data)\n \n \n \n # loop over val addresses\n val_data = []\n for i in range(len(val_addrs[:1000])):\n # print how many images are saved every 1000 images\n if i % 1000 == 0 and i > 1:\n print ('Val data: {}/{}'.format(i, len(val_addrs)))\n # read an image and resize to (64, 64)\n # cv2 load images as BGR, convert it to RGB\n addr = val_addrs[i]\n img = cv2.imread(addr)\n img = cv2.resize(img, (64, 64), interpolation=cv2.INTER_CUBIC)\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n val_data.append([np.array(img), np.array(labels[i])])\n shuffle(val_data)\n np.save('val_data.npy', val_data)\n #print(val_data[1])", "_____no_output_____" ], [ "from torch.autograd import Variable\nX = np.array([i[0] for i in train_data]).reshape(-1,64,64,3)\nX = Variable(torch.Tensor(X))\nX = X.reshape(-1,64,64,3)\nX = X.permute(0,3,1,2)\nprint(X.shape)\n#Y = Variable(torch.Tensor(Y))\n\nY = np.array([i[1] for i in train_data])\ntarget = Variable(torch.Tensor(Y))\ntarget = target.type(torch.LongTensor)\n\nprint(target.shape)\n#print(target)", "torch.Size([1000, 3, 64, 64])\ntorch.Size([1000, 2])\n" ], [ "criterian = nn.CrossEntropyLoss()\noptimizer = optim.SGD(net.parameters(), lr = 0.0001, momentum = 0.9)", "_____no_output_____" ], [ "for epoch in range(100):\n running_loss = 0.0\n optimizer.zero_grad() #zero the parameter gradients\n output = net(X)\n \n loss = criterian(output, torch.max(target, 1)[1])\n \n loss.backward()\n optimizer.step()\n running_loss += loss.item()\n print(epoch, ':', running_loss)", "/opt/conda/lib/python3.6/site-packages/torch/nn/functional.py:1332: UserWarning: nn.functional.sigmoid is deprecated. Use torch.sigmoid instead.\n warnings.warn(\"nn.functional.sigmoid is deprecated. Use torch.sigmoid instead.\")\n" ], [ "test = np.array([i[0] for i in test_data]).reshape(-1,64,64,3)\ntest = Variable(torch.Tensor(test))\ntest = test.reshape(-1,64,64,3)\ntest = test.permute(0,3,1,2)\nprint(test.shape)\n#Y = Variable(torch.Tensor(Y))\n\ntlabels = np.array([i[1] for i in test_data])\ntlabels = Variable(torch.Tensor(tlabels))\ntlabels = tlabels.type(torch.long)\n\nprint(tlabels.shape)\nprint(tlabels)", "torch.Size([1000, 3, 64, 64])\ntorch.Size([1000, 2])\ntensor([[1, 0],\n [0, 1],\n [0, 1],\n ...,\n [0, 1],\n [0, 1],\n [1, 0]])\n" ], [ "correct = 0\ntotal = 0\nwith torch.no_grad():\n for data in zip(X,target):\n images, labels = data\n images = images.reshape(1,3,64,64)\n outputs = net(images)\n _, predicted = torch.max(outputs, 1)\n #total += labels.size(0)\n if((predicted == 0 and labels[0] == 1) or (predicted == 1 and labels[1]==1) ):\n correct+=1\n #correct += (predicted == labels).sum().item()\n #print(outputs,labels)\ntotal = X.shape[0]\nprint('Train accuracy of the network on the' + str(total) + 'train images: %f %%' % (\n 100 * (correct*1.0) / total) )\nprint(correct, total)", "/opt/conda/lib/python3.6/site-packages/torch/nn/functional.py:1332: UserWarning: nn.functional.sigmoid is deprecated. Use torch.sigmoid instead.\n warnings.warn(\"nn.functional.sigmoid is deprecated. Use torch.sigmoid instead.\")\n" ], [ "correct = 0\ntotal = 0\nwith torch.no_grad():\n for data in zip(test,tlabels):\n images, labels = data\n images = images.reshape(1,3,64,64)\n outputs = net(images)\n _, predicted = torch.max(outputs, 1)\n #total += labels.size(0)\n if((predicted == 0 and labels[0] == 1) or (predicted == 1 and labels[1]==1) ):\n correct += 1\n \ntotal = test.shape[0]\nprint('Test accuracy of the network on the ' + str(total) + ' test images: %f %%' % (\n 100 * (correct*1.0) / total) )\nprint(correct, total)", "/opt/conda/lib/python3.6/site-packages/torch/nn/functional.py:1332: UserWarning: nn.functional.sigmoid is deprecated. Use torch.sigmoid instead.\n warnings.warn(\"nn.functional.sigmoid is deprecated. Use torch.sigmoid instead.\")\n" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
d0e005a2f7df223eecc61fa775cb0fd9cef6e642
18,866
ipynb
Jupyter Notebook
MKTD-Pytorch - 3 - Model training.ipynb
ggdupont/mktd-pytorch
c6e064ee2092a19571330915fcee801249763ad2
[ "MIT" ]
3
2019-03-29T09:48:45.000Z
2019-03-29T09:49:10.000Z
MKTD-Pytorch - 3 - Model training.ipynb
ggdupont/mktd-pytorch
c6e064ee2092a19571330915fcee801249763ad2
[ "MIT" ]
null
null
null
MKTD-Pytorch - 3 - Model training.ipynb
ggdupont/mktd-pytorch
c6e064ee2092a19571330915fcee801249763ad2
[ "MIT" ]
null
null
null
58.228395
10,496
0.762165
[ [ [ "# Exercices\nWith each exercice will teach you one aspect of deep learning. The process of machine learning can be decompose in 7 steps :\n\n* Data preparation\n* Model definition\n* Model training\n* Model evaluation\n* Hyperparameter tuning\n* Prediction", "_____no_output_____" ], [ "## 3 - Model training\n\n- 3.1 Metrics : evaluate model\n- 3.2 Loss function (mean square error, cross entropy)\n- 3.3 Optimizer function (stochastic gradient descent)\n- 3.4 Batch size, epoch number", "_____no_output_____" ], [ "### Load dataset", "_____no_output_____" ] ], [ [ "import torchvision.datasets as dset\nimport torchvision.transforms as transforms\nfrom torch.utils.data import DataLoader\n\ndata_path = './data'\n\n#trans = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5,), (1.0,))])\ntrans = transforms.Compose([transforms.Resize((32, 32)), transforms.ToTensor()])\n\n# if not exist, download mnist dataset\ntrain_set = dset.MNIST(root=data_path, train=True, transform=trans, download=True)\ntest_set = dset.MNIST(root=data_path, train=False, transform=trans, download=True)\n\nbatch = 4\n\ndata_train_loader = DataLoader(train_set, batch_size=batch, shuffle=True, num_workers=8)\ndata_test_loader = DataLoader(test_set, batch_size=batch, num_workers=8)\n\nclasses = ('0', '1', '2', '3', '4', '5', '6', '7', '8', '9')", "_____no_output_____" ] ], [ [ "### Define the network architecture", "_____no_output_____" ] ], [ [ "from __future__ import print_function\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass Net(nn.Module):\n\n def __init__(self):\n super(Net, self).__init__()\n # 1 input image channel, 6 output channels, 5x5 square convolution\n # kernel\n self.conv1 = nn.Conv2d(1, 6, 5)\n self.conv2 = nn.Conv2d(6, 16, 5)\n # an affine operation: y = Wx + b\n self.fc1 = nn.Linear(16 * 5 * 5, 120)\n self.fc2 = nn.Linear(120, 84)\n self.fc3 = nn.Linear(84, 10)\n\n def forward(self, x):\n # Max pooling over a (2, 2) window\n x = F.max_pool2d(F.relu(self.conv1(x)), (2, 2))\n # If the size is a square you can only specify a single number\n x = F.max_pool2d(F.relu(self.conv2(x)), 2)\n x = x.view(-1, self.num_flat_features(x))\n x = F.relu(self.fc1(x))\n x = F.relu(self.fc2(x))\n x = self.fc3(x)\n return x\n\n def num_flat_features(self, x):\n size = x.size()[1:] # all dimensions except the batch dimension\n num_features = 1\n for s in size:\n num_features *= s\n return num_features\n\n\nleNet = Net()\nprint(leNet)", "Net(\n (conv1): Conv2d(1, 6, kernel_size=(5, 5), stride=(1, 1))\n (conv2): Conv2d(6, 16, kernel_size=(5, 5), stride=(1, 1))\n (fc1): Linear(in_features=400, out_features=120, bias=True)\n (fc2): Linear(in_features=120, out_features=84, bias=True)\n (fc3): Linear(in_features=84, out_features=10, bias=True)\n)\n" ] ], [ [ "### Define loss criterion and optimizer", "_____no_output_____" ] ], [ [ "import torch.optim as optim\n\ncriterion = nn.MSELoss()\noptimizer = optim.SGD(leNet.parameters(), lr=0.01)", "_____no_output_____" ] ], [ [ "### Training loop", "_____no_output_____" ] ], [ [ "for epoch in range(3): # loop over the dataset multiple times\n leNet.train()\n \n running_loss = 0.0\n for i, (images, labels) in enumerate(data_train_loader):\n optimizer.zero_grad()\n\n output = leNet(images)\n \n # align vectors labels <=> outputs\n label_vect = torch.zeros(4, 10, dtype=torch.float)\n for j in range(0, len(labels)):\n label_vect[j, labels[j]] = 1.0 \n loss = criterion(output, label_vect)\n \n loss.backward()\n optimizer.step()\n\n # print statistics\n running_loss += loss.item()\n \n print('[{:d}] loss: {:.5f}'.format(epoch + 1, running_loss / (batch*len(data_train_loader))))\n \nprint('Finished Training')", "[1] loss: 0.01518\n[2] loss: 0.00453\n[3] loss: 0.00311\nFinished Training\n" ] ], [ [ "### Test the model", "_____no_output_____" ] ], [ [ "import matplotlib.pyplot as plt\nimport numpy as np\n\ndef imshow(images, labels):\n npimg = images.numpy()\n plt.imshow(np.transpose(npimg, (1, 2, 0)))\n plt.title(\"Ground Truth: {}\".format(labels))", "_____no_output_____" ], [ "import torchvision\n\ndataiter = iter(data_test_loader)\nimages, labels = dataiter.next()\n\n# print images\nimshow(torchvision.utils.make_grid(images), labels)\n", "_____no_output_____" ], [ "outputs = leNet(images)\n\n_, predicted = torch.max(outputs, 1)\n\nprint('Predicted: ', ' '.join('%5s' % classes[predicted[j]] for j in range(4)))", "Predicted: 7 2 1 0\n" ] ], [ [ "### Saving leNet", "_____no_output_____" ] ], [ [ "torch.save({\n 'epoch': 1,\n 'model_state_dict': leNet.state_dict(),\n 'optimizer_state_dict': optimizer.state_dict(),\n 'loss': loss,\n }, 'checkpoint-MKTD-pytorch-3.last')", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ] ]
d0e0361d41ef11f2ea5e3df832787a5adc871df0
32,346
ipynb
Jupyter Notebook
notebooks/statistics_Phase_2_vgsc_kdr_associations.ipynb
alimanfoo/ag1000g-phase2-vgsc-report
d2c2337a1ca443c1fbb7ac7ebbc8033e1fa3af12
[ "MIT" ]
2
2021-10-30T21:42:36.000Z
2021-10-30T21:45:43.000Z
notebooks/statistics_Phase_2_vgsc_kdr_associations.ipynb
alimanfoo/ag1000g-phase2-vgsc-report
d2c2337a1ca443c1fbb7ac7ebbc8033e1fa3af12
[ "MIT" ]
69
2019-07-31T14:31:07.000Z
2020-12-08T12:04:48.000Z
notebooks/statistics_Phase_2_vgsc_kdr_associations.ipynb
alimanfoo/ag1000g-phase2-vgsc-report
d2c2337a1ca443c1fbb7ac7ebbc8033e1fa3af12
[ "MIT" ]
4
2020-03-12T14:09:40.000Z
2022-03-19T16:26:16.000Z
115.521429
12,416
0.856366
[ [ [ "%run setup.ipynb\nfrom scipy.stats import dirichlet\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n%matplotlib inline", "_____no_output_____" ], [ "import traceback\nimport logging\n\nlogger = logging.getLogger('ag1000g-phase2')\nlogger.setLevel(logging.DEBUG)\n\n# create console handler with a higher log level\nch = logging.StreamHandler()\nch.setLevel(logging.INFO)\n\n# create formatter and add it to the handlers\nformatter = logging.Formatter(\n '%(asctime)s - %(name)s - %(levelname)s - %(message)s')\nch.setFormatter(formatter)\n# add the handlers to logger\nlogger.addHandler(ch)", "_____no_output_____" ], [ "#generate counts\nFpos = 2422652\nSpos = 2422651\n\ncallset = phase2_ar1.callset_pass_biallelic['2L']\ng = allel.GenotypeChunkedArray(callset['calldata']['genotype'])\npos = allel.SortedIndex(callset['variants']['POS'])\ndf_meta = pd.read_csv('../phase2.AR1/samples/samples.meta.txt', sep='\\t')\n\nFb = pos.values == Fpos\nSb = pos.values == Spos\n\ndef het_pop(pop):\n FSb = Fb + Sb\n popbool = np.array(df_meta.population == pop)\n popg = g.compress(popbool, axis=1)\n popgr = popg.compress(FSb, axis=0)\n a = np.asarray(popgr.to_n_alt())\n return a", "_____no_output_____" ], [ "gagam = het_pop('GAgam')\ncagam = het_pop('CMgam')\nnp.save('../data/gabon_n_alt.npy', gagam)\nnp.save('../data/cameroon_n_alt.npy', cagam)", "_____no_output_____" ], [ "def run_fs_het_analysis(path, ns=1_000_000):\n\n ac = np.load(path).T\n logger.info(f\"Loaded {path}\")\n \n # assuming col1 is F, col2 is S\n assert ac.sum(axis=1).max() == 2\n\n tot_alleles = ac.shape[0] * 2\n \n n_samples = ac.shape[0]\n logger.info(f\"{n_samples} samples found\")\n\n wt_alleles = tot_alleles - ac.sum()\n wt_alleles\n\n f_alleles = ac[:, 0].sum()\n s_alleles = ac[:, 1].sum()\n\n alpha = [1 + wt_alleles, 1 + f_alleles, 1 + s_alleles]\n \n logger.info(f\"Dirichlet alpha set to {alpha}\")\n \n diric = dirichlet(alpha)\n wt, f, s = diric.mean()\n logger.info(\n f\"Mean of dirichlet- wt: {wt:.2f}, f:{f:.2f}, s:{s:.2f}\")\n \n # this is what we observed\n is_het = (ac[:, 0] == ac[:, 1]) & (ac.sum(axis=1) == 2)\n tot_fs_hets = is_het.sum()\n \n logger.info(\n f\"In the AC data we observe {tot_fs_hets} F-S hets\")\n\n logger.info(f\"Beginning monte carlo analysis, n={ns}\")\n \n # draw 1m dirichlet observations of allele frequency\n v = np.random.dirichlet(alpha, size=ns)\n\n # for each of the 1m, sample n_samples, \n # and count how many \"F/S\" hets we observe\n o = np.zeros(ns, dtype=\"int\")\n for i in range(v.shape[0]):\n x = np.random.multinomial(2, v[i], size=n_samples)\n o[i] = np.sum((x[:, 1] == 1) & (x[:, 2] == 1))\n\n fig, ax = plt.subplots(figsize=(4, 4))\n bins = np.arange(0, max(o.max(), tot_fs_hets) + 5, 1)\n count, bins, patches = ax.hist(\n o, bins=bins, density=True)\n\n ymin, ymax = ax.get_ylim()\n ax.vlines([tot_fs_hets], ymin=ymin, ymax=ymax)\n sns.despine(ax=ax)\n \n grt = tot_fs_hets >= o\n les = tot_fs_hets <= o\n \n logger.info(\n \"{:.3f} of simulated values are greater than or equal to the observed\".format(\n 1 - np.mean(grt)))\n \n logger.info(\n \"{:.3f} of simulated values are less than or equal to the observed\".format(\n 1 - np.mean(les)))", "_____no_output_____" ], [ "run_fs_het_analysis(\"../data/gabon_n_alt.npy\")", "2019-11-21 11:38:15,961 - ag1000g-phase2 - INFO - Loaded ../data/gabon_n_alt.npy\n2019-11-21 11:38:15,963 - ag1000g-phase2 - INFO - 69 samples found\n2019-11-21 11:38:15,964 - ag1000g-phase2 - INFO - Dirichlet alpha set to [1, 94, 46]\n2019-11-21 11:38:15,965 - ag1000g-phase2 - INFO - Mean of dirichlet- wt: 0.01, f:0.67, s:0.33\n2019-11-21 11:38:15,966 - ag1000g-phase2 - INFO - In the AC data we observe 41 F-S hets\n2019-11-21 11:38:15,967 - ag1000g-phase2 - INFO - Beginning monte carlo analysis, n=1000000\n2019-11-21 11:38:54,452 - ag1000g-phase2 - INFO - 0.005 of simulated values are greater than or equal to the observed\n2019-11-21 11:38:54,454 - ag1000g-phase2 - INFO - 0.991 of simulated values are less than or equal to the observed\n" ], [ "run_fs_het_analysis(\"../data/cameroon_n_alt.npy\")", "2019-11-21 11:38:54,686 - ag1000g-phase2 - INFO - Loaded ../data/cameroon_n_alt.npy\n2019-11-21 11:38:54,688 - ag1000g-phase2 - INFO - 297 samples found\n2019-11-21 11:38:54,689 - ag1000g-phase2 - INFO - Dirichlet alpha set to [190, 94, 313]\n2019-11-21 11:38:54,690 - ag1000g-phase2 - INFO - Mean of dirichlet- wt: 0.32, f:0.16, s:0.52\n2019-11-21 11:38:54,691 - ag1000g-phase2 - INFO - In the AC data we observe 50 F-S hets\n2019-11-21 11:38:54,691 - ag1000g-phase2 - INFO - Beginning monte carlo analysis, n=1000000\n2019-11-21 11:40:10,898 - ag1000g-phase2 - INFO - 0.410 of simulated values are greater than or equal to the observed\n2019-11-21 11:40:10,899 - ag1000g-phase2 - INFO - 0.539 of simulated values are less than or equal to the observed\n" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code" ] ]
d0e041c19dfd7e7927bb776b72c2c4693b5853a4
6,047
ipynb
Jupyter Notebook
FeatureCollection/column_statistics_by_group.ipynb
pberezina/earthengine-py-notebooks
4cbe3c52bcc9ed3f1337bf097aa5799442991a5e
[ "MIT" ]
1
2020-03-20T19:39:34.000Z
2020-03-20T19:39:34.000Z
FeatureCollection/column_statistics_by_group.ipynb
pberezina/earthengine-py-notebooks
4cbe3c52bcc9ed3f1337bf097aa5799442991a5e
[ "MIT" ]
null
null
null
FeatureCollection/column_statistics_by_group.ipynb
pberezina/earthengine-py-notebooks
4cbe3c52bcc9ed3f1337bf097aa5799442991a5e
[ "MIT" ]
null
null
null
35.570588
435
0.552175
[ [ [ "<table class=\"ee-notebook-buttons\" align=\"left\">\n <td><a target=\"_blank\" href=\"https://github.com/giswqs/earthengine-py-notebooks/tree/master/FeatureCollection/column_statistics_by_group.ipynb\"><img width=32px src=\"https://www.tensorflow.org/images/GitHub-Mark-32px.png\" /> View source on GitHub</a></td>\n <td><a target=\"_blank\" href=\"https://nbviewer.jupyter.org/github/giswqs/earthengine-py-notebooks/blob/master/FeatureCollection/column_statistics_by_group.ipynb\"><img width=26px src=\"https://upload.wikimedia.org/wikipedia/commons/thumb/3/38/Jupyter_logo.svg/883px-Jupyter_logo.svg.png\" />Notebook Viewer</a></td>\n <td><a target=\"_blank\" href=\"https://mybinder.org/v2/gh/giswqs/earthengine-py-notebooks/master?filepath=FeatureCollection/column_statistics_by_group.ipynb\"><img width=58px src=\"https://mybinder.org/static/images/logo_social.png\" />Run in binder</a></td>\n <td><a target=\"_blank\" href=\"https://colab.research.google.com/github/giswqs/earthengine-py-notebooks/blob/master/FeatureCollection/column_statistics_by_group.ipynb\"><img src=\"https://www.tensorflow.org/images/colab_logo_32px.png\" /> Run in Google Colab</a></td>\n</table>", "_____no_output_____" ], [ "## Install Earth Engine API\nInstall the [Earth Engine Python API](https://developers.google.com/earth-engine/python_install) and [geehydro](https://github.com/giswqs/geehydro). The **geehydro** Python package builds on the [folium](https://github.com/python-visualization/folium) package and implements several methods for displaying Earth Engine data layers, such as `Map.addLayer()`, `Map.setCenter()`, `Map.centerObject()`, and `Map.setOptions()`.\nThe following script checks if the geehydro package has been installed. If not, it will install geehydro, which automatically install its dependencies, including earthengine-api and folium.", "_____no_output_____" ] ], [ [ "import subprocess\n\ntry:\n import geehydro\nexcept ImportError:\n print('geehydro package not installed. Installing ...')\n subprocess.check_call([\"python\", '-m', 'pip', 'install', 'geehydro'])", "_____no_output_____" ] ], [ [ "Import libraries", "_____no_output_____" ] ], [ [ "import ee\nimport folium\nimport geehydro", "_____no_output_____" ] ], [ [ "Authenticate and initialize Earth Engine API. You only need to authenticate the Earth Engine API once. ", "_____no_output_____" ] ], [ [ "try:\n ee.Initialize()\nexcept Exception as e:\n ee.Authenticate()\n ee.Initialize()", "_____no_output_____" ] ], [ [ "## Create an interactive map \nThis step creates an interactive map using [folium](https://github.com/python-visualization/folium). The default basemap is the OpenStreetMap. Additional basemaps can be added using the `Map.setOptions()` function. \nThe optional basemaps can be `ROADMAP`, `SATELLITE`, `HYBRID`, `TERRAIN`, or `ESRI`.", "_____no_output_____" ] ], [ [ "Map = folium.Map(location=[40, -100], zoom_start=4)\nMap.setOptions('HYBRID')", "_____no_output_____" ] ], [ [ "## Add Earth Engine Python script ", "_____no_output_____" ] ], [ [ "# Load a collection of US census blocks.\nblocks = ee.FeatureCollection('TIGER/2010/Blocks')\n\n# Compute sums of the specified properties, grouped by state code.\nsums = blocks \\\n .filter(ee.Filter.And(\n ee.Filter.neq('pop10', {}),\n ee.Filter.neq('housing10', {}))) \\\n .reduceColumns(**{\n 'selectors': ['pop10', 'housing10', 'statefp10'],\n 'reducer': ee.Reducer.sum().repeat(2).group(**{\n 'groupField': 2,\n 'groupName': 'state-code',\n })\n})\n\n# Print the resultant Dictionary.\nprint(sums.getInfo())\n\n", "_____no_output_____" ] ], [ [ "## Display Earth Engine data layers ", "_____no_output_____" ] ], [ [ "Map.setControlVisibility(layerControl=True, fullscreenControl=True, latLngPopup=True)\nMap", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
d0e042ecefa087d723633e3bdbab1ca5cc920c2e
4,071
ipynb
Jupyter Notebook
ipynb/Mozambique.ipynb
RobertRosca/oscovida.github.io
d609949076e3f881e38ec674ecbf0887e9a2ec25
[ "CC-BY-4.0" ]
null
null
null
ipynb/Mozambique.ipynb
RobertRosca/oscovida.github.io
d609949076e3f881e38ec674ecbf0887e9a2ec25
[ "CC-BY-4.0" ]
null
null
null
ipynb/Mozambique.ipynb
RobertRosca/oscovida.github.io
d609949076e3f881e38ec674ecbf0887e9a2ec25
[ "CC-BY-4.0" ]
null
null
null
28.468531
164
0.509948
[ [ [ "# Mozambique\n\n* Homepage of project: https://oscovida.github.io\n* [Execute this Jupyter Notebook using myBinder](https://mybinder.org/v2/gh/oscovida/binder/master?filepath=ipynb/Mozambique.ipynb)", "_____no_output_____" ] ], [ [ "import datetime\nimport time\n\nstart = datetime.datetime.now()\nprint(f\"Notebook executed on: {start.strftime('%d/%m/%Y %H:%M:%S%Z')} {time.tzname[time.daylight]}\")", "_____no_output_____" ], [ "%config InlineBackend.figure_formats = ['svg']\nfrom oscovida import *", "_____no_output_____" ], [ "overview(\"Mozambique\");", "_____no_output_____" ], [ "# load the data\ncases, deaths, region_label = get_country_data(\"Mozambique\")\n\n# compose into one table\ntable = compose_dataframe_summary(cases, deaths)\n\n# show tables with up to 500 rows\npd.set_option(\"max_rows\", 500)\n\n# display the table\ntable", "_____no_output_____" ] ], [ [ "# Explore the data in your web browser\n\n- If you want to execute this notebook, [click here to use myBinder](https://mybinder.org/v2/gh/oscovida/binder/master?filepath=ipynb/Mozambique.ipynb)\n- and wait (~1 to 2 minutes)\n- Then press SHIFT+RETURN to advance code cell to code cell\n- See http://jupyter.org for more details on how to use Jupyter Notebook", "_____no_output_____" ], [ "# Acknowledgements:\n\n- Johns Hopkins University provides data for countries\n- Robert Koch Institute provides data for within Germany\n- Open source and scientific computing community for the data tools\n- Github for hosting repository and html files\n- Project Jupyter for the Notebook and binder service\n- The H2020 project Photon and Neutron Open Science Cloud ([PaNOSC](https://www.panosc.eu/))\n\n--------------------", "_____no_output_____" ] ], [ [ "print(f\"Download of data from Johns Hopkins university: cases at {fetch_cases_last_execution()} and \"\n f\"deaths at {fetch_deaths_last_execution()}.\")", "_____no_output_____" ], [ "# to force a fresh download of data, run \"clear_cache()\"", "_____no_output_____" ], [ "print(f\"Notebook execution took: {datetime.datetime.now()-start}\")\n", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ] ]
d0e04fd4058845a28ca6b33ada84154488b24fd7
301,885
ipynb
Jupyter Notebook
notebooks/Blog_JournalPlots.ipynb
kristianeschenburg/kristianeschenburg.netlify.app
d022d3a37c2ad5253844ceda552c17eed27f1c09
[ "MIT" ]
null
null
null
notebooks/Blog_JournalPlots.ipynb
kristianeschenburg/kristianeschenburg.netlify.app
d022d3a37c2ad5253844ceda552c17eed27f1c09
[ "MIT" ]
null
null
null
notebooks/Blog_JournalPlots.ipynb
kristianeschenburg/kristianeschenburg.netlify.app
d022d3a37c2ad5253844ceda552c17eed27f1c09
[ "MIT" ]
null
null
null
509.940878
257,548
0.933137
[ [ [ "# Making journal-quality figures using Matplotlib", "_____no_output_____" ] ], [ [ "import numpy as np\n\nimport pandas as pd\nimport seaborn as sns\nsns.set(font_scale=1.4)\n\n%matplotlib inline\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\n\nfrom matplotlib.patches import ConnectionPatch\nimport matplotlib.gridspec as gridspec\nfrom matplotlib import text", "_____no_output_____" ] ], [ [ "Let's begin by loading some data. We have two data frames that I've saved previously. They're columns are the exact same, except for those labeled ```r2``` -- the values of these columns depend on whether a previously fit linear transformation was univariate (1d) or multivariate (2d).\n\nWe'll start by loading the data, dropping unnecessary columns, renaming the columns of interest, and merging the two DataFrames.", "_____no_output_____" ] ], [ [ "data_dir = '/Users/kristianeschenburg/Desktop/Data/'\ndim1_file = '%sConnectopy/Templated/FieldModeling/L.1D.ConnectopyMaps.Merged.csv' % (data_dir)\ndim2_file = '%sConnectopy/Templated/FieldModeling/L.2D.ConnectopyMaps.Merged.csv' % (data_dir)\n\ndim1_data = pd.read_csv(dim1_file, index_col=0)\ndim1_data = dim1_data.rename(columns={'r2': 'r2_1d'})\ndim1_data = dim1_data.drop(columns={'w_signal', 'signal', 'w_corr', 'distance', 'diameter'})\n\ndim2_data = pd.read_csv(dim2_file, index_col=0)\ndim2_data = dim2_data.rename(columns={'r2': 'r2_2d'})\ndim2_data = dim2_data.drop(columns={'w_signal', 'signal', 'w_corr', 'distance', 'diameter'})\n\n\ndf = pd.merge(dim1_data, dim2_data, on=['source', 'target', 'subject', 'scale', 'sigma', 'cost', 'corr', 'dnorm'])\ndf = df[df['source'] != df['target']]\n\nmerged_GA = df.groupby(['source', 'target'], as_index=False).mean()\nfrom scipy.stats import ttest_ind\n\nttest_map = {treg: {'tstat': None, 'pval': None} for treg in df.source.unique()}\n\ntstats = []\npvals = []\n\nfor treg in df.source.unique():\n \n T = T = ttest_ind(merged_GA[merged_GA['source'] == treg]['r2_2d'],\n merged_GA[merged_GA['source'] == treg]['r2_1d'])\n \n ttest_map[treg]['tstat'] = T[0]\n ttest_map[treg]['pval'] = T[1]\n\n tstats.append(T[0])\n pvals.append(T[1])\n\nd = pd.DataFrame({'region': df.source.unique(),\n 't': tstats, \n 'p': pvals})", "_____no_output_____" ], [ "df.head()", "_____no_output_____" ], [ "r2_diff = df.groupby(['source', 'target']).mean()[['r2_1d', 'r2_2d']].unstack()", "_____no_output_____" ] ], [ [ "Let's define the space over which we want to plot out data.", "_____no_output_____" ] ], [ [ "fig = plt.figure(constrained_layout=False, figsize=(18, 12))\ngs = fig.add_gridspec(nrows=3, ncols=3, hspace=0.3)\n\nax1 = fig.add_subplot(gs[0, :-1]);\nax1.set_title('[0, :-1]', fontsize=20)\n\nax2 = fig.add_subplot(gs[1:, :-1]);\nax2.set_title('[1:, :-1]', fontsize=20);\n\nax3 = fig.add_subplot(gs[0, 2]);\nax3.set_title('[0, 2]', fontsize=20);\n\nax4 = fig.add_subplot(gs[1, 2]);\nax4.set_title('[1, 2]', fontsize=20);\n\nax5 = fig.add_subplot(gs[2, 2]);\nax5.set_title('[2, 2]', fontsize=20);", "_____no_output_____" ], [ "df", "_____no_output_____" ], [ "fig = plt.figure(figsize=(20, 20))\ngs = fig.add_gridspec(nrows=3, ncols=3, hspace=1, wspace=0.15)\n\nax1 = fig.add_subplot(gs[0, :-1]);\ng = sns.barplot('region', 't', data=d, alpha=0.75, );\nax1.tick_params(rotation=90, labelsize=10)\nax1.axhline(y=2.035, c='k', linestyle='--');\nax1.axhline(y=-2.035, c='k', linestyle='--');\nax1.axhline(y=0, c='k');\nax1.grid(True)\nax1.set_ylabel('t', fontsize=20)\nax1.set_xlabel('Source Region', fontsize=20)\nax1.set_title('Linearity as function of dimension\\n2D > 1D', fontsize=20)\n\nax2 = fig.add_subplot(gs[1:, :-1]);\ng = sns.heatmap(r2_diff['r2_2d'] - r2_diff['r2_1d'], cmap='seismic')\nax2.set_xlabel('Target Region', fontsize=15)\nax2.set_ylabel('Source Region', fontsize=15)\nax2.tick_params(labelsize=12)\nax2.set_title(r'2d-$R^{2}$ > 1d-$R^{2}$', fontsize=20);\n\n\nax3 = fig.add_subplot(gs[0, 2]);\nax3.set_title('[0, 2]', fontsize=20);\nax3.scatter(df['dnorm'], np.log(df['r2_1d']), marker='.');\n\nax4 = fig.add_subplot(gs[1, 2]);\nax4.set_title('[1, 2]', fontsize=20);\nax4.scatter(df['dnorm'], np.log(df['r2_2d']), marker='.');\n\nax5 = fig.add_subplot(gs[2, 2]);\nax5.set_title('[2, 2]', fontsize=20);\nax5.scatter(np.log(df['corr']), np.log(df['r2_2d']), c='k', marker='.', alpha=0.5);\nax5.scatter(np.log(df['corr'])+10, np.log(df['r2_1d']), c='r', marker='.', alpha=0.5);\n", "/Users/kristianeschenburg/.local/lib/python3.7/site-packages/pandas-0.25.3-py3.7-macosx-10.7-x86_64.egg/pandas/core/series.py:856: RuntimeWarning: invalid value encountered in log\n result = getattr(ufunc, method)(*inputs, **kwargs)\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ] ]
d0e0503120bd3e04d820c3785618df742b482aa6
14,876
ipynb
Jupyter Notebook
Tutorials/Advanced_NN/9_NLP_Deep_Diving/9_BERT_Introduction.ipynb
lev1khachatryan/ASDS_CV
c9f0c0412002e929bcb7cc2fc6e5392977a9fa76
[ "MIT" ]
5
2019-12-13T16:26:10.000Z
2020-01-10T07:44:05.000Z
Tutorials/Advanced_NN/9_NLP_Deep_Diving/9_BERT_Introduction.ipynb
lev1khachatryan/ASDS_CV
c9f0c0412002e929bcb7cc2fc6e5392977a9fa76
[ "MIT" ]
1
2020-01-07T16:48:21.000Z
2020-03-18T18:43:37.000Z
Tutorials/Advanced_NN/9_NLP_Deep_Diving/9_BERT_Introduction.ipynb
lev1khachatryan/ASDS_CV
c9f0c0412002e929bcb7cc2fc6e5392977a9fa76
[ "MIT" ]
null
null
null
40.314363
634
0.634579
[ [ [ "# <div align=\"center\">BERT (Bidirectional Encoder Representations from Transformers) Explained: State of the art language model for NLP</div>\n---------------------------------------------------------------------\n\n<img src='asset/9_6/main.png'>\n\nyou can Find me on Github:\n> ###### [ GitHub](https://github.com/lev1khachatryan)", "_____no_output_____" ], [ "BERT (Bidirectional Encoder Representations from Transformers) is a recent paper published by researchers at Google AI Language. It has caused a stir in the Machine Learning community by presenting state-of-the-art results in a wide variety of NLP tasks, including Question Answering (SQuAD v1.1), Natural Language Inference (MNLI), and others.\n", "_____no_output_____" ], [ "BERT’s key technical innovation is applying the bidirectional training of Transformer, a popular attention model, to language modelling. This is in contrast to previous efforts which looked at a text sequence either from left to right or combined left-to-right and right-to-left training. The paper’s results show that a language model which is bidirectionally trained can have a deeper sense of language context and flow than single-direction language models. In the paper, the researchers detail a novel technique named ***Masked LM (MLM)*** which allows bidirectional training in models in which it was previously impossible.", "_____no_output_____" ], [ " ", "_____no_output_____" ], [ "# <div align=\"center\">Background</div>\n---------------------------------------------------------------------", "_____no_output_____" ], [ "In the field of computer vision, researchers have repeatedly shown the value of transfer learning — pre-training a neural network model on a known task, for instance ImageNet, and then performing fine-tuning — using the trained neural network as the basis of a new purpose-specific model. In recent years, researchers have been showing that a similar technique can be useful in many natural language tasks.\n\nA different approach, which is also popular in NLP tasks and exemplified in the recent ELMo paper, is feature-based training. In this approach, a pre-trained neural network produces word embeddings which are then used as features in NLP models.", "_____no_output_____" ], [ " ", "_____no_output_____" ], [ "# <div align=\"center\">How BERT works</div>\n---------------------------------------------------------------------", "_____no_output_____" ], [ "BERT makes use of ***Transformer***, an attention mechanism that learns contextual relations between words (or sub-words) in a text. In its vanilla form, ***Transformer includes two separate mechanisms*** — an ***encoder*** that reads the text input and a ***decoder*** that produces a prediction for the task. Since BERT’s goal is to generate a language model, only the encoder mechanism is necessary. The detailed workings of Transformer are described in a paper by Google.\n\n***As opposed to directional models, which read the text input sequentially (left-to-right or right-to-left), the Transformer encoder reads the entire sequence of words at once. Therefore it is considered bidirectional, though it would be more accurate to say that it’s non-directional. This characteristic allows the model to learn the context of a word based on all of its surroundings (left and right of the word).***\n\nThe chart below is a high-level description of the Transformer encoder. The input is a sequence of tokens, which are first embedded into vectors and then processed in the neural network. The output is a sequence of vectors of size H, in which each vector corresponds to an input token with the same index.\n\nWhen training language models, there is a challenge of defining a prediction goal. Many models predict the next word in a sequence (e.g. “The child came home from ... ”), a directional approach which inherently limits context learning. To overcome this challenge, BERT uses two training strategies:\n\n", "_____no_output_____" ], [ " ", "_____no_output_____" ], [ " ", "_____no_output_____" ], [ "# <div align=\"center\">Masked LM (MLM)</div>\n---------------------------------------------------------------------", "_____no_output_____" ], [ "Before feeding word sequences into BERT, 15% of the words in each sequence are replaced with a ***MASK*** token. The model then attempts to predict the original value of the masked words, based on the context provided by the other, non-masked, words in the sequence. In technical terms, the prediction of the output words requires:", "_____no_output_____" ], [ "* Adding a classification layer on top of the encoder output.\n\n\n* Multiplying the output vectors by the embedding matrix, transforming them into the vocabulary dimension.\n\n\n* Calculating the probability of each word in the vocabulary with softmax.", "_____no_output_____" ], [ "<img src='asset/9_6/1.png'>", "_____no_output_____" ], [ "The BERT loss function takes into consideration only the prediction of the masked values and ignores the prediction of the non-masked words. As a consequence, the model converges slower than directional models, a characteristic which is offset by its increased context awareness.", "_____no_output_____" ], [ "***Note:*** In practice, the BERT implementation is slightly more elaborate and doesn’t replace all of the 15% masked words:\n\nTraining the language model in BERT is done by predicting 15% of the tokens in the input, that were randomly picked. These tokens are pre-processed as follows – 80% are replaced with a **MASK** token, 10% with a random word, and 10% use the original word. The intuition that led the authors to pick this approach is as follows:\n\n* If we used [MASK] 100% of the time the model wouldn’t necessarily produce good token representations for non-masked words. The non-masked tokens were still used for context, but the model was optimized for predicting masked words.\n\n\n* If we used [MASK] 90% of the time and random words 10% of the time, this would teach the model that the observed word is never correct.\n\n\n* If we used [MASK] 90% of the time and kept the same word 10% of the time, then the model could just trivially copy the non-contextual embedding\n", "_____no_output_____" ], [ " ", "_____no_output_____" ], [ " ", "_____no_output_____" ], [ "# <div align=\"center\">Next Sentence Prediction (NSP)</div>\n---------------------------------------------------------------------", "_____no_output_____" ], [ "In the BERT training process, the model receives pairs of sentences as input and learns to predict if the second sentence in the pair is the subsequent sentence in the original document. During training, 50% of the inputs are a pair in which the second sentence is the subsequent sentence in the original document, while in the other 50% a random sentence from the corpus is chosen as the second sentence. The assumption is that the random sentence will be disconnected from the first sentence.", "_____no_output_____" ], [ "To help the model distinguish between the two sentences in training, the input is processed in the following way before entering the model:\n\n* A [CLS] token is inserted at the beginning of the first sentence and a [SEP] token is inserted at the end of each sentence.\n\n\n* A sentence embedding indicating Sentence A or Sentence B is added to each token. Sentence embeddings are similar in concept to token embeddings with a vocabulary of 2.\n\n\n* A positional embedding is added to each token to indicate its position in the sequence. The concept and implementation of positional embedding are presented in the Transformer paper.", "_____no_output_____" ], [ "<img src='asset/9_6/2.png'>", "_____no_output_____" ], [ "To predict if the second sentence is indeed connected to the first, the following steps are performed:\n\n* The entire input sequence goes through the Transformer model.\n\n\n* The output of the [CLS] token is transformed into a 2×1 shaped vector, using a simple classification layer (learned matrices of weights and biases).\n\n\n* Calculating the probability of IsNextSequence with softmax.", "_____no_output_____" ], [ "When training the BERT model, Masked LM and Next Sentence Prediction are trained together, with the goal of minimizing the combined loss function of the two strategies.", "_____no_output_____" ], [ " ", "_____no_output_____" ], [ " ", "_____no_output_____" ], [ "# <div align=\"center\">How to use BERT (Fine-tuning)</div>\n---------------------------------------------------------------------", "_____no_output_____" ], [ "Using BERT for a specific task is relatively straightforward:\n\nBERT can be used for a wide variety of language tasks, while only adding a small layer to the core model:\n\n* Classification tasks such as sentiment analysis are done similarly to Next Sentence classification, by adding a classification layer on top of the Transformer output for the [CLS] token.\n\n\n* In Question Answering tasks (e.g. SQuAD v1.1), the software receives a question regarding a text sequence and is required to mark the answer in the sequence. Using BERT, a Q&A model can be trained by learning two extra vectors that mark the beginning and the end of the answer.\n\n\n* In Named Entity Recognition (NER), the software receives a text sequence and is required to mark the various types of entities (Person, Organization, Date, etc) that appear in the text. Using BERT, a NER model can be trained by feeding the output vector of each token into a classification layer that predicts the NER label.", "_____no_output_____" ], [ "In the fine-tuning training, most hyper-parameters stay the same as in BERT training, and the paper gives specific guidance (Section 3.5) on the hyper-parameters that require tuning. The BERT team has used this technique to achieve state-of-the-art results on a wide variety of challenging natural language tasks, detailed in Section 4 of the paper.", "_____no_output_____" ], [ " ", "_____no_output_____" ], [ " ", "_____no_output_____" ], [ "# <div align=\"center\">Takeaways</div>\n---------------------------------------------------------------------", "_____no_output_____" ], [ "* Model size matters, even at huge scale. BERT_large, with 345 million parameters, is the largest model of its kind. It is demonstrably superior on small-scale tasks to BERT_base, which uses the same architecture with “only” 110 million parameters.\n\n\n* With enough training data, more training steps == higher accuracy. For instance, on the MNLI task, the BERT_base accuracy improves by 1.0% when trained on 1M steps (128,000 words batch size) compared to 500K steps with the same batch size.\n\n\n* BERT’s bidirectional approach (MLM) converges slower than left-to-right approaches (because only 15% of words are predicted in each batch) but bidirectional training still outperforms left-to-right training after a small number of pre-training steps.", "_____no_output_____" ], [ "<img src='asset/9_6/3.png'>", "_____no_output_____" ], [ "# <div align=\"center\">Conclusion</div>\n---------------------------------------------------------------------", "_____no_output_____" ], [ "BERT is undoubtedly a breakthrough in the use of Machine Learning for Natural Language Processing. The fact that it’s approachable and allows fast fine-tuning will likely allow a wide range of practical applications in the future. In this summary, we attempted to describe the main ideas of the paper while not drowning in excessive technical details. For those wishing for a deeper dive, we highly recommend reading the full article and ancillary articles referenced in it. Another useful reference is the BERT source code and models, which cover 103 languages and were generously released as open source by the research team.", "_____no_output_____" ], [ " ", "_____no_output_____" ], [ " ", "_____no_output_____" ] ] ]
[ "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ] ]
d0e06ba5a30bf2c128947da739d0203f15983a91
42,220
ipynb
Jupyter Notebook
Iterations/integrated_model/CustomisedModel.ipynb
BasemSuleiman/Intelligent_Failure_Prediction
772318d9242f7c3f476a1c8f5c1aaeb3459b39f6
[ "MIT" ]
null
null
null
Iterations/integrated_model/CustomisedModel.ipynb
BasemSuleiman/Intelligent_Failure_Prediction
772318d9242f7c3f476a1c8f5c1aaeb3459b39f6
[ "MIT" ]
null
null
null
Iterations/integrated_model/CustomisedModel.ipynb
BasemSuleiman/Intelligent_Failure_Prediction
772318d9242f7c3f476a1c8f5c1aaeb3459b39f6
[ "MIT" ]
null
null
null
54.477419
119
0.437068
[ [ [ "# Customised Kernel of the ACBC (only trained with the score tensor generated from Integrated model.ipynb)", "_____no_output_____" ] ], [ [ "from sklearn.metrics import classification_report\n\n", "_____no_output_____" ], [ "class MyModel:\n def __init__(self, w, lr):\n self.w = w\n self.size = w.shape[0]\n self.lr = lr\n \n def predict(self, x):\n length = x.shape[0]\n y_hat = np.zeros((length))\n for i in range(length):\n xi = x[i]\n w_masked = np.array([0 if np.isnan(xi[j]) else self.w[j] for j in range(self.size)])\n x_masked = np.array([0 if np.isnan(xij) else xij for xij in xi])\n w_sum = np.sum(w_masked)\n y_hat[i] = 0 if np.sum(x_masked * w_masked) / w_sum < 0.5 else 1\n return y_hat\n \n def GD_kernel(self, x,y):\n x_checking = x[np.isnan(x)]\n if x_checking.shape[0] == x.shape[0]:\n return 0, np.repeat(0, 36)\n \n W = self.w\n w_masked = np.array([0 if np.isnan(x[i]) else W[i] for i in range(self.size)])\n x_masked = np.array([0 if np.isnan(xi) else xi for xi in x])\n wx = w_masked * x_masked\n mask = np.array([0 if np.isnan(xi) else 1 for xi in x])\n sum_w = np.sum(w_masked)\n sum_wx = np.sum(wx)\n p = sum_wx / sum_w\n d_p = (x_masked*sum_w - sum_wx)/(sum_w**2) * mask\n loss = (y*(1-p) + (1-y)*p)\n gradients = -2*y*d_p + d_p\n gradients = gradients * mask\n\n label = 0.0 if p <0.5 else 1.0\n label = 1 if label == y else 0\n return label, gradients\n \n def train(self,x,y,epochs):\n length = x.shape[0]\n prev_w = self.w\n for i in range(epochs):\n avg_loss = 0\n gradients = np.zeros((length, self.size))\n for j in range(length):\n loss, g = self.GD_kernel(x[j], y[j])\n if loss == 0:\n gradients[i] = g\n avg_loss += loss\n gradient = np.sum(gradients, axis = 0)\n self.w = self.w - gradient*self.lr\n print(self.w)\n print('acc in epoch',i,':',float(avg_loss)/float(length))\n \n ", "_____no_output_____" ], [ "from sklearn import metrics\nfrom sklearn.metrics import accuracy_score\nimport pandas as pd\n\nvehicle_types = ['ZVe44', 'ZV573', 'ZV63d', 'ZVfd4', 'ZVa9c', 'ZVa78', 'ZV252']\n\ndef test_report(vehicle_type, train, test):\n print(train.shape)\n print('summary of test accuracy for vehicle type:', vehicle_type)\n model = MyModel(np.ones((36)),0.0002)\n model.train(train[:, 0:36], train[:,36],10)\n scores = model.predict(test[:,0:36])\n print(scores.shape[0])\n print(scores[scores==1].shape[0])\n #scores = np.mean(test[:,0:36],axis=1)\n print(scores.shape)\n #scores = np.array([1 if s >= 0.5 else 0 for s in scores])\n acc = accuracy_score(test[:,36], scores)\n \n print(classification_report(test[:,36], scores, digits=4))\n print('acc:', acc)\n fpr, tpr, thresholds = metrics.roc_curve(test[:,36], scores, pos_label=1)\n print('AUC:',metrics.auc(fpr, tpr))\n correct = int(acc * test.shape[0])\n print(correct,'/',test.shape[0])\n return correct\n\navg_acc = 0\nfor i in range(1):\n path = '../data/final/feature_tensors/'\n print(path)\n train_tensor = dict()\n test_tensor = dict()\n for vehicle_type in vehicle_types:\n train_tensor[vehicle_type] =pd.read_csv(path+vehicle_type+'_train.csv',sep=',',header=None).to_numpy()\n test_tensor[vehicle_type] = pd.read_csv(path+vehicle_type+'_test.csv', sep=',',header=None).to_numpy()\n\n\n\n correct = 0\n nums = 0\n for vehicle_type in vehicle_types:\n nums += test_tensor[vehicle_type].shape[0]\n correct += test_report(vehicle_type, train_tensor[vehicle_type], test_tensor[vehicle_type])\n avg_acc += (correct / nums)\n print('Test acc:', correct / nums)\nprint('average accuracy:', avg_acc)", "../data/final/feature_tensors/\n(13883, 37)\nsummary of test accuracy for vehicle type: ZVe44\n[0.98184169 0.93394808 1.00284979 1.00076328 1.000039 1.00071977\n 1.00022133 1. 1. 1. 1. 1.\n 1.02592945 1.02498896 1.00055318 0.99992169 0.9956929 0.90854824\n 1.00225583 1.00054577 1.00909531 1.01358105 0.99991108 1.\n 1.00016383 1. 1. 1. 1.00365677 1.00831526\n 1. 1. 1.04193225 1.03358318 1.00185059 1.00034376]\nacc in epoch 0 : 0.8490960167110855\n[0.95991923 0.86657597 1.0058694 1.00152845 1.00005426 1.00144009\n 1.00044406 1. 1. 1. 1. 1.\n 1.05088569 1.04807887 1.00114443 0.99984095 0.98756701 0.81656168\n 1.00450809 1.00108748 1.01818945 1.02703891 0.99981897 1.\n 1.00032633 1. 1. 1. 1.00695338 1.01660051\n 1. 1. 1.08139497 1.06461344 1.00381617 1.00068912]\nacc in epoch 1 : 0.8545703378232371\n[0.9336635 0.80056431 1.00869708 1.00238806 1.00006831 1.00239342\n 1.00066829 1. 1. 1. 1. 1.\n 1.07385229 1.06902008 1.00159745 0.99975753 0.97580894 0.72607359\n 1.00640858 1.00170272 1.02688424 1.03878102 0.99972341 1.\n 1.00048775 1. 1. 1. 1.00993169 1.02445376\n 1. 1. 1.11870654 1.09399294 1.00553682 1.00103615]\nacc in epoch 2 : 0.8592523229849456\n[0.90194078 0.73677032 1.01127167 1.00323666 1.00008108 1.00293967\n 1.00066829 1. 1. 1. 1. 1.\n 1.09606897 1.08879919 1.00205084 0.99967124 0.96097275 0.63815009\n 1.00808243 1.00229889 1.03536614 1.04970621 0.99962406 1.\n 1.00064837 1. 1. 1. 1.0128722 1.03202335\n 1. 1. 1.15400985 1.11886374 1.00702945 1.00139928]\nacc in epoch 3 : 0.8637902470647555\n[0.86768657 0.67372264 1.01423955 1.00394728 1.00009251 1.00353189\n 1.00066829 1. 1. 1. 1. 1.\n 1.1148501 1.10611429 1.00239131 0.99958188 0.94488678 0.55216061\n 1.00983179 1.00279319 1.04348169 1.05918516 0.99952076 1.\n 1.00080849 1. 1. 1. 1.0157931 1.03980157\n 1. 1. 1.18795436 1.1398077 1.00860227 1.00168009]\nacc in epoch 4 : 0.8684002016855147\n[0.82915465 0.61588134 1.0169294 1.00465831 1.00014382 1.00418529\n 1.00066829 1. 1. 1. 1. 1.\n 1.13183973 1.11972037 1.00273205 0.99948929 0.92733653 0.4698398\n 1.01132232 1.00327922 1.05131434 1.06717313 0.99941332 1.\n 1.00096826 1. 1. 1. 1.01884252 1.04660222\n 1. 1. 1.22032951 1.15737502 1.0101649 1.00196107]\nacc in epoch 5 : 0.8735864006338687\n[0.78827924 0.56019073 1.01878629 1.00536898 1.00019538 1.00484786\n 1.00066829 1. 1. 1. 1. 1.\n 1.14728062 1.13001163 1.00307297 0.99939329 0.90714126 0.39181991\n 1.01276745 1.00375595 1.05793294 1.07521561 0.99930156 1.\n 1.00112787 1. 1. 1. 1.02194499 1.05381136\n 1. 1. 1.25158602 1.17226106 1.01090767 1.00224188]\nacc in epoch 6 : 0.8776921414679825\n[0.74287821 0.5111858 1.0204054 1.00607868 1.00024718 1.00510594\n 1.00066829 1. 1. 1. 1. 1.\n 1.1591155 1.14084359 1.0034141 0.99929371 0.88114339 0.32093254\n 1.0138218 1.00423879 1.06432212 1.08409217 0.99918527 1.\n 1.00128758 1. 1. 1. 1.02462636 1.05967514\n 1. 1. 1.28051658 1.1862198 1.01139896 1.00255623]\nacc in epoch 7 : 0.8820139739249442\n[0.69433226 0.46796794 1.02167281 1.00678654 1.00029919 1.0046654\n 1.00066829 1. 1. 1. 1. 1.\n 1.16998389 1.14899666 1.00375539 0.99919051 0.85239 0.26194764\n 1.01461789 1.00471092 1.07057049 1.09327734 0.99906439 1.\n 1.00144763 1. 1. 1. 1.02725003 1.06496299\n 1. 1. 1.30663359 1.1976994 1.01179956 1.00287103]\nacc in epoch 8 : 0.8863358063819059\n[0.6454547 0.42864608 1.02307438 1.00749191 1.00035139 1.00422281\n 1.00066829 1. 1. 1. 1. 1.\n 1.17743107 1.15423302 1.00409679 0.99908376 0.82398179 0.21480737\n 1.01551554 1.00517181 1.0763532 1.10210636 0.99893921 1.\n 1.00160827 1. 1. 1. 1.02975663 1.06954178\n 1. 1. 1.33093633 1.20701432 1.01232872 1.00318593]\nacc in epoch 9 : 0.8898653028884247\n3471\n1773\n(3471,)\n precision recall f1-score support\n\n 0.0 0.7026 0.6892 0.6958 1731\n 1.0 0.6966 0.7098 0.7031 1740\n\n accuracy 0.6995 3471\n macro avg 0.6996 0.6995 0.6995 3471\nweighted avg 0.6996 0.6995 0.6995 3471\n\nacc: 0.6995102276001153\nAUC: 0.6994835554493116\n2428 / 3471\n(49193, 37)\nsummary of test accuracy for vehicle type: ZV573\n[0.86012274 0.95490553 0.99901643 1.01511051 1.00375191 1.00285352\n 0.99997816 1. 1. 1. 1. 1.\n 1.04664251 1.02407376 1.02383142 1.00283817 0.99132008 0.90605804\n 0.95888372 1.00825677 1.02746459 1.02843875 1.00441141 1.00191771\n 1.00018618 1.00001882 1. 1. 1.00802062 1.00816327\n 1.00090666 1.00046383 1.01793082 1.06025054 1.01448304 1.00896551]\nacc in epoch 0 : 0.8195881527859655\n[0.72030864 0.90668813 0.99442403 1.02859224 1.0073538 1.00558181\n 0.99995626 1. 1. 1. 1. 1.\n 1.09071459 1.0447903 1.04755093 1.00561213 0.97854865 0.80937675\n 0.91585711 1.01579791 1.05391267 1.0556597 1.00868134 1.0037323\n 1.00037715 1.00003768 1. 1. 1.0159873 1.01660418\n 1.00191488 1.00100652 1.03253465 1.11098939 1.02812277 1.01683503]\nacc in epoch 1 : 0.8236944280690343\n[0.58189716 0.85811148 0.98749576 1.04135916 1.01141561 1.00829542\n 0.99993433 1. 1. 1. 1. 1.\n 1.12986304 1.06077861 1.07023494 1.00838428 0.96305764 0.71293273\n 0.87081713 1.02231658 1.07923185 1.08017623 1.01275062 1.005542\n 1.00057334 1.00005658 1. 1. 1.02421589 1.02613114\n 1.00298928 1.00154843 1.04269295 1.15084569 1.04065522 1.02400324]\nacc in epoch 2 : 0.8280852966885532\n[0.4570312 0.81090257 0.97734613 1.05251268 1.016312 1.0115169\n 0.99991238 1. 1.00015005 1. 1. 1.\n 1.16528189 1.07188371 1.0921468 1.01106423 0.9440615 0.62222925\n 0.82378831 1.02757863 1.10410717 1.10288432 1.01685001 1.00743942\n 1.0007615 1.00005658 1. 1. 1.03252048 1.03492403\n 1.0038834 1.00198838 1.04618883 1.17728247 1.05111609 1.03002478]\nacc in epoch 3 : 0.8317646819669465\n[0.33741296 0.75954529 0.96538124 1.06281537 1.02112828 1.01452946\n 0.99989042 1. 1.00030728 1. 1. 1.\n 1.19608007 1.07808956 1.11210849 1.01349532 0.92480017 0.53592081\n 0.7763311 1.03162894 1.12799776 1.12574837 1.02102343 1.00881571\n 1.00095499 1.00005658 1. 1. 1.04053723 1.04279957\n 1.00490733 1.00242808 1.04475497 1.19806164 1.06155 1.03477601]\nacc in epoch 4 : 0.8356880043908687\n[0.22486378 0.70918648 0.95398471 1.0711424 1.0260327 1.01731508\n 0.99986848 1. 1.00047198 1. 1. 1.\n 1.22200723 1.07826422 1.1297959 1.01618569 0.90617579 0.46749787\n 0.72949469 1.03390889 1.1507794 1.14624528 1.02558287 1.01027889\n 1.00115368 1.00005658 1. 1. 1.04794958 1.04913237\n 1.00595195 1.00284246 1.04032522 1.20670771 1.07272161 1.03873494]\nacc in epoch 5 : 0.8394690301465656\n[0.12756593 0.66640356 0.9424172 1.07804608 1.03055516 1.02007494\n 0.99984658 1. 1.00064387 1. 1. 1.\n 1.24103928 1.07588439 1.14452041 1.01894172 0.88644231 0.41092185\n 0.68359109 1.03564553 1.17224606 1.16493016 1.03013778 1.01170481\n 1.0013567 1.00005658 1. 1. 1.05431338 1.05561375\n 1.00699515 1.00325632 1.03251586 1.2081338 1.08281157 1.04193607]\nacc in epoch 6 : 0.8425385725611367\n[0.058219 0.63042467 0.92649766 1.08426511 1.03349117 1.02166237\n 0.99982472 1. 1.00082206 1. 1. 1.\n 1.25710057 1.06966323 1.15640787 1.02166779 0.87216547 0.36697827\n 0.63821584 1.03678805 1.1917126 1.18186909 1.03467995 1.01300062\n 1.00158287 1.00005658 1. 1. 1.0601535 1.06205754\n 1.00802616 1.0036695 1.0212274 1.20466324 1.09308055 1.04503215]\nacc in epoch 7 : 0.8446526944890533\n" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code" ] ]
d0e07476aa52127c00deb185fdb7d89d4523188e
4,735
ipynb
Jupyter Notebook
final_ml_model.ipynb
Build-Week-Med-Cabinet-Two/Data-Science
c029f55c74f6d6b66ca0bd5c8cc98d30cb8857cf
[ "MIT" ]
null
null
null
final_ml_model.ipynb
Build-Week-Med-Cabinet-Two/Data-Science
c029f55c74f6d6b66ca0bd5c8cc98d30cb8857cf
[ "MIT" ]
null
null
null
final_ml_model.ipynb
Build-Week-Med-Cabinet-Two/Data-Science
c029f55c74f6d6b66ca0bd5c8cc98d30cb8857cf
[ "MIT" ]
3
2020-08-19T02:43:28.000Z
2020-08-20T02:20:43.000Z
28.871951
119
0.535586
[ [ [ "\"\"\"\nMain application and routing logic\n\"\"\"\n# Standard imports\nimport os\n\n# Database + Heroku + Postgres\nfrom dotenv import load_dotenv\nfrom flask import Flask, jsonify, request\nfrom flask_sqlalchemy import SQLAlchemy\nimport psycopg2\nfrom .models import DB, Strain\nfrom flask_cors import CORS\n\n# import model\n#from nearest_neighbors_model import predict\n\n\n####################################################\n\n\nimport pickle\nimport pandas as pd\nfrom sklearn.neighbors import NearestNeighbors\nfrom sklearn.feature_extraction.text import TfidfVectorizer\n\n# changed from relative to to full path\n\n\nstrains = pd.read_csv(\"https://github.com/Build-Week-Med-Cabinet-Two/Data-Science/blob/master/cannabis.csv\")\n\ntransformer = TfidfVectorizer(stop_words=\"english\", min_df=0.025, max_df=0.98, ngram_range=(1,3))\n\ndtm = transformer.fit_transform(canabis['spaCy_tokens'])\ndtm = pd.DataFrame(dtm.todense(), columns=transformer.get_feature_names())\n\nmodel = NearestNeighbors(n_neighbors=10, algorithm='kd_tree')\nmodel.fit(dtm)\ndef predict(request_text):\n transformed = transformer.transform([request_text])\n dense = transformed.todense()\n recommendations = model.kneighbors(dense)[1][0]\n output_array = []\n for recommendation in recommendations:\n strain = strains.iloc[recommendation]\n output = strain.drop(['total_text', 'spaCy_tokens']).to_dict()\n output_array.append(output)\n return output_array\n\n##################################################", "_____no_output_____" ], [ "def create_app():\n \"\"\"Create and configure an instance of the Flask application\"\"\"\n app = Flask(__name__)\n CORS(app)\n # consider using config\n app.config['SQLALCHEMY_DATABASE_URI'] = os.getenv(\"DB_URL\")\n app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\n\n # load file from .env file\n load_dotenv()\n db_name = os.getenv(\"DB_NAME\")\n db_user = os.getenv(\"DB_USER\")\n db_password = os.getenv(\"DB_PASSWORD\")\n db_host = os.getenv(\"DB_HOST\")\n\n # establish cursor and connection\n connection = psycopg2.connect(dbname=db_name, user=db_user, password=db_password, host=db_host)\n print(\"CONNECTION:\", connection)\n cursor = connection.cursor()\n print(\"CURSOR:\", cursor)\n\n # binding the instance to a very specific Flask app\n # initialize app for use with this database setup\n db = SQLAlchemy(app)\n db.init_app(app)\n # root route\n @app.route('/')\n def root():\n DB.create_all()\n return \"Welcome to Med Cab\"\n @app.route(\"/test\", methods=['POST', 'GET'])\n def predict_strain():\n text = request.get_json(force=True)\n predictions = predict(text)\n return jsonify(predictions)\n \n return app", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code" ] ]
d0e07a959f9e3626d07f06d569f579388e6fb41b
108,297
ipynb
Jupyter Notebook
module1-join-and-reshape-data/Mahfuzur_Join_and_Reshape_Data_Assignment.ipynb
mahfuz978/DS-Unit-1-Sprint-2-Data-Wrangling-and-Storytelling
4e4f15b1016c3b58e6c705b1ba9a94a90eb0d967
[ "MIT" ]
null
null
null
module1-join-and-reshape-data/Mahfuzur_Join_and_Reshape_Data_Assignment.ipynb
mahfuz978/DS-Unit-1-Sprint-2-Data-Wrangling-and-Storytelling
4e4f15b1016c3b58e6c705b1ba9a94a90eb0d967
[ "MIT" ]
null
null
null
module1-join-and-reshape-data/Mahfuzur_Join_and_Reshape_Data_Assignment.ipynb
mahfuz978/DS-Unit-1-Sprint-2-Data-Wrangling-and-Storytelling
4e4f15b1016c3b58e6c705b1ba9a94a90eb0d967
[ "MIT" ]
null
null
null
32.031056
330
0.312095
[ [ [ "<a href=\"https://colab.research.google.com/github/mahfuz978/DS-Unit-1-Sprint-2-Data-Wrangling-and-Storytelling/blob/master/module1-join-and-reshape-data/Mahfuzur_Join_and_Reshape_Data_Assignment.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ], [ "_Lambda School Data Science_\n\n# Join and Reshape datasets\n\nObjectives\n- concatenate data with pandas\n- merge data with pandas\n- understand tidy data formatting\n- melt and pivot data with pandas\n\nLinks\n- [Pandas Cheat Sheet](https://github.com/pandas-dev/pandas/blob/master/doc/cheatsheet/Pandas_Cheat_Sheet.pdf)\n- [Tidy Data](https://en.wikipedia.org/wiki/Tidy_data)\n - Combine Data Sets: Standard Joins\n - Tidy Data\n - Reshaping Data\n- Python Data Science Handbook\n - [Chapter 3.6](https://jakevdp.github.io/PythonDataScienceHandbook/03.06-concat-and-append.html), Combining Datasets: Concat and Append\n - [Chapter 3.7](https://jakevdp.github.io/PythonDataScienceHandbook/03.07-merge-and-join.html), Combining Datasets: Merge and Join\n - [Chapter 3.8](https://jakevdp.github.io/PythonDataScienceHandbook/03.08-aggregation-and-grouping.html), Aggregation and Grouping\n - [Chapter 3.9](https://jakevdp.github.io/PythonDataScienceHandbook/03.09-pivot-tables.html), Pivot Tables\n \nReference\n- Pandas Documentation: [Reshaping and Pivot Tables](https://pandas.pydata.org/pandas-docs/stable/reshaping.html)\n- Modern Pandas, Part 5: [Tidy Data](https://tomaugspurger.github.io/modern-5-tidy.html)", "_____no_output_____" ] ], [ [ "!wget https://s3.amazonaws.com/instacart-datasets/instacart_online_grocery_shopping_2017_05_01.tar.gz", "--2019-09-11 21:47:30-- https://s3.amazonaws.com/instacart-datasets/instacart_online_grocery_shopping_2017_05_01.tar.gz\nResolving s3.amazonaws.com (s3.amazonaws.com)... 52.216.228.187\nConnecting to s3.amazonaws.com (s3.amazonaws.com)|52.216.228.187|:443... connected.\nHTTP request sent, awaiting response... 200 OK\nLength: 205548478 (196M) [application/x-gzip]\nSaving to: ‘instacart_online_grocery_shopping_2017_05_01.tar.gz’\n\ninstacart_online_gr 100%[===================>] 196.03M 25.2MB/s in 7.2s \n\n2019-09-11 21:47:37 (27.4 MB/s) - ‘instacart_online_grocery_shopping_2017_05_01.tar.gz’ saved [205548478/205548478]\n\n" ], [ "!tar --gunzip --extract --verbose --file=instacart_online_grocery_shopping_2017_05_01.tar.gz", "instacart_2017_05_01/\ninstacart_2017_05_01/._aisles.csv\ninstacart_2017_05_01/aisles.csv\ninstacart_2017_05_01/._departments.csv\ninstacart_2017_05_01/departments.csv\ninstacart_2017_05_01/._order_products__prior.csv\ninstacart_2017_05_01/order_products__prior.csv\ninstacart_2017_05_01/._order_products__train.csv\ninstacart_2017_05_01/order_products__train.csv\ninstacart_2017_05_01/._orders.csv\ninstacart_2017_05_01/orders.csv\ninstacart_2017_05_01/._products.csv\ninstacart_2017_05_01/products.csv\n" ], [ "%cd instacart_2017_05_01", "/content/instacart_2017_05_01\n" ], [ "!ls -lh *.csv", "-rw-r--r-- 1 502 staff 2.6K May 2 2017 aisles.csv\n-rw-r--r-- 1 502 staff 270 May 2 2017 departments.csv\n-rw-r--r-- 1 502 staff 551M May 2 2017 order_products__prior.csv\n-rw-r--r-- 1 502 staff 24M May 2 2017 order_products__train.csv\n-rw-r--r-- 1 502 staff 104M May 2 2017 orders.csv\n-rw-r--r-- 1 502 staff 2.1M May 2 2017 products.csv\n" ] ], [ [ "# Assignment\n\n## Join Data Practice\n\nThese are the top 10 most frequently ordered products. How many times was each ordered? \n\n1. Banana\n2. Bag of Organic Bananas\n3. Organic Strawberries\n4. Organic Baby Spinach \n5. Organic Hass Avocado\n6. Organic Avocado\n7. Large Lemon \n8. Strawberries\n9. Limes \n10. Organic Whole Milk\n\nFirst, write down which columns you need and which dataframes have them.\n\nNext, merge these into a single dataframe.\n\nThen, use pandas functions from the previous lesson to get the counts of the top 10 most frequently ordered products.", "_____no_output_____" ] ], [ [ "\nimport pandas as pd\norder_products__prior = pd.read_csv('order_products__prior.csv')", "_____no_output_____" ], [ "order_products__prior.head()", "_____no_output_____" ], [ "order_products__prior.shape", "_____no_output_____" ], [ "# we need order id and product id", "_____no_output_____" ], [ "order_products__train = pd.read_csv('order_products__train.csv')", "_____no_output_____" ], [ "order_products__train.head()", "_____no_output_____" ], [ "order_products__train.shape", "_____no_output_____" ], [ "# we need order id and product id", "_____no_output_____" ], [ "products = pd.read_csv('products.csv')", "_____no_output_____" ], [ "products.head()", "_____no_output_____" ], [ "# we need product_name & product_id", "_____no_output_____" ], [ "products.shape", "_____no_output_____" ], [ "# we dont need aisles csv", "_____no_output_____" ], [ "aisles = pd.read_csv('aisles.csv')\naisles.head()", "_____no_output_____" ], [ "# we dont need department csv", "_____no_output_____" ], [ "departments = pd.read_csv('departments.csv')\ndepartments.head()", "_____no_output_____" ], [ "orders = pd.read_csv('orders.csv')", "_____no_output_____" ], [ "orders.shape", "_____no_output_____" ], [ "orders.head()", "_____no_output_____" ], [ "# i need order id and order number", "_____no_output_____" ], [ "order_products = pd.concat([order_products__train,order_products__prior])", "_____no_output_____" ], [ "order_products.head()", "_____no_output_____" ], [ "#we need order_id amd product_id so,\nconcat_1 = order_products.loc[:,['order_id','product_id']]", "_____no_output_____" ], [ "concat_1.head()", "_____no_output_____" ], [ "data_1 = products.loc[:,['product_name','product_id']]", "_____no_output_____" ], [ "data_1.head()", "_____no_output_____" ], [ "data_2 = orders.loc[:,['order_id','order_number']]", "_____no_output_____" ], [ "data_2.head()", "_____no_output_____" ], [ "merge_1 = pd.merge(concat_1,data_1,on='product_id')", "_____no_output_____" ], [ "merge_1.head()", "_____no_output_____" ], [ "Final_merge = pd.merge(merge_1,data_2, on='order_id')", "_____no_output_____" ], [ "Final_merge.head()", "_____no_output_____" ], [ "final = Final_merge['product_name'].value_counts()", "_____no_output_____" ], [ "final.head(10)", "_____no_output_____" ] ], [ [ "## Reshape Data Section\n\n- Replicate the lesson code\n- Complete the code cells we skipped near the beginning of the notebook\n- Table 2 --> Tidy\n- Tidy --> Table 2\n- Load seaborn's `flights` dataset by running the cell below. Then create a pivot table showing the number of passengers by month and year. Use year for the index and month for the columns. You've done it right if you get 112 passengers for January 1949 and 432 passengers for December 1960.", "_____no_output_____" ] ], [ [ "%matplotlib inline\nimport pandas as pd\nimport numpy as np\nimport seaborn as sns", "_____no_output_____" ], [ "table1 = pd.DataFrame(\n [[np.nan, 2],\n [16, 11], \n [3, 1]],\n index=['John Smith', 'Jane Doe', 'Mary Johnson'], \n columns=['treatmenta', 'treatmentb'])\n\ntable2 = table1.T", "_____no_output_____" ], [ "table1", "_____no_output_____" ], [ "table2", "_____no_output_____" ], [ "table1 = table1.reset_index()\ntable1", "_____no_output_____" ], [ "tidy = table1.melt(id_vars='index')\ntidy", "_____no_output_____" ], [ "tidy = tidy.rename(columns={\n 'index': 'name', \n 'variable': 'trt', \n 'value': 'result'\n})\n\ntidy", "_____no_output_____" ], [ "tidy.trt=tidy.trt.str.replace('treatment', '')\ntidy", "_____no_output_____" ], [ "tidy.pivot_table(index='name', columns='trt',values='result')", "_____no_output_____" ] ], [ [ "# Tidy ----------> 2", "_____no_output_____" ] ], [ [ "table2", "_____no_output_____" ], [ "table2 = table2.reset_index()", "_____no_output_____" ], [ "table2", "_____no_output_____" ], [ "", "_____no_output_____" ], [ "tidy1 = table2.T.reset_index().melt(id_vars='index').rename(columns= {\n 'index': 'name', \n 'variable': 'trt', \n 'value': 'result'\n})\ntidy1", "_____no_output_____" ], [ "tidy1['trt']= tidy1['trt'].str.replace('treatment','')\ntidy1 = tidy1.set_index('name')\ntidy1", "_____no_output_____" ], [ "tidy1.pivot_table(index = 'name', columns= 'trt', values = 'result' ).T", "_____no_output_____" ], [ "", "_____no_output_____" ], [ "flights = sns.load_dataset('flights')", "_____no_output_____" ], [ "flights.head()", "_____no_output_____" ], [ "wide = flights.pivot_table(index= 'year', columns = 'month', values = 'passengers')\nwide", "_____no_output_____" ] ], [ [ "## Join Data Stretch Challenge\n\nThe [Instacart blog post](https://tech.instacart.com/3-million-instacart-orders-open-sourced-d40d29ead6f2) has a visualization of \"**Popular products** purchased earliest in the day (green) and latest in the day (red).\" \n\nThe post says,\n\n> \"We can also see the time of day that users purchase specific products.\n\n> Healthier snacks and staples tend to be purchased earlier in the day, whereas ice cream (especially Half Baked and The Tonight Dough) are far more popular when customers are ordering in the evening.\n\n> **In fact, of the top 25 latest ordered products, the first 24 are ice cream! The last one, of course, is a frozen pizza.**\"\n\nYour challenge is to reproduce the list of the top 25 latest ordered popular products.\n\nWe'll define \"popular products\" as products with more than 2,900 orders.\n\n", "_____no_output_____" ] ], [ [ "##### YOUR CODE HERE #####", "_____no_output_____" ] ], [ [ "## Reshape Data Stretch Challenge\n\n_Try whatever sounds most interesting to you!_\n\n- Replicate more of Instacart's visualization showing \"Hour of Day Ordered\" vs \"Percent of Orders by Product\"\n- Replicate parts of the other visualization from [Instacart's blog post](https://tech.instacart.com/3-million-instacart-orders-open-sourced-d40d29ead6f2), showing \"Number of Purchases\" vs \"Percent Reorder Purchases\"\n- Get the most recent order for each user in Instacart's dataset. This is a useful baseline when [predicting a user's next order](https://www.kaggle.com/c/instacart-market-basket-analysis)\n- Replicate parts of the blog post linked at the top of this notebook: [Modern Pandas, Part 5: Tidy Data](https://tomaugspurger.github.io/modern-5-tidy.html)", "_____no_output_____" ] ], [ [ "##### YOUR CODE HERE #####", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
d0e09a09f563546f4bb3b4840a11f7459670fd64
2,804
ipynb
Jupyter Notebook
12-SpecialMethods.ipynb
Omezkan/PythonLessons
3e4861db62149e59e01dde23220c2840854129c5
[ "MIT" ]
null
null
null
12-SpecialMethods.ipynb
Omezkan/PythonLessons
3e4861db62149e59e01dde23220c2840854129c5
[ "MIT" ]
null
null
null
12-SpecialMethods.ipynb
Omezkan/PythonLessons
3e4861db62149e59e01dde23220c2840854129c5
[ "MIT" ]
null
null
null
18.090323
130
0.478959
[ [ [ "class Fruits():\n def __init__(self , name , calories):\n self.name = name\n self.calories = calories \n def __str__(self):\n return f\"{self.name} has {self.calories} calories \"\n \n def __len__(self) :\n return self.calories", "_____no_output_____" ], [ "my_fruit = Fruits(\"banana\" , 200)", "_____no_output_____" ], [ "my_fruit.calories", "_____no_output_____" ], [ "print(my_fruit)", "banana has 200 calories \n" ], [ "my_list = [1,2,3]", "_____no_output_____" ], [ "len(my_list)", "_____no_output_____" ], [ "len(my_fruit)", "_____no_output_____" ], [ "## yukarıda 3 tane özel metot yazdık bunlar python da önceden tanımlanmış olup biz kullandık __init__ , __len__ , __str__\n## bunlar özel metotlar bunlardan birsürü var googledan öğren ", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code" ] ]
d0e09d2d4de895aec9f15b5feeb0684d5a70278a
32,185
ipynb
Jupyter Notebook
04-tools-prediction-titanic/.ipynb_checkpoints/titanic-checkpoint.ipynb
ByronBay/dataXbwcon
00e523c3204a2e3d4e148e3cb2608ea6f3c27d81
[ "Apache-2.0" ]
5
2019-02-14T10:29:51.000Z
2019-12-24T10:52:07.000Z
04-tools-prediction-titanic/.ipynb_checkpoints/titanic-checkpoint.ipynb
ByronBay/dataXbwcon
00e523c3204a2e3d4e148e3cb2608ea6f3c27d81
[ "Apache-2.0" ]
null
null
null
04-tools-prediction-titanic/.ipynb_checkpoints/titanic-checkpoint.ipynb
ByronBay/dataXbwcon
00e523c3204a2e3d4e148e3cb2608ea6f3c27d81
[ "Apache-2.0" ]
5
2019-02-15T10:25:58.000Z
2020-10-14T10:37:00.000Z
27.368197
550
0.552742
[ [ [ "# Real World Example: \n### AI, Machine Learning & Data Science \n\n---", "_____no_output_____" ], [ "# What is the Value for your Business?\n\n\n- By seeing acutal examples you'll be empowered to ask the right questions (and get fair help from consultants, startups, or data analytics companies)\n- This will help you make the correct decisions for your business\n\n# Demystify\n\nThis is a real world example of how you'd solve a Machine Learning prediciton problem.\n\n**Common Machine Learning Use Cases in Companies:**\n- Discover churn risk of customers\n- Predict optimal price levels (investments / retail)\n- Predict future revenues\n- Build recommendation systems\n- Customer value scoring\n- Fraud detection\n- Customer insights (characteristics)\n- Predict sentiment of text / client feedback\n- Object detecton in images\n- etc etc...", "_____no_output_____" ], [ "## Why Python?\n\nPython is general purpose and can do Software development, Web development, AI. Python has experienced incredible growth over the last couple of years.\n\n<img src='https://zgab33vy595fw5zq-zippykid.netdna-ssl.com/wp-content/uploads/2017/09/growth_major_languages-1-1400x1200.png' width=400px></img>\n\nSource: https://stackoverflow.blog/2017/09/06/incredible-growth-python/", "_____no_output_____" ], [ "# Everything is free!\n\nThe best software today is open source and it's also enterprise-ready. Anyone can download and use them for free (even for business purposes).\n\n**Examples of great, free AI libraries:**\n* Anaconda\n* Google's TensorFlow\n* Scikit-learn\n* Pandas\n* Keras\n* Matplotlib\n* SQL\n* Spark\n* Numpy\n\n## State-of-the-Art algorithms\n\nNo matter what algorithm you want to use (Linear Regression, Random Forests, Neural Networks, or Deep Learning), **all of the latest methods are implemented optimized for Python**.", "_____no_output_____" ], [ "## Big Data\n\nPython code can run on any computer. Therefore, you can scale your computations and utilize for example cloud resources to run big data jobs.\n\n**Great tools for Big Data:**\n- Spark\n- Databricks\n- Hadoop / MapReduce\n- Kafka\n- Amazon EC2\n- Amazon S3\n\n\n# Note on data collection\n\n- Collect all the data you can! (storage is cheap)\n\n---", "_____no_output_____" ], [ "----\n# Real world example of AI: Titanic Analysis\n\nTitanic notebook is open source. All of our material is online. Anyone can developt sophisticated AI programs and solutions.", "_____no_output_____" ], [ "___\n## The difficult part is never to implement the algorithm\n\nThe hard part of a machine learning problem is to get data into the right format so you can solve the problem. We'll illustrate this below.\n___", "_____no_output_____" ], [ "![data-x](http://oi64.tinypic.com/o858n4.jpg)\n\n\n# __Titanic Survivor Analysis__\n\n\n**Sources:** \n* **Training + explanations**: https://www.kaggle.com/c/titanic\n\n___\n___\n", "_____no_output_____" ], [ "\n# Understanding the connections between passanger information and survival rate\n\nThe sinking of the RMS Titanic is one of the most infamous shipwrecks in history. On April 15, 1912, during her maiden voyage, the Titanic sank after colliding with an iceberg, killing 1502 out of 2224 passengers and crew. This sensational tragedy shocked the international community and led to better safety regulations for ships.\n\nOne of the reasons that the shipwreck led to such loss of life was that there were not enough lifeboats for the passengers and crew. Although there was some element of luck involved in surviving the sinking, some groups of people were more likely to survive than others.\n\n### **Our task is to train a machine learning model on a data set consisting of 891 samples of people who were onboard of the Titanic. And then, be able to predict if the passengers survived or not.**", "_____no_output_____" ], [ "# Import packages", "_____no_output_____" ] ], [ [ "# No warnings\nimport warnings\nwarnings.filterwarnings('ignore') # Filter out warnings\n\n# data analysis and wrangling\nimport pandas as pd\nimport numpy as np\n\n# visualization\nimport matplotlib.pyplot as plt\n%matplotlib inline\nimport seaborn as sns\n\n# machine learning\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.svm import SVC, LinearSVC\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.naive_bayes import GaussianNB # Gaussian Naive Bays\nfrom sklearn.linear_model import Perceptron\nfrom sklearn.tree import DecisionTreeClassifier\n\nimport xgboost as xgb\n\nfrom plot_distribution import plot_distribution\nplt.rcParams['figure.figsize'] = (9, 5)", "_____no_output_____" ] ], [ [ "### Load Data", "_____no_output_____" ] ], [ [ "df = pd.read_csv('data/train.csv')", "_____no_output_____" ] ], [ [ "<a id='sec3'></a>\n___\n## Part 2: Exploring the Data\n**Data descriptions**\n\n<img src=\"data/Titanic_Variable.png\">", "_____no_output_____" ] ], [ [ "# preview the data\ndf.head(3)", "_____no_output_____" ], [ "# General data statistics\ndf.describe()", "_____no_output_____" ] ], [ [ "### Histograms", "_____no_output_____" ] ], [ [ "df.hist(figsize=(13,10));", "_____no_output_____" ], [ "# Balanced data set?\ny_numbers = df['Survived'].map({0:'Deceased',1:'Survived'}).value_counts()\n\ny_numbers", "_____no_output_____" ], [ "# Imbalanced data set, our classifiers have to outperform 62 % accuracy\n\ny_numbers[1] / y_numbers[0]\n", "_____no_output_____" ] ], [ [ "> #### __Interesting Fact:__ \n\n> Third Class passengers were the first to board, with First and Second Class passengers following up to an hour before departure. \n\n> Third Class passengers were inspected for ailments and physical impairments that might lead to their being refused entry to the United States, while First Class passengers were personally greeted by Captain Smith.", "_____no_output_____" ] ], [ [ "# Analysis of survival rate for the socioeconmic classes?\n\ndf[['Pclass', 'Survived']].groupby(['Pclass'], as_index=True) \\\n .mean().sort_values(by='Survived', ascending=False)", "_____no_output_____" ] ], [ [ "___\n\n> #### __Brief Remarks Regarding the Data__\n\n> * `PassengerId` is a random number (incrementing index) and thus does not contain any valuable information. \n\n> * `Survived, Passenger Class, Age, Siblings Spouses, Parents Children` and `Fare` are numerical values (no need to transform them) -- but, we might want to group them (i.e. create categorical variables). \n\n> * `Sex, Embarked` are categorical features that we need to map to integer values. `Name, Ticket` and `Cabin` might also contain valuable information.\n\n___", "_____no_output_____" ] ], [ [ "df.head(1)", "_____no_output_____" ] ], [ [ "### Dropping Unnecessary data\n__Note:__ It is important to remove variables that convey information already captured by some other variable. Doing so removes the correlation, while also diminishing potential overfit.", "_____no_output_____" ] ], [ [ "# Drop columns 'Ticket', 'Cabin', 'Fare' need to do it \n# for both test and training\n\ndf = df.drop(['PassengerId','Ticket', 'Cabin','Fare'], axis=1)", "_____no_output_____" ] ], [ [ "<a id='sec4'></a>\n____\n## Part 3: Transforming the data\n\n### 3.1 _The Title of the person can be used to predict survival_", "_____no_output_____" ] ], [ [ "# List example titles in Name column\ndf.Name", "_____no_output_____" ], [ "# Create column called Title\n\ndf['Title'] = df['Name'].str.extract(' ([A-Za-z]+)\\.', expand=False)", "_____no_output_____" ], [ "# Double check that our titles makes sense (by comparing to sex)\n\npd.crosstab(df['Title'], df['Sex'])", "_____no_output_____" ], [ "# Map rare titles to one group\n\ndf['Title'] = df['Title'].\\\n replace(['Lady', 'Countess','Capt', 'Col', 'Don', 'Dr',\\\n 'Major', 'Rev', 'Sir', 'Jonkheer', 'Dona'], 'Rare')\n\ndf['Title'] = df['Title'].replace('Mlle', 'Miss') #Mademoiselle\ndf['Title'] = df['Title'].replace('Ms', 'Miss')\ndf['Title'] = df['Title'].replace('Mme', 'Mrs') #Madame", "_____no_output_____" ], [ "# We now have more logical (contemporary) titles, and fewer groups\n# See if we can get some insights\n\ndf[['Title', 'Survived']].groupby(['Title']).mean()", "_____no_output_____" ], [ "# We can plot the survival chance for each title\n\nsns.countplot(x='Survived', hue=\"Title\", data=df, order=[1,0])\nplt.xticks(range(2),['Survived','Deceased']);", "_____no_output_____" ], [ "# Title dummy mapping: Map titles to binary dummy columns\n\nbinary_encoded = pd.get_dummies(df.Title)\ndf[binary_encoded.columns] = binary_encoded", "_____no_output_____" ], [ "# Remove unique variables for analysis (Title is generally bound to Name, so it's also dropped)\ndf = df.drop(['Name', 'Title'], axis=1)", "_____no_output_____" ], [ "df.head()", "_____no_output_____" ] ], [ [ "### Map Gender column to binary (male = 0, female = 1) categories", "_____no_output_____" ] ], [ [ "# convert categorical variable to numeric\n\ndf['Sex'] = df['Sex']. \\\n map( {'female': 1, 'male': 0} ).astype(int)\n\ndf.head()", "_____no_output_____" ] ], [ [ "### Handle missing values for age", "_____no_output_____" ] ], [ [ "df.Age = df.Age.fillna(df.Age.median())", "_____no_output_____" ] ], [ [ "### Split age into bands and look at survival rates", "_____no_output_____" ] ], [ [ "# Age bands\ndf['AgeBand'] = pd.cut(df['Age'], 5)\ndf[['AgeBand', 'Survived']].groupby(['AgeBand'], as_index=False)\\\n .mean().sort_values(by='AgeBand', ascending=True)", "_____no_output_____" ] ], [ [ "### Suvival probability against age", "_____no_output_____" ] ], [ [ "# Plot the relative survival rate distributions against Age of passangers\n# subsetted by the gender\n\nplot_distribution( df , var = 'Age' , target = 'Survived' ,\\\n row = 'Sex' )\n\n# Recall: {'male': 0, 'female': 1}", "_____no_output_____" ], [ "# Change Age column to\n# map Age ranges (AgeBands) to ordinal integer numbers\n\ndf.loc[ df['Age'] <= 16, 'Age'] = 0\ndf.loc[(df['Age'] > 16) & (df['Age'] <= 32), 'Age'] = 1\ndf.loc[(df['Age'] > 32) & (df['Age'] <= 48), 'Age'] = 2\ndf.loc[(df['Age'] > 48) & (df['Age'] <= 64), 'Age'] = 3\ndf.loc[ df['Age'] > 64, 'Age']=4\ndf = df.drop(['AgeBand'], axis=1)\n\ndf.head()\n\n# Note we could just run \n# df['Age'] = pd.cut(df['Age'], 5,labels=[0,1,2,3,4])", "_____no_output_____" ] ], [ [ "### Travel Party Size\n\nHow did the number of people the person traveled with impact the chance of survival?", "_____no_output_____" ] ], [ [ "# SibSp = Number of Sibling / Spouses\n# Parch = Parents / Children\n\ndf['FamilySize'] = df['SibSp'] + df['Parch'] + 1\n\n# Survival chance against FamilySize\ndf[['FamilySize', 'Survived']].groupby(['FamilySize'], as_index=True) \\\n .mean().sort_values(by='Survived', ascending=False)", "_____no_output_____" ], [ "# Plot it, 1 is survived\n\nsns.countplot(x='Survived', hue=\"FamilySize\", data=df, order=[1,0]);", "_____no_output_____" ], [ "# Create binary variable if the person was alone or not\n\ndf['IsAlone'] = 0\ndf.loc[df['FamilySize'] == 1, 'IsAlone'] = 1\n\ndf[['IsAlone', 'Survived']].groupby(['IsAlone'], as_index=True).mean()", "_____no_output_____" ], [ "# We will only use the binary IsAlone feature for further analysis\n\ndf.drop(['Parch', 'SibSp', 'FamilySize'], axis=1, inplace=True)\n\ndf.head()", "_____no_output_____" ] ], [ [ "# Feature construction", "_____no_output_____" ] ], [ [ "# We can also create new features based on intuitive combinations\n# Here is an example when we say that the age times socioclass is a determinant factor\n\ndf['Age*Class'] = df.Age.values * df.Pclass.values\n\ndf.loc[:, ['Age*Class', 'Age', 'Pclass']].head()", "_____no_output_____" ] ], [ [ "## Port the person embarked from\nLet's see how that influences chance of survival", "_____no_output_____" ], [ "<img src= \"data/images/titanic_voyage_map.png\">\n>___\n", "_____no_output_____" ] ], [ [ "# Fill NaN 'Embarked' Values in the dfs\nfreq_port = df['Embarked'].dropna().mode()[0]\ndf['Embarked'] = df['Embarked'].fillna(freq_port)\n ", "_____no_output_____" ], [ "# Plot it, 1 is survived\n\nsns.countplot(x='Survived', hue=\"Embarked\", data=df, order=[1,0]);", "_____no_output_____" ], [ "df[['Embarked', 'Survived']].groupby(['Embarked'], as_index=True) \\\n .mean().sort_values(by='Survived', ascending=False)", "_____no_output_____" ], [ "# Create categorical dummy variables for Embarked values\n\nbinary_encoded = pd.get_dummies(df.Embarked)\ndf[binary_encoded.columns] = binary_encoded\ndf.drop('Embarked', axis=1, inplace=True)\n\ndf.head()", "_____no_output_____" ] ], [ [ "### Finished -- Preprocessing Complete!", "_____no_output_____" ] ], [ [ "# All features are approximately on the same scale\n# no need for feature engineering / normalization\n\ndf.head(7)", "_____no_output_____" ] ], [ [ "### Sanity Check: View the correlation between features", "_____no_output_____" ] ], [ [ "# Uncorrelated features are generally more powerful predictors\n\ncolormap = plt.cm.viridis\nplt.figure(figsize=(12,12))\nplt.title('Pearson Correlation of Features', y=1.05, size=15)\nsns.heatmap(df.corr().round(2)\\\n ,linewidths=0.1,vmax=1.0, square=True, cmap=colormap, \\\n linecolor='white', annot=True);", "_____no_output_____" ] ], [ [ "<a id='sec5'></a>\n___\n### Machine Learning, Prediction and Artifical Intelligence\nNow we will use Machine Learning algorithms in order to predict if the person survived. \n\n**We will choose the best model from:**\n1. Logistic Regression\n2. K-Nearest Neighbors (KNN) \n3. Support Vector Machines (SVM)\n4. Perceptron\n5. XGBoost\n6. Random Forest\n7. Neural Network (Deep Learning)", "_____no_output_____" ], [ "### Setup Training and Validation Sets", "_____no_output_____" ] ], [ [ "X = df.drop(\"Survived\", axis=1) # Training & Validation data\nY = df[\"Survived\"] # Response / Target Variable\n\nprint(X.shape, Y.shape)", "_____no_output_____" ], [ "# Split training set so that we validate on 20% of the data\n# Note that our algorithms will never have seen the validation \n\nnp.random.seed(1337) # set random seed for reproducibility\n\nfrom sklearn.model_selection import train_test_split\n\nX_train, X_val, Y_train, Y_val = \\\n train_test_split(X, Y, test_size=0.2)\n\nprint('Training Samples:', X_train.shape, Y_train.shape)\nprint('Validation Samples:', X_val.shape, Y_val.shape)", "_____no_output_____" ] ], [ [ "___\n> ## General ML workflow\n> 1. Create Model Object\n> 2. Train the Model\n> 3. Predict on _unseen_ data\n> 4. Evaluate accuracy.\n\n___", "_____no_output_____" ], [ "## Compare Different Prediciton Models", "_____no_output_____" ], [ "### 1. Logistic Regression", "_____no_output_____" ] ], [ [ "logreg = LogisticRegression() # create\nlogreg.fit(X_train, Y_train) # train\nacc_log_2 = logreg.score(X_val, Y_val) # predict & evaluate\n\nprint('Logistic Regression accuracy:',\\\n str(round(acc_log_2*100,2)),'%')", "_____no_output_____" ] ], [ [ "### 2. K-Nearest Neighbour", "_____no_output_____" ] ], [ [ "knn = KNeighborsClassifier(n_neighbors = 5) # instantiate\nknn.fit(X_train, Y_train) # fit\nacc_knn = knn.score(X_val, Y_val) # predict + evaluate\n\nprint('K-Nearest Neighbors labeling accuracy:', str(round(acc_knn*100,2)),'%') ", "_____no_output_____" ] ], [ [ "### 3. Support Vector Machine", "_____no_output_____" ] ], [ [ "# Support Vector Machines Classifier (non-linear kernel)\nsvc = SVC() # instantiate\nsvc.fit(X_train, Y_train) # fit\nacc_svc = svc.score(X_val, Y_val) # predict + evaluate\n\nprint('Support Vector Machines labeling accuracy:', str(round(acc_svc*100,2)),'%')", "_____no_output_____" ] ], [ [ "### 4. Perceptron", "_____no_output_____" ] ], [ [ "perceptron = Perceptron() # instantiate \nperceptron.fit(X_train, Y_train) # fit\nacc_perceptron = perceptron.score(X_val, Y_val) # predict + evalaute\n\nprint('Perceptron labeling accuracy:', str(round(acc_perceptron*100,2)),'%')", "_____no_output_____" ] ], [ [ "### 5. Gradient Boosting", "_____no_output_____" ] ], [ [ "# XGBoost, same API as scikit-learn\ngradboost = xgb.XGBClassifier(n_estimators=1000) # instantiate\ngradboost.fit(X_train, Y_train) # fit\nacc_xgboost = gradboost.score(X_val, Y_val) # predict + evalute\n\nprint('XGBoost labeling accuracy:', str(round(acc_xgboost*100,2)),'%')", "_____no_output_____" ] ], [ [ "### 6. Random Forest", "_____no_output_____" ] ], [ [ "# Random Forest\nrandom_forest = RandomForestClassifier(n_estimators=500) # instantiate\nrandom_forest.fit(X_train, Y_train) # fit\nacc_rf = random_forest.score(X_val, Y_val) # predict + evaluate\n\nprint('Random Forest labeling accuracy:', str(round(acc_rf*100,2)),'%')", "_____no_output_____" ] ], [ [ "### 7. Neural Networks (Deep Learning)", "_____no_output_____" ] ], [ [ "from keras.models import Sequential\nfrom keras.layers import Dense", "_____no_output_____" ], [ "model = Sequential()\nmodel.add( Dense(units=300, activation='relu', input_shape=(13,) ))\nmodel.add( Dense(units=100, activation='relu'))\nmodel.add( Dense(units=50, activation='relu'))\nmodel.add( Dense(units=1, activation='sigmoid') )", "_____no_output_____" ], [ "model.compile(loss = 'binary_crossentropy', optimizer = 'adam', metrics = ['accuracy'])\nmodel.fit(X_train, Y_train, epochs = 50, batch_size= 50)", "_____no_output_____" ], [ "# # Evaluate the model Accuracy on test set\nprint('Neural Network accuracy:',str(round(model.evaluate(X_val, Y_val, batch_size=50,verbose=False)[1]*100,2)),'%')", "_____no_output_____" ] ], [ [ "### Importance scores in the random forest model", "_____no_output_____" ] ], [ [ "# Look at importnace of features for random forest\n\ndef plot_model_var_imp( model , X , y ):\n imp = pd.DataFrame( \n model.feature_importances_ , \n columns = [ 'Importance' ] , \n index = X.columns \n )\n imp = imp.sort_values( [ 'Importance' ] , ascending = True )\n imp[ : 10 ].plot( kind = 'barh' )\n print ('Training accuracy Random Forest:',model.score( X , y ))\n\nplot_model_var_imp(random_forest, X_train, Y_train)", "_____no_output_____" ] ], [ [ "<a id='sec6'></a>\n___\n\n## Appendix I:\n#### Why are our models maxing out at around 80%?\n", "_____no_output_____" ], [ "#### __John Jacob Astor__\n\n<img src= \"data/images/john-jacob-astor.jpg\"> \n\nJohn Jacob Astor perished in the disaster even though our model predicted he would survive. Astor was the wealthiest person on the Titanic -- his ticket fare was valued at over 35,000 USD in 2016 -- it seems likely that he would have been among of the approximatelly 35 percent of men in first class to survive. However, this was not the case: although his pregnant wife survived, John Jacob Astor’s body was recovered a week later, along with a gold watch, a diamond ring with three stones, and no less than 92,481 USD (2016 value) in cash.\n\n<br >\n\n\n#### __Olaus Jorgensen Abelseth__\n\n<img src= \"data/images/olaus-jorgensen-abelseth.jpg\">\n\nAvelseth was a 25-year-old Norwegian sailor, a man in 3rd class, and not expected to survive by classifier. However, once the ship sank, he survived by swimming for 20 minutes in the frigid North Atlantic water before joining other survivors on a waterlogged collapsible boat.\n\nAbelseth got married three years later, settled down as a farmer in North Dakota, had 4 kids, and died in 1980 at the age of 94.\n\n<br >\n\n### __Key Takeaway__ \n\nAs engineers and business professionals we are trained to answer the question 'what could we do to improve on an 80 percent average'. These data points represent real people. Each time our model was wrong we should be glad -- in such misclasifications we will likely find incredible stories of human nature and courage triumphing over extremely difficult odds. \n\n__It is important to never lose sight of the human element when analyzing data that deals with people.__ ", "_____no_output_____" ], [ "<a id='sec7'></a>\n___\n## Appendix II: Resources and references to material we won't cover in detail", "_____no_output_____" ], [ "> * **Gradient Boosting:** http://blog.kaggle.com/2017/01/23/a-kaggle-master-explains-gradient-boosting/\n\n> * **Jupyter Notebook (tutorial):** https://www.datacamp.com/community/tutorials/tutorial-jupyter-notebook\n\n> * **K-Nearest Neighbors (KNN):** https://towardsdatascience.com/introduction-to-k-nearest-neighbors-3b534bb11d26\n\n> * **Logistic Regression:** https://towardsdatascience.com/5-reasons-logistic-regression-should-be-the-first-thing-you-learn-when-become-a-data-scientist-fcaae46605c4\n\n> * **Naive Bayes:** http://scikit-learn.org/stable/modules/naive_bayes.html\n\n> * **Perceptron:** http://aass.oru.se/~lilien/ml/seminars/2007_02_01b-Janecek-Perceptron.pdf\n\n> * **Random Forest:** https://medium.com/@williamkoehrsen/random-forest-simple-explanation-377895a60d2d\n\n> * **Support Vector Machines (SVM):** https://towardsdatascience.com/https-medium-com-pupalerushikesh-svm-f4b42800e989\n\n\n<br>\n___\n___", "_____no_output_____" ], [ "![](http://i67.tinypic.com/2jcbwcw.png)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ] ]
d0e0a32b6388737c7761bb8ece8d2301496c8e28
2,146
ipynb
Jupyter Notebook
Array/1028/932. Beautiful Array.ipynb
YuHe0108/Leetcode
90d904dde125dd35ee256a7f383961786f1ada5d
[ "Apache-2.0" ]
1
2020-08-05T11:47:47.000Z
2020-08-05T11:47:47.000Z
Array/1028/932. Beautiful Array.ipynb
YuHe0108/LeetCode
b9e5de69b4e4d794aff89497624f558343e362ad
[ "Apache-2.0" ]
null
null
null
Array/1028/932. Beautiful Array.ipynb
YuHe0108/LeetCode
b9e5de69b4e4d794aff89497624f558343e362ad
[ "Apache-2.0" ]
null
null
null
20.056075
74
0.416589
[ [ [ " 对于某些固定的 N,如果数组 A 是整数 1, 2, ..., N 组成的排列,使得:\n\n 对于每个 i < j,都不存在 k 满足 i < k < j 使得 A[k] * 2 = A[i] + A[j]。\n\n 那么数组 A 是漂亮数组。\n\n 给定 N,返回任意漂亮数组 A(保证存在一个)。\n\n示例 1:\n 输入:4\n 输出:[2,1,4,3]\n\n示例 2:\n 输入:5\n 输出:[3,1,2,5,4]\n\n提示:\n 1 <= N <= 1000\n", "_____no_output_____" ] ], [ [ "class Solution:\n def beautifulArray(self, N: int):\n # 将res分成两半,左边都是奇数,右边都是偶数,因为奇数和偶数的平均数,是小数,肯定不在两者之间存在\n res = [1]\n while len(res) < N:\n print([x * 2 - 1 for x in res])\n print([x * 2 for x in res])\n res = [x * 2 - 1 for x in res] + [x * 2 for x in res]\n return [x for x in res if x <= N]", "_____no_output_____" ], [ "solution = Solution()\nsolution.beautifulArray(5)", "[1]\n[2]\n[1, 3]\n[2, 4]\n[1, 5, 3, 7]\n[2, 6, 4, 8]\n" ] ] ]
[ "raw", "code" ]
[ [ "raw" ], [ "code", "code" ] ]
d0e0b2a8e7142e9517431890f5fadf637ab36d08
4,911
ipynb
Jupyter Notebook
ode_methods_ab.ipynb
chapman-phys220-2018f/cw10-jap
d7b564bd2c465064e3d65bea33c2487cf465a39d
[ "MIT" ]
null
null
null
ode_methods_ab.ipynb
chapman-phys220-2018f/cw10-jap
d7b564bd2c465064e3d65bea33c2487cf465a39d
[ "MIT" ]
null
null
null
ode_methods_ab.ipynb
chapman-phys220-2018f/cw10-jap
d7b564bd2c465064e3d65bea33c2487cf465a39d
[ "MIT" ]
null
null
null
33.636986
453
0.611281
[ [ [ "# CW10: Ordinary Differential Equations", "_____no_output_____" ], [ "Notes:\n\n- solution to a differential equation is a function or set of functions\n- Euler's Method serves as the basis for all others\n- The names of each method gives insight to how the functions look/behave graphically", "_____no_output_____" ], [ "Solving a differential equation with initial condition:\n\n$dy/dx = cos(x)$ $y(0) = -1$", "_____no_output_____" ], [ "multiply both sides by $dx$\n$$1dy = cos(x)dx$$\nfind the antiderivative of both sides\n$$\\int 1dy = \\int cos(x)dx$$\n$$y = sin(x)+C; y(0) = -1$$\nplug $y(0) = -1$ into any $y$ values\n$$-1 = sin(0) + C$$\n$$-1 = 0 + C$$\n$$-1 = C$$\nplug $C$ in to achieve particular solution\n$$y = sin(x) - 1$$", "_____no_output_____" ], [ "Euler Method:\n- Euler method is used to obtain solutions numerically. From any point on a curve, you can find an approximate of a nearby point on the curve by moving a short distance along a line tangent to the curve\n- Looking at the taylor expansion, Euler Method solves to an error squared/of order 2.\n - $f(t+\\Delta t)=f(t)+f'(t)\\Delta t+\\mathcal{O}(\\Delta t)^2$\n - IDEA: the curve is initially unknown but the starting point is known. From a differential equation the slope to the curve at the starting point can be computed and the tangent line", "_____no_output_____" ], [ "Leapfrog (Midpoint) Method:\n- The Leapfrog Method takes the initial starting point (similar to Euler) then takes a \"step\" back on the graph ($\\Delta t$) and finds the point along that slope instead\n- Euler's Method finds the next point when the tangent intersectsthe vertical line ($\\Delta t$) which veers away from the curve/larger error. Leapfrog Method uses the tangent at the midpoint which yeilds a more accurate approximation of the curve\n- Looking at the taylor expansion, Leapfrog Method solves to an error cubed/of order 3 which is a smaller error/more accurate compared to Euler Method\n - $f(t+\\Delta t)-f(t-\\Delta t)=2f'(t)\\Delta t+\\mathcal{O}(\\Delta t)^3$", "_____no_output_____" ], [ "Heun's (Trapezoid) Method:\n- Euler Method improves linearly when the step size is decreased. Heun Method improves accuracy quadratically\n- The point estimated by Heun's Method takes both tangent lines provided by the points determined by Euler's Method, then takes the average between the two intersections of the tangent lines to $\\Delta t$ (In the equation below (x-y coordinates), h is equivalent to $\\Delta t$)\n - $y_{n+1} = y_n + (h/2)(f(x_n,y_n)+f(x_n+h, y_n+hf(x_n,y_n)))$", "_____no_output_____" ], [ "2nd-order Runge-Kutta Method:\n- Slope $K_1$ is halved to obtain a midpoint. This midpoint yeilds another slope, approximating point $K_2$. This differs from Heun's method because slopes of points are used rather than tangent lines.", "_____no_output_____" ], [ "4th-order Runge-Kutta Method:\n- $K_1$ is the linear approximation of the difference between $u_{k+1}$ and $u_{k}$. $K_2$ is the linear approximation using $\\Delta t/2$ between $K_1$. $K_3$ uses the slope of $K_2$ stopping at $\\Delta t/2$. $K_4$ is found similarly to $K_3$, using the slope of $K_3$ instead of $K_2$. A more detailed description of the graph is given by the two midpoints versus endpoints. The midpoints allow us to determine any nonlinearity in the function.", "_____no_output_____" ] ] ]
[ "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ] ]
d0e0baae854b692afec119b80ca9206942f49914
4,873
ipynb
Jupyter Notebook
notebooks/format_options.ipynb
tbabej/datafiles
c9d1ff8f8b22039a79ceee705019ec6a872de8f7
[ "MIT" ]
null
null
null
notebooks/format_options.ipynb
tbabej/datafiles
c9d1ff8f8b22039a79ceee705019ec6a872de8f7
[ "MIT" ]
null
null
null
notebooks/format_options.ipynb
tbabej/datafiles
c9d1ff8f8b22039a79ceee705019ec6a872de8f7
[ "MIT" ]
null
null
null
18.670498
62
0.438539
[ [ [ "%%sh\n\nrm -f files/format_options.*", "_____no_output_____" ], [ "from dataclasses import dataclass\nfrom typing import List\n\n@dataclass\nclass Nested:\n value: int\n\n@dataclass\nclass Base:\n my_dict: Nested\n my_list: List[Nested]\n my_bool: bool = True\n my_float: float = 1.23\n my_int: int = 42\n my_str: str = \"Hello, world!\"", "_____no_output_____" ] ], [ [ "# JSON", "_____no_output_____" ] ], [ [ "from datafiles import datafile\n\n@datafile('files/format_options.json', defaults=True)\nclass Sample(Base):\n fmt: str = \"JavaScript Object Notation\"\n \nsample = Sample(Nested(0), [Nested(1), Nested(2)])", "_____no_output_____" ], [ "%%sh\n\ncat files/format_options.json", "{\n \"my_dict\": {\n \"value\": 0\n },\n \"my_list\": [\n {\n \"value\": 1\n },\n {\n \"value\": 2\n }\n ],\n \"my_bool\": true,\n \"my_float\": 1.23,\n \"my_int\": 42,\n \"my_str\": \"Hello, world!\"\n}" ] ], [ [ "# TOML", "_____no_output_____" ] ], [ [ "from datafiles import datafile\n\n@datafile('files/format_options.toml', defaults=True)\nclass Sample(Base):\n fmt: str = \"Tom's Obvious Minimal Language\"\n \nsample = Sample(Nested(0), [Nested(1), Nested(2)])", "_____no_output_____" ], [ "%%sh\n\ncat files/format_options.toml", "my_bool = true\nmy_float = 1.23\nmy_int = 42\nmy_str = \"Hello, world!\"\n[[my_list]]\nvalue = 1\n\n[[my_list]]\nvalue = 2\n\n[my_dict]\nvalue = 0\n" ] ], [ [ "# YAML", "_____no_output_____" ] ], [ [ "from datafiles import datafile\n\n@datafile('files/format_options.yml', defaults=True)\nclass Sample(Base):\n fmt: str = \"YAML Ain't Markup Language\"\n \nsample = Sample(Nested(0), [Nested(1), Nested(2)])", "_____no_output_____" ], [ "%%sh\n\ncat files/format_options.yml", "my_dict:\n value: 0\nmy_list:\n - value: 1\n - value: 2\nmy_bool: true\nmy_float: 1.23\nmy_int: 42\nmy_str: Hello, world!\n" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ] ]
d0e0c0b1eff6a721e466bc92636403a1213db389
48,419
ipynb
Jupyter Notebook
application/app/Chapter4/feature_selection.ipynb
slevgolo/book-python-machine-learning
56c312c8d71397cb3321091f3640bfeeea2cd14d
[ "MIT" ]
null
null
null
application/app/Chapter4/feature_selection.ipynb
slevgolo/book-python-machine-learning
56c312c8d71397cb3321091f3640bfeeea2cd14d
[ "MIT" ]
null
null
null
application/app/Chapter4/feature_selection.ipynb
slevgolo/book-python-machine-learning
56c312c8d71397cb3321091f3640bfeeea2cd14d
[ "MIT" ]
null
null
null
89.664815
17,212
0.785869
[ [ [ "from sklearn.base import clone\nfrom itertools import combinations\nimport numpy as np\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.model_selection import train_test_split\n\n\nclass SBS():\n def __init__(self, estimator, k_features, scoring=accuracy_score,\n test_size=0.25, random_state=1):\n self.scoring = scoring\n self.estimator = clone(estimator)\n self.k_features = k_features\n self.test_size = test_size\n self.random_state = random_state\n\n def fit(self, X, y):\n \n X_train, X_test, y_train, y_test = \\\n train_test_split(X, y, test_size=self.test_size,\n random_state=self.random_state)\n\n dim = X_train.shape[1]\n self.indices_ = tuple(range(dim))\n self.subsets_ = [self.indices_]\n score = self._calc_score(X_train, y_train, \n X_test, y_test, self.indices_)\n self.scores_ = [score]\n\n while dim > self.k_features:\n scores = []\n subsets = []\n\n for p in combinations(self.indices_, r=dim - 1):\n score = self._calc_score(X_train, y_train, \n X_test, y_test, p)\n scores.append(score)\n subsets.append(p)\n\n best = np.argmax(scores)\n self.indices_ = subsets[best]\n self.subsets_.append(self.indices_)\n dim -= 1\n\n self.scores_.append(scores[best])\n self.k_score_ = self.scores_[-1]\n\n return self\n\n def transform(self, X):\n return X[:, self.indices_]\n\n def _calc_score(self, X_train, y_train, X_test, y_test, indices):\n self.estimator.fit(X_train[:, indices], y_train)\n y_pred = self.estimator.predict(X_test[:, indices])\n score = self.scoring(y_test, y_pred)\n return score", "_____no_output_____" ], [ "import pandas as pd\nimport os\ndf_wine = pd.read_csv(os.path.join('..', '..', 'data', 'input', 'wine.data'), header=None)\ndf_wine.columns = ['Class label', 'Alcohol', 'Malic acid', 'Ash', 'Alcalinity of ash', 'Magnesium', 'Total phenols',\n 'Flavanoids', 'Nonflavanoid phenols', 'Proanthocyanins', 'Color intensity', 'Hue',\n 'OD280/OD315 of diluted wines', 'Proline']\nprint('Class labels', np.unique(df_wine['Class label']))\ndf_wine.head()", "Class labels [1 2 3]\n" ], [ "from sklearn.model_selection import train_test_split\nX, y = df_wine.iloc[:, 1:].values, df_wine.iloc[:, 0].values\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0, stratify=y)\nfrom sklearn.preprocessing import StandardScaler\nstdsc = StandardScaler()\nX_train_std = stdsc.fit_transform(X_train)\nX_test_std = stdsc.transform(X_test)", "_____no_output_____" ], [ "import matplotlib.pyplot as plt\nfrom sklearn.neighbors import KNeighborsClassifier\nknn = KNeighborsClassifier(n_neighbors=5)\nsbs = SBS(knn, k_features=1)\nsbs.fit(X_train_std, y_train)", "_____no_output_____" ], [ "k_feat = [len(k) for k in sbs.subsets_]\nplt.plot(k_feat, sbs.scores_, marker='o')\nplt.ylim([0.7, 1.02])\nplt.ylabel('Accuracy')\nplt.xlabel('Number of features')\nplt.grid()\nplt.tight_layout()\nplt.show()", "_____no_output_____" ], [ "k3 = list(sbs.subsets_[10])\nprint(df_wine.columns[1:][k3])", "Index(['Alcohol', 'Malic acid', 'OD280/OD315 of diluted wines'], dtype='object')\n" ], [ "df_wine.columns", "_____no_output_____" ], [ "k3", "_____no_output_____" ], [ "knn.fit(X_train_std, y_train)\nprint('Training accuracy: ', knn.score(X_train_std, y_train))\nprint('Test accuracy: ', knn.score(X_test_std, y_test))", "Training accuracy: 0.967741935483871\nTest accuracy: 0.9629629629629629\n" ], [ "knn.fit(X_train_std[:, k3], y_train)\nprint('Training accuracy: ', knn.score(X_train_std[:, k3], y_train))\nprint('Test accuracy: ', knn.score(X_test_std[:, k3], y_test))", "Training accuracy: 0.9516129032258065\nTest accuracy: 0.9259259259259259\n" ], [ "from sklearn.ensemble import RandomForestClassifier\nfeat_labels = df_wine.columns[1:]\nforest = RandomForestClassifier(n_estimators=500, random_state=1)\nforest.fit(X_train, y_train)\nimportances = forest.feature_importances_\nindices = np.argsort(importances)[::-1]\nfor f in range(X_train.shape[1]):\n print(\"%2d) %-*s %f\" % (f + 1, 30, feat_labels[indices[f]], importances[indices[f]]))\nplt.title('Feature Importance')\nplt.bar(range(X_train.shape[1]), importances[indices], align='center')\nplt.xticks(range(X_train.shape[1]), feat_labels[indices], rotation=90)\nplt.xlim([-1, X_train.shape[1]])\nplt.tight_layout()\nplt.show()", " 1) Proline 0.185453\n 2) Flavanoids 0.174751\n 3) Color intensity 0.143920\n 4) OD280/OD315 of diluted wines 0.136162\n 5) Alcohol 0.118529\n 6) Hue 0.058739\n 7) Total phenols 0.050872\n 8) Magnesium 0.031357\n 9) Malic acid 0.025648\n10) Proanthocyanins 0.025570\n11) Alcalinity of ash 0.022366\n12) Nonflavanoid phenols 0.013354\n13) Ash 0.013279\n" ], [ "from sklearn.feature_selection import SelectFromModel\nsfm = SelectFromModel(forest, threshold=0.1, prefit=True)\nX_selected = sfm.transform(X_train)\nprint('Number of features that meet this threshold', 'criterion:', X_selected.shape[1])\nfor f in range(X_selected.shape[1]):\n print(\"%2d) %-*s %f\" % (f + 1, 30, feat_labels[indices[f]], importances[indices[f]]))", "Number of features that meet this threshold criterion: 5\n 1) Proline 0.185453\n 2) Flavanoids 0.174751\n 3) Color intensity 0.143920\n 4) OD280/OD315 of diluted wines 0.136162\n 5) Alcohol 0.118529\n" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
d0e0c6ce9998de5fd2820ebcb3dfd9c61597f65e
725,013
ipynb
Jupyter Notebook
DSC 530 - Data Exploration and Analysis/Project/.ipynb_checkpoints/DataScience-checkpoint.ipynb
Hakuna-Patata/BU_MSDS_PTW
4759cb2db3e63ae5722bd42771e4d228dfbc733d
[ "MIT" ]
null
null
null
DSC 530 - Data Exploration and Analysis/Project/.ipynb_checkpoints/DataScience-checkpoint.ipynb
Hakuna-Patata/BU_MSDS_PTW
4759cb2db3e63ae5722bd42771e4d228dfbc733d
[ "MIT" ]
null
null
null
DSC 530 - Data Exploration and Analysis/Project/.ipynb_checkpoints/DataScience-checkpoint.ipynb
Hakuna-Patata/BU_MSDS_PTW
4759cb2db3e63ae5722bd42771e4d228dfbc733d
[ "MIT" ]
null
null
null
196.427255
49,968
0.848538
[ [ [ "## Importing Libraries", "_____no_output_____" ] ], [ [ "import numpy as np\nimport pandas as pd\nimport math\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\nimport seaborn as sns\nfrom matplotlib.offsetbox import (AnchoredOffsetbox, TextArea)\nimport statsmodels.formula.api as smf\nimport requests\nimport sklearn as skl\nfrom sklearn import datasets\nimport scipy.stats as spstats\nfrom scipy.special import inv_boxcox", "_____no_output_____" ] ], [ [ "<br><br>\n\n## Functions", "_____no_output_____" ] ], [ [ "def df_index_slice(df, indices, include=True, sort=False, sort_col=None, desc=True):\n \"\"\"\n ====================================================================================================\n PURPOSE:\n | Given a DataFrame and a list of indices, either exclude or include the rows of thos indices.\n | Then sort the DataFrame on a specific column if desired.\n |\n ==========\n PARAMETERS:\n | df : pandas DataFrame to slice.\n |\n | indices : List of indices to slice the DataFrame by.\n |\n | include : If set to True (default), will include the rows based on the indices provided and will\n | will exclude others. If set to False, will exclude the rows based on the indices provided\n | and will include others.\n |\n | sort : If False (default), no sorting will be done on the sliced DataFrame. If True, will sort.\n |\n | sort_col : If sorting enabled, the column name on which sorting will be determined.\n |\n | desc : If True (default), sliced DataFrame will be sorted descending based on the column specified,\n | else, if False, will be sorted ascending based on the column specified.\n |\n ==========\n OUTPUT:\n | - A sliced DataFrame\n |\n ====================================================================================================\n \"\"\"\n \n if include:\n df = df.iloc[df.index.isin(indices)]\n else:\n df = df.iloc[~df.index.isin(indices)]\n \n if sort:\n sort_switch = False if desc else True\n try:\n df = df.sort_values(by=[sort_col], ascending=sort_switch)\n except:\n print(f\"Unable to sort!\")\n else:\n pass\n \n return df\n\n\ndef invert_transform(value, reversed_transformation=None, pre_xfrm='N', lamb=None, mean=None, std=None, min_val=None, col_sum=None, shift=None):\n \"\"\"\n ====================================================================================================\n PURPOSE:\n | Given a value and the transformation that was used to transform that value, get the inverse\n | transformation of the value. \n |\n ==========\n PARAMETERS:\n | value : Value to inverse transform.\n |\n | reversed_transformation : Select the transformation that you want to inverse.\n | - boxcox\n | - log\n | - recip (aka reciprocal)\n | - reflect\n | - x2\n | - normalize\n | - zscore\n | - exp\n |\n | pre_xfrm: \n | - N : Values were not shifted prior to transformation.\n | - Y : Values were shifted prior to transformation. In this case you'll need to enter the \n | parameters used to shift the values.\n |\n | lamb : Required to inverse boxcox transformation.\n |\n | mean : Required to inverse zscore transformation.\n |\n | std : Required to inverse zscore transformation.\n |\n | min_val : Required to inverse reflect transformation.\n |\n | col_sum : Required to inverse normalization transformation.\n |\n | shift : Amount that values were shifted prior to transformation. Required if pre_xfrm parameter \n | set to 'Y' and transformation utilizes shift.\n |\n ==========\n OUTPUT:\n | - The inverse representation of a value that was transformed.\n |\n ====================================================================================================\n \"\"\"\n if reversed_transformation == 'boxcox':\n if lamb is None:\n print('Must specify lambda!')\n else:\n rev_val = inv_boxcox(value, lamb)\n return rev_val\n\n elif reversed_transformation == 'log':\n if pre_xfrm.upper()=='N':\n rev_val = math.exp(value)\n else:\n if shift is None:\n print(f\"Enter amount to shift values or set pre_xfrm='N'\")\n else:\n rev_val = math.exp(value) - shift\n return rev_val\n \n elif reversed_transformation == 'recip':\n if pre_xfrm.upper()=='N':\n rev_val = (1 / value)\n else:\n if shift is None:\n print(f\"Enter amount to shift values or set pre_xfrm='N'\")\n else:\n rev_val = (1 / value) - shift\n return rev_val\n \n elif reversed_transformation == 'reflect':\n if min_val is None:\n print(f\"Must enter minimum value used to reflect original data!\")\n else:\n rev_val = min_val + value\n return rev_val\n \n elif reversed_transformation == 'x2':\n if pre_xfrm.upper()=='N':\n rev_val = math.sqrt(value)\n else:\n if shift is None:\n print(f\"Enter amount to shift values or set pre_xfrm='N'\")\n else:\n rev_val = (math.sqrt(value)) - shift\n \n elif reversed_transformation == 'normalize':\n if pre_xfrm.upper()=='N':\n if col_sum is None:\n print(f\"Must enter the sum used to normalize the original data!\")\n else:\n rev_val = col_sum * value\n else:\n if col_sum is None or shift is None:\n print(f\"Must enter the sum used to normalize and the amount of shift done prior to normalization!\")\n else:\n rev_val = (col_sum * value) - shift\n \n elif reversed_transformation == 'zscore':\n if mean is None or std is None:\n print(f\"Must enter mean and standard deviation used to obtain zscore for the data!\")\n else:\n rev_val = (value * std) + mean\n \n elif reversed_transformation == 'exp':\n rev_val = np.exp(value)\n \n ", "_____no_output_____" ] ], [ [ "<br><br>\n\n## Classes", "_____no_output_____" ] ], [ [ "class SwissDF(object):\n \"\"\"\n ====================================================================================================\n DEVELOPER: Patrick Weatherford\n \n CLASS OVERVIEW:\n This is the Data Science swiss army knife for a pandas DataFrames!! \n \"\"\"\n \n def __init__(self, df):\n \"\"\"\n ====================================================================================================\n PURPOSE:\n | Instantiate SwissDF object.\n |\n ==========\n PARAMETERS:\n | df : A pandas DataFrame or something that can be converted into a pandas DataFrame. Will first try\n | to convert the variable into a DataFrame and if unsuccessful, will output an error message.\n |\n ==========\n OUTPUT:\n | SwissDF object created that has an attribute .df for the DataFrame instantiated with the object.\n |\n ====================================================================================================\n \"\"\"\n try:\n self.df = pd.DataFrame(df) # try to convert to pandas DataFrame\n except:\n print('Is not or cannot convert to pandas.DataFrame!')\n \n \n \n def df_dist_plot(self, graph_type='histplot', hist_color=\"grey\", kde_color=\"black\"):\n \"\"\"\n ====================================================================================================\n PURPOSE:\n | Take a DataFrame and plot a histogram/kde plot for each variable in the DataFrame.\n |\n ==========\n PARAMETERS:\n | graph_type : Type of graph to disply.\n | - histplot (default) - histogram w/KDE\n | - cdf - Cumulative Distribution Function\n |\n | hist_color : Color of histogram bars.\n | \n | kde_color : Color of kernel density estimation (kde) line.\n | \n ==========\n OUTPUT:\n | Multiple plot figures with a shape of rows=1, cols=3. The number of plots depends on the number of\n | variables (aka columns) in the DataFrame.\n |\n ====================================================================================================\n \"\"\"\n iterations = math.ceil(len(self.df.columns) / 3)\n num_vars = len(self.df.columns)\n var_cnt = 0\n\n for row in range(iterations):\n fig = plt.figure()\n fig.set_figwidth(fig.get_figwidth() * 3)\n for col in range(3):\n plt_loc = int('13' + str(col+1))\n plt.subplot(plt_loc)\n if var_cnt >= num_vars:\n pass\n else:\n if graph_type == 'histplot':\n sns.histplot(self.df.iloc[:,[var_cnt]]\n , kde=True\n , alpha=.5\n , line_kws={\"lw\":4}\n , facecolor=hist_color\n , edgecolor=None\n ).lines[0].set_color(kde_color)\n \n elif graph_type == 'cdf':\n sns.ecdfplot(self.df.iloc[:,[var_cnt]])\n \n var_cnt+=1\n plt.show()\n \n \n \n def get_outlier_info(self, outlier_method='iqr', exclude_cols=None):\n \"\"\"\n ====================================================================================================\n PURPOSE:\n | Take the object DataFrame attribute and flag outliers for each variable recording the index for\n | each of them in a list. The outlier list will then be instantiated as an attribute and will also\n | be used to instantiate an outlier DataFrame which can be used to review the outliers before taking\n | action on them.\n |\n ==========\n PARAMETERS:\n | outlier_method : Calculation method to define outliers.\n | - iqr (default) : x < q1 - iqr * 1.5 OR x > q3 + iqr * 1.5\n | - zscore : If zscore for the variable is > 3\n |\n | exclude_cols : List of column names to exclude from outlier analysis.\n |\n ==========\n OUTPUT:\n | - object attribute .outlier_indices : list of row indices where any variable has an outlier.\n | - object attribute .outlier_dict : dictionary of all columns and the outlier row indices for the variable.\n | - object attribute .outlier_df : DataFrame of all rows where any variable has an outlier.\n |\n |\n ====================================================================================================\n \"\"\"\n df_copy = self.df\n \n if exclude_cols is None:\n pass\n else:\n df_copy = df_copy.drop([exclude_cols])\n \n indices = []\n outlier_indices = []\n outlier_dict = {}\n\n if outlier_method == 'zscore':\n for col in df_copy.columns:\n index = df_copy.index[((df_copy[col] - df_copy[col].mean()) / df_copy[col].std()) > 3].tolist()\n indices.extend(index)\n outlier_dict[col] = index\n\n elif outlier_method == 'iqr':\n for col in df.columns:\n q1 = df_copy[col].quantile(0.25)\n q3 = df_copy[col].quantile(0.75)\n iqr = q3-q1\n lower_bound = q1 - (iqr * 1.5)\n upper_bound = q3 + (iqr * 1.5)\n index = df_copy.index[(df[col] < lower_bound) | (df_copy[col] > upper_bound)].tolist()\n indices.extend(index)\n outlier_dict[col] = index\n \n for i in indices:\n if i in outlier_indices or i is None or i == '':\n pass\n else:\n outlier_indices.append(i)\n \n if len(outlier_indices) > 0:\n self.outlier_indices = outlier_indices\n self.outlier_dict = outlier_dict\n self.outlier_df = self.df.iloc[self.df.index.isin(self.outlier_indices)]\n \n else:\n print(f\"No outliers found in DataFrame.\")\n \n \n \n def remove_outliers(self, outlier_method='iqr', for_vars='All'):\n \"\"\"\n ====================================================================================================\n PURPOSE:\n | Remove all variable outlier rows or only outlier rows for specified variables.\n |\n ==========\n PARAMETERS:\n | outlier_method : Calculation method to define outliers.\n | - iqr (default) : x < q1 - iqr * 1.5 OR x > q3 + iqr * 1.5\n | - zscore : If zscore for the variable is > 3\n |\n | for_vars : specifiy the rows to remove for specified variables\n |\n ==========\n OUTPUT:\n | Will modify the object DataFrame attribute and remove all variable outlier rows or only outlier \n | rows for specified variables.\n |\n ====================================================================================================\n \"\"\"\n df_start_len = len(self.df)\n \n try:\n self.outlier_dict # check to see if object attribute exists and outlier analysis ran\n pass\n except:\n self.get_outlier_info(outlier_method=outlier_method)\n \n \n if for_vars=='All':\n self.df = self.df.iloc[~self.df.index.isin(self.outlier_indices)]\n df_end_len = len(self.df)\n print(f'{df_start_len-df_end_len} rows removed!')\n else:\n index_holder = []\n index_filter_list = []\n for col in for_vars:\n index_holder.extend(self.outlier_dict[col])\n for i in index_holder:\n if i in index_filter_list:\n pass\n else:\n index_filter_list.append(i)\n \n self.df = self.df.iloc[~self.df.index.isin(index_filter_list)]\n df_end_len = len(self.df)\n print(f'{df_start_len-df_end_len} rows removed!')\n \n \n def col_transform(self, transform_type='log', transform_cols=None):\n \"\"\"\n ====================================================================================================\n PURPOSE\n | Can be used to transform specific columns into other representations and then add the columns onto \n | the existing DataFrame associated with the object.\n |\n | Right (positve) skew (from lowest to strongest skew): \n | - log\n | - recip\n |\n | Left (negative) skew (from lowest to strongest skew): \n | - reflect (*then must do appropriate right-skew transformation)\n | - x2\n | - exp\n |\n ==========\n PARAMETERS\n | transform_type : specify the transformation for the new column\n | - log : Takes the log of each of the values. Will first check to see if the minimum value for the\n | column is <= 0. If so, will add the absolute value of the minimum + 1 to ensure no log of\n | of 0 or negative number.\n |\n | - recip : (reciprocal) = 1 / value. Will first check to see if the minimum value for the \n | column is <= 0. If so, will add the absolute value of the minimum + 1 to ensure no \n | log of of 0 or negative number.\n |\n | - normalize : The data value divided by the sum of the entire column.\n |\n | - zscore : (data value - mean) / standard deviation\n |\n | - reflect : Subtract every value from the minimum value. Then perform \n |\n | - x2 : Square each value of x\n |\n | - exp : e**x\n |\n | - boxcox : scipy.stats.boxcox(x)\n |\n | transform_cols : specify the column names to transform in a list format\n |\n ==========\n OUTPUT:\n | - Modified object DataFrame attribute with new columns that are transformations of existing columns\n | specified in the object's DataFrame.\n |\n | - Object attribute for parameters used for transformation\n |\n ====================================================================================================\n \"\"\"\n transform_cols = transform_cols\n \n self.xfrm_params = {}\n\n if transform_cols is None:\n print(f\"No columns selected!\")\n\n else:\n if transform_type == 'log':\n for col in transform_cols:\n new_col_name = col + '_LOG'\n if self.df[col].min() > 0:\n self.df[new_col_name] = np.log(self.df[col]) # log(value)\n self.xfrm_params[new_col_name] = {\n \"PRE_XFRM\":'N'\n ,\"SHIFT\":0\n ,\"X_FORM\":\"log(x)\"\n }\n\n elif self.df[col].min() <= 0:\n self.df[new_col_name]=np.log(self.df[col] + abs(self.df[col].min()) + 1) # log(value accounting for negative & 0)\n self.xfrm_params[new_col_name] = { # parameters needed to inverse column values\n \"PRE_XFRM\":'Y'\n ,\"SHIFT\":abs(self.df[col].min())+1\n ,\"X_FORM\":\"log(x + abs(min(x)) + 1)\"\n }\n print(f\"{transform_type} of {transform_cols} successfully added to object DataFrame!\")\n \n \n elif transform_type == \"recip\":\n for col in transorm_cols:\n new_col_name = col + '_RECIP'\n if self.df[col].min() > 1:\n self.df[new_col_name] = 1 / (self.df[col]) # 1 / value\n self.xfrm_params[new_col_name] = { \n \"PRE_XFRM\":\"N\"\n ,\"X_FORM\":\"1 / x\"\n }\n \n elif self.df[col].min() <= 1:\n self.df[new_col_name] = 1 / (self.df[col] + abs(self.df[col].min()) + 2) # 1 / value accounting for negative, 1, and 0\n self.xfrm_params[new_col_name] = { # parameters needed to inverse column values\n \"PRE_XFRM\":\"Y\"\n ,\"SHIFT\":abs(self.df[col].min())+2\n ,\"X_FORM\":\"1 / (x + abs(min(x)) + 2)\"\n }\n print(f\"{transform_type} of {transform_cols} successfully added to object DataFrame!\")\n \n \n elif transform_type == 'normalize':\n for col in transform_cols:\n new_col_name = col + '_NORMLZ'\n if self.df[col].min() > 0:\n self.df[new_col_name] = self.df[col] / sum(self.df[col]) # value / sum of column values\n self.xfrm_params[new_col_name] = {\n \"PRE_XFRM\":\"N\"\n ,\"SHIFT\":0\n ,\"COL_SUM\":sum(self.df[col])\n ,\"X_FORM\":\"x / (sum(x))\"\n }\n \n elif self.df[col].min() <= 0:\n self.df[new_col_name] = (self.df[col] + abs(self.df[col].min()) + 1) / (sum((self.df[col] + abs(self.df[col].min()) + 1)))\n self.xfrm_params[new_col_name] = {\n \"PRE_XFM\":\"Y\"\n ,\"SHIFT\":self.df[col].min()+1\n ,\"COL_SUM\":sum(self.df[col])\n ,\"X_FORM\":\"(x + abs(min(x)) + 1) / (sum( (x + abs(min(x)) + 1) ))\"\n }\n print(f\"{transform_type} of {transform_cols} successfully added to object DataFrame!\")\n \n \n elif transform_type == 'zscore':\n for col in transform_cols:\n new_col_name = col + '_Z'\n self.df[new_col_name] = (self.df[col] - self.df[col].mean()) / self.df[col].std()\n self.xfrm_params[new_col_name] = {\n \"MEAN\":self.df[col].mean()\n ,\"STD\":self.df[col].std()\n ,\"X_FORM\":\"(x - mean(x)) / std(x)\"\n }\n print(f\"{transform_type} of {transform_cols} successfully added to object DataFrame!\")\n \n \n elif transform_type == 'reflect':\n for col in transform_cols:\n new_col_name = col + '_REFLECT'\n self.df[new_col_name] = self.df[col].min() - self.df[col] # min - value\n self.xfrm_params[new_col_name] = {\n \"ABS_MIN\":abs(self.df[col].min())\n ,\"X_FORM\":\"min(x) - x\"\n }\n print(f\"{transform_type} of {transform_cols} successfully added to object DataFrame!\")\n \n \n elif transform_type == 'x2':\n for col in transform_cols:\n new_col_name = col + '_X2'\n if self.df[col].min() > 0:\n self.df[new_col_name] = self.df[col]**2\n self.xfrm_params[new_col_name] = {\n \"PRE_XFRM\":\"N\"\n ,\"SHIFT\":0\n ,\"X_FORM\":\"x**2\"\n }\n elif self.df[col].min() <= 0:\n self.df[new_col_name] = self.df[col] + abs(self.df[col].min())\n self.xfrm_params[new_col_name] = {\n \"PRE_XFRM\":\"Y\"\n ,\"SHIFT\":abs(self.df[col].min())\n ,\"X_FORM\":\"( x + abs(min(x)) )**2\"\n }\n \n \n elif transform_type == 'exp':\n for col in transform_cols:\n new_col_name = col + '_EXP'\n self.df[new_col_name] = (match.e)**self.df[col] # exponentiating values\n self.xfrm_params[new_col_name] = {\n \"PRE_XFRM\":\"N\"\n ,\"X_FORM\":\"e**x\"\n }\n \n \n elif transform_type == 'boxcox':\n for col in transform_cols:\n new_col_name = col + '_BOXCOX'\n self.df[new_col_name], lamb = spstats.boxcox(self.df[col])\n self.xfrm_params[new_col_name] = {\n \"PRE_XFRM\":\"N\"\n ,\"LAMBDA\":lamb\n ,\"X_FORM\":\"scipy.stats.boxcox(x)\"\n }\n \n \n def corr_hm(self, method='pearson', cmap='bwr'):\n\n fig, ax = plt.subplots(figsize=(6,1))\n fig.subplots_adjust(bottom=0.6)\n ax.set_facecolor('black')\n\n cm = eval(f\"mpl.cm.{cmap}\")\n norm = mpl.colors.Normalize(vmin=-1, vmax=1)\n\n cbar = mpl.colorbar.ColorbarBase(ax\n , cmap=cm\n , norm=norm\n , orientation='horizontal')\n ax.set_facecolor('yellow')\n plt.title(\"Correlation\")\n plt.show()\n \n corr_hm = self.df.corr(method='pearson').style.background_gradient(cmap=cm, vmin=-1, vmax=1)\n return corr_hm\n \n ", "_____no_output_____" ] ], [ [ "<br><br>\n\n## Testing", "_____no_output_____" ] ], [ [ "## regression data set\ncal_housing_df = datasets.fetch_california_housing(as_frame=True).data\ncal_housing_df['MEDIAN_PRICE'] = datasets.fetch_california_housing(as_frame=True).target\n\n## classification data set\niris_df = datasets.load_iris(as_frame=True)", "_____no_output_____" ], [ "new_col_names = [\n \"MEDIAN_INCOME\"\n ,\"MEDIAN_HOUSE_AGE\"\n ,\"AVG_ROOMS\"\n ,\"AVG_BEDROOMS\"\n ,\"BLOCK_POP\"\n ,\"AVG_HOUSE_OCC\"\n ,\"LAT\"\n ,\"LON\"\n ,\"MEDIAN_PRICE\"\n]\n\ncal_housing_df.columns = new_col_names", "_____no_output_____" ], [ "df = cal_housing_df\n\ndf1 = SwissDF(df)\n\ndf1.df", "_____no_output_____" ], [ "df1.df_dist_plot() # takes forever to run due to crazy outliers", "_____no_output_____" ], [ "df1.df.describe()", "_____no_output_____" ] ], [ [ "<br>\n\nWhat in the world is going on with **[AVG_HOUSE_OCC]**, **[AVG_ROOMS]**, and **[AVG_BEDROOMS]**??", "_____no_output_____" ] ], [ [ "df1.get_outlier_info(outlier_method='iqr')\n\ndf_index_slice(df1.df\n , df1.outlier_dict['AVG_HOUSE_OCC']\n , sort=True, sort_col='AVG_HOUSE_OCC'\n , desc=True).head(30)", "_____no_output_____" ], [ "df_index_slice(df1.df\n , df1.outlier_dict['AVG_ROOMS']\n , sort=True, sort_col='AVG_ROOMS'\n , desc=True).head(30)", "_____no_output_____" ], [ "df_index_slice(df1.df\n , df1.outlier_dict['AVG_BEDROOMS']\n , sort=True, sort_col='AVG_BEDROOMS'\n , desc=True).head(30)", "_____no_output_____" ] ], [ [ "<br><br>\n\nAfter searching the Latitude/Longitude on Google, it looks like a lot of places where the AVG_HOUSE_OCC, AVG_BEDROOMS, and AVG_ROOOMS is high are places like colleges, prisons, communities, resorts, etc. If we are looking specifically at house prices, I think it would be safe to remove these.", "_____no_output_____" ] ], [ [ "df1.remove_outliers(for_vars=['AVG_ROOMS','AVG_BEDROOMS','AVG_HOUSE_OCC'])", "2235 rows removed!\n" ] ], [ [ "<br><br>\n\nNow lets see what the distributions look like.", "_____no_output_____" ] ], [ [ "df1.df_dist_plot()", "_____no_output_____" ] ], [ [ "<br><br>\n\nNow I'm curious about the outliers for **[MEDIAN_HOUSE_AGE]** and **[MEDIAN_PRICE]**\n", "_____no_output_____" ] ], [ [ "df_index_slice(df1.df\n , df1.outlier_dict['MEDIAN_HOUSE_AGE']\n , sort=True, sort_col='MEDIAN_HOUSE_AGE'\n , desc=True).head(30)", "_____no_output_____" ] ], [ [ "No outliers detected for **[MEDIAN_HOUSE_AGE]**\n\n<br><br>", "_____no_output_____" ] ], [ [ "df_index_slice(df1.df\n , df1.outlier_dict['MEDIAN_PRICE']\n , sort=True, sort_col='MEDIAN_PRICE'\n , desc=True).head(30)", "_____no_output_____" ] ], [ [ "After review, the high price homes seem to located in fancy-pants-ville so I think those outliers should stay. \n\n<br><br>\n\nI still think we can clean the data a little further. For the next step, I'll attempt to make some the right-skewed data more normal by taking the boxcox transformation of those columns.", "_____no_output_____" ] ], [ [ "transform_cols = [\n 'MEDIAN_INCOME'\n ,'BLOCK_POP'\n ,'MEDIAN_PRICE'\n]\n\ndf1.col_transform(transform_type='boxcox', transform_cols=transform_cols)", "<ipython-input-42-abafbe1e0817>:384: SettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame.\nTry using .loc[row_indexer,col_indexer] = value instead\n\nSee the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n self.df[new_col_name], lamb = spstats.boxcox(self.df[col])\n" ], [ "df1.df_dist_plot()", "_____no_output_____" ] ], [ [ "I'm liking that **[BLOCK_POP_LOG]**, **[MEDIAN_INCOME_LOG]**, now taking on a more normal distribution. I think I'll use that moving forward. \n\nOk final data set.", "_____no_output_____" ] ], [ [ "df1.df = df1.df.drop(columns=['MEDIAN_INCOME','BLOCK_POP','MEDIAN_PRICE'])", "_____no_output_____" ], [ "df1.df_dist_plot()", "_____no_output_____" ] ], [ [ "<br><br>\n\nExample of how to revert value in transformed distribution back to a meaningful value", "_____no_output_____" ] ], [ [ "## first find the parameters used in the transformation for each transformed column\ndf1.xfrm_params", "_____no_output_____" ] ], [ [ "<br><br>\n\nAfter parameters found, use those to transform to a meaningful representation", "_____no_output_____" ] ], [ [ "boxcox_mean = df1.df['BLOCK_POP_BOXCOX'].mean()\nreg_mean = invert_transform(boxcox_mean, reversed_transformation='boxcox', lamb=df1.xfrm_params['BLOCK_POP_BOXCOX']['LAMBDA'])\n\nboxcox_mean, reg_mean", "_____no_output_____" ] ], [ [ "<br><br>\n\nCorrelation heatmap", "_____no_output_____" ] ], [ [ "df1.corr_hm()", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]