diff --git "a/index.html" "b/index.html" --- "a/index.html" +++ "b/index.html" @@ -13117,7 +13117,7 @@ div#notebook {
In this notebook I'm exploring fast.ai's Kaggle notebook on "How does a neural net really work". This relates to Lesson 3 of the fast.ai Deep Learning course. While the video provides a solid explanation, the enigmatic imports and variables can be difficult to comprehend. I'm reimplementing some sections to see if if sticks. In a nutshell, this is what is happening in this notebook:
+In this notebook I'm exploring fast.ai's Kaggle notebook on "How does a neural net really work". This relates to Lesson 3 and Lesson 5 of the fast.ai Deep Learning course. While the video provides a solid explanation, the enigmatic imports and variables can be difficult to comprehend. I'm reimplementing some sections to see if if sticks. In a nutshell, this is what is happening in this notebook:
# Installing the dependencies within the notebook to make it easier to run on colab
-%pip install -Uqq fastai==2.7.18 ipywidgets==8.1.5 plotly==5.24.1
+%pip install -Uqq fastai==2.7.18 ipywidgets==8.1.5 plotly==5.24.1 datasets==3.3.2
This section, from the fast.ai course, sets the stage for understanding how neural networks learn "weights". +We'll plot some points on a graphic and use visualizations to see how changing the coefficients affects the function to better fit the points.
from fastai.basics import torch, plt
+import numpy as np, pandas as pd
+
+# Make pandas and numpy use the entire screan
+np.set_printoptions(linewidth=140)
+torch.set_printoptions(linewidth=140, sci_mode=False, edgeitems=7)
+pd.set_option('display.width', 140)
# Set the figure DPI to 90 for better resolution
plt.rc('figure', dpi=90)
@@ -13310,12 +13318,12 @@ div#notebook {
-
-
@@ -13543,9 +13551,9 @@ var element = $('#259b5904-8016-4be9-a0f8-828e0f13c3d6');
-
from fastai.metrics import mae
-def demo_auto_fit(steps=50):
+def demo_auto_fit(steps=20):
x, y = generate_noisy_data(mk_quad(3,2,1))
abc = torch.tensor([1.0,1.0,1.0], requires_grad=True)
@@ -13819,48 +13827,1255 @@ step=16; loss=0.76; abc=tensor([2.7889, 1.5358, 1.4100], requires_grad=True)
step=17; loss=0.74; abc=tensor([2.8330, 1.5484, 1.3900], requires_grad=True)
step=18; loss=0.71; abc=tensor([2.8771, 1.5611, 1.3700], requires_grad=True)
step=19; loss=0.69; abc=tensor([2.9212, 1.5737, 1.3500], requires_grad=True)
-step=20; loss=0.68; abc=tensor([2.9155, 1.5863, 1.3100], requires_grad=True)
-step=21; loss=0.66; abc=tensor([2.9346, 1.5832, 1.2800], requires_grad=True)
-step=22; loss=0.65; abc=tensor([2.9289, 1.5958, 1.2400], requires_grad=True)
-step=23; loss=0.64; abc=tensor([2.9480, 1.5926, 1.2100], requires_grad=True)
-step=24; loss=0.62; abc=tensor([2.9672, 1.5895, 1.1800], requires_grad=True)
-step=25; loss=0.61; abc=tensor([2.9864, 1.5863, 1.1500], requires_grad=True)
-step=26; loss=0.60; abc=tensor([2.9807, 1.6000, 1.1200], requires_grad=True)
-step=27; loss=0.60; abc=tensor([3.0064, 1.5937, 1.1200], requires_grad=True)
-step=28; loss=0.59; abc=tensor([3.0018, 1.6105, 1.1000], requires_grad=True)
-step=29; loss=0.59; abc=tensor([3.0275, 1.6042, 1.1000], requires_grad=True)
-step=30; loss=0.59; abc=tensor([3.0283, 1.6137, 1.0900], requires_grad=True)
-step=31; loss=0.58; abc=tensor([3.0290, 1.6232, 1.0800], requires_grad=True)
-step=32; loss=0.58; abc=tensor([3.0547, 1.6168, 1.0800], requires_grad=True)
-step=33; loss=0.58; abc=tensor([3.0555, 1.6263, 1.0700], requires_grad=True)
-step=34; loss=0.58; abc=tensor([3.0563, 1.6358, 1.0600], requires_grad=True)
-step=35; loss=0.58; abc=tensor([3.0571, 1.6453, 1.0500], requires_grad=True)
-step=36; loss=0.58; abc=tensor([3.0828, 1.6389, 1.0500], requires_grad=True)
-step=37; loss=0.58; abc=tensor([3.0835, 1.6484, 1.0400], requires_grad=True)
-step=38; loss=0.58; abc=tensor([3.0843, 1.6579, 1.0300], requires_grad=True)
-step=39; loss=0.57; abc=tensor([3.0851, 1.6674, 1.0200], requires_grad=True)
-step=40; loss=0.57; abc=tensor([3.1136, 1.6558, 1.0300], requires_grad=True)
-step=41; loss=0.58; abc=tensor([3.0956, 1.6516, 1.0100], requires_grad=True)
-step=42; loss=0.57; abc=tensor([3.0992, 1.6558, 1.0100], requires_grad=True)
-step=43; loss=0.57; abc=tensor([3.1027, 1.6600, 1.0100], requires_grad=True)
-step=44; loss=0.57; abc=tensor([3.1063, 1.6642, 1.0100], requires_grad=True)
-step=45; loss=0.57; abc=tensor([3.0911, 1.6547, 1.0000], requires_grad=True)
-step=46; loss=0.57; abc=tensor([3.0946, 1.6589, 1.0000], requires_grad=True)
-step=47; loss=0.57; abc=tensor([3.0982, 1.6632, 1.0000], requires_grad=True)
-step=48; loss=0.57; abc=tensor([3.1017, 1.6674, 1.0000], requires_grad=True)
-step=49; loss=0.57; abc=tensor([3.1053, 1.6716, 1.0000], requires_grad=True)
-Best abc parameters: tensor([3.1053, 1.6716, 1.0000])
+Best abc parameters: tensor([2.9212, 1.5737, 1.3500])
@@ -13764,7 +13772,7 @@ var element = $('#82feb05b-de4e-42b1-9bc1-96b7dbb58213');
+
@@ -13397,12 +13405,12 @@ Prediction: 4.0, Actual: 4.2, Absolute Difference: 0.200
+
+
+3. The Basics of a Neural-Network¶
+
+
+
+
+
+
+3.1 Introducing Non-Linearity with ReLU¶
We've seen that simple functions like quadratics can model some data, but real-world data is rarely so straightforward. Imagine trying to predict something complex, like whether a picture is a cat or a dog, based on many pixel values (our 'dimensions'). A simple quadratic or even a single linear function just won't be flexible enough to capture the intricate patterns in such high-dimensional data.
+To handle this complexity, we need to build more powerful functions. Simply combining linear functions won't solve the problem because any combination of linear functions is still just a linear function! Linear functions can only model linear relationships in the data. Real-world data, like images of cats and dogs, is highly non-linear.
+To introduce non-linearity, we use activation functions. ReLU (Rectified Linear Unit) is a simple yet powerful activation function that introduces non-linearity. By applying ReLU to the output of linear functions, we can create models that can learn complex, non-linear patterns in the data. This non-linearity is what allows neural networks to model intricate relationships that simple linear models cannot. This will lead us to the idea of a ReLU
, a simple activation function, and the simplest "neural network" we can build with it.
+
+
+
+
+
+In [12]:
+
+
+def rectified_linear(m,b,x):
+ y = m*x+b
+ return torch.clip(y, 0.)
+
+plot_function(partial(rectified_linear, 1,1))
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+Combining two ReLUs allows us to create more complex, piecewise linear functions, as illustrated in the interactive plot below. This combination increases the flexibility of our model, enabling it to capture more intricate relationships in the data.
+
+
+
+
+
+In [13]:
+
+
+def double_relu(m1,b1,m2,b2,x):
+ return rectified_linear(m1,b1,x) + rectified_linear(m2,b2,x)
+
+@interact(m1=-1.5, b1=-1.5, m2=1.5, b2=1.5)
+def plot_double_relu(m1, b1, m2, b2):
+ plot_function(partial(double_relu, m1,b1,m2,b2), ylim=(-1,6))
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+3.2 Building a Neural Network from-Scratch¶
From this point forward, we will be following this notebook: Linear model and neural net from scratch.
+Important ⚠️: For simplicity, I'm skipping all the steps that involve data cleanup and preparation. This means of course that my model will most likely not have a very good performance.
+We are using the Titanic competition from Kaggle. I have made a copy in my Hugging Face workspace, which tbh I did to to experiment how Datasets work on Hugging Face.
+
+The goal is to create a model to predict whether a passenger Survived
, which is provided in our dataset.
+In essence, we will now combine functions like those we've explored above, such as ReLUs, to construct a simple neural network. This network will receive passenger features as input, apply weights (similar to $m$ in our previous examples), and hopefully predict whether the passenger Survived
with the lowest possible loss/error.
+
+
+
+
+
+In [14]:
+
+
+from datasets import load_dataset
+# Load only train and test splits.
+dataset = load_dataset("paulopontesm/titanic", data_files={"train": "train.csv", "test": "test.csv"})
+# Access the splits
+train_dataset_df = dataset["train"].to_pandas()
+test_dataset_df = dataset["test"].to_pandas()
+
+train_dataset_df
+
+
+
+
+
+
+
+Out[14]:
+
+
+
+
+
+
+
+
+Since we need numerical data for our model, we'll just use the columns that already contain numbers as predictors. These are the columns that are already numerical.
+
+
+
+
+
+In [15]:
+
+
+import numpy as np
+
+train_dataset_df.describe(include=(np.number))
+
+
+
+
+
+
+
+Out[15]:
+
+
+
+
+
+
+
+
+Now that we have numbers for the features, we can create tensors/arrays for our features (aka called independent variables) and target (aka dependent variable).
+Even thought I mentioned above that I didn't want to do a lot of data transofrmations, I think we really need to remove the NaNs and to normalize the numbers.
+
+
+
+
+
+In [16]:
+
+
+from torch import tensor
+
+# And one for the target. Also known as dependent variables or outputs
+t_dep = tensor(train_dataset_df.Survived)
+
+indep_cols = ['Age', 'SibSp', 'Parch', 'Fare']
+
+# We need to do 2 things before proceeding so that we can use our data.
+# 1. Replace all the nans by the mode of that column
+for col in indep_cols:
+ mode_val = train_dataset_df[col].mode()[0]
+ train_dataset_df[col] = train_dataset_df[col].fillna(mode_val)
+
+# 2. to prevent one column from dominating all the others, by making each row range between 0 and 1.
+# We can do this by dividing each entry by
+# the max value on that column
+for col in indep_cols:
+ max_val = train_dataset_df[col].max()
+ train_dataset_df[col] = train_dataset_df[col] / max_val
+
+# Create a tensor with our predictors. Also known as independent variables, features, or inputs.
+t_indep = tensor(train_dataset_df[indep_cols].values, dtype=torch.float)
+t_indep
+
+
+
+
+
+
+
+Out[16]:
+
+
+
+
+
+
+
+
+Looks good (?)...
+Because we want to calculate accuracy later, for fun, let's also keep a chunk of the training data for this. This is called a validation set
.
+Note: This other notebook explains the difference between the validation
set and the test
set. They seem similar, but looks like it's important not to confuse these two concepts. https://www.kaggle.com/code/jhoward/getting-started-with-nlp-for-absolute-beginners#Test-and-validation-sets
+
+
+
+
+
+In [17]:
+
+
+from fastai.data.transforms import RandomSplitter
+trn_split,val_split=RandomSplitter(seed=42)(train_dataset_df)
+
+train_set_features,validation_set_features = t_indep[trn_split],t_indep[val_split]
+train_set_targets,validation_set_targets = t_dep[trn_split],t_dep[val_split]
+len(train_set_features),len(validation_set_features)
+
+
+
+
+
+
+
+Out[17]:
+
+
+
+
+
+
+
+
+Now we can generate random weights (m
s) for each of our features. We're using a linear model, effectively calculating a weighted sum of the features: $f(x) = m_{Age}*x_{Age} + m_{SibSp}*x_{SibSp} + m_{Parch}*x_{Parch} + m_{Fare}*x_{Fare}$. We will adjust these weights to predict passenger survival based on the features.
+
+
+
+
+
+In [18]:
+
+
+def generate_random_coefficients(num_coeffs):
+ torch.manual_seed(42)
+ coeffs = torch.rand(num_coeffs)-0.5 # pick random numbers in the range (-0.5,0.5)
+ return coeffs.requires_grad_()
+
+nn_coeffs=generate_random_coefficients(num_coeffs=train_set_features.shape[1])
+nn_coeffs
+
+
+
+
+
+
+
+Out[18]:
+
+
+
+
+
+
+
+In [19]:
+
+
+def calc_preds(coeffs, features): return (features*coeffs).sum(axis=1)
+
+predictions = calc_preds(nn_coeffs, train_set_features)
+predictions.topk(3)
+
+
+
+
+
+
+
+Out[19]:
+
+
+
+
+
+
+
+In [20]:
+
+
+def calc_loss(coeffs, features, targets): return torch.abs(calc_preds(coeffs, features)-targets).mean()
+
+loss = calc_loss(coeffs=nn_coeffs, features=train_set_features, targets=train_set_targets)
+loss
+
+
+
+
+
+
+
+Out[20]:
+
+
+
+
+
+
+
+In [21]:
+
+
+loss.backward()
+nn_coeffs.grad
+
+
+
+
+
+
+
+Out[21]:
+
+
+
+
+
+
+
+In [22]:
+
+
+def one_epoch(coeffs, lr, train_set_features_set, train_set_targets_set):
+ loss = calc_loss(coeffs, train_set_features_set, train_set_targets_set)
+ loss.backward()
+ with torch.no_grad():
+ coeffs.sub_(coeffs.grad * lr)
+ coeffs.grad.zero_()
+ print(f"{loss:.3f}", end="; ")
+
+def train_model(train_set_features_set, train_set_targets_set, epochs=60, lr=4):
+ torch.manual_seed(442)
+ coeffs = generate_random_coefficients(num_coeffs=t_indep.shape[1])
+ for i in range(epochs): one_epoch(coeffs, lr=lr, train_set_features_set=train_set_features_set, train_set_targets_set=train_set_targets_set)
+ return coeffs
+
+final_weights = train_model(train_set_features, train_set_targets)
+
+def show_coeffs(coeffs): return dict(zip(indep_cols, coeffs.requires_grad_(False)))
+
+show_coeffs(nn_coeffs)
+
+
+
+
+
+
+
+
+
+
+
+Out[22]:
+
+
+
+
+
+
+
+
+We have weights, let's do predictions then.
+
+
+
+
+
+In [23]:
+
+
+calc_preds(nn_coeffs, validation_set_features)
+
+
+
+
+
+
+
+Out[23]:
+
+
+
+
+
+
+
+
+It's hard not to notice that we should be predicitng a 0
or 1
value, but instead we're getting a lot of negatives.
+For simplicity, let's ignore this and say that everything above 0.5
survived.
+
+
+
+
+
+In [24]:
+
+
+preds = calc_preds(nn_coeffs, validation_set_features)
+
+print(f"True count was {torch.sum(preds>0.5)} should have been {torch.sum(validation_set_targets.bool())}")
+print(f"False count was {torch.sum(preds<=0.5)} should have been {len( validation_set_targets.bool()) - torch.sum(validation_set_targets.bool())}")
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+With this we can use our validation set
and calculate the %
of predicitons that we are getting correctly.
+
+
+
+
+
+In [25]:
+
+
+def calc_accuracy(predictions, validation_set_features, validation_set_targets):
+ # Convert predictions to boolean values (True if > 0.5, False otherwise)
+ bool_predictions = predictions > 0.5
+ # Convert validation dependent variable to boolean values
+ bool_validation_set_targets = validation_set_targets.bool()
+ # Compare boolean predictions with boolean validation dependent variable to find correct predictions
+ correct_predictions = bool_validation_set_targets == bool_predictions
+ # Convert correct predictions (boolean) to float (1.0 for True, 0.0 for False)
+ accuracy_float = correct_predictions.float()
+ # Calculate the mean of the accuracy_float to get the overall accuracy
+ accuracy_val = accuracy_float.mean()
+ return accuracy_val
+
+accuracy_result = calc_accuracy(predictions=preds, validation_set_features=validation_set_features, validation_set_targets=validation_set_targets)
+print(accuracy_result)
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+Looks like we're doing slightly better than throwing a coin. I say this is a success 🤔❓🤔 I don't think so...
+As seen in the counts above ("True count was 2 should have been 72", "False count was 176 should have been 106"), the model predicts most instances as False (not survived).
+It correctly identifies many of the actual 'not survived' cases, contributing to the accuracy, but incorrectly classifies most 'survived' cases as 'not survived'.
+This bias results in an accuracy that is better than random (50%), but not very high, around 58%.
+The Linear model and neural net from scratch goes much further on this exercise. It uses other techniques to clean-up and normalize the data, it also uses the non-numerical by transforming them to numericals, and then uses a sigmoid that differently form our linear function, always give a value between 0
and 1
.
+For me, this was enought to get an better overview of what's happening inside a neural network.
+
+
+
+
+
+
+3.3 Do Deep Learning¶
In the section above we implemented a simple Neural Network. Now let's explore Deep Learning, which is what truly unlocks the power of Neural Networks.
+Deep Learning involves creating Neural Networks with multiple layers. Instead of a single layer, we stack layers of neurons, allowing the network to learn more complex patterns and representations from the data.
+
+
+
+
+
+In [26]:
+
+
+def generate_random_coefficients_for_deep_learning(n_coeff, num_neurons_per_hidden_layer=[10, 10]):
+ torch.manual_seed(42)
+ # Define the number of neurons for each layer, including input, hidden, and output layers.
+ # The input layer size is n_coeff, hidden layers sizes are from num_neurons_per_hidden_layer, and output layer size is 1.
+ num_neurons = [n_coeff] + num_neurons_per_hidden_layer + [1]
+ layers = []
+ for i in range(len(num_neurons)-1):
+ # Determine the size of the input for the current layer from the previous layer's neuron count
+ layer_input_size = num_neurons[i]
+ # Determine the size of the output for the current layer from the current layer's neuron count
+ layer_output_size = num_neurons[i+1]
+ # Initialize a layer with random weights between -0.5 and 0.5.
+ # torch.rand generates uniform random numbers between 0 and 1, then we shift and scale to get range [-0.5, 0.5].
+ # requires_grad_() is set to True to enable gradient tracking for these tensors, which is needed for backpropagation.
+ layer = (torch.rand(layer_input_size, layer_output_size)-0.5).requires_grad_()
+ layers.append(layer)
+ return layers
+
+dnn_layers_coeffs = generate_random_coefficients_for_deep_learning(n_coeff=train_set_features.shape[1], num_neurons_per_hidden_layer=[10, 10])
+dnn_layers_coeffs
+
+
+
+
+
+
+
+Out[26]:
+
+
+
+
+
+
+
+
+We can test how we do without any training
+
+
+
+
+
+In [27]:
+
+
+def calc_preds_for_deep_learning(coeffs, features):
+ # @ is matrix multiplication in Python
+ # It was introduced in Python 3.5 as part of [PEP 465](https://peps.python.org/pep-0465/)
+ layer_features = features
+ for layer in coeffs[:-1]:
+ layer_features = layer_features @ layer
+ layer_features = layer_features @ coeffs[-1]
+ return layer_features.squeeze()
+
+def calc_loss_for_deep_learning(coeffs, features, targets): return torch.abs(calc_preds_for_deep_learning(coeffs, features)-targets).mean()
+
+dnn_preds = calc_preds_for_deep_learning(coeffs=dnn_layers_coeffs, features=validation_set_features)
+
+print(f"True count was {torch.sum(dnn_preds>0.5)} should have been {torch.sum(validation_set_targets.bool())}")
+print(f"False count was {torch.sum(dnn_preds<=0.5)} should have been {len( validation_set_targets.bool()) - torch.sum(validation_set_targets.bool())}")
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+And we need to do the grandient descent for all the coeffs on each layer.
+
+
+
+
+
+In [28]:
+
+
+def one_epoch_for_deep_learning(coeffs, lr, train_set_features_set, train_set_targets_set):
+ loss = calc_loss_for_deep_learning(coeffs, train_set_features_set, train_set_targets_set)
+ loss.backward()
+ with torch.no_grad():
+ for layer in coeffs:
+ layer -= layer.grad * lr
+ layer.grad.zero_()
+
+def train_model_for_deep_learning(train_set_features_set, train_set_targets_set, num_neurons_per_hidden_layer=[10, 10], epochs=60, lr=4):
+ torch.manual_seed(442)
+ coeffs = generate_random_coefficients_for_deep_learning(n_coeff=train_set_features_set.shape[1], num_neurons_per_hidden_layer=num_neurons_per_hidden_layer)
+ for i in range(epochs): one_epoch_for_deep_learning(coeffs, lr=lr, train_set_features_set=train_set_features_set, train_set_targets_set=train_set_targets_set)
+ return coeffs # Returns the trained coefficients, which have the same structure as generate_random_coefficients_for_deep_learning
+
+
+
+
+
+
+
+
+Let's test it then with different combinations of hidden layers and neurons per layer...
+
+
+
+
+
+In [29]:
+
+
+for num_neurons in [[10, 10], [20, 20],[5, 5, 5],[30], [], [2], [50], [2, 2], [50, 50], [5, 10, 5], [2, 2, 2, 2]]:
+ dnn_final_weights = train_model_for_deep_learning(train_set_features, train_set_targets, num_neurons_per_hidden_layer=num_neurons)
+ dnn_preds = calc_preds_for_deep_learning(coeffs=dnn_final_weights, features=validation_set_features)
+ accuracy = calc_accuracy(predictions=dnn_preds, validation_set_features=validation_set_features, validation_set_targets=validation_set_targets)
+
+ print(f"Hidden layers: {num_neurons}")
+ print(f"True count was {torch.sum(dnn_preds>0.5)} should have been {torch.sum(validation_set_targets.bool())}")
+ print(f"False count was {torch.sum(dnn_preds<=0.5)} should have been {len( validation_set_targets.bool()) - torch.sum(validation_set_targets.bool())}")
+ print(f"Accuracy: {accuracy}")
+ print("-" * 20) # Separator for readability
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+Not a lot has changed...
+Just for fun, we can see how adding a sigmoid and ReLU would affect the results... The code is Ctrl+V Ctrl+C from above, but with a smarter_calc_preds_for_deep_learning
.
+
+
+
+
+
+In [30]:
+
+
+import torch.nn.functional as F
+
+def smarter_calc_preds_for_deep_learning(coeffs, features):
+ # @ is matrix multiplication in Python
+ # It was introduced in Python 3.5 as part of [PEP 465](https://peps.python.org/pep-0465/)
+ layer_features = features
+ for layer in coeffs[:-1]:
+ layer_features = F.relu(layer_features @ layer)
+ layer_features = layer_features @ coeffs[-1]
+ return torch.sigmoid(layer_features.squeeze())
+
+def smarter_calc_loss_for_deep_learning(coeffs, features, targets):
+ predictions = smarter_calc_preds_for_deep_learning(coeffs, features)
+ return F.binary_cross_entropy(predictions, targets) # Changed loss to Binary Cross Entropy
+
+def smarter_one_epoch_for_deep_learning(coeffs, lr, train_set_features_set, train_set_targets_set):
+ loss = smarter_calc_loss_for_deep_learning(coeffs, train_set_features_set, train_set_targets_set)
+ loss.backward()
+ with torch.no_grad():
+ for layer in coeffs:
+ layer -= layer.grad * lr
+ layer.grad.zero_()
+
+def smarter_train_model_for_deep_learning(train_set_features_set, train_set_targets_set, num_neurons_per_hidden_layer=[10, 10], epochs=60, lr=4):
+ torch.manual_seed(442)
+ coeffs = generate_random_coefficients_for_deep_learning(n_coeff=train_set_features_set.shape[1], num_neurons_per_hidden_layer=num_neurons_per_hidden_layer)
+ for i in range(epochs): smarter_one_epoch_for_deep_learning(coeffs, lr=lr, train_set_features_set=train_set_features_set, train_set_targets_set=train_set_targets_set)
+ return coeffs # Returns the trained coefficients, which have the same structure as generate_random_coefficients_for_deep_learning
+
+
+
+
+
+
+
+In [31]:
+
+
+for num_neurons in [[10, 10], [20, 20],[5, 5, 5],[30], [], [2], [50], [2, 2], [50, 50], [5, 10, 5], [2, 2, 2, 2]]:
+ dnn_final_weights = smarter_train_model_for_deep_learning(train_set_features, train_set_targets.float(), num_neurons_per_hidden_layer=num_neurons)
+ dnn_preds = smarter_calc_preds_for_deep_learning(coeffs=dnn_final_weights, features=validation_set_features)
+ accuracy = calc_accuracy(predictions=dnn_preds, validation_set_features=validation_set_features, validation_set_targets=validation_set_targets)
+
+ print(f"Hidden layers: {num_neurons}")
+ print(f"True count was {torch.sum(dnn_preds>0.5)} should have been {torch.sum(validation_set_targets.bool())}")
+ print(f"False count was {torch.sum(dnn_preds<=0.5)} should have been {len( validation_set_targets.bool()) - torch.sum(validation_set_targets.bool())}")
+ print(f"Accuracy: {accuracy}")
+ print("-" * 20) # Separator for readability
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+Interesting. That definitely improved.
+I will stop here for now. However, the next step will likely be to add the boolean variables like is_male
, is_female
, is_class_1
, etc. If I understood that correctly and I'm not making any mistakes, it should bring me to around 80% accuracy, like we see on the fast.ai notebook.
+
+
+
+
+
+In [ ]:
+
+
+
+
+
+
+
+