diff --git a/notebooks/examples/single-layer_perceptron.ipynb b/notebooks/examples/single-layer_perceptron.ipynb
index 200fb71606760518eb1ca4fbbcb08c09a0700487..ac1f62d6f9d01467ac8ed04d206e971bc1d71a33 100644
--- a/notebooks/examples/single-layer_perceptron.ipynb
+++ b/notebooks/examples/single-layer_perceptron.ipynb
@@ -5,7 +5,9 @@
"id": "7a974be2",
"metadata": {},
"source": [
- "# Single-Layer Perceptron"
+ "# Single-Layer Perceptron\n",
+ "In this notebook, we will implement a single-layer perceptron.\n",
+ "Goal is to predict an or-gate."
]
},
{
@@ -124,14 +126,15 @@
"source": [
"def predict(inputs, weights):\n",
" '''inputs: matrix[i][:-1], weights: array of weights'''\n",
- "\n",
+ " \n",
" total_activation = 0\n",
" # Iterate through the inputs and related weights and sum them up in total_activation\n",
" for input, weight in zip(inputs, weights):\n",
" total_activation += input * weight\n",
" \n",
" # calculate sigmoid activation to limit the output between 0 and 1\n",
- " return 1/(1+math.exp(-total_activation))"
+ " return 1/(1+math.exp(-total_activation))\n",
+ " "
]
},
{
@@ -189,15 +192,16 @@
"def accuracy(matrix, weights):\n",
" num_correct = 0\n",
" preds = []\n",
- " threshold = 0.4\n",
" \n",
" for i in range(len(matrix)):\n",
" # Get prediction for the current datapoint\n",
" pred = predict(matrix[i][:-1], weights)\n",
" preds.append(pred)\n",
"\n",
- " # Check if the prediction is correct within the threshold\n",
- " if math.isclose(pred, matrix[i][-1], abs_tol = threshold):\n",
+ " # Check if the prediction is correct\n",
+ " # math.isclose checks if two values are close to each other\n",
+ " # abs_tol is the minimum absolute tolerance.\n",
+ " if math.isclose(pred, matrix[i][-1], abs_tol = 0.4):\n",
" num_correct += 1\n",
" \n",
" print('Predictions:', preds)\n",
diff --git a/notebooks/exercises/solution_single-layer_perceptron.ipynb b/notebooks/exercises/solution_single-layer_perceptron.ipynb
index 4447000c299d78adfc462491b1d3218847462cf2..59f6b207f47b219859273e050f93686ece9106b7 100644
--- a/notebooks/exercises/solution_single-layer_perceptron.ipynb
+++ b/notebooks/exercises/solution_single-layer_perceptron.ipynb
@@ -100,10 +100,14 @@
"outputs": [],
"source": [
"def predict(inputs, weights):\n",
- " threshold = 0.5\n",
+ " '''inputs: matrix[i][:-1], weights: array of weights'''\n",
+ " \n",
" total_activation = 0\n",
+ " # Iterate through the inputs and related weights and sum them up in total_activation\n",
" for input, weight in zip(inputs, weights):\n",
" total_activation += input * weight\n",
+ " \n",
+ " # calculate sigmoid activation to limit the output between 0 and 1\n",
" return 1/(1+math.exp(-total_activation))"
]
},
@@ -153,13 +157,20 @@
"def accuracy(matrix, weights):\n",
" num_correct = 0\n",
" preds = []\n",
+ " \n",
" for i in range(len(matrix)):\n",
+ " # Get prediction for the current datapoint\n",
" pred = predict(matrix[i][:-1], weights)\n",
" preds.append(pred)\n",
- " #if pred == matrix[i][-1]:\n",
+ "\n",
+ " # Check if the prediction is correct\n",
+ " # math.isclose checks if two values are close to each other\n",
+ " # abs_tol is the minimum absolute tolerance.\n",
" if math.isclose(pred, matrix[i][-1], abs_tol = 0.4):\n",
" num_correct += 1\n",
+ " \n",
" print('Predictions:', preds)\n",
+ "\n",
" # return overall accuracy\n",
" return num_correct / float(len(matrix))"
]