From 3cbdbe2ab96684001c94dd10a5fe09cab7c33289 Mon Sep 17 00:00:00 2001
From: Kai-Philipp Nosper
Date: Tue, 25 Jan 2022 18:48:46 +0100
Subject: [PATCH 1/2] Add comments to slp
---
notebooks/examples/single-layer_perceptron.ipynb | 12 +++++++-----
.../solution_single-layer_perceptron.ipynb | 15 +++++++++++++--
2 files changed, 20 insertions(+), 7 deletions(-)
diff --git a/notebooks/examples/single-layer_perceptron.ipynb b/notebooks/examples/single-layer_perceptron.ipynb
index 200fb71..fc577aa 100644
--- a/notebooks/examples/single-layer_perceptron.ipynb
+++ b/notebooks/examples/single-layer_perceptron.ipynb
@@ -124,14 +124,15 @@
"source": [
"def predict(inputs, weights):\n",
" '''inputs: matrix[i][:-1], weights: array of weights'''\n",
- "\n",
+ " \n",
" total_activation = 0\n",
" # Iterate through the inputs and related weights and sum them up in total_activation\n",
" for input, weight in zip(inputs, weights):\n",
" total_activation += input * weight\n",
" \n",
" # calculate sigmoid activation to limit the output between 0 and 1\n",
- " return 1/(1+math.exp(-total_activation))"
+ " return 1/(1+math.exp(-total_activation))\n",
+ " "
]
},
{
@@ -189,15 +190,16 @@
"def accuracy(matrix, weights):\n",
" num_correct = 0\n",
" preds = []\n",
- " threshold = 0.4\n",
" \n",
" for i in range(len(matrix)):\n",
" # Get prediction for the current datapoint\n",
" pred = predict(matrix[i][:-1], weights)\n",
" preds.append(pred)\n",
"\n",
- " # Check if the prediction is correct within the threshold\n",
- " if math.isclose(pred, matrix[i][-1], abs_tol = threshold):\n",
+ " # Check if the prediction is correct\n",
+ " # math.isclose checks if two values are close to each other\n",
+ " # abs_tol is the minimum absolute tolerance.\n",
+ " if math.isclose(pred, matrix[i][-1], abs_tol = 0.4):\n",
" num_correct += 1\n",
" \n",
" print('Predictions:', preds)\n",
diff --git a/notebooks/exercises/solution_single-layer_perceptron.ipynb b/notebooks/exercises/solution_single-layer_perceptron.ipynb
index 4447000..59f6b20 100644
--- a/notebooks/exercises/solution_single-layer_perceptron.ipynb
+++ b/notebooks/exercises/solution_single-layer_perceptron.ipynb
@@ -100,10 +100,14 @@
"outputs": [],
"source": [
"def predict(inputs, weights):\n",
- " threshold = 0.5\n",
+ " '''inputs: matrix[i][:-1], weights: array of weights'''\n",
+ " \n",
" total_activation = 0\n",
+ " # Iterate through the inputs and related weights and sum them up in total_activation\n",
" for input, weight in zip(inputs, weights):\n",
" total_activation += input * weight\n",
+ " \n",
+ " # calculate sigmoid activation to limit the output between 0 and 1\n",
" return 1/(1+math.exp(-total_activation))"
]
},
@@ -153,13 +157,20 @@
"def accuracy(matrix, weights):\n",
" num_correct = 0\n",
" preds = []\n",
+ " \n",
" for i in range(len(matrix)):\n",
+ " # Get prediction for the current datapoint\n",
" pred = predict(matrix[i][:-1], weights)\n",
" preds.append(pred)\n",
- " #if pred == matrix[i][-1]:\n",
+ "\n",
+ " # Check if the prediction is correct\n",
+ " # math.isclose checks if two values are close to each other\n",
+ " # abs_tol is the minimum absolute tolerance.\n",
" if math.isclose(pred, matrix[i][-1], abs_tol = 0.4):\n",
" num_correct += 1\n",
+ " \n",
" print('Predictions:', preds)\n",
+ "\n",
" # return overall accuracy\n",
" return num_correct / float(len(matrix))"
]
--
GitLab
From 143c85454a9ec87f05a2c78133de0755a0378c75 Mon Sep 17 00:00:00 2001
From: Kai-Philipp Nosper
Date: Tue, 25 Jan 2022 18:52:08 +0100
Subject: [PATCH 2/2] Add explanation for notebook at the top
---
notebooks/examples/single-layer_perceptron.ipynb | 4 +++-
1 file changed, 3 insertions(+), 1 deletion(-)
diff --git a/notebooks/examples/single-layer_perceptron.ipynb b/notebooks/examples/single-layer_perceptron.ipynb
index fc577aa..ac1f62d 100644
--- a/notebooks/examples/single-layer_perceptron.ipynb
+++ b/notebooks/examples/single-layer_perceptron.ipynb
@@ -5,7 +5,9 @@
"id": "7a974be2",
"metadata": {},
"source": [
- "# Single-Layer Perceptron"
+ "# Single-Layer Perceptron\n",
+ "In this notebook, we will implement a single-layer perceptron.\n",
+ "Goal is to predict an or-gate."
]
},
{
--
GitLab