diff --git a/pq-tls-benchmark-framework/emulation-exp/code/kex/Makefile b/pq-tls-benchmark-framework/emulation-exp/code/kex/Makefile
index 7643195142a44590e11091dccf10edb08210d125..7e706ad4272f8f15dbad3545f690c39366a7341f 100644
--- a/pq-tls-benchmark-framework/emulation-exp/code/kex/Makefile
+++ b/pq-tls-benchmark-framework/emulation-exp/code/kex/Makefile
@@ -50,5 +50,6 @@ _change_netem_settings:
 	sudo ip netns exec cli_ns_1 tc qdisc change dev cli_ve root netem limit 1000 rate 500.0mbit delay 2.684ms 0.0ms loss 20.0% duplicate 0.0% corrupt 0.0% reorder 0.0%
 
 _run_openssl_speed:
-	$(OPENSSL) speed -seconds 5 bikel1
-	$(OPENSSL) speed -seconds 5 hqc128
+	$(OPENSSL) speed -seconds 3 bikel1
+	$(OPENSSL) speed -seconds 10 ECP-256 X25519 mlkem512 hqc128 bikel1
+
diff --git a/pq-tls-benchmark-framework/emulation-exp/code/kex/scripts/generate_graphs.py b/pq-tls-benchmark-framework/emulation-exp/code/kex/scripts/generate_graphs.py
index 9a828e9f6cd76bc15d88ac3a3db4dd6baea747e7..ac40b4994b617868cf8b6069061cc7988788fc55 100755
--- a/pq-tls-benchmark-framework/emulation-exp/code/kex/scripts/generate_graphs.py
+++ b/pq-tls-benchmark-framework/emulation-exp/code/kex/scripts/generate_graphs.py
@@ -730,12 +730,13 @@ def plot_lines(data):
                 plt.plot(x, y, linestyle=mode, marker=".", color=color, label=kem_alg)
 
             plt.ylim(bottom=0)
-            plt.xlim(left=0)
+            plt.xlim(left=0, right=x.max() + (x.max() / 50))
             plt.xlabel(row["scenario"])
             plt.ylabel(f"Time-to-first-byte (ms)")
             # plt.title(
             #     f"Medians of {row['scenario']} in {row['protocol']} in {row['sec_level']}"
             # )
+            plt.grid()
             plt.legend(
                 bbox_to_anchor=(0.5, 1), loc="lower center", ncol=3, fontsize="small"
             )
@@ -785,9 +786,10 @@ def plot_lines(data):
                 plt.plot(x, y, linestyle=mode, marker=".", color=color, label=protocol)
 
             plt.ylim(bottom=0)
-            plt.xlim(left=0)
+            plt.xlim(left=0, right=x.max() + (x.max() / 50))
             plt.xlabel(row["scenario"])
             plt.ylabel(f"Time-to-first-byte (ms)")
+            plt.grid()
             plt.legend(
                 bbox_to_anchor=(0.5, 1), loc="lower center", ncol=3, fontsize="small"
             )
@@ -827,9 +829,10 @@ def plot_lines(data):
             )
             plt.plot(x, y, linestyle="-", marker=".")
             plt.ylim(bottom=0)
-            plt.xlim(left=0)
+            plt.xlim(left=0, right=x.max() + (x.max() / 50))
             plt.xlabel(row["scenario"])
             plt.ylabel(f"Time-to-first-byte (ms)")
+            plt.grid()
             plt.title(
                 f"Median of {row['scenario']} in {row['protocol']} in {row['sec_level']} with {row['kem_alg']}"
             )
@@ -972,6 +975,7 @@ def plot_distributions(data):
             plt.xlim(left=-1.5)
             plt.xlabel(row["scenario"])
             plt.ylabel(f"Time-to-first-byte (ms)")
+            plt.grid()
 
             subdir = "filtered/" if filtered else ""
             appendix = "-filtered" if filtered else ""
@@ -1004,9 +1008,10 @@ def plot_distributions(data):
                 plt.figure()
                 plt.violinplot(row["measurements"], showmedians=True)
                 # plt.ylim(bottom=0)
-                # plt.xlim(left=0)
+                # plt.xlim(left=0, right=x.max() + (x.max() / 50))
                 plt.xlabel("Dichte")
                 plt.ylabel(f"Time-to-first-byte (ms)")
+                plt.grid()
 
                 plt.savefig(
                     f"{PLOTS_DIR}/distributions/single/single-violin-plot-for-{row['scenario']}-{row['protocol']}-{row['sec_level']}-{row['kem_alg']}-{value}.pdf"
@@ -1074,9 +1079,10 @@ def plot_distributions(data):
                     )
 
                 # plt.ylim(bottom=0)
-                # plt.xlim(left=0)
+                # plt.xlim(left=0, right=x.max() + (x.max() / 50))
                 plt.ylabel("Wahrscheinlichkeit")
                 plt.xlabel(f"Time-to-first-byte (ms)")
+                plt.grid()
 
                 plt.legend(
                     bbox_to_anchor=(0.5, 1),
@@ -1168,6 +1174,7 @@ def plot_static_data(data):
             )
             plt.xlabel("KEM Algorithms")
             plt.ylabel("Time-to-first-byte (ms)")
+            plt.grid()
 
             sec_level_string = (
                 sec_level if type(sec_level) == str else "-".join(sec_level)
@@ -1199,6 +1206,7 @@ def plot_static_data(data):
             def boxplot_of_medians_for_configuration(filtered_data, row):
                 plt.figure()
                 plt.boxplot(filtered_data["median"])
+                plt.grid()
                 plt.savefig(
                     os.path.join(
                         PLOTS_DIR,
@@ -1220,6 +1228,7 @@ def plot_static_data(data):
                 plt.violinplot(measurements_flattend, showmedians=True)
                 plt.ylabel("Time-to-first-byte (ms)")
                 plt.xlabel("Dichte")
+                plt.grid()
                 plt.savefig(
                     os.path.join(
                         PLOTS_DIR,
@@ -1236,6 +1245,7 @@ def plot_static_data(data):
                 plt.hist(measurements_flattend, bins=100, density=True)
                 plt.xlabel("Time-to-first-byte (ms)")
                 plt.ylabel("Dichte")
+                plt.grid()
 
                 plt.savefig(
                     os.path.join(
@@ -1270,6 +1280,7 @@ def plot_static_data(data):
                 plt.ylabel("Dichte")
                 plt.xlim([xmin, xmax])
                 plt.ylim([0, max(kde_values) + 0.1])
+                plt.grid()
 
                 plt.savefig(
                     os.path.join(
@@ -1357,6 +1368,7 @@ def plot_general_plots():
 
         plt.xlabel("Bytes sent")
         plt.ylabel("Performance (µs)")
+        plt.grid()
 
         name = (
             "scatter-of-bytes-sent-against-kem-performance-with-hybrids.pdf"
@@ -1428,6 +1440,7 @@ def plot_general_plots():
 
         plt.xlabel("Public Key Länge in Bytes")
         plt.ylabel("Ciphertext Länge in Bytes")
+        plt.grid()
 
         plt.gca().xaxis.set_major_formatter(ticker.ScalarFormatter())
         plt.gca().yaxis.set_major_formatter(ticker.ScalarFormatter())
@@ -1451,11 +1464,13 @@ def plot_general_plots():
         # get the line with the maximum median
         max_median = data["median"].idxmax()
         print(data.iloc[max_median])
+        plt.grid()
         plt.savefig(f"{PLOTS_DIR}/general/median_against_iqr_hexbin.pdf")
         plt.close()
 
         plt.figure()
         plt.hist2d(data["median"], data["iqr"], bins=50)
+        plt.grid()
         plt.savefig(f"{PLOTS_DIR}/general/median_against_iqr_hist2d.pdf")
         plt.close()
 
diff --git a/pq-tls-benchmark-framework/emulation-exp/code/kex/scripts/helper_scripts/performance_eval_of_oqs.py b/pq-tls-benchmark-framework/emulation-exp/code/kex/scripts/helper_scripts/performance_eval_of_oqs.py
new file mode 100644
index 0000000000000000000000000000000000000000..cddabe05d30a3354f2027af491a883fb9e9069a9
--- /dev/null
+++ b/pq-tls-benchmark-framework/emulation-exp/code/kex/scripts/helper_scripts/performance_eval_of_oqs.py
@@ -0,0 +1,494 @@
+import os
+import subprocess
+import pandas as pd
+
+FEATHERS_DIR = "feathers"
+
+DIR_OF_LIB_OQS = "../tmp/liboqs/build/tests"
+LIBOQS_ALGORITHM_LIST = [
+    "BIKE-L1",
+    "BIKE-L3",
+    "BIKE-L5",
+    "HQC-128",
+    "HQC-192",
+    "HQC-256",
+    "ML-KEM-512",
+    "ML-KEM-768",
+    "ML-KEM-1024",
+    "FrodoKEM-640-AES",
+    "FrodoKEM-640-SHAKE",
+    "FrodoKEM-976-AES",
+    "FrodoKEM-976-SHAKE",
+    "FrodoKEM-1344-AES",
+    "FrodoKEM-1344-SHAKE",
+]
+
+PATH_TO_OPENSSL_BIN = "../tmp/.local/openssl/bin/openssl"
+OPENSSL_ALGORITHM_LIST = [
+    "ECP-256",
+    "ECP-384",
+    "ECP-521",
+    "X25519",
+    "X448",
+    "mlkem512",
+    "frodo640aes",
+    "p256_frodo640aes",
+    "x25519_frodo640aes",
+    "frodo640shake",
+    "p256_frodo640shake",
+    "x25519_frodo640shake",
+    "frodo976aes",
+    "p384_frodo976aes",
+    "x448_frodo976aes",
+    "frodo976shake",
+    "p384_frodo976shake",
+    "x448_frodo976shake",
+    "frodo1344aes",
+    "p521_frodo1344aes",
+    "frodo1344shake",
+    "p521_frodo1344shake",
+    "mlkem512",
+    "p256_mlkem512",
+    "x25519_mlkem512",
+    "mlkem768",
+    "p384_mlkem768",
+    "x448_mlkem768",
+    "X25519MLKEM768",
+    "SecP256r1MLKEM768",
+    "mlkem1024",
+    "p521_mlkem1024",
+    "p384_mlkem1024",
+    "bikel1",
+    "p256_bikel1",
+    "x25519_bikel1",
+    "bikel3",
+    "p384_bikel3",
+    "x448_bikel3",
+    "bikel5",
+    "p521_bikel5",
+    "hqc128",
+    "p256_hqc128",
+    "x25519_hqc128",
+    "hqc192",
+    "p384_hqc192",
+    "x448_hqc192",
+    "hqc256",
+    "p521_hqc256",
+]
+# OPENSSL_ALGORITHM_LIST = ["ECP-256", "X25519", "mlkem512", "hqc128", "bikel1"]
+
+
+def main():
+    os.makedirs(FEATHERS_DIR, exist_ok=True)
+    if os.path.exists(f"{FEATHERS_DIR}/liboqs_speed.feather"):
+        liboqs_speed_data = pd.read_feather(f"{FEATHERS_DIR}/liboqs_speed.feather")
+    else:
+        liboqs_speed_data = run_liboqs_speed()
+
+    liboqs_speed_data = concat_liboqs_speed_data_with_expected_values(liboqs_speed_data)
+
+    if os.path.exists(f"{FEATHERS_DIR}/openssl_speed.feather"):
+        openssl_speed_data = pd.read_feather(f"{FEATHERS_DIR}/openssl_speed.feather")
+    else:
+        openssl_speed_data = run_openssl_speed()
+
+    analyze_openssl_speed_data(openssl_speed_data)
+
+
+# runs 24 min
+# $(OPENSSL) speed -seconds 10 ECP-256 X25519 mlkem512 hqc128 bikel1
+def run_openssl_speed():
+    # output:
+    # version: 3.4.0
+    # built on: Sat Jan 25 23:46:41 2025 UTC
+    # options: bn(64,64)
+    # compiler: gcc -fPIC -pthread -m64 -Wa,--noexecstack -Wall -O3 -DOPENSSL_USE_NODELETE -DL_ENDIAN -DOPENSSL_PIC -DOPENSSL_BUILDING_OPENSSL -DNDEBUG
+    # CPUINFO: OPENSSL_ia32cap=0xfedab2234f8bffff:0x9c2fb9
+    #                                keygen    encaps    decaps keygens/s  encaps/s  decaps/s
+    #                     ECP-256 0.000011s 0.000073s 0.000057s   87234.7   13682.1   17611.5
+    #                      X25519 0.000029s 0.000067s 0.000033s   34973.9   14993.8   30058.9
+    #                    mlkem512 0.000009s 0.000007s 0.000007s  117398.9  141457.1  149222.9
+    #                      bikel1 0.000231s 0.000039s 0.000731s    4335.4   25898.9    1368.8
+    #                      hqc128 0.000026s 0.000055s 0.000101s   38658.9   18207.3    9919.8
+
+    # Doing ECP-256 keygen ops for 1s: 82873 ECP-256 KEM keygen ops in 0.95s
+    # Doing ECP-256 encaps ops for 1s: 12998 ECP-256 KEM encaps ops in 0.95s
+    # Doing ECP-256 decaps ops for 1s: 16907 ECP-256 KEM decaps ops in 0.96s
+    # Doing X25519 keygen ops for 1s: 32176 X25519 KEM keygen ops in 0.92s
+    # Doing X25519 encaps ops for 1s: 14394 X25519 KEM encaps ops in 0.96s
+    # Doing X25519 decaps ops for 1s: 28556 X25519 KEM decaps ops in 0.95s
+    # Doing mlkem512 keygen ops for 1s: 109181 mlkem512 KEM keygen ops in 0.93s
+    # Doing mlkem512 encaps ops for 1s: 128726 mlkem512 KEM encaps ops in 0.91s
+    # Doing mlkem512 decaps ops for 1s: 143254 mlkem512 KEM decaps ops in 0.96s
+    # Doing bikel1 keygen ops for 1s: 4162 bikel1 KEM keygen ops in 0.96s
+    # Doing bikel1 encaps ops for 1s: 24604 bikel1 KEM encaps ops in 0.95s
+    # Doing bikel1 decaps ops for 1s: 1314 bikel1 KEM decaps ops in 0.96s
+    # Doing hqc128 keygen ops for 1s: 36726 hqc128 KEM keygen ops in 0.95s
+    # Doing hqc128 encaps ops for 1s: 17479 hqc128 KEM encaps ops in 0.96s
+    # Doing hqc128 decaps ops for 1s: 9523 hqc128 KEM decaps ops in 0.96s
+    result = subprocess.run(
+        [
+            PATH_TO_OPENSSL_BIN,
+            "speed",
+            "-seconds",
+            "10",
+            *OPENSSL_ALGORITHM_LIST,
+        ],
+        capture_output=True,
+        text=True,
+        check=True,
+    )
+    print(result.stdout)
+    print(result.stderr)
+
+    empty_line_in_output = result.stdout.split("\n").index("")
+    stdout_lines_stripped = result.stdout.split("\n")[6:empty_line_in_output]
+
+    # print(stdout_lines_stripped)
+
+    data = pd.DataFrame()
+    for line in stdout_lines_stripped:
+        split_line = line.split()
+        algorithm = split_line[0]
+        keygen_mean_time_s = float(split_line[1][:-1])  # remove last character "s"
+        encaps_mean_time_s = float(split_line[2][:-1])
+        decaps_mean_time_s = float(split_line[3][:-1])
+
+        keygen_mean_ops_per_s = float(split_line[4])
+        encaps_mean_ops_per_s = float(split_line[5])
+        decaps_mean_ops_per_s = float(split_line[6])
+
+        # print(
+        #     f"Algorithm: {algorithm}, keygen_mean_time_s: {keygen_mean_time_s}, encaps_mean_time_s: {encaps_mean_time_s}, decaps_mean_time_s: {decaps_mean_time_s}, keygen_mean_ops_per_s: {keygen_mean_ops_per_s}, encaps_mean_ops_per_s: {encaps_mean_ops_per_s}, decaps_mean_ops_per_s: {decaps_mean_ops_per_s}"
+        # )
+        data = pd.concat(
+            [
+                data,
+                pd.DataFrame(
+                    [
+                        {
+                            "algorithm": algorithm,
+                            "keygen_mean_time_s": keygen_mean_time_s,
+                            "encaps_mean_time_s": encaps_mean_time_s,
+                            "decaps_mean_time_s": decaps_mean_time_s,
+                            "keygen_mean_ops_per_s": keygen_mean_ops_per_s,
+                            "encaps_mean_ops_per_s": encaps_mean_ops_per_s,
+                            "decaps_mean_ops_per_s": decaps_mean_ops_per_s,
+                        }
+                    ]
+                ),
+            ],
+            ignore_index=True,
+        )
+
+    # print(data)
+    data.to_feather(f"{FEATHERS_DIR}/openssl_speed.feather")
+    return data
+
+
+def analyze_openssl_speed_data(data):
+    data["keygen_mean_time_s_expected"] = -1
+    data["encaps_mean_time_s_expected"] = -1
+    data["decaps_mean_time_s_expected"] = -1
+    data["total_actual_performance_time_s"] = (
+        data["keygen_mean_time_s"]
+        + data["encaps_mean_time_s"]
+        + data["decaps_mean_time_s"]
+    )
+
+    for index, row in data.iterrows():
+        algorithm = row["algorithm"]
+        if "_" in algorithm:
+            classical_part = algorithm.split("_")[0]
+            pqc_part = algorithm.split("_")[1]
+        else:
+            continue
+
+        if classical_part == "p256":
+            classical_part = "ECP-256"
+        elif classical_part == "p384":
+            classical_part = "ECP-384"
+        elif classical_part == "p521":
+            classical_part = "ECP-521"
+        elif classical_part == "x25519":
+            classical_part = "X25519"
+        elif classical_part == "x448":
+            classical_part = "X448"
+        else:
+            continue
+
+        # print(
+        #     data.loc[data["algorithm"] == classical_part, "keygen_mean_time_s"].values[
+        #         0
+        #     ]
+        # )
+        data.loc[index, "keygen_mean_time_s_expected"] = (
+            data.loc[data["algorithm"] == classical_part, "keygen_mean_time_s"].values[
+                0
+            ]
+            + data.loc[data["algorithm"] == pqc_part, "keygen_mean_time_s"].values[0]
+        )
+        data.loc[index, "encaps_mean_time_s_expected"] = (
+            data.loc[data["algorithm"] == classical_part, "encaps_mean_time_s"].values[
+                0
+            ]
+            + data.loc[data["algorithm"] == pqc_part, "encaps_mean_time_s"].values[0]
+        )
+        data.loc[index, "decaps_mean_time_s_expected"] = (
+            data.loc[data["algorithm"] == classical_part, "decaps_mean_time_s"].values[
+                0
+            ]
+            + data.loc[data["algorithm"] == pqc_part, "decaps_mean_time_s"].values[0]
+        )
+
+    data["total_expected_performance_time_s"] = (
+        data["keygen_mean_time_s_expected"]
+        + data["encaps_mean_time_s_expected"]
+        + data["decaps_mean_time_s_expected"]
+    )
+
+    data["total_performance_difference"] = (
+        data["total_actual_performance_time_s"]
+        - data["total_expected_performance_time_s"]
+    )
+
+    print(data)
+
+    # filter data for all algorithms which have x25519 in their name
+    data_x25519 = data[data["algorithm"].str.contains("x25519")]
+    print(data_x25519["total_performance_difference"].describe())
+    data_x448 = data[data["algorithm"].str.contains("x448")]
+    print(data_x448["total_performance_difference"].describe())
+
+    print()
+
+    data_p256 = data[data["algorithm"].str.contains("p256")]
+    print(data_p256["total_performance_difference"].describe())
+    data_p384 = data[data["algorithm"].str.contains("p384")]
+    print(data_p384["total_performance_difference"].describe())
+    data_p521 = data[data["algorithm"].str.contains("p521")]
+    print(data_p521["total_performance_difference"].describe())
+
+
+def run_liboqs_speed():
+    def run_subprocess_for(algorithm):
+        # format of result:
+        # Operation                            | Iterations | Total time (s) | Time (us): mean | pop. stdev | CPU cycles: mean          | pop. stdev
+        # ------------------------------------ | ----------:| --------------:| ---------------:| ----------:| -------------------------:| ----------:
+        # HQC-128                              |            |                |                 |            |                           |
+        # keygen                               |     418441 |         10.000 |          23.898 |     11.507 |                     82467 |      39650
+        # encaps                               |     173703 |         10.000 |          57.570 |     31.131 |                    198560 |     107190
+        # decaps                               |      92868 |         10.001 |         107.692 |     95.636 |                    371588 |     330160
+        result = subprocess.run(
+            [f"{DIR_OF_LIB_OQS}/speed_kem", "--duration", "10", algorithm],
+            capture_output=True,
+            text=True,
+            check=True,
+        )
+        print(result.stdout)
+        print(result.stderr)
+
+        for line in result.stdout.split("\n"):
+            if line.startswith("keygen"):
+                split_line = line.split()
+                keygen_mean_time_us = float(split_line[6])
+                keygen_time_us_stdev = float(split_line[8])
+                keygen_mean_cpu_cycles = float(split_line[10])
+                keygen_cpu_cycles_stdev = float(split_line[12])
+            if line.startswith("encaps"):
+                split_line = line.split()
+                encaps_mean_time_us = float(split_line[6])
+                encaps_time_us_stdev = float(split_line[8])
+                encaps_mean_cpu_cycles = float(split_line[10])
+                encaps_cpu_cycles_stdev = float(split_line[12])
+            if line.startswith("decaps"):
+                split_line = line.split()
+                decaps_mean_time_us = float(split_line[6])
+                decaps_time_us_stdev = float(split_line[8])
+                decaps_mean_cpu_cycles = float(split_line[10])
+                decaps_cpu_cycles_stdev = float(split_line[12])
+
+        # if error occured then repeat the run (a time which could not be possible)
+        if (
+            (keygen_mean_time_us > 1000000000000)
+            or (encaps_mean_time_us > 1000000000000)
+            or (decaps_mean_time_us > 1000000000000)
+        ):
+            return run_subprocess_for(algorithm)
+
+        return {
+            "algorithm": algorithm,
+            "keygen_mean_time_us": keygen_mean_time_us,
+            "keygen_time_us_stdev": keygen_time_us_stdev,
+            "keygen_mean_cpu_cycles": keygen_mean_cpu_cycles,
+            "keygen_cpu_cycles_stdev": keygen_cpu_cycles_stdev,
+            "encaps_mean_time_us": encaps_mean_time_us,
+            "encaps_time_us_stdev": encaps_time_us_stdev,
+            "encaps_mean_cpu_cycles": encaps_mean_cpu_cycles,
+            "encaps_cpu_cycles_stdev": encaps_cpu_cycles_stdev,
+            "decaps_mean_time_us": decaps_mean_time_us,
+            "decaps_time_us_stdev": decaps_time_us_stdev,
+            "decaps_mean_cpu_cycles": decaps_mean_cpu_cycles,
+            "decaps_cpu_cycles_stdev": decaps_cpu_cycles_stdev,
+        }
+
+    data = pd.DataFrame()
+    for algorithm in LIBOQS_ALGORITHM_LIST:
+        data_for_algorithm = run_subprocess_for(algorithm)
+        data = pd.concat([data, pd.DataFrame([data_for_algorithm])], ignore_index=True)
+
+    pd.set_option("display.max_columns", None)
+    print(data)
+
+    data.to_feather(f"{FEATHERS_DIR}/liboqs_speed.feather")
+    return data
+
+
+def concat_liboqs_speed_data_with_expected_values(data):
+    # add columns expected_keygen_cycles, expected_encaps_cycles, expected_decaps_cycles
+
+    # print(data)
+    data["cycles_per_us"] = data["keygen_mean_cpu_cycles"] / data["keygen_mean_time_us"]
+    print("Statistics for cycles_per_us:")
+    print(data["cycles_per_us"].describe())
+
+    data["keygen_expected_cycles"] = float("-inf")
+    data["encaps_expected_cycles"] = float("-inf")
+    data["decaps_expected_cycles"] = float("-inf")
+
+    # https://bikesuite.org/files/v5.2/BIKE_Spec.2024.10.10.1.pdf, bike v5.2
+    data.loc[data["algorithm"] == "BIKE-L1", "keygen_expected_cycles"] = 366_000
+    data.loc[data["algorithm"] == "BIKE-L1", "encaps_expected_cycles"] = 74_000
+    data.loc[data["algorithm"] == "BIKE-L1", "decaps_expected_cycles"] = 1177_000
+
+    data.loc[data["algorithm"] == "BIKE-L3", "keygen_expected_cycles"] = 1049_000
+    data.loc[data["algorithm"] == "BIKE-L3", "encaps_expected_cycles"] = 164_000
+    data.loc[data["algorithm"] == "BIKE-L3", "decaps_expected_cycles"] = 3512_000
+
+    # in specification there is no cycles for BIKE-L5
+
+    # https://pqc-hqc.org/doc/hqc-specification_2024-10-30.pdf, hqc 30.10.2024
+    data.loc[data["algorithm"] == "HQC-128", "keygen_expected_cycles"] = 75_000
+    data.loc[data["algorithm"] == "HQC-128", "encaps_expected_cycles"] = 177_000
+    data.loc[data["algorithm"] == "HQC-128", "decaps_expected_cycles"] = 323_000
+
+    data.loc[data["algorithm"] == "HQC-192", "keygen_expected_cycles"] = 175_000
+    data.loc[data["algorithm"] == "HQC-192", "encaps_expected_cycles"] = 404_000
+    data.loc[data["algorithm"] == "HQC-192", "decaps_expected_cycles"] = 669_000
+
+    data.loc[data["algorithm"] == "HQC-256", "keygen_expected_cycles"] = 356_000
+    data.loc[data["algorithm"] == "HQC-256", "encaps_expected_cycles"] = 799_000
+    data.loc[data["algorithm"] == "HQC-256", "decaps_expected_cycles"] = 1427_000
+
+    # https://pq-crystals.org/kyber/data/kyber-submission-nist-round3.zip kyber round 3 -> problematic
+    data.loc[data["algorithm"] == "ML-KEM-512", "keygen_expected_cycles"] = 21880
+    data.loc[data["algorithm"] == "ML-KEM-512", "encaps_expected_cycles"] = 28592
+    data.loc[data["algorithm"] == "ML-KEM-512", "decaps_expected_cycles"] = 38752
+
+    data.loc[data["algorithm"] == "ML-KEM-768", "keygen_expected_cycles"] = 30460
+    data.loc[data["algorithm"] == "ML-KEM-768", "encaps_expected_cycles"] = 40140
+    data.loc[data["algorithm"] == "ML-KEM-768", "decaps_expected_cycles"] = 51512
+
+    data.loc[data["algorithm"] == "ML-KEM-1024", "keygen_expected_cycles"] = 43212
+    data.loc[data["algorithm"] == "ML-KEM-1024", "encaps_expected_cycles"] = 56556
+    data.loc[data["algorithm"] == "ML-KEM-1024", "decaps_expected_cycles"] = 71180
+
+    # https://frodokem.org/files/FrodoKEM-specification-20210604.pdf
+    data.loc[data["algorithm"] == "FrodoKEM-640-AES", "keygen_expected_cycles"] = (
+        1387_000
+    )
+    data.loc[data["algorithm"] == "FrodoKEM-640-AES", "encaps_expected_cycles"] = (
+        1634_000
+    )
+    data.loc[data["algorithm"] == "FrodoKEM-640-AES", "decaps_expected_cycles"] = (
+        1531_000
+    )
+
+    data.loc[data["algorithm"] == "FrodoKEM-976-AES", "keygen_expected_cycles"] = (
+        2846_000
+    )
+    data.loc[data["algorithm"] == "FrodoKEM-976-AES", "encaps_expected_cycles"] = (
+        3047_000
+    )
+    data.loc[data["algorithm"] == "FrodoKEM-976-AES", "decaps_expected_cycles"] = (
+        2894_000
+    )
+
+    data.loc[data["algorithm"] == "FrodoKEM-1344-AES", "keygen_expected_cycles"] = (
+        4779_000
+    )
+    data.loc[data["algorithm"] == "FrodoKEM-1344-AES", "encaps_expected_cycles"] = (
+        5051_000
+    )
+    data.loc[data["algorithm"] == "FrodoKEM-1344-AES", "decaps_expected_cycles"] = (
+        4849_000
+    )
+
+    data.loc[data["algorithm"] == "FrodoKEM-640-SHAKE", "keygen_expected_cycles"] = (
+        4031_000
+    )
+    data.loc[data["algorithm"] == "FrodoKEM-640-SHAKE", "encaps_expected_cycles"] = (
+        4218_000
+    )
+    data.loc[data["algorithm"] == "FrodoKEM-640-SHAKE", "decaps_expected_cycles"] = (
+        4116_000
+    )
+
+    data.loc[data["algorithm"] == "FrodoKEM-976-SHAKE", "keygen_expected_cycles"] = (
+        8599_000
+    )
+    data.loc[data["algorithm"] == "FrodoKEM-976-SHAKE", "encaps_expected_cycles"] = (
+        8799_000
+    )
+    data.loc[data["algorithm"] == "FrodoKEM-976-SHAKE", "decaps_expected_cycles"] = (
+        8659_000
+    )
+
+    data.loc[data["algorithm"] == "FrodoKEM-1344-SHAKE", "keygen_expected_cycles"] = (
+        15067_000
+    )
+    data.loc[data["algorithm"] == "FrodoKEM-1344-SHAKE", "encaps_expected_cycles"] = (
+        15338_000
+    )
+    data.loc[data["algorithm"] == "FrodoKEM-1344-SHAKE", "decaps_expected_cycles"] = (
+        15170_000
+    )
+
+    data["keygen_cycles_ratio"] = (
+        data["keygen_mean_cpu_cycles"] / data["keygen_expected_cycles"]
+    )
+    data["encaps_cycles_ratio"] = (
+        data["encaps_mean_cpu_cycles"] / data["encaps_expected_cycles"]
+    )
+    data["decaps_cycles_ratio"] = (
+        data["decaps_mean_cpu_cycles"] / data["decaps_expected_cycles"]
+    )
+
+    data["total_liboqs_performance_ratio"] = (
+        data["keygen_mean_cpu_cycles"]
+        + data["encaps_mean_cpu_cycles"]
+        + data["decaps_mean_cpu_cycles"]
+    ) / (
+        data["keygen_expected_cycles"]
+        + data["encaps_expected_cycles"]
+        + data["decaps_expected_cycles"]
+    )
+
+    data["total_liboqs_performance_difference_in_mu"] = (
+        (
+            data["keygen_mean_cpu_cycles"]
+            + data["encaps_mean_cpu_cycles"]
+            + data["decaps_mean_cpu_cycles"]
+        )
+        - (
+            data["keygen_expected_cycles"]
+            + data["encaps_expected_cycles"]
+            + data["decaps_expected_cycles"]
+        )
+    ) / data["cycles_per_us"]
+
+    print(data)
+
+
+if __name__ == "__main__":
+    main()