diff --git a/pq-tls-benchmark-framework/emulation-exp/code/install-prereqs-ubuntu.sh b/pq-tls-benchmark-framework/emulation-exp/code/install-prereqs-ubuntu.sh
index 056deaf65d45191db0627d62663d34c81e8e18cc..04a83699d0d5d8912bcb120cbbea6f75c7fc2974 100755
--- a/pq-tls-benchmark-framework/emulation-exp/code/install-prereqs-ubuntu.sh
+++ b/pq-tls-benchmark-framework/emulation-exp/code/install-prereqs-ubuntu.sh
@@ -27,7 +27,7 @@ OPENSSL_INSTALL=${ROOT}/.local/openssl
 # Fetch all the files we need
 wget https://cmake.org/files/v${CMAKE_VERSION}/cmake-${CMAKE_VERSION}.${CMAKE_BUILD}-linux-x86_64.sh
 git clone --no-checkout --single-branch --branch master https://github.com/openssl/openssl.git
-(cd openssl && git checkout 2a45839778955ffcab01918f10544d46e42f9a5b) # OpenSSL 3.4.0-dev
+(cd openssl && git checkout 2a45839778955ffcab01918f10544d46e42f9a5b) # OpenSSL 3.4.0-dev # NOTE can be updated to stable version 3.4.0
 git clone --no-checkout --single-branch --branch 0.10.1-release https://github.com/open-quantum-safe/liboqs.git
 (cd liboqs && git switch --detach tags/0.10.1)
 git clone --no-checkout --single-branch --branch main https://github.com/open-quantum-safe/oqs-provider.git
diff --git a/pq-tls-benchmark-framework/emulation-exp/code/kex/scripts/generate_graphs.py b/pq-tls-benchmark-framework/emulation-exp/code/kex/scripts/generate_graphs.py
index 92dd204acf2a71c4b99a252e184e4dda1e2f6e58..37d904803ccb147382575451243bd699da1cd252 100755
--- a/pq-tls-benchmark-framework/emulation-exp/code/kex/scripts/generate_graphs.py
+++ b/pq-tls-benchmark-framework/emulation-exp/code/kex/scripts/generate_graphs.py
@@ -13,7 +13,7 @@ import scipy
 
 import helper_scripts.helper_functions as helper_functions
 
-RESULTS_DIR = "saved/results-run-20241028-vm-p16"
+RESULTS_DIR = "saved/results-run-20241030-vm-p16"
 FILTER_RESULTS = []
 PLOTS_DIR = "plots"
 FEATHERS_DIR = "feathers"
@@ -26,9 +26,9 @@ def main():
     data = load_data()
 
     # generally they both only seconds for graphs when not generating for single algorithms
-    plot_general_plots()  # takes about 4 seconds
-    plot_lines(data)  # takes about 1:50 min
-    plot_static_data(data)  # takes about 4 min
+    # plot_general_plots()  # takes about 4 seconds
+    # plot_lines(data)  # takes about 1:50 min
+    # plot_static_data(data)  # takes about 4 min
     plot_distributions(data)
 
 
@@ -69,6 +69,8 @@ def read_data_into_pandas():
             "std",
             "cv",
             "median",
+            "qtl_01",
+            "qtl_05",
             "qtl_25",
             "qtl_75",
             "qtl_95",
@@ -132,6 +134,8 @@ def read_data_into_pandas():
                 "std": np.std(measurements),
                 "cv": np.std(measurements) / np.mean(measurements),
                 "median": np.median(measurements),
+                "qtl_01": np.quantile(measurements, 0.01),
+                "qtl_05": np.quantile(measurements, 0.05),
                 "qtl_25": np.quantile(measurements, 0.25),
                 "qtl_75": np.quantile(measurements, 0.75),
                 "qtl_95": np.quantile(measurements, 0.95),
@@ -739,6 +743,8 @@ def plot_lines(data):
 
     do_graphs_for = [
         "median",
+        "qtl_01",
+        "qtl_05",
         "qtl_25",
         "qtl_75",
         "qtl_95",
@@ -788,21 +794,33 @@ def plot_distributions(data):
             #     f"scenario: {row['scenario']}, protocol: {row['protocol']}, sec_level: {row['sec_level']}, kem_alg: {row['kem_alg']}, len: {len(filtered_data)}"
             # )
 
+            def remove_rows_of_data(data):
+                # print(data.iloc[0]["scenario"])
+                # print(data)
+                match data.iloc[0]["scenario"]:
+                    case "packetloss":
+                        # return data where src_packetloss is 0, 4, 8, 12, 16 or 20
+                        return data.query("srv_pkt_loss % 4 == 0")
+                    case _:
+                        return pd.concat(
+                            [
+                                data.iloc[[0]],
+                                data.iloc[3:-4:4],
+                                data.iloc[[-1]],
+                            ]
+                        )
+
             if filtered:
-                filtered_data = pd.concat(
-                    [
-                        filtered_data.iloc[[0]],
-                        filtered_data.iloc[3:-4:4],
-                        filtered_data.iloc[[-1]],
-                    ]
-                )
+                filtered_data = remove_rows_of_data(filtered_data)
                 # print(filtered_data)
+                # if filtered_data.iloc[0]["scenario"] == "packetloss":
+                #     exit()
 
             plt.figure()
             x = get_x_axis(row["scenario"], filtered_data, len(filtered_data))
             x = x.to_list()
             # print(x)
-            width = 0.5 if not filtered else 2
+            width = 0.5 if not filtered else 2.5
             plt.violinplot(
                 filtered_data["measurements"],
                 positions=x,
@@ -814,6 +832,8 @@ def plot_distributions(data):
             #     pc.set_alpha(0.5)
 
             plt.ylim(bottom=0)
+            if filtered:
+                plt.ylim(bottom=0, top=filtered_data["qtl_95"].max())
             plt.xlim(left=0)
             plt.xlabel(row["scenario"])
             plt.ylabel(f"Time-to-first-byte (ms)")
@@ -859,7 +879,7 @@ def plot_distributions(data):
                 plt.close()
                 # return
 
-    plot_multiple_violin_plots(data, filtered=False)
+    # plot_multiple_violin_plots(data, filtered=False)
     plot_multiple_violin_plots(data, filtered=True)
     # plot_single_violin_plot(data)  # takes an age
 
diff --git a/pq-tls-benchmark-framework/emulation-exp/code/kex/scripts/queries.py b/pq-tls-benchmark-framework/emulation-exp/code/kex/scripts/queries.py
index f0c0c0843bda56c3094e474feeacb27f39283bcc..773b854a89dbf292d42b07498df022a0cea6da6b 100644
--- a/pq-tls-benchmark-framework/emulation-exp/code/kex/scripts/queries.py
+++ b/pq-tls-benchmark-framework/emulation-exp/code/kex/scripts/queries.py
@@ -8,11 +8,14 @@ import helper_scripts.helper_functions as hf
 
 def main():
     data = pd.read_feather(f"{FEATHERS_DIR}/data.feather")
+    # data = pd.read_feather(f"{FEATHERS_DIR}/data_run_20241028.feather")
 
-    # print_kem_ids()
+    stats_of_qtl95_of_packetloss(data)
+    # error_count_and_rate(data)
+    # measurements_with_negative_skewness(data)
+    # iqr_kurtosis_of_delay_data(data)
 
-    # stats_of_qtl95_of_packetloss(data)
-    error_count_and_rate(data)
+    # print_kem_ids()
 
 
 def stats_of_qtl95_of_packetloss(data):
@@ -39,33 +42,21 @@ def stats_of_qtl95_of_packetloss(data):
     print(ldata)
 
 
+# For old run without bigger crypto buffer: Grep tells there are 83996 CRYPTO_BUFFER_EXCEEDEDs, while total error count is just a bit above it 84186
+# For new run with fix: 187.0 other errors, probably from server side, because 'Shutdown before completion' on client side while waiting for handshake to complete -> b'808B57C2E1760000:error:0A0000CF:SSL routines:quic_do_handshake:protocol is shutdown:ssl/quic/quic_impl.c:1717:\n'
 def error_count_and_rate(data):
     print("Error count and rate")
     ldata = data
     print("Total index length")
     print(len(ldata.index))
     print("Total error count")
-    print(
-        ldata["error_count"].sum()
-    )  # Grep tells there are 83996 CRYPTO_BUFFER_EXCEEDEDs, while total error count is just a bit above it 84186
+    print(ldata["error_count"].sum())
     ldata = ldata.query("error_count > 0")
     print("Total index length with error count > 0")
     print(len(ldata.index))
-    ldata = ldata[~ldata["kem_alg"].str.contains("frodo")]
-    print("Total index length with error count > 0 and without frodo")
-    print(len(ldata.index))
-    print(
-        ldata[
-            [
-                "scenario",
-                "protocol",
-                "sec_level",
-                "kem_alg",
-                "error_count",
-                "error_rate",
-            ]
-        ]
-    )
+    print("Error count describe")
+    print(ldata["error_count"].describe())
+    print(ldata["scenario"].value_counts())
     # print(ldata["scenario"].unique()) # all 10 scenarios
     print("With error count > 1")
     ldata = ldata.query("error_count > 1")
@@ -83,6 +74,30 @@ def error_count_and_rate(data):
     )
 
 
+def measurements_with_negative_skewness(data):
+    print("Measurements with negative skewness")
+    ldata = data
+    print("Skewness of data")
+    print(ldata["skewness"].describe())
+
+    print("Amount of data with negative skewness")
+    ldata = ldata.query("skewness < 0")
+    print(len(ldata.index))
+    # ldata = ldata.query("scenario != 'reorder'")
+    # print(len(ldata.index))
+    # give out per scenario the count of measurements with negative skewness
+    print("Per scenario numbers of measurements with negative skewness")
+    print(ldata["scenario"].value_counts())  # mostly reorder and jitter, rate a bit
+
+
+def iqr_kurtosis_of_delay_data(data):
+    print("Kurtosis of data, Fisher's definition, so 0 is normal distribution")
+    ldata = data
+    print(ldata[["iqr", "kurtosis"]].describe())
+    ldata = ldata.query("scenario == 'delay'")
+    print(ldata[["iqr", "kurtosis"]].describe())
+
+
 def print_kem_ids():
     data = hf.get_kem_ids()
     print(data)