Skip to content
Snippets Groups Projects
Commit a2a7a8fc authored by Bartolomeo Berend Müller's avatar Bartolomeo Berend Müller
Browse files

Added error count to data model

parent 9e7845fe
No related branches found
No related tags found
No related merge requests found
......@@ -24,7 +24,7 @@ To run the experiment with a parallelism of 16, you need aproximately 24 hours.
In `benchmarking-pqc-in-quic/pq-tls-benchmark-framework/emulation-exp/code/kex`:
```bash
./scripts/setup.sh $(nproc)
sudo .venv/bin/python scripts/experiment.py testscenarios/scenario_static.csv testscenarios/scenario_corrupt.csv tests cenarios/scenario_delay.csv testscenarios/scenario_duplicate.csv testscenarios/scenario_jitter_delay20ms.csv testscenarios/scenario_packetloss.csv testscenarios/scenario_rate_both.csv testscenarios/scenario_rate_client.csv testscenarios/scenario_rate_server.csv testscenarios/scenario_reorder.csv
sudo .venv/bin/python scripts/experiment.py testscenarios/scenario_static.csv testscenarios/scenario_corrupt.csv tests cenarios/scenario_delay.csv testscenarios/scenario_duplicate.csv testscenarios/scenario_jitter_delay20ms.csv testscenarios/scenario_packetloss.csv testscenarios/scenario_rate_both.csv testscenarios/scenario_rate_client.csv testscenarios/scenario_rate_server.csv testscenarios/scenario_reorder.csv | tee stdout.log
./scripts/teardown.sh $(nproc)
```
......
......@@ -247,6 +247,7 @@ int main(int argc, char *argv[])
size_t measurements = 0;
struct timespec start, finish;
double *handshake_times_ms = malloc(measurements_to_make * sizeof(*handshake_times_ms));
int error_count = 0;
while (measurements < measurements_to_make)
{
clock_gettime(CLOCK_MONOTONIC_RAW, &start);
......@@ -260,6 +261,7 @@ int main(int argc, char *argv[])
* Non-retryable errors are caught by manual
* inspection of logs, which has sufficed
* for our purposes */
error_count += 1;
continue;
}
......@@ -280,6 +282,7 @@ int main(int argc, char *argv[])
measurements++;
}
printf("%d;", error_count);
for (size_t i = 0; i < measurements - 1; i++)
{
printf("%f,", handshake_times_ms[i]);
......
......@@ -133,6 +133,7 @@ int main(int argc, char* argv[])
SSL_CTX_set_verify(ssl_ctx, SSL_VERIFY_PEER, NULL);
// Start experiments
int error_count = 0;
while(measurements < measurements_to_make)
{
clock_gettime(CLOCK_MONOTONIC_RAW, &start);
......@@ -145,6 +146,7 @@ int main(int argc, char* argv[])
* Non-retryable errors are caught by manual
* inspection of logs, which has sufficed
* for our purposes */
error_count += 1;
continue;
}
......@@ -161,6 +163,7 @@ int main(int argc, char* argv[])
measurements++;
}
printf("%d;", error_count);
for(size_t i = 0; i < measurements - 1; i++)
{
printf("%f,", handshake_times_ms[i]);
......
......@@ -72,8 +72,10 @@ def main():
path_to_results_csv_file,
"a",
) as out:
result = run_timers(timer_pool, protocol, kem_alg)
csv.writer(out).writerow(result)
error_count, result = run_timers(
timer_pool, protocol, kem_alg
)
csv.writer(out).writerow([error_count, *result])
timer_pool.close()
timer_pool.join()
......@@ -178,7 +180,12 @@ def run_timers(timer_pool, protocol, kem_alg):
results_nested = timer_pool.starmap(
time_handshake, [(protocol, kem_alg, MEASUREMENTS_PER_TIMER)] * TIMERS
)
return [item for sublist in results_nested for item in sublist]
# results_nested is a list of tuples, which contain the errors_count and the list of measurements
error_count_aggregated = sum([error_count for error_count, _ in results_nested])
results_nested = [measurements for _, measurements in results_nested]
return error_count_aggregated, [
item for sublist in results_nested for item in sublist
]
# do TLS handshake (s_timer.c)
......@@ -214,7 +221,9 @@ def time_handshake(protocol, kem_alg, measurements) -> list[float]:
]
result = run_subprocess(command)
release_network_namespace(network_namespace)
return [float(i) for i in result.strip().split(",")]
error_count, result = result.split(";")
error_count = int(error_count)
return error_count, [float(i) for i in result.strip().split(",")]
def run_subprocess(command, working_dir=".", expected_returncode=0) -> str:
......
......@@ -62,6 +62,8 @@ def read_data_into_pandas():
"cli_corrupt",
"cli_reorder",
"cli_rate",
"error_count",
"error_rate",
"measurements",
"mean",
"std",
......@@ -101,8 +103,9 @@ def read_data_into_pandas():
assert len(result_file_data.columns) == len(df_scenariofile)
for i in range(len(result_file_data.columns)):
measurements = result_file_data.iloc[:, i].tolist()
measurements = np.array(measurements)
measurements_and_error_count = result_file_data.iloc[:, i].tolist()
error_count = measurements_and_error_count[0]
measurements = np.array(measurements_and_error_count[1:])
data.loc[len(data)] = {
"scenario": scenario,
"protocol": protocol,
......@@ -122,6 +125,8 @@ def read_data_into_pandas():
"cli_corrupt": df_scenariofile.iloc[i]["cli_corrupt"],
"cli_reorder": df_scenariofile.iloc[i]["cli_reorder"],
"cli_rate": df_scenariofile.iloc[i]["cli_rate"],
"error_count": error_count,
"error_rate": error_count / len(measurements),
"measurements": measurements,
"mean": np.mean(measurements),
"std": np.std(measurements),
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment