Skip to content
Snippets Groups Projects
Commit 3ff1d7fd authored by Bartolomeo Berend Müller's avatar Bartolomeo Berend Müller
Browse files

Analyze of packet dump

parent 05c3a06f
Branches
No related tags found
No related merge requests found
Provided Key Exchange:
{ 1.2.840.113549.1.3.1, DH, dhKeyAgreement } @ default
{ 1.3.101.110, X25519 } @ default
{ 1.3.101.111, X448 } @ default
HKDF @ default
TLS1-PRF @ default
{ 1.3.6.1.4.1.11591.4.11, id-scrypt, SCRYPT } @ default
ECDH @ default
Provided Key encapsulation:
{ 1.2.840.113549.1.1.1, 2.5.8.1.1, RSA, rsaEncryption } @ default
{ 1.2.840.10045.2.1, EC, id-ecPublicKey } @ default
{ 1.3.101.110, X25519 } @ default
{ 1.3.101.111, X448 } @ default
frodo640aes @ oqsprovider
p256_frodo640aes @ oqsprovider
x25519_frodo640aes @ oqsprovider
frodo640shake @ oqsprovider
p256_frodo640shake @ oqsprovider
x25519_frodo640shake @ oqsprovider
frodo976aes @ oqsprovider
p384_frodo976aes @ oqsprovider
x448_frodo976aes @ oqsprovider
frodo976shake @ oqsprovider
p384_frodo976shake @ oqsprovider
x448_frodo976shake @ oqsprovider
frodo1344aes @ oqsprovider
p521_frodo1344aes @ oqsprovider
frodo1344shake @ oqsprovider
p521_frodo1344shake @ oqsprovider
kyber512 @ oqsprovider
p256_kyber512 @ oqsprovider
x25519_kyber512 @ oqsprovider
kyber768 @ oqsprovider
p384_kyber768 @ oqsprovider
x448_kyber768 @ oqsprovider
x25519_kyber768 @ oqsprovider
p256_kyber768 @ oqsprovider
kyber1024 @ oqsprovider
p521_kyber1024 @ oqsprovider
mlkem512 @ oqsprovider
p256_mlkem512 @ oqsprovider
x25519_mlkem512 @ oqsprovider
mlkem768 @ oqsprovider
p384_mlkem768 @ oqsprovider
x448_mlkem768 @ oqsprovider
x25519_mlkem768 @ oqsprovider
p256_mlkem768 @ oqsprovider
mlkem1024 @ oqsprovider
p521_mlkem1024 @ oqsprovider
p384_mlkem1024 @ oqsprovider
bikel1 @ oqsprovider
p256_bikel1 @ oqsprovider
x25519_bikel1 @ oqsprovider
bikel3 @ oqsprovider
p384_bikel3 @ oqsprovider
x448_bikel3 @ oqsprovider
bikel5 @ oqsprovider
p521_bikel5 @ oqsprovider
hqc128 @ oqsprovider
p256_hqc128 @ oqsprovider
x25519_hqc128 @ oqsprovider
hqc192 @ oqsprovider
p384_hqc192 @ oqsprovider
x448_hqc192 @ oqsprovider
hqc256 @ oqsprovider
p521_hqc256 @ oqsprovider
kem_name nid
0 frodo640aes 0x0200
1 p256_frodo640aes 0x2F00
2 x25519_frodo640aes 0x2F80
3 frodo640shake 0x0201
4 p256_frodo640shake 0x2F01
5 x25519_frodo640shake 0x2F81
6 frodo976aes 0x0202
7 p384_frodo976aes 0x2F02
8 x448_frodo976aes 0x2F82
9 frodo976shake 0x0203
10 p384_frodo976shake 0x2F03
11 x448_frodo976shake 0x2F83
12 frodo1344aes 0x0204
13 p521_frodo1344aes 0x2F04
14 frodo1344shake 0x0205
15 p521_frodo1344shake 0x2F05
16 kyber512 0x023A
17 p256_kyber512 0x2F3A
18 x25519_kyber512 0x2F39
19 kyber768 0x023C
20 p384_kyber768 0x2F3C
21 x448_kyber768 0x2F90
22 x25519_kyber768 0x6399
23 p256_kyber768 0x639A
24 kyber1024 0x023D
25 p521_kyber1024 0x2F3D
26 mlkem512 0x0247
27 p256_mlkem512 0x2F47
28 x25519_mlkem512 0x2FB2
29 mlkem768 0x0248
30 p384_mlkem768 0x2F48
31 x448_mlkem768 0x2FB3
32 x25519_mlkem768 0x2FB4
33 p256_mlkem768 0x2FB5
34 mlkem1024 0x0249
35 p521_mlkem1024 0x2F49
36 p384_mlkem1024 0x2F4A
37 bikel1 0x0241
38 p256_bikel1 0x2F41
39 x25519_bikel1 0x2FAE
40 bikel3 0x0242
41 p384_bikel3 0x2F42
42 x448_bikel3 0x2FAF
43 bikel5 0x0243
44 p521_bikel5 0x2F43
45 hqc128 0x0244
46 p256_hqc128 0x2F44
47 x25519_hqc128 0x2FB0
48 hqc192 0x0245
49 p384_hqc192 0x2F45
50 x448_hqc192 0x2FB1
51 hqc256 0x0246
52 p521_hqc256 0x2F46
53 p256 0x0017
54 p384 0x0018
55 p521 0x0019
56 x25519 0x001D
57 x448 0x001E
\ No newline at end of file
secLevel1,prime256v1,ntru_hps2048509,kyber512,kyber90s512,lightsaber
secLevel1_hybrid,p256_ntru_hps2048509,p256_kyber512,p256_kyber90s512,p256_lightsaber
secLevel3,secp384r1,ntru_hrss701,ntru_hps2048677,saber,kyber768,kyber90s768
secLevel3_hybrid,p384_ntru_hrss701,p384_ntru_hps2048677,p384_saber,p384_kyber768,p384_kyber90s768
secLevel5,secp521r1,ntru_hps4096821,firesaber,kyber1024,kyber90s1024
secLevel5_hybrid,p521_ntru_hps4096821,p521_firesaber,p521_kyber1024,p521_kyber90s1024
secLevel1_alternative,prime256v1,bikel1,frodo640aes,frodo640shake,hqc128,ntrulpr653,ntrulpr761,sntrup653,sntrup761
secLevel1_hybrid_alternative,p256_bikel1,p256_frodo640aes,p256_frodo640shake,p256_hqc128,p256_ntrulpr653,p256_ntrulpr761,p256_sntrup653,p256_sntrup761
secLevel3_alternative,secp384r1,bikel3,frodo976aes,frodo976shake,hqc192,ntrulpr857,sntrup857
secLevel3_hybrid_alternative,p384_bikel3,p384_frodo976aes,p384_frodo976shake,p384_hqc192,p384_ntrulpr857,p384_sntrup857
secLevel5_alternative,secp521r1,frodo1344aes,frodo1344shake,hqc256,ntrulpr1277,sntrup1277
secLevel5_hybrid_alternative,p521_frodo1344aes,p521_frodo1344shake,p521_hqc256,p521_ntrulpr1277,p521_sntrup1277
import os
import numpy as np
import pandas as pd
import pyshark
import helper_functions
# NOTE there is also a packet called scapy which might work
# To create a capture and sslkeylogfile, do the following:
# First run setup.sh
# Then run experiment.py with scenario_analyze_packets.csv while setting the variables to POOL_SIZE = 1, MEASUREMENTS_PER_TIMER = 5, TIMERS = 1
# Then run teardown.sh
DATESTRING = "20240830153007"
EXPECTED_DELAY = 10 # ms
EXPECTED_MEASUREMENTS_PER_CONFIG = 5
def main():
kem_id_df = helper_functions.get_kem_ids()
# print(kem_id_df)
os.makedirs("feathers", exist_ok=True)
if os.path.exists("feathers/udp_packets.feather"):
udp_packets_df = pd.read_feather("feathers/udp_packets.feather")
else:
udp_packets_df = analyze_udp_packets(kem_id_df)
udp_packets_df.to_feather("feathers/udp_packets.feather")
get_packets_sent_by_node(udp_packets_df)
def get_packets_sent_by_node(udp_packets_df):
udp_packets_df = udp_packets_df.drop(columns=["srcport", "quic_cid"])
# print(udp_packets_df.head(20))
# print()
i = 0
packets_per_node = pd.DataFrame()
for g in udp_packets_df.groupby("wireshark_quic_cid"):
# print(g[0]) # is the group number
# print(g[1]) # is the dataframe of this group
g_df = g[1]
finished_row = g_df.loc[
(g_df["Sender"] == "Client")
& (g_df["tls_handshake_type"].apply(lambda x: "Finished" in x))
]
if finished_row.empty:
print(
f"No finished row found for {i}, probably cuz an error, throwing away this connection, since it was probably retried"
)
# print(g_df)
continue
# print(finished_row)
# print("important", finished_row.iloc[0]["ID"])
# print("before", g_df)
g_df = g_df.query(f"ID <= {finished_row.iloc[0]['ID']}")
# print("after", g_df)
# print()
packets = g_df.groupby("Sender").size()
packets_with_crypto = g_df.query("no_crypto == False").groupby("Sender").size()
# if g_df["kem_algo"].iloc[0] == "p256":
# print(finished_row.index[0])
# print(g_df)
# print(g_df.query("Sender == 'Client'"))
packets_per_node = pd.concat(
[
packets_per_node,
pd.DataFrame(
{
"wireshark_quic_cid": [g[0]],
"kem_algo": g_df["kem_algo"].iloc[0],
"client_sent_packets_count": packets["Client"],
"server_sent_packets_count": packets["Server"],
"client_sent_packets_with_crypto_count": packets_with_crypto[
"Client"
],
"server_sent_packets_with_crypto_count": packets_with_crypto[
"Server"
],
}
),
],
ignore_index=True,
)
i += 1
# if i >= 5:
# break
# print(packets_per_node)
# print(packets_per_node.loc[packets_per_node["kem_algo"] == "p256"])
nunique_and_count = packets_per_node.groupby("kem_algo").agg(
{
"client_sent_packets_count": ["nunique", "count"],
"server_sent_packets_count": ["nunique", "count"],
"client_sent_packets_with_crypto_count": ["nunique"],
"server_sent_packets_with_crypto_count": ["nunique"],
}
)
nunique_and_count.columns = [
"_".join(col).strip() for col in nunique_and_count.columns.values
]
assert (
(
nunique_and_count.client_sent_packets_count_count
== EXPECTED_MEASUREMENTS_PER_CONFIG
)
& (
nunique_and_count.server_sent_packets_count_count
== EXPECTED_MEASUREMENTS_PER_CONFIG
)
).all()
nunique_and_count = nunique_and_count.drop(
columns=["client_sent_packets_count_count", "server_sent_packets_count_count"]
)
assert (
(nunique_and_count.client_sent_packets_with_crypto_count_nunique == 1)
& (nunique_and_count.server_sent_packets_with_crypto_count_nunique == 1)
).all()
# print(nunique_and_count)
# print(packets_per_node)
packets_per_node_with_crypto = packets_per_node[
[
"kem_algo",
"client_sent_packets_with_crypto_count",
"server_sent_packets_with_crypto_count",
]
]
# print(packets_per_node_with_crypto)
packets_per_node_with_crypto = (
packets_per_node_with_crypto.drop_duplicates().sort_values(
by=[
"client_sent_packets_with_crypto_count",
"server_sent_packets_with_crypto_count",
]
)
)
print(packets_per_node_with_crypto)
kem_characteristics_df = helper_functions.get_kem_characteristics()
df = pd.merge(
packets_per_node_with_crypto, kem_characteristics_df, on="kem_algo", how="left"
)
# print(df)
# print()
# print(df.loc[df["kem_algo"] == "p256_mlkem512", "length_public_key"])
for row in df.iterrows():
kem_algo = row[1]["kem_algo"]
split = kem_algo.split("_")
if len(split) != 2:
continue
classic_part = split[0]
pqc_part = split[1]
# print(classic_part, pqc_part)
classic_length_public_key = df.loc[
df["kem_algo"] == classic_part, "length_public_key"
].values[0]
pqc_length_public_key = df.loc[
df["kem_algo"] == pqc_part, "length_public_key"
].values[0]
df.loc[df["kem_algo"] == kem_algo, "length_public_key"] = (
classic_length_public_key + pqc_length_public_key
)
classic_length_ciphertext = df.loc[
df["kem_algo"] == classic_part, "length_ciphertext"
].values[0]
pqc_length_ciphertext = df.loc[
df["kem_algo"] == pqc_part, "length_ciphertext"
].values[0]
df.loc[df["kem_algo"] == kem_algo, "length_ciphertext"] = (
classic_length_ciphertext + pqc_length_ciphertext
)
df["length_public_key"] = df["length_public_key"].astype(int)
df["length_ciphertext"] = df["length_ciphertext"].astype(int)
# df["length_secret_key"] = df["length_secret_key"].astype(int)
# df["length_shared_secret"] = df["length_shared_secret"].astype(int)
df = df.drop(
columns=[
"claimed_nist_level",
"claimed_security",
"length_secret_key",
"length_shared_secret",
]
)
print(df)
print(df.info())
def analyze_udp_packets(kem_id_df):
cap = pyshark.FileCapture(
os.path.join("captures", f"capture_{DATESTRING}.pcap"),
override_prefs={
"tls.keylog_file": os.path.join(
"captures", f"sslkeylogfile_{DATESTRING}.log"
)
},
display_filter="udp",
)
# print(cap)
df = pd.DataFrame()
for idx, packet in enumerate(cap):
# icmp messages with pings that contain quic, ignore them
if "udp" not in packet:
# print(packet)
# print(packet.layers)
continue
# if idx >= 2000:
# if idx >= 6:
# break
# print(packet.number)
# print(packet.layers)
# print(packet.ip.field_names) # ['version', 'hdr_len', 'dsfield', 'dsfield_dscp', 'dsfield_ecn', 'len', 'id', 'flags', 'flags_rb', 'flags_df', 'flags_mf', 'frag_offset', 'ttl', 'proto', 'checksum', 'checksum_status', 'src', 'addr', 'src_host', 'host', 'dst', 'dst_host']
# print(packet.eth.field_names) # ['dst', 'dst_resolved', 'dst_oui', 'dst_oui_resolved', 'addr', 'addr_resolved', 'addr_oui', 'addr_oui_resolved', 'dst_lg', 'lg', 'dst_ig', 'ig', 'src', 'src_resolved', 'src_oui', 'src_oui_resolved', 'src_lg', 'src_ig', 'type']
# print(packet.udp.field_names) # ['srcport', 'dstport', 'port', 'length', 'checksum', 'checksum_status', 'stream', '', 'time_relative', 'time_delta', 'payload']
# if packet.number == "695" or packet.number == "696":
# for quic_layer in packet.get_multiple_layers("quic"):
# print(packet.number, quic_layer.field_names)
match ("scid" in packet.quic.field_names, "dcid" in packet.quic.field_names):
case (True, True):
assert False, "Both scid and dcid are present"
case (False, False):
cid = np.nan
case (True, False):
cid = packet.quic.scid
case (False, True):
cid = packet.quic.dcid
# A packet can have multiple quic layers, the layers can have multiple fields with the same name, but they are hidden behind the all_fields attribute
tls_handshake_types = []
for quic_layer in packet.get_multiple_layers("quic"):
if "tls_handshake_type" in quic_layer.field_names:
for field in quic_layer.tls_handshake_type.all_fields:
tls_handshake_types.append(field.show)
tls_handshake_types = map_tls_handshake_types(tls_handshake_types)
# The naming inside of wireshark of the kem algos is not correct all the time
supported_group = np.nan
if "Client Hello" in tls_handshake_types:
for quic_layer in packet.get_multiple_layers("quic"):
if "tls_handshake_extensions_supported_group" in quic_layer.field_names:
# only shows the first of the supported groups, but fine in our context, when only looking at the client hello
supported_group = (
quic_layer.tls_handshake_extensions_supported_group
)
# no_crypto is only correct for the quic packets sent in the handshake, not for the packets sent after the handshake
no_crypto = []
for quic_layer in packet.get_multiple_layers("quic"):
if "crypto_offset" in quic_layer.field_names:
no_crypto.append(False)
else:
no_crypto.append(True)
assert len(no_crypto) > 0, "No quic layer"
no_crypto = all(no_crypto)
df = pd.concat(
[
df,
pd.DataFrame(
{
"ID": [packet.number],
"Sender": [
(
"Server"
if packet.eth.src == "00:00:00:00:00:01"
else "Client"
)
],
"srcport": [packet.udp.srcport],
"time_relative": [packet.udp.time_relative],
"time_delta": [packet.udp.time_delta],
"frame_length": [packet.length],
"ip_length": [packet.ip.len],
"udp_length": [packet.udp.length],
"quic_length": [packet.quic.packet_length],
"wireshark_quic_cid": [packet.quic.connection_number],
"quic_cid": [cid],
"supported_group": [supported_group],
"tls_handshake_type": [tls_handshake_types],
"no_crypto": [no_crypto],
}
),
],
ignore_index=True,
)
# change type from str to int
df["ID"] = df["ID"].astype(int)
df["srcport"] = df["srcport"].astype(int)
df["time_relative"] = df["time_relative"].astype(float)
df["time_delta"] = df["time_delta"].astype(float)
df["frame_length"] = df["frame_length"].astype(int)
df["ip_length"] = df["ip_length"].astype(int)
df["udp_length"] = df["udp_length"].astype(int)
df["quic_length"] = df["quic_length"].astype(int)
df["wireshark_quic_cid"] = df["wireshark_quic_cid"].astype(int)
# supported groups do have hex string values, but with lowercase letters, so keep the x lowercase and transform the rest to uppercase
df["supported_group"] = df["supported_group"].apply(
lambda x: x[0:2] + x[2:].upper() if pd.notna(x) else np.nan
)
df["kem_algo"] = df["supported_group"].apply(
lambda x: (
kem_id_df.loc[kem_id_df["nid"] == x, "kem_name"].values[0]
if pd.notna(x)
else np.nan
)
)
df["kem_algo"] = df.groupby("wireshark_quic_cid")["kem_algo"].transform(
lambda x: x.ffill().bfill()
)
printdf = df.drop(columns=["srcport", "quic_cid"])
# print(printdf.head())
# print(printdf.query("ID >= 689 and ID <= 699"))
# print()
# print(printdf.query("ID >= 1657 and ID <= 1680"))
return df
def map_tls_handshake_types(handshake_types):
tls_handshake_type_map = {
"1": "Client Hello",
"2": "Server Hello",
"4": "New Session Ticket",
"8": "Encrypted Extensions",
"11": "Certificate",
"12": "Server Key Exchange",
"13": "Certificate Request",
"14": "Server Hello Done",
"15": "Certificate Verify",
"16": "Client Key Exchange",
"20": "Finished",
}
return [
tls_handshake_type_map.get(
handshake_type, f"Unknown tls_handshake_type {handshake_type}"
)
for handshake_type in handshake_types
]
main()
import os
import pandas as pd
import numpy as np
import yaml
def main():
sort_kem_alg_via_categories_type()
# get_kem_characteristics()
def sort_kem_alg_via_categories_type():
df = pd.read_feather("feathers/data.feather")
print(df.info())
categories = [
"secp256r1",
"secp384r1",
"secp521r1",
"x25519",
"x448",
"mlkem512",
"p256_mlkem512",
"x25519_mlkem512",
"mlkem768",
"p384_mlkem768",
"x448_mlkem768",
"x25519_mlkem768",
"p256_mlkem768",
"mlkem1024",
"p521_mlkem1024",
"p384_mlkem1024",
"bikel1",
"p256_bikel1",
"x25519_bikel1",
"bikel3",
"p384_bikel3",
"x448_bikel3",
"bikel5",
"p521_bikel5",
"hqc128",
"p256_hqc128",
"x25519_hqc128",
"hqc192",
"p384_hqc192",
"x448_hqc192",
"hqc256",
"p521_hqc256",
"frodo640aes",
"p256_frodo640aes",
"x25519_frodo640aes",
"frodo640shake",
"p256_frodo640shake",
"x25519_frodo640shake",
"frodo976aes",
"p384_frodo976aes",
"x448_frodo976aes",
"frodo976shake",
"p384_frodo976shake",
"x448_frodo976shake",
"frodo1344aes",
"p521_frodo1344aes",
"frodo1344shake",
"p521_frodo1344shake",
]
df["kem_alg"] = pd.Categorical(df["kem_alg"], categories=categories, ordered=True)
# df.to_feather("feathers/data.feather")
subpart = df.query(
"scenario == 'corrupt' and protocol == 'quic' and srv_corrupt == 1.0"
)
subpart = subpart[["scenario", "protocol", "kem_alg", "srv_corrupt"]]
print(subpart.sort_values(by=["kem_alg"]))
main()
import os
import numpy as np
import pandas as pd
import yaml
# TODO create a list of KEMs and their characteristics, from https://github.com/open-quantum-safe/liboqs/blob/main/docs/algorithms/kem/ml_kem.yml
TMP_DIR = "../tmp"
def get_kem_characteristics():
def match_name(name):
mapping = {
"ML-KEM-512-ipd": "mlkem512",
"ML-KEM-768-ipd": "mlkem768",
"ML-KEM-1024-ipd": "mlkem1024",
"BIKE-L1": "bikel1",
"BIKE-L3": "bikel3",
"BIKE-L5": "bikel5",
"FrodoKEM-640-AES": "frodo640aes",
"FrodoKEM-640-SHAKE": "frodo640shake",
"FrodoKEM-976-AES": "frodo976aes",
"FrodoKEM-976-SHAKE": "frodo976shake",
"FrodoKEM-1344-AES": "frodo1344aes",
"FrodoKEM-1344-SHAKE": "frodo1344shake",
"HQC-128": "hqc128",
"HQC-192": "hqc192",
"HQC-256": "hqc256",
}
return mapping[name]
LIBOQS_DOCS_KEM_DIR = "liboqs/docs/algorithms/kem"
file_list = []
for root, _, files in os.walk(os.path.join(TMP_DIR, LIBOQS_DOCS_KEM_DIR)):
for file in files:
if file.endswith(".yml"):
file_list.append(os.path.join(root, file))
# print(file_list)
df = pd.DataFrame()
for file_name in file_list:
with open(file_name, "r") as file:
kem_info = yaml.safe_load(file)
# print(kem_info)
for parameter_set in kem_info["parameter-sets"]:
# print(parameter_set)
name = parameter_set["name"]
if (
("sntrup761" in name)
or ("Kyber" in name)
or ("Classic-McEliece" in name)
):
continue
name = match_name(name)
claimed_nist_level = parameter_set["claimed-nist-level"]
claimed_security = parameter_set["claimed-security"]
length_public_key = parameter_set["length-public-key"]
length_ciphertext = parameter_set["length-ciphertext"]
length_secret_key = parameter_set["length-secret-key"]
length_shared_secret = parameter_set["length-shared-secret"]
# no_secret_dependent_branching_claimed = parameter_set[
# "no-secret-dependent-branching-claimed"
# ]
# large_stack_usage = parameter_set["large-stack-usage"]
df = pd.concat(
[
df,
pd.DataFrame(
[
{
"kem_name": name,
"claimed_nist_level": claimed_nist_level,
"claimed_security": claimed_security,
"length_public_key": length_public_key,
"length_ciphertext": length_ciphertext,
"length_secret_key": length_secret_key,
"length_shared_secret": length_shared_secret,
}
]
),
],
ignore_index=True,
)
df = pd.concat(
[
df,
pd.DataFrame(
{
"kem_name": ["p256", "p384", "p521", "x25519", "x448"],
"claimed_nist_level": ["1", "3", "5", "1", "3"],
"claimed_security": [np.nan, np.nan, np.nan, np.nan, np.nan],
"length_public_key": [
65,
97,
133,
32,
56,
], # vetted by looking at wireshark
# NOTE check with a viable source
"length_ciphertext": [
65,
97,
133,
32,
56,
], # vetted by looking at wireshark
# "length_secret_key": [32, 48, 66, 32, 56],
"length_secret_key": [np.nan, np.nan, np.nan, np.nan, np.nan],
# "length_shared_secret": [32, 48, 66, 32, 56],
"length_shared_secret": [np.nan, np.nan, np.nan, np.nan, np.nan],
}
),
],
ignore_index=True,
)
df = df.rename(columns={"kem_name": "kem_algo"})
print(df.info())
# print(df)
return df
def get_kem_ids():
# https://github.com/open-quantum-safe/oqs-provider/blob/main/ALGORITHMS.md
# https://github.com/open-quantum-safe/oqs-provider/blob/main/oqs-template/generate.yml
def match_name_group_to_pXXX(name_group):
mapping = {
"frodo640aes": "p256",
"frodo640shake": "p256",
"frodo976aes": "p384",
"frodo976shake": "p384",
"frodo1344aes": "p521",
"frodo1344shake": "p521",
"kyber512": "p256",
"kyber768": "p384",
"kyber1024": "p521",
"mlkem512": "p256",
"mlkem768": "p384",
"mlkem1024": "p521",
"bikel1": "p256",
"bikel3": "p384",
"bikel5": "p521",
"hqc128": "p256",
"hqc192": "p384",
"hqc256": "p521",
}
return mapping[name_group]
with open(f"{TMP_DIR}/oqs-provider/oqs-template/generate.yml", "r") as file:
data = yaml.safe_load(file)
df = pd.DataFrame()
for entry in data["kems"]:
if "nid" not in entry:
continue
# print(entry["name_group"], entry["nid"])
# print(
# f"{match_name_group_to_pXXX(entry['name_group'])}_{entry['name_group']}",
# entry["nid_hybrid"],
# )
concat = [
{"kem_name": entry["name_group"], "nid": entry["nid"]},
{
"kem_name": f"{match_name_group_to_pXXX(entry['name_group'])}_{entry['name_group']}",
"nid": entry["nid_hybrid"],
},
]
if "extra_nids" in entry:
if "current" in entry["extra_nids"]:
for i in entry["extra_nids"]["current"]:
# print(f"{i['hybrid_group']}_{entry['name_group']}", i["nid"])
concat.append(
{
"kem_name": f"{i['hybrid_group']}_{entry['name_group']}",
"nid": i["nid"],
}
)
df = pd.concat([df, pd.DataFrame(concat)], ignore_index=True)
# source https://datatracker.ietf.org/doc/html/rfc8446#appendix-B.3.1.4
df = pd.concat(
[
df,
pd.DataFrame(
{
"kem_name": [
"p256",
"p384",
"p521",
"x25519",
"x448",
],
"nid": ["0x0017", "0x0018", "0x0019", "0x001D", "0x001E"],
}
),
],
ignore_index=True,
)
# print(df)
return df
analyze_packets,srv_pkt_loss,srv_delay,srv_jitter,srv_duplicate,srv_corrupt,srv_reorder,srv_rate,cli_pkt_loss,cli_delay,cli_jitter,cli_duplicate,cli_corrupt,cli_reorder,cli_rate
,0,10ms,0,0,0,0,500,0,10ms,0,0,0,0,500
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment