mirror of
https://github.com/ltcptgeneral/IdealRMT-DecisionTrees.git
synced 2025-09-07 15:57:23 +00:00
Compare commits
16 Commits
9729c6e68c
...
decision-t
Author | SHA1 | Date | |
---|---|---|---|
|
fda251f051 | ||
|
afc882a569 | ||
6de3807fe2 | |||
|
fc16d3c586 | ||
7bee40ecf9 | |||
|
e811171a73 | ||
61a451b82d | |||
c73de36c70 | |||
fadeab8a99 | |||
c208037ae9 | |||
ae3128f6e8 | |||
25e5a86a43 | |||
d3fe6efd47 | |||
23867747cd | |||
eeebc17d56 | |||
0d5e51f582 |
5
.gitignore
vendored
5
.gitignore
vendored
@@ -1,4 +1,5 @@
|
||||
data.*
|
||||
__pycache__
|
||||
tree.json
|
||||
compressed_tree.json
|
||||
*.json
|
||||
.DS_Store
|
||||
.ipynb_checkpoints/
|
||||
|
3
.gitmodules
vendored
Normal file
3
.gitmodules
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
[submodule "ideal-rmt-simulator"]
|
||||
path = ideal-rmt-simulator
|
||||
url = https://github.com/rfchang/ideal-rmt-simulator
|
128
CompressedTreeParser.ipynb
Normal file
128
CompressedTreeParser.ipynb
Normal file
@@ -0,0 +1,128 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "938dec51",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import numpy as np\n",
|
||||
"import pandas as pd\n",
|
||||
"import argparse\n",
|
||||
"from sklearn.tree import DecisionTreeClassifier, plot_tree, _tree\n",
|
||||
"from sklearn.metrics import accuracy_score\n",
|
||||
"from sklearn.tree import export_graphviz\n",
|
||||
"import pydotplus\n",
|
||||
"from matplotlib import pyplot as plt\n",
|
||||
"from labels import mac_to_label\n",
|
||||
"import json\n",
|
||||
"import math"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "442624c7",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"Set1 = pd.read_csv('data.csv').values.tolist()\n",
|
||||
"X = [i[0:3] for i in Set1]\n",
|
||||
"Y =[i[3] for i in Set1]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "12ad454d",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"predict_Yt = []\n",
|
||||
"with open('compressed_tree.json', 'r') as file:\n",
|
||||
" data = json.load(file)\n",
|
||||
" classes = data[\"classes\"]\n",
|
||||
" for x in X:\n",
|
||||
" counter = 0\n",
|
||||
" class_set = []\n",
|
||||
" for feature in reversed(data['layers']): #Have to reverse this list due to structure of the data.csv file and how it aligns with the compressed tree layers\n",
|
||||
" for node in data['layers'][feature]:\n",
|
||||
" if node['min'] is None:\n",
|
||||
" if x[counter] < node['max']:\n",
|
||||
" class_set.append(node['classes'])\n",
|
||||
" break #is this an issue?\n",
|
||||
" else:\n",
|
||||
" continue\n",
|
||||
" elif node['max'] is None:\n",
|
||||
" if node['min'] < x[counter]:\n",
|
||||
" class_set.append(node['classes'])\n",
|
||||
" break #is this an issue?\n",
|
||||
" else:\n",
|
||||
" continue\n",
|
||||
" elif node['min'] < x[counter] and x[counter] < node['max']:\n",
|
||||
" class_set.append(node['classes'])\n",
|
||||
" break #is this an issue?\n",
|
||||
"\n",
|
||||
" counter += 1\n",
|
||||
" result = set(class_set[0])\n",
|
||||
" for s in class_set[1:]:\n",
|
||||
" result.intersection_update(s)\n",
|
||||
"\n",
|
||||
" #predict_Yt.append(list(result))\n",
|
||||
" #print(result)\n",
|
||||
" if len(result) == 1:\n",
|
||||
" prediction = list(result)[0]\n",
|
||||
" pred_class = classes[prediction]\n",
|
||||
" predict_Yt.append(pred_class)\n",
|
||||
" else:\n",
|
||||
" predict_Yt.append(None)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "8b4c56b6",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"0.8448217242194891\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"correct = 0\n",
|
||||
"for i in range(len(Y)):\n",
|
||||
" prediction = predict_Yt[i]\n",
|
||||
" if prediction != None and Y[i] == prediction:\n",
|
||||
" correct += 1\n",
|
||||
"\n",
|
||||
"print(correct / len(Y))"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.12.7"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
File diff suppressed because one or more lines are too long
@@ -89,7 +89,7 @@
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "switch",
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
@@ -103,7 +103,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.12.7"
|
||||
"version": "3.12.9"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
|
@@ -2,7 +2,7 @@
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"execution_count": 1,
|
||||
"id": "ec310f34",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -14,7 +14,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"execution_count": 2,
|
||||
"id": "5b54797e",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -28,22 +28,25 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"execution_count": 3,
|
||||
"id": "a38fdb8a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# First cleanup the tree by rounding the decision points to integer values\n",
|
||||
"# We assume all features will use integer values. If this is not the case, then training data should be normalized so that integer values can be accurate enough\n",
|
||||
"# we also enumerate all the paths for later use\n",
|
||||
"\n",
|
||||
"i = 0\n",
|
||||
"\n",
|
||||
"path_ids = set()\n",
|
||||
"path_classes = tree[\"classes\"]\n",
|
||||
"\n",
|
||||
"# for each path in the tree\n",
|
||||
"for path in paths:\n",
|
||||
"\t# assign a path id \n",
|
||||
"\tpath[\"id\"] = i\n",
|
||||
"\ti += 1\n",
|
||||
"\tpath_ids.add(i)\n",
|
||||
"\t#path_classes.add(path[\"classification\"])\n",
|
||||
"\ti += 1\t\n",
|
||||
"\t# for each condition\n",
|
||||
"\tconditions = path[\"conditions\"]\n",
|
||||
"\tfor condition in conditions:\n",
|
||||
@@ -57,7 +60,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"execution_count": 4,
|
||||
"id": "2fd4f738",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -80,7 +83,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 13,
|
||||
"execution_count": 5,
|
||||
"id": "98cde024",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -106,13 +109,13 @@
|
||||
"\t\tvalue = condition[\"value\"]\n",
|
||||
"\n",
|
||||
"\t\t# move the min/max for the corresponding feature in compressed\n",
|
||||
"\t\tif operation == \"<=\" and compressed[feature][\"min\"] is None:\n",
|
||||
"\t\tif operation == \"<=\" and compressed[feature][\"max\"] is None:\n",
|
||||
"\t\t\tcompressed[feature][\"max\"] = value\n",
|
||||
"\t\telif operation == \">\" and compressed[feature][\"max\"] is None:\n",
|
||||
"\t\telif operation == \">\" and compressed[feature][\"min\"] is None:\n",
|
||||
"\t\t\tcompressed[feature][\"min\"] = value\n",
|
||||
"\t\telif operation == \"<=\" and value < compressed[feature][\"min\"]:\n",
|
||||
"\t\telif operation == \"<=\" and value < compressed[feature][\"max\"]:\n",
|
||||
"\t\t\tcompressed[feature][\"max\"] = value\n",
|
||||
"\t\telif operation == \">\" and value > compressed[feature][\"max\"]:\n",
|
||||
"\t\telif operation == \">\" and value > compressed[feature][\"min\"]:\n",
|
||||
"\t\t\tcompressed[feature][\"min\"] = value\n",
|
||||
"\n",
|
||||
"\tpath[\"compressed\"] = compressed"
|
||||
@@ -120,7 +123,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 14,
|
||||
"execution_count": 6,
|
||||
"id": "b6fbadbf",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -168,49 +171,58 @@
|
||||
"\t\t# for each bucket which encompases the condition's range, add this path's id to the sets \n",
|
||||
"\t\ti = 0\n",
|
||||
"\t\tfor bp in breakpoints[feature_name]:\n",
|
||||
"\t\t\tin_range = is_in_range(bp, lower, upper)\n",
|
||||
"\t\t\tif in_range:\n",
|
||||
"\t\t\tif is_in_range(bp, lower, upper):\n",
|
||||
"\t\t\t\tbuckets_id[feature_name][i].add(ID)\n",
|
||||
"\t\t\t\tbuckets_class[feature_name][i].add(Class)\n",
|
||||
"\t\t\ti += 1"
|
||||
"\t\t\ti += 1\n",
|
||||
"\n",
|
||||
"\t\tif is_in_range(bp+1, lower, upper):\n",
|
||||
"\t\t\tbuckets_id[feature_name][i].add(ID)\n",
|
||||
"\t\t\tbuckets_class[feature_name][i].add(Class)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 15,
|
||||
"execution_count": 7,
|
||||
"id": "0a767971",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# combine breakpoints and buckets to one representation\n",
|
||||
"\n",
|
||||
"compressed_tree = defaultdict(list)\n",
|
||||
"compressed_layers = defaultdict(list)\n",
|
||||
"for feature_name in buckets_id:\n",
|
||||
"\tlower = None\n",
|
||||
"\tupper = breakpoints[feature_name][0]\n",
|
||||
"\tpaths = buckets_id[feature_name][0]\n",
|
||||
"\tclasses = buckets_class[feature_name][0]\n",
|
||||
"\t#print(f\"{feature_name} = [{lower}, {upper}]: {members}\")\n",
|
||||
"\tcompressed_tree[feature_name].append({\"min\": lower, \"max\": upper, \"paths\": paths, \"classes\": classes})\n",
|
||||
"\tcompressed_layers[feature_name].append({\"min\": lower, \"max\": upper, \"paths\": paths, \"classes\": classes})\n",
|
||||
"\tfor i in range(1, len(buckets_id[feature_name]) - 1):\n",
|
||||
"\t\tlower = breakpoints[feature_name][i-1]\n",
|
||||
"\t\tupper = breakpoints[feature_name][i]\n",
|
||||
"\t\tmembers = buckets_id[feature_name][i]\n",
|
||||
"\t\tpaths = buckets_id[feature_name][i]\n",
|
||||
"\t\tclasses = buckets_class[feature_name][i]\n",
|
||||
"\t\t#print(f\"{feature_name} = [{lower}, {upper}]: {buckets[feature_name][i]}\")\n",
|
||||
"\t\tcompressed_tree[feature_name].append({\"min\": lower, \"max\": upper, \"paths\": paths, \"classes\": classes})\n",
|
||||
"\t\tcompressed_layers[feature_name].append({\"min\": lower, \"max\": upper, \"paths\": paths, \"classes\": classes})\n",
|
||||
"\tlower = breakpoints[feature_name][len(breakpoints[feature_name]) - 1]\n",
|
||||
"\tupper = None\n",
|
||||
"\tmembers = buckets_id[feature_name][len(buckets_id[feature_name]) - 1]\n",
|
||||
"\tpaths = buckets_id[feature_name][len(buckets_id[feature_name]) - 1]\n",
|
||||
"\tclasses = buckets_class[feature_name][len(buckets_class[feature_name]) - 1]\n",
|
||||
"\t#print(f\"{feature_name} = [{lower}, {upper}]: {members}\")\n",
|
||||
"\tcompressed_tree[feature_name].append({\"min\": lower, \"max\": upper, \"paths\": paths, \"classes\": classes})\n",
|
||||
"\t#print(\"=\"*40)"
|
||||
"\tcompressed_layers[feature_name].append({\"min\": lower, \"max\": upper, \"paths\": paths, \"classes\": classes})\n",
|
||||
"\t#print(\"=\"*40)\n",
|
||||
"\n",
|
||||
"compressed_tree = {\n",
|
||||
"\t\"paths\": path_ids,\n",
|
||||
"\t\"classes\": path_classes,\n",
|
||||
"\t\"layers\": compressed_layers,\n",
|
||||
"}"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 16,
|
||||
"execution_count": 8,
|
||||
"id": "561b0bc1",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
@@ -229,7 +241,7 @@
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "switch",
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
@@ -243,7 +255,7 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.12.7"
|
||||
"version": "3.12.9"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
404
TreeToRMT.ipynb
Normal file
404
TreeToRMT.ipynb
Normal file
@@ -0,0 +1,404 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "58fc6db9",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import json\n",
|
||||
"import math"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "e07be4b3",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"f = open(\"compressed_tree.json\")\n",
|
||||
"tree = json.loads(f.read())\n",
|
||||
"layers = tree[\"layers\"]\n",
|
||||
"classes = tree[\"classes\"]\n",
|
||||
"f.close()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "1516ff91",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"field_width = {\n",
|
||||
"\t\"src\": 16,\n",
|
||||
"\t\"dst\": 16,\n",
|
||||
"\t\"protocl\": 8,\n",
|
||||
"}"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "f9193827",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Worst Case RMT"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "5e37cfc5",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def worst_case_rmt(tree):\n",
|
||||
"\trmt = []\n",
|
||||
"\tstep = 0\n",
|
||||
"\n",
|
||||
"\ttcam_bits = 0\n",
|
||||
"\tram_bits = 0\n",
|
||||
"\n",
|
||||
"\tfor layer in layers:\n",
|
||||
"\t\tnum_ranges = len(layers[layer])\n",
|
||||
"\t\t# assume that each range requires all of 2*k prefixes when performing prefix expansion\n",
|
||||
"\t\t# therefore there are 2*k * R for R ranges and width k\n",
|
||||
"\t\tnum_prefixes = 2 * field_width[layer] * num_ranges\n",
|
||||
"\t\tprefix_width = field_width[layer]\n",
|
||||
"\n",
|
||||
"\t\ttcam = {\n",
|
||||
"\t\t\t\"id\": f\"{layer}_range\",\n",
|
||||
"\t\t\t\"step\": step,\n",
|
||||
"\t\t\t\"match\": \"ternary\",\n",
|
||||
"\t\t\t\"entries\": num_prefixes,\n",
|
||||
"\t\t\t\"key_size\": prefix_width\n",
|
||||
"\t\t}\n",
|
||||
"\t\ttcam_bits += num_prefixes * prefix_width\n",
|
||||
"\n",
|
||||
"\t\t# assume basic pointer reuse for metadata storage\n",
|
||||
"\t\tram = {\n",
|
||||
"\t\t\t\"id\": f\"{layer}_meta\",\n",
|
||||
"\t\t\t\"step\": step,\n",
|
||||
"\t\t\t\"match\": \"exact\",\n",
|
||||
"\t\t\t\"method\": \"index\",\n",
|
||||
"\t\t\t\"key_size\": math.ceil(math.log2(num_ranges)),\n",
|
||||
"\t\t\t\"data_size\": len(classes)\n",
|
||||
"\t\t}\n",
|
||||
"\t\tram_bits += num_ranges * len(classes)\n",
|
||||
"\n",
|
||||
"\t\trmt.append(tcam)\n",
|
||||
"\t\trmt.append(ram)\n",
|
||||
"\n",
|
||||
"\t\tstep += 1\n",
|
||||
"\n",
|
||||
"\treturn rmt, tcam_bits, ram_bits\n",
|
||||
"\n",
|
||||
"x, tcam_bits, ram_bits = worst_case_rmt(tree)\n",
|
||||
"f = open(\"worst_case_rmt.json\", \"w+\")\n",
|
||||
"f.write(json.dumps(x, indent=4))\n",
|
||||
"f.close()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "0dc1d6d4",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"TCAM mapping: \n",
|
||||
"[1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n",
|
||||
"SRAM mapping: \n",
|
||||
"[1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n",
|
||||
"id mapping: \n",
|
||||
"[['dst_range', 'dst_meta'], ['src_range', 'src_meta'], ['protocl_range', 'protocl_meta'], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], []]\n",
|
||||
"TCAM bits: 13184\n",
|
||||
"RAM bits: 504\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"! command python3 ideal-rmt-simulator/sim.py naive_rmt.json\n",
|
||||
"print(f\"TCAM bits: {tcam_bits}\")\n",
|
||||
"print(f\"RAM bits: {ram_bits}\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "2a628655",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Naive Range Expansion "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "fb9febe9",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# shamelessly stolen from: https://github.com/autolyticus/range-to-prefix/blob/master/rangetoprefix.C\n",
|
||||
"\n",
|
||||
"def int_to_bin(i, width):\n",
|
||||
"\treturn bin(i)[2:].zfill(width)\n",
|
||||
"\n",
|
||||
"def increment_dc(pfx):\n",
|
||||
"\tidx = pfx.find(\"*\")\n",
|
||||
"\tif idx == -1:\n",
|
||||
"\t\tidx = len(pfx)\n",
|
||||
"\tidx = idx - 1\n",
|
||||
"\t#print(pfx, pfx[:idx])\n",
|
||||
"\treturn pfx[:idx] + \"*\" + pfx[idx+1:]\n",
|
||||
"\t\n",
|
||||
"def can_merge(pfx_a, pfx_b):\n",
|
||||
"\tpfx_a = pfx_a.replace(\"*\", \"\")\n",
|
||||
"\tpfx_b = pfx_b.replace(\"*\", \"\")\n",
|
||||
"\treturn pfx_a[:-1] == pfx_b[:-1] and pfx_a[-1] != pfx_b[-1]\n",
|
||||
"\n",
|
||||
"def merge(pfx_a, prefixes):\n",
|
||||
"\tpfx_a = increment_dc(pfx_a)\n",
|
||||
"\tprefixes[-1] = pfx_a\n",
|
||||
"\n",
|
||||
"\tfor i in range(len(prefixes) - 2, -1, -1):\n",
|
||||
"\t\tif can_merge(prefixes[i], prefixes[i+1]):\n",
|
||||
"\t\t\tprefixes.pop()\n",
|
||||
"\t\t\tpfx = increment_dc(prefixes[i])\n",
|
||||
"\t\t\tprefixes[i] = pfx\n",
|
||||
"\n",
|
||||
"def convert_range(lower, upper, width):\n",
|
||||
"\tprefixes = []\n",
|
||||
"\tprefix = int_to_bin(lower, width)\n",
|
||||
"\tprefixes.append(prefix)\n",
|
||||
"\tnorm_upper = min(upper, 2**width-1)\n",
|
||||
"\tfor i in range(lower+1, norm_upper+1):\n",
|
||||
"\t\tprefix = int_to_bin(i, width)\n",
|
||||
"\t\tif can_merge(prefix, prefixes[-1]):\n",
|
||||
"\t\t\tmerge(prefix, prefixes)\n",
|
||||
"\t\telse:\n",
|
||||
"\t\t\tprefixes.append(prefix)\n",
|
||||
"\treturn prefixes"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "55167c28",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def naive_rmt(tree):\n",
|
||||
"\trmt = []\n",
|
||||
"\tstep = 0\n",
|
||||
"\n",
|
||||
"\ttcam_bits = 0\n",
|
||||
"\tram_bits = 0\n",
|
||||
"\n",
|
||||
"\tfor layer in layers:\n",
|
||||
"\t\tnum_prefixes = 0\n",
|
||||
"\t\tprefix_width = field_width[layer]\n",
|
||||
"\t\t# for each range in the layer, convert the ranges to prefixes using naive range expansion\n",
|
||||
"\t\tfor r in layers[layer]:\n",
|
||||
"\t\t\tif r[\"min\"] == None:\n",
|
||||
"\t\t\t\tr[\"min\"] = 0\n",
|
||||
"\t\t\telif r[\"max\"] == None:\n",
|
||||
"\t\t\t\tr[\"max\"] = 2 ** prefix_width\n",
|
||||
"\t\t\tprefixes = convert_range(r[\"min\"], r[\"max\"], prefix_width)\n",
|
||||
"\t\t\tr[\"prefixes\"] = prefixes\n",
|
||||
"\t\t\tnum_prefixes += len(prefixes)\n",
|
||||
"\t\t\ttcam_bits += len(prefixes) * prefix_width\n",
|
||||
"\n",
|
||||
"\t\ttcam = {\n",
|
||||
"\t\t\t\"id\": f\"{layer}_range\",\n",
|
||||
"\t\t\t\"step\": step,\n",
|
||||
"\t\t\t\"match\": \"ternary\",\n",
|
||||
"\t\t\t\"entries\": num_prefixes,\n",
|
||||
"\t\t\t\"key_size\": prefix_width,\n",
|
||||
"\t\t\t\"ranges\": layers[layer]\n",
|
||||
"\t\t}\n",
|
||||
"\n",
|
||||
"\t\tnum_ranges = len(layers[layer])\n",
|
||||
"\t\t# assume no pointer reuse for metadata storage\n",
|
||||
"\t\tram = {\n",
|
||||
"\t\t\t\"id\": f\"{layer}_meta\",\n",
|
||||
"\t\t\t\"step\": step,\n",
|
||||
"\t\t\t\"match\": \"exact\",\n",
|
||||
"\t\t\t\"method\": \"index\",\n",
|
||||
"\t\t\t\"key_size\": math.ceil(math.log2(num_ranges)),\n",
|
||||
"\t\t\t\"data_size\": len(classes)\n",
|
||||
"\t\t}\n",
|
||||
"\t\tram_bits += num_ranges * len(classes)\n",
|
||||
"\n",
|
||||
"\t\trmt.append(tcam)\n",
|
||||
"\t\trmt.append(ram)\n",
|
||||
"\n",
|
||||
"\t\tstep += 1\n",
|
||||
"\n",
|
||||
"\treturn rmt, tcam_bits, ram_bits\n",
|
||||
"\n",
|
||||
"x, tcam_bits, ram_bits = naive_rmt(tree)\n",
|
||||
"f = open(\"naive_rmt.json\", \"w+\")\n",
|
||||
"f.write(json.dumps(x, indent=4))\n",
|
||||
"f.close()\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "48011528",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"TCAM mapping: \n",
|
||||
"[1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n",
|
||||
"SRAM mapping: \n",
|
||||
"[1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n",
|
||||
"id mapping: \n",
|
||||
"[['dst_range', 'dst_meta'], ['src_range', 'src_meta'], ['protocl_range', 'protocl_meta'], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], []]\n",
|
||||
"TCAM bits: 3320\n",
|
||||
"RAM bits: 504\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"! command python3 ideal-rmt-simulator/sim.py naive_rmt.json\n",
|
||||
"print(f\"TCAM bits: {tcam_bits}\")\n",
|
||||
"print(f\"RAM bits: {ram_bits}\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"id": "64b7271e",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# for this technique, we note that given disjoint ranges [0,a][a,b],[b,c] ...\n",
|
||||
"# then if using a TCAM that selects the first matching prefix, then [0,a],[0,b],[0,c] would be equivalent\n",
|
||||
"# this is because if for some k<a, even though the range [0,b] could be selected, as long as the prefixes for [0,a] are before [0,b] then the correct prefix will still be selected\n",
|
||||
"\n",
|
||||
"def priority_aware(tree):\n",
|
||||
"\trmt = []\n",
|
||||
"\tstep = 0\n",
|
||||
"\n",
|
||||
"\ttcam_bits = 0\n",
|
||||
"\tram_bits = 0\n",
|
||||
"\n",
|
||||
"\tfor layer in layers:\n",
|
||||
"\t\tnum_prefixes = 0\n",
|
||||
"\t\tprefix_width = field_width[layer]\n",
|
||||
"\t\t# for each range, run the regular prefix expansion, and also the prefix expansion setting the minimum to 0\n",
|
||||
"\t\t# then check which set of prefixes would be better\n",
|
||||
"\t\t# we will assume the ranges are already disjoin and in the correct order\n",
|
||||
"\t\tfor r in layers[layer]:\n",
|
||||
"\t\t\tif r[\"min\"] == None:\n",
|
||||
"\t\t\t\tr[\"min\"] = 0\n",
|
||||
"\t\t\telif r[\"max\"] == None:\n",
|
||||
"\t\t\t\tr[\"max\"] = 2 ** prefix_width\n",
|
||||
"\t\t\tregular_prefixes = convert_range(r[\"min\"], r[\"max\"], prefix_width)\n",
|
||||
"\t\t\tzero_start_prefixes = convert_range(0, r[\"max\"], prefix_width)\n",
|
||||
"\n",
|
||||
"\t\t\tif len(regular_prefixes) <= len(zero_start_prefixes):\n",
|
||||
"\t\t\t\tpfx_type = \"exact\"\n",
|
||||
"\t\t\t\tprefixes = regular_prefixes\n",
|
||||
"\t\t\telse:\n",
|
||||
"\t\t\t\tpfx_type = \"zero\"\n",
|
||||
"\t\t\t\tprefixes = zero_start_prefixes\n",
|
||||
"\n",
|
||||
"\t\t\tr[\"prefixes\"] = prefixes\n",
|
||||
"\t\t\tr[\"prefix_type\"] = pfx_type\n",
|
||||
"\t\t\tnum_prefixes += len(prefixes)\n",
|
||||
"\t\t\ttcam_bits += len(prefixes) * prefix_width\n",
|
||||
"\n",
|
||||
"\t\ttcam = {\n",
|
||||
"\t\t\t\"id\": f\"{layer}_range\",\n",
|
||||
"\t\t\t\"step\": step,\n",
|
||||
"\t\t\t\"match\": \"ternary\",\n",
|
||||
"\t\t\t\"entries\": num_prefixes,\n",
|
||||
"\t\t\t\"key_size\": prefix_width,\n",
|
||||
"\t\t\t\"ranges\": layers[layer]\n",
|
||||
"\t\t}\n",
|
||||
"\n",
|
||||
"\t\tnum_ranges = len(layers[layer])\n",
|
||||
"\t\t# assume no pointer reuse for metadata storage\n",
|
||||
"\t\tram = {\n",
|
||||
"\t\t\t\"id\": f\"{layer}_meta\",\n",
|
||||
"\t\t\t\"step\": step,\n",
|
||||
"\t\t\t\"match\": \"exact\",\n",
|
||||
"\t\t\t\"method\": \"index\",\n",
|
||||
"\t\t\t\"key_size\": math.ceil(math.log2(num_ranges)),\n",
|
||||
"\t\t\t\"data_size\": len(classes)\n",
|
||||
"\t\t}\n",
|
||||
"\t\tram_bits += num_ranges * len(classes)\n",
|
||||
"\n",
|
||||
"\t\trmt.append(tcam)\n",
|
||||
"\t\trmt.append(ram)\n",
|
||||
"\n",
|
||||
"\t\tstep += 1\n",
|
||||
"\n",
|
||||
"\treturn rmt, tcam_bits, ram_bits\n",
|
||||
"\n",
|
||||
"x, tcam_bits, ram_bits = priority_aware(tree)\n",
|
||||
"f = open(\"priority_aware.json\", \"w+\")\n",
|
||||
"f.write(json.dumps(x, indent=4))\n",
|
||||
"f.close()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"id": "cd706e41",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"TCAM mapping: \n",
|
||||
"[1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n",
|
||||
"SRAM mapping: \n",
|
||||
"[1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n",
|
||||
"id mapping: \n",
|
||||
"[['dst_range', 'dst_meta'], ['src_range', 'src_meta'], ['protocl_range', 'protocl_meta'], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], []]\n",
|
||||
"TCAM bits: 2152\n",
|
||||
"RAM bits: 504\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"! command python3 ideal-rmt-simulator/sim.py priority_aware.json\n",
|
||||
"print(f\"TCAM bits: {tcam_bits}\")\n",
|
||||
"print(f\"RAM bits: {ram_bits}\")"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.12.9"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
1
ideal-rmt-simulator
Submodule
1
ideal-rmt-simulator
Submodule
Submodule ideal-rmt-simulator added at 852153f017
Reference in New Issue
Block a user