add data processing notebooks

This commit is contained in:
2025-05-27 18:18:55 +00:00
commit cc8c27220b
4 changed files with 336 additions and 0 deletions

1
.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
data.*

188
DecisionTree.ipynb Normal file
View File

@@ -0,0 +1,188 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": 4,
"id": "d5618056",
"metadata": {},
"outputs": [],
"source": [
"import numpy as np\n",
"import pandas as pd\n",
"import argparse\n",
"from sklearn.tree import DecisionTreeClassifier\n",
"from sklearn.metrics import accuracy_score\n",
"from sklearn.tree import export_graphviz\n",
"import pydotplus"
]
},
{
"cell_type": "code",
"execution_count": 5,
"id": "d336971a",
"metadata": {},
"outputs": [],
"source": [
"# extract argument\n",
"inputfile = \"data.csv\"\n",
"outputfile = \"tree\"\n",
"#testfile = args.t\n",
"\n",
"# output the tree\n",
"def get_lineage(tree, feature_names, file):\n",
" proto = []\n",
" src = []\n",
" dst = []\n",
" left = tree.tree_.children_left\n",
" right = tree.tree_.children_right\n",
" threshold = tree.tree_.threshold\n",
" features = [feature_names[i] for i in tree.tree_.feature]\n",
" value = tree.tree_.value\n",
" le = '<='\n",
" g = '>'\n",
" # get ids of child nodes\n",
" idx = np.argwhere(left == -1)[:, 0]\n",
" \n",
" # traverse the tree and get the node information\n",
" def recurse(left, right, child, lineage=None):\n",
" if lineage is None:\n",
" lineage = [child]\n",
" if child in left:\n",
" parent = np.where(left == child)[0].item()\n",
" split = 'l'\n",
" else:\n",
" parent = np.where(right == child)[0].item()\n",
" split = 'r'\n",
" \n",
" lineage.append((parent, split, threshold[parent], features[parent]))\n",
" if parent == 0:\n",
" lineage.reverse()\n",
" return lineage\n",
" else:\n",
" return recurse(left, right, parent, lineage)\n",
"\n",
" for j, child in enumerate(idx):\n",
" clause = ' when '\n",
" for node in recurse(left, right, child):\n",
" if len(str(node)) < 3:\n",
" continue\n",
" i = node\n",
" \n",
" if i[1] == 'l':\n",
" sign = le\n",
" else:\n",
" sign = g\n",
" clause = clause + i[3] + sign + str(i[2]) + ' and '\n",
" \n",
" # wirte the node information into text file\n",
" a = list(value[node][0])\n",
" ind = a.index(max(a))\n",
" clause = clause[:-4] + ' then ' + str(ind)\n",
" file.write(clause)\n",
" file.write(\";\\n\")\n"
]
},
{
"cell_type": "code",
"execution_count": 8,
"id": "b96f3403",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"train accuracy: 0.879490682862549\n",
"test accuracy: 0.879490682862549\n"
]
}
],
"source": [
"# Training set X and Y\n",
"Set1 = pd.read_csv(inputfile)\n",
"Set = Set1.values.tolist()\n",
"X = [i[0:3] for i in Set]\n",
"Y =[i[3] for i in Set]\n",
"\n",
"# Test set Xt and Yt\n",
"Set2 = pd.read_csv(inputfile)\n",
"Sett = Set2.values.tolist()\n",
"Xt = [i[0:3] for i in Set]\n",
"Yt =[i[3] for i in Set]\n",
"\n",
"#class_names=['iperf','memcached','ping','sparkglm','sparkkmeans']\n",
"feature_names=['proto','src','dst']\n",
"\n",
"# prepare training and testing set\n",
"X = np.array(X)\n",
"Y = np.array(Y)\n",
"Xt = np.array(Xt)\n",
"Yt = np.array(Yt)\n",
"\n",
"# decision tree fit\n",
"dt = DecisionTreeClassifier(max_depth = 5)\n",
"dt.fit(X, Y)\n",
"Predict_Y = dt.predict(X)\n",
"print(f\"train accuracy: {accuracy_score(Y, Predict_Y)}\")\n",
"\n",
"Predict_Yt = dt.predict(Xt)\n",
"print(f\"test accuracy: {accuracy_score(Yt, Predict_Yt)}\")\n",
"\n",
"# output the tree in a text file, write it\n",
"threshold = dt.tree_.threshold\n",
"features = [feature_names[i] for i in dt.tree_.feature]\n",
"proto = []\n",
"src = []\n",
"dst = []\n",
"for i, fe in enumerate(features):\n",
" \n",
" if fe == 'proto':\n",
" proto.append(threshold[i])\n",
" elif fe == 'src':\n",
" if threshold[i] != -2.0:\n",
" src.append(threshold[i])\n",
" else:\n",
" dst.append(threshold[i])\n",
"proto = [int(i) for i in proto]\n",
"src = [int(i) for i in src]\n",
"dst = [int(i) for i in dst]\n",
"proto.sort()\n",
"src.sort()\n",
"dst.sort()\n",
"tree = open(outputfile,\"w+\")\n",
"tree.write(\"proto = \")\n",
"tree.write(str(proto))\n",
"tree.write(\";\\n\")\n",
"tree.write(\"src = \")\n",
"tree.write(str(src))\n",
"tree.write(\";\\n\")\n",
"tree.write(\"dst = \")\n",
"tree.write(str(dst))\n",
"tree.write(\";\\n\")\n",
"get_lineage(dt,feature_names,tree)\n",
"tree.close()"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "switch",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.12.7"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

142
ExtractDataset.ipynb Normal file
View File

@@ -0,0 +1,142 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"id": "97e76d73",
"metadata": {},
"outputs": [],
"source": [
"from scapy.all import *\n",
"import numpy as np\n",
"import pandas as pd\n",
"import argparse\n",
"import os\n",
"\n",
"inputfile = \"data.pcap\"\n",
"outputfile = \"data.csv\"\n",
"\n",
"mac_to_device = {\n",
" \"44:65:0d:56:cc:d3\": \"Amazon Echo\",\n",
" \"e0:76:d0:3f:00:ae\": \"August Doorbell Cam\",\n",
" \"70:88:6b:10:0f:c6\": \"Awair air quality monitor\",\n",
" \"b4:75:0e:ec:e5:a9\": \"Belkin Camera\",\n",
" \"ec:1a:59:83:28:11\": \"Belkin Motion Sensor\",\n",
" \"ec:1a:59:79:f4:89\": \"Belkin Switch\",\n",
" \"74:6a:89:00:2e:25\": \"Blipcare BP Meter\",\n",
" \"7c:70:bc:5d:5e:dc\": \"Canary Camera\",\n",
" \"30:8c:fb:2f:e4:b2\": \"Dropcam\",\n",
" \"6c:ad:f8:5e:e4:61\": \"Google Chromecast\",\n",
" \"28:c2:dd:ff:a5:2d\": \"Hello Barbie\",\n",
" \"70:5a:0f:e4:9b:c0\": \"HP Printer\",\n",
" \"74:c6:3b:29:d7:1d\": \"iHome PowerPlug\",\n",
" \"d0:73:d5:01:83:08\": \"LiFX Bulb\",\n",
" \"18:b4:30:25:be:e4\": \"NEST Smoke Sensor\",\n",
" \"70:ee:50:18:34:43\": \"Netatmo Camera\",\n",
" \"70:ee:50:03:b8:ac\": \"Netatmo Weather station\",\n",
" \"00:17:88:2b:9a:25\": \"Phillip Hue Lightbulb\",\n",
" \"e0:76:d0:33:bb:85\": \"Pixstart photo frame\",\n",
" \"88:4a:ea:31:66:9d\": \"Ring Door Bell\",\n",
" \"00:16:6c:ab:6b:88\": \"Samsung Smart Cam\",\n",
" \"d0:52:a8:00:67:5e\": \"Smart Things\",\n",
" \"f4:f2:6d:93:51:f1\": \"TP-Link Camera\",\n",
" \"50:c7:bf:00:56:39\": \"TP-Link Plug\",\n",
" \"18:b7:9e:02:20:44\": \"Triby Speaker\",\n",
" \"00:24:e4:10:ee:4c\": \"Withings Baby Monitor\",\n",
" \"00:24:e4:1b:6f:96\": \"Withings Scale\",\n",
" \"00:24:e4:20:28:c6\": \"Withings sleep sensor\",\n",
" \"00:24:e4:11:18:a8\": \"Withings\"\n",
"}"
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "119623a5",
"metadata": {},
"outputs": [],
"source": [
"#read the pcap file and extract the features for each packet\n",
"all_packets = rdpcap(inputfile)"
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "f5584562",
"metadata": {},
"outputs": [],
"source": [
"results = []\n",
"for packet in all_packets:\n",
" size = len(packet)\n",
" try:\n",
" proto = packet.proto\n",
" except AttributeError:\n",
" proto = 0\n",
" try:\n",
" sport = packet.sport\n",
" dport = packet.dport\n",
" except AttributeError:\n",
" sport = 0\n",
" dport = 0\n",
"\n",
" proto = int(proto)\n",
" sport = int(sport)\n",
" dport = int(dport)\n",
"\n",
" if \"Ether\" in packet:\n",
" eth_dst = packet[\"Ether\"].dst\n",
" if eth_dst in mac_to_device:\n",
" classification = mac_to_device[eth_dst]\n",
" else:\n",
" classification = \"other\"\n",
" else:\n",
" classification = \"other\"\n",
"\n",
" metric = [proto,sport,dport,classification]\n",
" results.append(metric)\n",
"results = (np.array(results)).T"
]
},
{
"cell_type": "code",
"execution_count": 4,
"id": "2e04c2d1",
"metadata": {},
"outputs": [],
"source": [
"# store the features in the dataframe\n",
"dataframe = pd.DataFrame({'protocl':results[0],'src':results[1],'dst':results[2],'classfication':results[3]})\n",
"columns = ['protocl','src','dst','classfication']\n",
"\n",
"# save the dataframe to the csv file, if not exsit, create one.\n",
"if os.path.exists(outputfile):\n",
" dataframe.to_csv(outputfile,index=False,sep=',',mode='a',columns = columns, header=False)\n",
"else:\n",
" dataframe.to_csv(outputfile,index=False,sep=',',columns = columns)"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "switch",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.12.7"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

5
requirements.txt Normal file
View File

@@ -0,0 +1,5 @@
scapy
numpy
pandas
scikit-learn
pydotplus