diff --git a/app/cache/idealcache.py b/app/cache/idealcache.py
new file mode 100644
index 0000000..1a1ce6c
--- /dev/null
+++ b/app/cache/idealcache.py
@@ -0,0 +1,16 @@
+from .cache import Cache
+from database import get_user_profile
+
+class IdealCache(Cache):
+
+    def __init__(self, limit: int):
+        pass
+
+    def get(self, key):
+        return get_user_profile(key)
+
+    def put(self, key, val):
+        return False
+    
+    def invalidate(self, key):
+        return False
\ No newline at end of file
diff --git a/app/cache/nocache.py b/app/cache/nocache.py
new file mode 100644
index 0000000..dc0c822
--- /dev/null
+++ b/app/cache/nocache.py
@@ -0,0 +1,15 @@
+from .cache import Cache
+
+class NoCache(Cache):
+
+    def __init__(self, limit: int):
+        pass
+
+    def get(self, key):
+        return None
+
+    def put(self, key, val):
+        return False
+    
+    def invalidate(self, key):
+        return False
\ No newline at end of file
diff --git a/app/config_ideal.yaml b/app/config_ideal.yaml
new file mode 100644
index 0000000..ba5bab5
--- /dev/null
+++ b/app/config_ideal.yaml
@@ -0,0 +1,4 @@
+cache_strategy: "Ideal"
+cache_limit: 50
+l2_cache_limit: 100 # unused
+db_file: "llmData_sns.json" # Change this to the name of any json file within the "database/datastore" folder
\ No newline at end of file
diff --git a/app/config_nocache.yaml b/app/config_nocache.yaml
new file mode 100644
index 0000000..0c8ec80
--- /dev/null
+++ b/app/config_nocache.yaml
@@ -0,0 +1,4 @@
+cache_strategy: "None"
+cache_limit: 50
+l2_cache_limit: 100 # unused
+db_file: "llmData_sns.json" # Change this to the name of any json file within the "database/datastore" folder
\ No newline at end of file
diff --git a/app/main.py b/app/main.py
index 5b80676..470c48e 100644
--- a/app/main.py
+++ b/app/main.py
@@ -4,6 +4,8 @@ from cache.cache import BaselineCache
 from cache.prefetch_cache import PrefetchCache
 from cache.tiered_cache import TieredCache
 from cache.eviction_seive import SeiveCache
+from cache.nocache import NoCache
+from cache.idealcache import IdealCache
 from config import CACHE_STRATEGY, CACHE_LIMIT, L2_CACHE_LIMIT
 import time
 
@@ -22,6 +24,12 @@ elif CACHE_STRATEGY == "Tiered":
 elif CACHE_STRATEGY == "Seive":
     print("Using seive cache strategy")
     cache = SeiveCache(limit=CACHE_LIMIT)
+elif CACHE_STRATEGY == "None":
+    print("Using no cache strategy")
+    cache = NoCache(limit=CACHE_LIMIT)
+elif CACHE_STRATEGY == "Ideal":
+    print("Using ideal cache strategy")
+    cache = IdealCache(limit=CACHE_LIMIT)
 else:
     raise ValueError(f"Invalid CACHE_STRATEGY: {CACHE_STRATEGY}")
 
@@ -42,7 +50,7 @@ def fetch_user_profile(user_id: str):
         return {"user_id": user_id, "profile": cached_profile, "source": "cache", "time_ms": (time.time() - start) * 1000}
 
     profile = get_user_profile(user_id)
-    time.sleep(10 / 1000) # simulate 10 ms db delay
+    time.sleep(10 / 1000) # simulate 10 ms db delay, we do this here instead of the actual db in the ideal cache case
     if profile is None:
         raise HTTPException(status_code=404, detail="User not found")
 
diff --git a/tests/random_readonly.py b/tests/random_readonly.py
index ab1b671..6b74518 100644
--- a/tests/random_readonly.py
+++ b/tests/random_readonly.py
@@ -5,6 +5,7 @@ import random
 import json
 from tqdm import tqdm
 import time
+from utils import print_report
 
 baseurl = "http://localhost:8000"
 
@@ -36,21 +37,4 @@ for i in tqdm(range(10000)):
     hits.append(content["source"] == "cache")
 end = time.time()
 
-hits_count = sum(hits)
-miss_count = len(hits) - hits_count
-
-hits_time = 0
-miss_time = 0
-for i in range(len(times)):
-    if hits[i]:
-        hits_time += times[i]
-    else:
-        miss_time += times[i]
-total_time = hits_time + miss_time
-
-print(f"hits: {hits_count} misses: {miss_count} ratio: { hits_count / (hits_count + miss_count)}")
-print(f"average response time (ms)           : {total_time / len(times)}")
-print(f"average cache hit response time (ms) : {hits_time / hits_count}")
-print(f"average cache miss response time (ms): {miss_time / miss_count}")
-print(f"cache throughput (requests / ms)     : { len(times) / total_time}")
-print(f"real throughput  (requests / ms)     : { len(times) / (end - start) / 1000}")
\ No newline at end of file
+print_report(hits, times, end - start)
\ No newline at end of file
diff --git a/tests/utils.py b/tests/utils.py
new file mode 100644
index 0000000..ba29975
--- /dev/null
+++ b/tests/utils.py
@@ -0,0 +1,26 @@
+def print_report(hits, request_times, real_time):
+
+    hits_count = sum(hits)
+    miss_count = len(hits) - hits_count
+
+    hits_time = 0
+    miss_time = 0
+    for i in range(len(request_times)):
+        if hits[i]:
+            hits_time += request_times[i]
+        else:
+            miss_time += request_times[i]
+    total_time = hits_time + miss_time
+
+    print(f"hits: {hits_count} misses: {miss_count} ratio: { hits_count / (hits_count + miss_count)}")
+    print(f"average response time (ms)           : {total_time / len(request_times)}")
+    if hits_count > 0:
+        print(f"average cache hit response time (ms) : {hits_time / hits_count}")
+    else :
+        print(f"average cache hit response time (ms) : N/A")
+    if miss_count > 0:
+        print(f"average cache miss response time (ms): {miss_time / miss_count}")
+    else:
+        print(f"average cache miss response time (ms): N/A")
+    print(f"cache throughput (requests / s)     : { len(request_times) / total_time * 1000}")
+    print(f"real throughput  (requests / s)     : { len(request_times) / (real_time)}")
\ No newline at end of file
diff --git a/tests/weighred_freind_readonly.py b/tests/weighted_freind_readonly.py
similarity index 61%
rename from tests/weighred_freind_readonly.py
rename to tests/weighted_freind_readonly.py
index 0ac48ad..dd9e55d 100644
--- a/tests/weighred_freind_readonly.py
+++ b/tests/weighted_freind_readonly.py
@@ -5,6 +5,7 @@ import random
 import json
 from tqdm import tqdm
 import time
+from utils import print_report
 
 baseurl = "http://localhost:8000"
 
@@ -43,21 +44,4 @@ for i in tqdm(range(10000)):
         curr_user = generate_random()
 end = time.time()
 
-hits_count = sum(hits)
-miss_count = len(hits) - hits_count
-
-hits_time = 0
-miss_time = 0
-for i in range(len(times)):
-    if hits[i]:
-        hits_time += times[i]
-    else:
-        miss_time += times[i]
-total_time = hits_time + miss_time
-
-print(f"hits: {hits_count} misses: {miss_count} ratio: { hits_count / (hits_count + miss_count)}")
-print(f"average response time (ms)           : {total_time / len(times)}")
-print(f"average cache hit response time (ms) : {hits_time / hits_count}")
-print(f"average cache miss response time (ms): {miss_time / miss_count}")
-print(f"cache throughput (requests / ms)     : { len(times) / total_time}")
-print(f"real throughput  (requests / ms)     : { len(times) / (end - start) / 1000}")
\ No newline at end of file
+print_report(hits, times, end - start)
\ No newline at end of file
diff --git a/tests/weighted_frequentuser_readwrite.py b/tests/weighted_frequentuser_readwrite.py
index ec9abe7..95d6b64 100644
--- a/tests/weighted_frequentuser_readwrite.py
+++ b/tests/weighted_frequentuser_readwrite.py
@@ -3,6 +3,7 @@ import random
 import json
 from tqdm import tqdm
 import time
+from utils import print_report
 
 baseurl = "http://localhost:8000"
 
@@ -61,16 +62,4 @@ for i in tqdm(range(10000)):
 
 end = time.time()
 
-hits_count = sum(hits)
-miss_count = len(hits) - hits_count
-
-hits_time = sum(times[i] for i in range(len(times)) if hits[i])
-miss_time = sum(times[i] for i in range(len(times)) if not hits[i])
-total_time = hits_time + miss_time
-
-print(f"hits: {hits_count} misses: {miss_count} ratio: {hits_count / (hits_count + miss_count):.2f}")
-print(f"average response time (ms)           : {total_time / len(times):.2f}")
-print(f"average cache hit response time (ms) : {hits_time / hits_count if hits_count else 0:.2f}")
-print(f"average cache miss response time (ms): {miss_time / miss_count if miss_count else 0:.2f}")
-print(f"cache throughput (requests / ms)     : {len(times) / total_time:.2f}")
-print(f"real throughput  (requests / ms)     : {len(times) / (end - start) / 1000:.2f}")
\ No newline at end of file
+print_report(hits, times, end - start)
\ No newline at end of file