1
0
mirror of https://github.com/ltcptgeneral/cs239-caching.git synced 2025-04-01 12:33:25 +00:00

add no cache and ideal cache,

move printing cache reports to util file
This commit is contained in:
Arthur Lu 2025-03-02 21:22:31 +00:00 committed by root
parent 50b5ea0acd
commit 35ea5a234f
9 changed files with 80 additions and 50 deletions

16
app/cache/idealcache.py vendored Normal file

@ -0,0 +1,16 @@
from .cache import Cache
from database import get_user_profile
class IdealCache(Cache):
def __init__(self, limit: int):
pass
def get(self, key):
return get_user_profile(key)
def put(self, key, val):
return False
def invalidate(self, key):
return False

15
app/cache/nocache.py vendored Normal file

@ -0,0 +1,15 @@
from .cache import Cache
class NoCache(Cache):
def __init__(self, limit: int):
pass
def get(self, key):
return None
def put(self, key, val):
return False
def invalidate(self, key):
return False

4
app/config_ideal.yaml Normal file

@ -0,0 +1,4 @@
cache_strategy: "Ideal"
cache_limit: 50
l2_cache_limit: 100 # unused
db_file: "llmData_sns.json" # Change this to the name of any json file within the "database/datastore" folder

4
app/config_nocache.yaml Normal file

@ -0,0 +1,4 @@
cache_strategy: "None"
cache_limit: 50
l2_cache_limit: 100 # unused
db_file: "llmData_sns.json" # Change this to the name of any json file within the "database/datastore" folder

@ -4,6 +4,8 @@ from cache.cache import BaselineCache
from cache.prefetch_cache import PrefetchCache from cache.prefetch_cache import PrefetchCache
from cache.tiered_cache import TieredCache from cache.tiered_cache import TieredCache
from cache.eviction_seive import SeiveCache from cache.eviction_seive import SeiveCache
from cache.nocache import NoCache
from cache.idealcache import IdealCache
from config import CACHE_STRATEGY, CACHE_LIMIT, L2_CACHE_LIMIT from config import CACHE_STRATEGY, CACHE_LIMIT, L2_CACHE_LIMIT
import time import time
@ -22,6 +24,12 @@ elif CACHE_STRATEGY == "Tiered":
elif CACHE_STRATEGY == "Seive": elif CACHE_STRATEGY == "Seive":
print("Using seive cache strategy") print("Using seive cache strategy")
cache = SeiveCache(limit=CACHE_LIMIT) cache = SeiveCache(limit=CACHE_LIMIT)
elif CACHE_STRATEGY == "None":
print("Using no cache strategy")
cache = NoCache(limit=CACHE_LIMIT)
elif CACHE_STRATEGY == "Ideal":
print("Using ideal cache strategy")
cache = IdealCache(limit=CACHE_LIMIT)
else: else:
raise ValueError(f"Invalid CACHE_STRATEGY: {CACHE_STRATEGY}") raise ValueError(f"Invalid CACHE_STRATEGY: {CACHE_STRATEGY}")
@ -42,7 +50,7 @@ def fetch_user_profile(user_id: str):
return {"user_id": user_id, "profile": cached_profile, "source": "cache", "time_ms": (time.time() - start) * 1000} return {"user_id": user_id, "profile": cached_profile, "source": "cache", "time_ms": (time.time() - start) * 1000}
profile = get_user_profile(user_id) profile = get_user_profile(user_id)
time.sleep(10 / 1000) # simulate 10 ms db delay time.sleep(10 / 1000) # simulate 10 ms db delay, we do this here instead of the actual db in the ideal cache case
if profile is None: if profile is None:
raise HTTPException(status_code=404, detail="User not found") raise HTTPException(status_code=404, detail="User not found")

@ -5,6 +5,7 @@ import random
import json import json
from tqdm import tqdm from tqdm import tqdm
import time import time
from utils import print_report
baseurl = "http://localhost:8000" baseurl = "http://localhost:8000"
@ -36,21 +37,4 @@ for i in tqdm(range(10000)):
hits.append(content["source"] == "cache") hits.append(content["source"] == "cache")
end = time.time() end = time.time()
hits_count = sum(hits) print_report(hits, times, end - start)
miss_count = len(hits) - hits_count
hits_time = 0
miss_time = 0
for i in range(len(times)):
if hits[i]:
hits_time += times[i]
else:
miss_time += times[i]
total_time = hits_time + miss_time
print(f"hits: {hits_count} misses: {miss_count} ratio: { hits_count / (hits_count + miss_count)}")
print(f"average response time (ms) : {total_time / len(times)}")
print(f"average cache hit response time (ms) : {hits_time / hits_count}")
print(f"average cache miss response time (ms): {miss_time / miss_count}")
print(f"cache throughput (requests / ms) : { len(times) / total_time}")
print(f"real throughput (requests / ms) : { len(times) / (end - start) / 1000}")

26
tests/utils.py Normal file

@ -0,0 +1,26 @@
def print_report(hits, request_times, real_time):
hits_count = sum(hits)
miss_count = len(hits) - hits_count
hits_time = 0
miss_time = 0
for i in range(len(request_times)):
if hits[i]:
hits_time += request_times[i]
else:
miss_time += request_times[i]
total_time = hits_time + miss_time
print(f"hits: {hits_count} misses: {miss_count} ratio: { hits_count / (hits_count + miss_count)}")
print(f"average response time (ms) : {total_time / len(request_times)}")
if hits_count > 0:
print(f"average cache hit response time (ms) : {hits_time / hits_count}")
else :
print(f"average cache hit response time (ms) : N/A")
if miss_count > 0:
print(f"average cache miss response time (ms): {miss_time / miss_count}")
else:
print(f"average cache miss response time (ms): N/A")
print(f"cache throughput (requests / s) : { len(request_times) / total_time * 1000}")
print(f"real throughput (requests / s) : { len(request_times) / (real_time)}")

@ -5,6 +5,7 @@ import random
import json import json
from tqdm import tqdm from tqdm import tqdm
import time import time
from utils import print_report
baseurl = "http://localhost:8000" baseurl = "http://localhost:8000"
@ -43,21 +44,4 @@ for i in tqdm(range(10000)):
curr_user = generate_random() curr_user = generate_random()
end = time.time() end = time.time()
hits_count = sum(hits) print_report(hits, times, end - start)
miss_count = len(hits) - hits_count
hits_time = 0
miss_time = 0
for i in range(len(times)):
if hits[i]:
hits_time += times[i]
else:
miss_time += times[i]
total_time = hits_time + miss_time
print(f"hits: {hits_count} misses: {miss_count} ratio: { hits_count / (hits_count + miss_count)}")
print(f"average response time (ms) : {total_time / len(times)}")
print(f"average cache hit response time (ms) : {hits_time / hits_count}")
print(f"average cache miss response time (ms): {miss_time / miss_count}")
print(f"cache throughput (requests / ms) : { len(times) / total_time}")
print(f"real throughput (requests / ms) : { len(times) / (end - start) / 1000}")

@ -3,6 +3,7 @@ import random
import json import json
from tqdm import tqdm from tqdm import tqdm
import time import time
from utils import print_report
baseurl = "http://localhost:8000" baseurl = "http://localhost:8000"
@ -61,16 +62,4 @@ for i in tqdm(range(10000)):
end = time.time() end = time.time()
hits_count = sum(hits) print_report(hits, times, end - start)
miss_count = len(hits) - hits_count
hits_time = sum(times[i] for i in range(len(times)) if hits[i])
miss_time = sum(times[i] for i in range(len(times)) if not hits[i])
total_time = hits_time + miss_time
print(f"hits: {hits_count} misses: {miss_count} ratio: {hits_count / (hits_count + miss_count):.2f}")
print(f"average response time (ms) : {total_time / len(times):.2f}")
print(f"average cache hit response time (ms) : {hits_time / hits_count if hits_count else 0:.2f}")
print(f"average cache miss response time (ms): {miss_time / miss_count if miss_count else 0:.2f}")
print(f"cache throughput (requests / ms) : {len(times) / total_time:.2f}")
print(f"real throughput (requests / ms) : {len(times) / (end - start) / 1000:.2f}")