cse151b-final-project/dqn_wordle.ipynb

190 lines
5.6 KiB
Plaintext
Raw Normal View History

2024-03-13 18:04:30 +00:00
{
"cells": [
{
"cell_type": "code",
"execution_count": 83,
2024-03-13 18:04:30 +00:00
"metadata": {},
"outputs": [],
"source": [
"import gym\n",
"import gym_wordle\n",
"from stable_baselines3 import DQN, PPO, common\n",
2024-03-13 20:57:23 +00:00
"import numpy as np\n",
"import tqdm"
2024-03-13 18:04:30 +00:00
]
},
{
"cell_type": "code",
"execution_count": 84,
2024-03-13 18:04:30 +00:00
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"<Monitor<WordleEnv instance>>\n"
]
}
],
2024-03-13 18:04:30 +00:00
"source": [
"env = gym_wordle.wordle.WordleEnv()\n",
"env = common.monitor.Monitor(env)\n",
2024-03-13 18:04:30 +00:00
"\n",
"print(env)"
]
},
{
"cell_type": "code",
"execution_count": 85,
2024-03-13 18:04:30 +00:00
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Using cuda device\n",
"Wrapping the env in a DummyVecEnv.\n"
]
}
],
2024-03-13 18:04:30 +00:00
"source": [
"total_timesteps = 1000\n",
"model = PPO(\"MlpPolicy\", env, verbose=1)\n",
"model.learn(total_timesteps=total_timesteps)"
2024-03-13 18:04:30 +00:00
]
},
{
"cell_type": "code",
2024-03-13 20:57:23 +00:00
"execution_count": null,
2024-03-13 18:04:30 +00:00
"metadata": {},
"outputs": [],
"source": [
"model.save(\"dqn_wordle\")"
2024-03-13 18:04:30 +00:00
]
},
{
"cell_type": "code",
2024-03-13 20:57:23 +00:00
"execution_count": null,
2024-03-13 18:04:30 +00:00
"metadata": {},
2024-03-13 20:57:23 +00:00
"outputs": [],
2024-03-13 18:04:30 +00:00
"source": [
"model = PPO.load(\"dqn_wordle\")"
2024-03-13 18:04:30 +00:00
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"[[16 18 5 15 14 3 3 1 3 3]\n",
" [ 0 0 0 0 0 0 0 0 0 0]\n",
" [ 0 0 0 0 0 0 0 0 0 0]\n",
" [ 0 0 0 0 0 0 0 0 0 0]\n",
" [ 0 0 0 0 0 0 0 0 0 0]\n",
" [ 0 0 0 0 0 0 0 0 0 0]] -1.0 False {}\n",
"[[16 18 5 15 14 3 3 1 3 3]\n",
" [16 18 5 15 14 3 3 1 3 3]\n",
" [ 0 0 0 0 0 0 0 0 0 0]\n",
" [ 0 0 0 0 0 0 0 0 0 0]\n",
" [ 0 0 0 0 0 0 0 0 0 0]\n",
" [ 0 0 0 0 0 0 0 0 0 0]] -1.0 False {}\n",
"[[16 18 5 15 14 3 3 1 3 3]\n",
" [16 18 5 15 14 3 3 1 3 3]\n",
" [16 18 5 15 14 3 3 1 3 3]\n",
" [ 0 0 0 0 0 0 0 0 0 0]\n",
" [ 0 0 0 0 0 0 0 0 0 0]\n",
" [ 0 0 0 0 0 0 0 0 0 0]] -1.0 False {}\n",
"[[16 18 5 15 14 3 3 1 3 3]\n",
" [16 18 5 15 14 3 3 1 3 3]\n",
" [16 18 5 15 14 3 3 1 3 3]\n",
" [16 18 5 15 14 3 3 1 3 3]\n",
" [ 0 0 0 0 0 0 0 0 0 0]\n",
" [ 0 0 0 0 0 0 0 0 0 0]] -1.0 False {}\n",
"[[16 18 5 15 14 3 3 1 3 3]\n",
" [16 18 5 15 14 3 3 1 3 3]\n",
" [16 18 5 15 14 3 3 1 3 3]\n",
" [16 18 5 15 14 3 3 1 3 3]\n",
" [16 18 5 15 14 3 3 1 3 3]\n",
" [ 0 0 0 0 0 0 0 0 0 0]] -1.0 False {}\n",
"[[16 18 5 15 14 3 3 1 3 3]\n",
" [16 18 5 15 14 3 3 1 3 3]\n",
" [16 18 5 15 14 3 3 1 3 3]\n",
" [16 18 5 15 14 3 3 1 3 3]\n",
" [16 18 5 15 14 3 3 1 3 3]\n",
" [16 18 5 15 14 3 3 1 3 3]] -1.0 True {}\n"
]
},
{
"ename": "KeyError",
"evalue": "'correct'",
"output_type": "error",
"traceback": [
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[0;31mKeyError\u001b[0m Traceback (most recent call last)",
"Cell \u001b[0;32mIn[82], line 19\u001b[0m\n\u001b[1;32m 15\u001b[0m state, reward, done, info \u001b[38;5;241m=\u001b[39m env\u001b[38;5;241m.\u001b[39mstep(action)\n\u001b[1;32m 17\u001b[0m \u001b[38;5;28mprint\u001b[39m(state, reward, done, info)\n\u001b[0;32m---> 19\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[43minfo\u001b[49m\u001b[43m[\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mcorrect\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m]\u001b[49m:\n\u001b[1;32m 20\u001b[0m wins \u001b[38;5;241m+\u001b[39m\u001b[38;5;241m=\u001b[39m \u001b[38;5;241m1\u001b[39m\n\u001b[1;32m 22\u001b[0m \u001b[38;5;66;03m#end_rewards.append(reward == 0)\u001b[39;00m\n\u001b[1;32m 23\u001b[0m \n\u001b[1;32m 24\u001b[0m \u001b[38;5;66;03m#return np.sum(end_rewards) / len(end_rewards)\u001b[39;00m\n",
"\u001b[0;31mKeyError\u001b[0m: 'correct'"
]
}
],
2024-03-13 20:57:23 +00:00
"source": [
"env = gym_wordle.wordle.WordleEnv()\n",
"\n",
"for i in range(1):\n",
" \n",
" state = env.reset()\n",
"\n",
" done = False\n",
"\n",
" wins = 0\n",
"\n",
" while not done:\n",
"\n",
" action, _states = model.predict(state, deterministic=True)\n",
"\n",
" state, reward, done, info = env.step(action)\n",
"\n",
" print(state, reward, done, info)\n",
"\n",
" if info[\"correct\"]:\n",
" wins += 1\n",
" \n",
" #end_rewards.append(reward == 0)\n",
" \n",
"#return np.sum(end_rewards) / len(end_rewards)\n"
2024-03-13 20:57:23 +00:00
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
2024-03-13 18:04:30 +00:00
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.8.10"
}
},
"nbformat": 4,
"nbformat_minor": 2
}