mirror of
https://github.com/ltcptgeneral/cse151b-final-project.git
synced 2025-10-23 10:59:21 +00:00
Compare commits
1 Commits
f40301cac9
...
gymnasium-
Author | SHA1 | Date | |
---|---|---|---|
|
cf977e4797 |
2
.gitignore
vendored
2
.gitignore
vendored
@@ -2,5 +2,3 @@
|
|||||||
**/*.zip
|
**/*.zip
|
||||||
**/__pycache__
|
**/__pycache__
|
||||||
/env
|
/env
|
||||||
**/runs/*
|
|
||||||
**/wandb/*
|
|
File diff suppressed because it is too large
Load Diff
435
dqn_wordle.ipynb
435
dqn_wordle.ipynb
@@ -6,11 +6,10 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"import gym\n",
|
|
||||||
"import gym_wordle\n",
|
"import gym_wordle\n",
|
||||||
"from stable_baselines3 import DQN, PPO, common\n",
|
"from stable_baselines3 import DQN, PPO, common\n",
|
||||||
"import numpy as np\n",
|
"import numpy as np\n",
|
||||||
"import tqdm"
|
"from tqdm import tqdm"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -43,88 +42,212 @@
|
|||||||
"output_type": "stream",
|
"output_type": "stream",
|
||||||
"text": [
|
"text": [
|
||||||
"Using cuda device\n",
|
"Using cuda device\n",
|
||||||
"Wrapping the env in a DummyVecEnv.\n"
|
"Wrapping the env in a DummyVecEnv.\n",
|
||||||
|
"---------------------------------\n",
|
||||||
|
"| rollout/ | |\n",
|
||||||
|
"| ep_len_mean | 6 |\n",
|
||||||
|
"| ep_rew_mean | 2.14 |\n",
|
||||||
|
"| time/ | |\n",
|
||||||
|
"| fps | 750 |\n",
|
||||||
|
"| iterations | 1 |\n",
|
||||||
|
"| time_elapsed | 2 |\n",
|
||||||
|
"| total_timesteps | 2048 |\n",
|
||||||
|
"---------------------------------\n",
|
||||||
|
"-----------------------------------------\n",
|
||||||
|
"| rollout/ | |\n",
|
||||||
|
"| ep_len_mean | 6 |\n",
|
||||||
|
"| ep_rew_mean | 4.59 |\n",
|
||||||
|
"| time/ | |\n",
|
||||||
|
"| fps | 625 |\n",
|
||||||
|
"| iterations | 2 |\n",
|
||||||
|
"| time_elapsed | 6 |\n",
|
||||||
|
"| total_timesteps | 4096 |\n",
|
||||||
|
"| train/ | |\n",
|
||||||
|
"| approx_kl | 0.022059526 |\n",
|
||||||
|
"| clip_fraction | 0.331 |\n",
|
||||||
|
"| clip_range | 0.2 |\n",
|
||||||
|
"| entropy_loss | -9.47 |\n",
|
||||||
|
"| explained_variance | -0.0118 |\n",
|
||||||
|
"| learning_rate | 0.0003 |\n",
|
||||||
|
"| loss | 130 |\n",
|
||||||
|
"| n_updates | 10 |\n",
|
||||||
|
"| policy_gradient_loss | -0.0851 |\n",
|
||||||
|
"| value_loss | 253 |\n",
|
||||||
|
"-----------------------------------------\n",
|
||||||
|
"-----------------------------------------\n",
|
||||||
|
"| rollout/ | |\n",
|
||||||
|
"| ep_len_mean | 6 |\n",
|
||||||
|
"| ep_rew_mean | 5.86 |\n",
|
||||||
|
"| time/ | |\n",
|
||||||
|
"| fps | 585 |\n",
|
||||||
|
"| iterations | 3 |\n",
|
||||||
|
"| time_elapsed | 10 |\n",
|
||||||
|
"| total_timesteps | 6144 |\n",
|
||||||
|
"| train/ | |\n",
|
||||||
|
"| approx_kl | 0.024416003 |\n",
|
||||||
|
"| clip_fraction | 0.462 |\n",
|
||||||
|
"| clip_range | 0.2 |\n",
|
||||||
|
"| entropy_loss | -9.47 |\n",
|
||||||
|
"| explained_variance | 0.152 |\n",
|
||||||
|
"| learning_rate | 0.0003 |\n",
|
||||||
|
"| loss | 85.2 |\n",
|
||||||
|
"| n_updates | 20 |\n",
|
||||||
|
"| policy_gradient_loss | -0.0987 |\n",
|
||||||
|
"| value_loss | 218 |\n",
|
||||||
|
"-----------------------------------------\n",
|
||||||
|
"-----------------------------------------\n",
|
||||||
|
"| rollout/ | |\n",
|
||||||
|
"| ep_len_mean | 6 |\n",
|
||||||
|
"| ep_rew_mean | 4.75 |\n",
|
||||||
|
"| time/ | |\n",
|
||||||
|
"| fps | 566 |\n",
|
||||||
|
"| iterations | 4 |\n",
|
||||||
|
"| time_elapsed | 14 |\n",
|
||||||
|
"| total_timesteps | 8192 |\n",
|
||||||
|
"| train/ | |\n",
|
||||||
|
"| approx_kl | 0.026305672 |\n",
|
||||||
|
"| clip_fraction | 0.45 |\n",
|
||||||
|
"| clip_range | 0.2 |\n",
|
||||||
|
"| entropy_loss | -9.47 |\n",
|
||||||
|
"| explained_variance | 0.161 |\n",
|
||||||
|
"| learning_rate | 0.0003 |\n",
|
||||||
|
"| loss | 144 |\n",
|
||||||
|
"| n_updates | 30 |\n",
|
||||||
|
"| policy_gradient_loss | -0.105 |\n",
|
||||||
|
"| value_loss | 220 |\n",
|
||||||
|
"-----------------------------------------\n",
|
||||||
|
"----------------------------------------\n",
|
||||||
|
"| rollout/ | |\n",
|
||||||
|
"| ep_len_mean | 6 |\n",
|
||||||
|
"| ep_rew_mean | 1.47 |\n",
|
||||||
|
"| time/ | |\n",
|
||||||
|
"| fps | 554 |\n",
|
||||||
|
"| iterations | 5 |\n",
|
||||||
|
"| time_elapsed | 18 |\n",
|
||||||
|
"| total_timesteps | 10240 |\n",
|
||||||
|
"| train/ | |\n",
|
||||||
|
"| approx_kl | 0.02928267 |\n",
|
||||||
|
"| clip_fraction | 0.498 |\n",
|
||||||
|
"| clip_range | 0.2 |\n",
|
||||||
|
"| entropy_loss | -9.46 |\n",
|
||||||
|
"| explained_variance | 0.167 |\n",
|
||||||
|
"| learning_rate | 0.0003 |\n",
|
||||||
|
"| loss | 127 |\n",
|
||||||
|
"| n_updates | 40 |\n",
|
||||||
|
"| policy_gradient_loss | -0.116 |\n",
|
||||||
|
"| value_loss | 207 |\n",
|
||||||
|
"----------------------------------------\n",
|
||||||
|
"-----------------------------------------\n",
|
||||||
|
"| rollout/ | |\n",
|
||||||
|
"| ep_len_mean | 6 |\n",
|
||||||
|
"| ep_rew_mean | 1.62 |\n",
|
||||||
|
"| time/ | |\n",
|
||||||
|
"| fps | 546 |\n",
|
||||||
|
"| iterations | 6 |\n",
|
||||||
|
"| time_elapsed | 22 |\n",
|
||||||
|
"| total_timesteps | 12288 |\n",
|
||||||
|
"| train/ | |\n",
|
||||||
|
"| approx_kl | 0.028425258 |\n",
|
||||||
|
"| clip_fraction | 0.483 |\n",
|
||||||
|
"| clip_range | 0.2 |\n",
|
||||||
|
"| entropy_loss | -9.46 |\n",
|
||||||
|
"| explained_variance | 0.143 |\n",
|
||||||
|
"| learning_rate | 0.0003 |\n",
|
||||||
|
"| loss | 109 |\n",
|
||||||
|
"| n_updates | 50 |\n",
|
||||||
|
"| policy_gradient_loss | -0.117 |\n",
|
||||||
|
"| value_loss | 240 |\n",
|
||||||
|
"-----------------------------------------\n",
|
||||||
|
"-----------------------------------------\n",
|
||||||
|
"| rollout/ | |\n",
|
||||||
|
"| ep_len_mean | 5.98 |\n",
|
||||||
|
"| ep_rew_mean | 6.14 |\n",
|
||||||
|
"| time/ | |\n",
|
||||||
|
"| fps | 541 |\n",
|
||||||
|
"| iterations | 7 |\n",
|
||||||
|
"| time_elapsed | 26 |\n",
|
||||||
|
"| total_timesteps | 14336 |\n",
|
||||||
|
"| train/ | |\n",
|
||||||
|
"| approx_kl | 0.026178032 |\n",
|
||||||
|
"| clip_fraction | 0.453 |\n",
|
||||||
|
"| clip_range | 0.2 |\n",
|
||||||
|
"| entropy_loss | -9.46 |\n",
|
||||||
|
"| explained_variance | 0.174 |\n",
|
||||||
|
"| learning_rate | 0.0003 |\n",
|
||||||
|
"| loss | 141 |\n",
|
||||||
|
"| n_updates | 60 |\n",
|
||||||
|
"| policy_gradient_loss | -0.116 |\n",
|
||||||
|
"| value_loss | 235 |\n",
|
||||||
|
"-----------------------------------------\n",
|
||||||
|
"----------------------------------------\n",
|
||||||
|
"| rollout/ | |\n",
|
||||||
|
"| ep_len_mean | 6 |\n",
|
||||||
|
"| ep_rew_mean | 3.03 |\n",
|
||||||
|
"| time/ | |\n",
|
||||||
|
"| fps | 537 |\n",
|
||||||
|
"| iterations | 8 |\n",
|
||||||
|
"| time_elapsed | 30 |\n",
|
||||||
|
"| total_timesteps | 16384 |\n",
|
||||||
|
"| train/ | |\n",
|
||||||
|
"| approx_kl | 0.02457074 |\n",
|
||||||
|
"| clip_fraction | 0.423 |\n",
|
||||||
|
"| clip_range | 0.2 |\n",
|
||||||
|
"| entropy_loss | -9.45 |\n",
|
||||||
|
"| explained_variance | 0.171 |\n",
|
||||||
|
"| learning_rate | 0.0003 |\n",
|
||||||
|
"| loss | 111 |\n",
|
||||||
|
"| n_updates | 70 |\n",
|
||||||
|
"| policy_gradient_loss | -0.112 |\n",
|
||||||
|
"| value_loss | 212 |\n",
|
||||||
|
"----------------------------------------\n",
|
||||||
|
"-----------------------------------------\n",
|
||||||
|
"| rollout/ | |\n",
|
||||||
|
"| ep_len_mean | 6 |\n",
|
||||||
|
"| ep_rew_mean | 9.54 |\n",
|
||||||
|
"| time/ | |\n",
|
||||||
|
"| fps | 532 |\n",
|
||||||
|
"| iterations | 9 |\n",
|
||||||
|
"| time_elapsed | 34 |\n",
|
||||||
|
"| total_timesteps | 18432 |\n",
|
||||||
|
"| train/ | |\n",
|
||||||
|
"| approx_kl | 0.024578478 |\n",
|
||||||
|
"| clip_fraction | 0.417 |\n",
|
||||||
|
"| clip_range | 0.2 |\n",
|
||||||
|
"| entropy_loss | -9.45 |\n",
|
||||||
|
"| explained_variance | 0.178 |\n",
|
||||||
|
"| learning_rate | 0.0003 |\n",
|
||||||
|
"| loss | 121 |\n",
|
||||||
|
"| n_updates | 80 |\n",
|
||||||
|
"| policy_gradient_loss | -0.114 |\n",
|
||||||
|
"| value_loss | 232 |\n",
|
||||||
|
"-----------------------------------------\n",
|
||||||
|
"-----------------------------------------\n",
|
||||||
|
"| rollout/ | |\n",
|
||||||
|
"| ep_len_mean | 6 |\n",
|
||||||
|
"| ep_rew_mean | 3.81 |\n",
|
||||||
|
"| time/ | |\n",
|
||||||
|
"| fps | 527 |\n",
|
||||||
|
"| iterations | 10 |\n",
|
||||||
|
"| time_elapsed | 38 |\n",
|
||||||
|
"| total_timesteps | 20480 |\n",
|
||||||
|
"| train/ | |\n",
|
||||||
|
"| approx_kl | 0.022704324 |\n",
|
||||||
|
"| clip_fraction | 0.379 |\n",
|
||||||
|
"| clip_range | 0.2 |\n",
|
||||||
|
"| entropy_loss | -9.45 |\n",
|
||||||
|
"| explained_variance | 0.194 |\n",
|
||||||
|
"| learning_rate | 0.0003 |\n",
|
||||||
|
"| loss | 108 |\n",
|
||||||
|
"| n_updates | 90 |\n",
|
||||||
|
"| policy_gradient_loss | -0.112 |\n",
|
||||||
|
"| value_loss | 216 |\n",
|
||||||
|
"-----------------------------------------\n"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"data": {
|
"data": {
|
||||||
"application/vnd.jupyter.widget-view+json": {
|
|
||||||
"model_id": "6921a0721569456abf5bceac7e7b6b34",
|
|
||||||
"version_major": 2,
|
|
||||||
"version_minor": 0
|
|
||||||
},
|
|
||||||
"text/plain": [
|
"text/plain": [
|
||||||
"Output()"
|
"<stable_baselines3.ppo.ppo.PPO at 0x7f86ef4ddcd0>"
|
||||||
]
|
|
||||||
},
|
|
||||||
"metadata": {},
|
|
||||||
"output_type": "display_data"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "stdout",
|
|
||||||
"output_type": "stream",
|
|
||||||
"text": [
|
|
||||||
"----------------------------------\n",
|
|
||||||
"| rollout/ | |\n",
|
|
||||||
"| ep_len_mean | 4.97 |\n",
|
|
||||||
"| ep_rew_mean | -63.8 |\n",
|
|
||||||
"| exploration_rate | 0.05 |\n",
|
|
||||||
"| time/ | |\n",
|
|
||||||
"| episodes | 10000 |\n",
|
|
||||||
"| fps | 1628 |\n",
|
|
||||||
"| time_elapsed | 30 |\n",
|
|
||||||
"| total_timesteps | 49995 |\n",
|
|
||||||
"----------------------------------\n"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "stdout",
|
|
||||||
"output_type": "stream",
|
|
||||||
"text": [
|
|
||||||
"----------------------------------\n",
|
|
||||||
"| rollout/ | |\n",
|
|
||||||
"| ep_len_mean | 5 |\n",
|
|
||||||
"| ep_rew_mean | -70.5 |\n",
|
|
||||||
"| exploration_rate | 0.05 |\n",
|
|
||||||
"| time/ | |\n",
|
|
||||||
"| episodes | 20000 |\n",
|
|
||||||
"| fps | 662 |\n",
|
|
||||||
"| time_elapsed | 150 |\n",
|
|
||||||
"| total_timesteps | 99992 |\n",
|
|
||||||
"| train/ | |\n",
|
|
||||||
"| learning_rate | 0.0001 |\n",
|
|
||||||
"| loss | 11.7 |\n",
|
|
||||||
"| n_updates | 12497 |\n",
|
|
||||||
"----------------------------------\n"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"data": {
|
|
||||||
"text/html": [
|
|
||||||
"<pre style=\"white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace\"></pre>\n"
|
|
||||||
],
|
|
||||||
"text/plain": []
|
|
||||||
},
|
|
||||||
"metadata": {},
|
|
||||||
"output_type": "display_data"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"data": {
|
|
||||||
"text/html": [
|
|
||||||
"<pre style=\"white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace\">\n",
|
|
||||||
"</pre>\n"
|
|
||||||
],
|
|
||||||
"text/plain": [
|
|
||||||
"\n"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"metadata": {},
|
|
||||||
"output_type": "display_data"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"data": {
|
|
||||||
"text/plain": [
|
|
||||||
"<stable_baselines3.dqn.dqn.DQN at 0x1bfd6cc0210>"
|
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
"execution_count": 3,
|
"execution_count": 3,
|
||||||
@@ -133,9 +256,9 @@
|
|||||||
}
|
}
|
||||||
],
|
],
|
||||||
"source": [
|
"source": [
|
||||||
"total_timesteps = 100_000\n",
|
"total_timesteps = 20_000\n",
|
||||||
"model = DQN(\"MlpPolicy\", env, verbose=1, device='cuda')\n",
|
"model = PPO(\"MlpPolicy\", env, verbose=1, device='cuda')\n",
|
||||||
"model.learn(total_timesteps=total_timesteps, log_interval=10_000, progress_bar=True)"
|
"model.learn(total_timesteps=total_timesteps)"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -144,29 +267,16 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"model.save(\"dqn_new_state\")"
|
"model.save(\"dqn_wordle\")"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": 5,
|
"execution_count": 5,
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [
|
"outputs": [],
|
||||||
{
|
|
||||||
"name": "stderr",
|
|
||||||
"output_type": "stream",
|
|
||||||
"text": [
|
|
||||||
"c:\\Repository\\cse151b-final-project\\env\\Lib\\site-packages\\stable_baselines3\\common\\save_util.py:166: UserWarning: Could not deserialize object lr_schedule. Consider using `custom_objects` argument to replace this object.\n",
|
|
||||||
"Exception: code() argument 13 must be str, not int\n",
|
|
||||||
" warnings.warn(\n",
|
|
||||||
"c:\\Repository\\cse151b-final-project\\env\\Lib\\site-packages\\stable_baselines3\\common\\save_util.py:166: UserWarning: Could not deserialize object exploration_schedule. Consider using `custom_objects` argument to replace this object.\n",
|
|
||||||
"Exception: code() argument 13 must be str, not int\n",
|
|
||||||
" warnings.warn(\n"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"source": [
|
"source": [
|
||||||
"# model = DQN.load(\"dqn_wordle\")"
|
"model = PPO.load(\"dqn_wordle\")"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -174,88 +284,38 @@
|
|||||||
"execution_count": 7,
|
"execution_count": 7,
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [
|
"outputs": [
|
||||||
|
{
|
||||||
|
"name": "stderr",
|
||||||
|
"output_type": "stream",
|
||||||
|
"text": [
|
||||||
|
"100%|██████████| 1000/1000 [00:03<00:00, 252.17it/s]"
|
||||||
|
]
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"name": "stdout",
|
"name": "stdout",
|
||||||
"output_type": "stream",
|
"output_type": "stream",
|
||||||
"text": [
|
"text": [
|
||||||
"[1. 0. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 0. 1. 0. 1. 1. 1.\n",
|
"[[ 7 18 1 19 16 3 3 3 2 3]\n",
|
||||||
" 1. 1. 1. 0. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 0. 1. 0. 1.\n",
|
" [16 9 5 14 4 3 3 3 3 3]\n",
|
||||||
" 1. 1. 1. 1. 1. 0. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 0. 1.\n",
|
" [16 9 5 14 4 3 3 3 3 3]\n",
|
||||||
" 0. 1. 1. 1. 1. 1. 1. 0. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1.\n",
|
" [16 9 5 14 4 3 3 3 3 3]\n",
|
||||||
" 0. 1. 0. 1. 1. 1. 1. 1. 1. 0. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1.\n",
|
" [ 7 18 1 19 16 3 3 3 2 3]\n",
|
||||||
" 1. 1. 0. 1. 0. 1. 1. 1. 1. 1. 0. 0. 0. 0. 0. 1. 1. 0. 0. 0. 0. 0. 0. 0.\n",
|
" [ 7 18 1 19 16 3 3 3 2 3]] -54 {'correct': False, 'guesses': defaultdict(<class 'int'>, {'grasp': 3, 'piend': 3})}\n",
|
||||||
" 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]\n",
|
|
||||||
"[1. 0. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 0. 1. 1. 1.\n",
|
|
||||||
" 1. 1. 1. 0. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 0. 1.\n",
|
|
||||||
" 1. 1. 1. 1. 1. 0. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1.\n",
|
|
||||||
" 0. 1. 1. 1. 1. 1. 1. 0. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1.\n",
|
|
||||||
" 1. 1. 0. 1. 1. 1. 1. 1. 1. 0. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1.\n",
|
|
||||||
" 1. 1. 1. 1. 0. 1. 1. 1. 1. 1. 0. 0. 0. 0. 0. 1. 1. 0. 0. 0. 0. 0. 0. 0.\n",
|
|
||||||
" 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]\n",
|
|
||||||
"[1. 0. 1. 1. 1. 0. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 0. 0. 1. 1. 1. 1.\n",
|
|
||||||
" 1. 1. 1. 0. 1. 1. 1. 0. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 0. 0. 1. 1.\n",
|
|
||||||
" 1. 1. 1. 1. 1. 0. 1. 1. 1. 0. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 0. 0.\n",
|
|
||||||
" 1. 1. 1. 1. 1. 1. 1. 0. 1. 1. 1. 0. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1.\n",
|
|
||||||
" 0. 0. 1. 1. 1. 1. 1. 1. 1. 0. 1. 1. 1. 0. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1.\n",
|
|
||||||
" 1. 1. 0. 0. 1. 1. 1. 1. 1. 1. 0. 0. 0. 0. 0. 0. 1. 0. 0. 0. 0. 0. 0. 0.\n",
|
|
||||||
" 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]\n",
|
|
||||||
"[1. 1. 1. 1. 1. 0. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 0. 0. 1. 1. 1.\n",
|
|
||||||
" 1. 1. 1. 1. 1. 1. 1. 0. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 0. 0. 1.\n",
|
|
||||||
" 1. 1. 1. 1. 1. 1. 1. 1. 1. 0. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 0.\n",
|
|
||||||
" 0. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 0. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1.\n",
|
|
||||||
" 1. 0. 0. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 0. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1.\n",
|
|
||||||
" 1. 1. 1. 0. 0. 1. 1. 1. 1. 1. 0. 0. 0. 0. 0. 0. 1. 0. 0. 0. 0. 0. 0. 0.\n",
|
|
||||||
" 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]\n",
|
|
||||||
"[1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 0. 1. 1. 1. 1.\n",
|
|
||||||
" 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 0. 1. 1.\n",
|
|
||||||
" 1. 1. 1. 1. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n",
|
|
||||||
" 1. 0. 0. 0. 0. 0. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1.\n",
|
|
||||||
" 1. 0. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1.\n",
|
|
||||||
" 1. 1. 1. 0. 1. 1. 1. 1. 1. 1. 0. 0. 0. 0. 0. 1. 1. 0. 0. 0. 0. 0. 0. 0.\n",
|
|
||||||
" 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 1. 0. 0.]\n",
|
|
||||||
"[1. 0. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 0. 0. 0. 1. 1. 1.\n",
|
|
||||||
" 1. 1. 1. 0. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 0. 0. 0. 1.\n",
|
|
||||||
" 1. 1. 1. 1. 1. 0. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 0. 0.\n",
|
|
||||||
" 0. 1. 1. 1. 1. 1. 1. 0. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1.\n",
|
|
||||||
" 0. 0. 0. 1. 1. 1. 1. 1. 1. 0. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1.\n",
|
|
||||||
" 1. 1. 0. 0. 0. 1. 1. 1. 1. 1. 0. 0. 0. 0. 0. 1. 1. 0. 0. 0. 0. 0. 0. 0.\n",
|
|
||||||
" 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]\n",
|
|
||||||
"[1. 0. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 0. 1. 1. 1. 1.\n",
|
|
||||||
" 1. 1. 1. 0. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 0. 1. 1.\n",
|
|
||||||
" 1. 1. 1. 1. 1. 0. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 0.\n",
|
|
||||||
" 1. 1. 1. 1. 1. 1. 0. 0. 0. 0. 0. 1. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n",
|
|
||||||
" 0. 0. 0. 0. 0. 0. 0. 0. 1. 0. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1.\n",
|
|
||||||
" 1. 1. 1. 0. 1. 1. 1. 1. 1. 1. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n",
|
|
||||||
" 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 1. 0.]\n",
|
|
||||||
"[1. 0. 1. 1. 1. 0. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 0. 0. 0. 1. 1. 1.\n",
|
|
||||||
" 1. 1. 1. 0. 1. 1. 1. 0. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 0. 0. 0. 1.\n",
|
|
||||||
" 1. 1. 1. 1. 1. 0. 1. 1. 1. 0. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 0. 0.\n",
|
|
||||||
" 0. 1. 1. 1. 1. 1. 1. 0. 1. 1. 1. 0. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1.\n",
|
|
||||||
" 0. 0. 0. 1. 1. 1. 1. 1. 1. 0. 1. 1. 1. 0. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1.\n",
|
|
||||||
" 1. 1. 0. 0. 0. 1. 1. 1. 1. 1. 0. 0. 0. 0. 0. 0. 1. 0. 0. 0. 0. 0. 0. 0.\n",
|
|
||||||
" 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]\n",
|
|
||||||
"[1. 0. 1. 1. 1. 0. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 0. 0. 0. 1. 1. 1.\n",
|
|
||||||
" 1. 1. 1. 0. 1. 1. 1. 0. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 0. 0. 0. 1.\n",
|
|
||||||
" 1. 1. 1. 1. 1. 0. 1. 1. 1. 0. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 0. 0.\n",
|
|
||||||
" 0. 1. 1. 1. 1. 1. 1. 0. 1. 1. 1. 0. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1.\n",
|
|
||||||
" 0. 0. 0. 1. 1. 1. 1. 1. 1. 0. 1. 1. 1. 0. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1.\n",
|
|
||||||
" 1. 1. 0. 0. 0. 1. 1. 1. 1. 1. 0. 0. 0. 0. 1. 0. 1. 0. 0. 0. 0. 0. 0. 0.\n",
|
|
||||||
" 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]\n",
|
|
||||||
"[1. 0. 1. 1. 1. 0. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 0. 1. 1. 1. 1. 1.\n",
|
|
||||||
" 1. 1. 1. 0. 1. 1. 1. 0. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 0. 1. 1. 1.\n",
|
|
||||||
" 1. 1. 1. 1. 1. 0. 1. 1. 1. 0. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 0. 1.\n",
|
|
||||||
" 1. 1. 1. 1. 1. 1. 1. 0. 1. 1. 1. 0. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1.\n",
|
|
||||||
" 0. 1. 1. 1. 1. 1. 1. 1. 1. 0. 1. 1. 1. 0. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1.\n",
|
|
||||||
" 1. 1. 0. 1. 1. 1. 1. 1. 1. 1. 0. 0. 0. 0. 0. 0. 1. 0. 0. 0. 0. 0. 0. 0.\n",
|
|
||||||
" 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]\n",
|
|
||||||
"0\n"
|
"0\n"
|
||||||
]
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "stderr",
|
||||||
|
"output_type": "stream",
|
||||||
|
"text": [
|
||||||
|
"\n"
|
||||||
|
]
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"source": [
|
"source": [
|
||||||
"env = gym_wordle.wordle.WordleEnv()\n",
|
"env = gym_wordle.wordle.WordleEnv()\n",
|
||||||
"\n",
|
"\n",
|
||||||
"for i in range(1000):\n",
|
"for i in tqdm(range(1000)):\n",
|
||||||
" \n",
|
" \n",
|
||||||
" state, info = env.reset()\n",
|
" state, info = env.reset()\n",
|
||||||
"\n",
|
"\n",
|
||||||
@@ -269,41 +329,12 @@
|
|||||||
"\n",
|
"\n",
|
||||||
" state, reward, done, truncated, info = env.step(action)\n",
|
" state, reward, done, truncated, info = env.step(action)\n",
|
||||||
"\n",
|
"\n",
|
||||||
" print(state)\n",
|
|
||||||
" if info[\"correct\"]:\n",
|
" if info[\"correct\"]:\n",
|
||||||
" wins += 1\n",
|
" wins += 1\n",
|
||||||
"\n",
|
"\n",
|
||||||
"print(wins)"
|
"print(state, reward, info)\n",
|
||||||
]
|
"\n",
|
||||||
},
|
"print(wins)\n"
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": 6,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [
|
|
||||||
{
|
|
||||||
"data": {
|
|
||||||
"text/plain": [
|
|
||||||
"(array([1., 0., 1., 1., 1., 0., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,\n",
|
|
||||||
" 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 0., 1., 1., 1., 0., 1., 1.,\n",
|
|
||||||
" 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,\n",
|
|
||||||
" 1., 1., 0., 1., 1., 1., 0., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,\n",
|
|
||||||
" 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 0., 1., 1., 1., 0., 1.,\n",
|
|
||||||
" 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,\n",
|
|
||||||
" 1., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n",
|
|
||||||
" 0., 0., 0., 0., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n",
|
|
||||||
" 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n",
|
|
||||||
" 0., 0., 0., 0., 0., 0., 0., 1.]),\n",
|
|
||||||
" -50)"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"execution_count": 6,
|
|
||||||
"metadata": {},
|
|
||||||
"output_type": "execute_result"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"source": [
|
|
||||||
"state, reward"
|
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -330,7 +361,7 @@
|
|||||||
"name": "python",
|
"name": "python",
|
||||||
"nbconvert_exporter": "python",
|
"nbconvert_exporter": "python",
|
||||||
"pygments_lexer": "ipython3",
|
"pygments_lexer": "ipython3",
|
||||||
"version": "3.11.5"
|
"version": "3.8.10"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"nbformat": 4,
|
"nbformat": 4,
|
||||||
|
129
eric_wordle/.gitignore
vendored
129
eric_wordle/.gitignore
vendored
@@ -1,129 +0,0 @@
|
|||||||
# Byte-compiled / optimized / DLL files
|
|
||||||
__pycache__/
|
|
||||||
*.py[cod]
|
|
||||||
*$py.class
|
|
||||||
|
|
||||||
# C extensions
|
|
||||||
*.so
|
|
||||||
|
|
||||||
# Distribution / packaging
|
|
||||||
.Python
|
|
||||||
build/
|
|
||||||
develop-eggs/
|
|
||||||
dist/
|
|
||||||
downloads/
|
|
||||||
eggs/
|
|
||||||
.eggs/
|
|
||||||
lib/
|
|
||||||
lib64/
|
|
||||||
parts/
|
|
||||||
sdist/
|
|
||||||
var/
|
|
||||||
wheels/
|
|
||||||
pip-wheel-metadata/
|
|
||||||
share/python-wheels/
|
|
||||||
*.egg-info/
|
|
||||||
.installed.cfg
|
|
||||||
*.egg
|
|
||||||
MANIFEST
|
|
||||||
|
|
||||||
# PyInstaller
|
|
||||||
# Usually these files are written by a python script from a template
|
|
||||||
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
|
||||||
*.manifest
|
|
||||||
*.spec
|
|
||||||
|
|
||||||
# Installer logs
|
|
||||||
pip-log.txt
|
|
||||||
pip-delete-this-directory.txt
|
|
||||||
|
|
||||||
# Unit test / coverage reports
|
|
||||||
htmlcov/
|
|
||||||
.tox/
|
|
||||||
.nox/
|
|
||||||
.coverage
|
|
||||||
.coverage.*
|
|
||||||
.cache
|
|
||||||
nosetests.xml
|
|
||||||
coverage.xml
|
|
||||||
*.cover
|
|
||||||
*.py,cover
|
|
||||||
.hypothesis/
|
|
||||||
.pytest_cache/
|
|
||||||
|
|
||||||
# Translations
|
|
||||||
*.mo
|
|
||||||
*.pot
|
|
||||||
|
|
||||||
# Django stuff:
|
|
||||||
*.log
|
|
||||||
local_settings.py
|
|
||||||
db.sqlite3
|
|
||||||
db.sqlite3-journal
|
|
||||||
|
|
||||||
# Flask stuff:
|
|
||||||
instance/
|
|
||||||
.webassets-cache
|
|
||||||
|
|
||||||
# Scrapy stuff:
|
|
||||||
.scrapy
|
|
||||||
|
|
||||||
# Sphinx documentation
|
|
||||||
docs/_build/
|
|
||||||
|
|
||||||
# PyBuilder
|
|
||||||
target/
|
|
||||||
|
|
||||||
# Jupyter Notebook
|
|
||||||
.ipynb_checkpoints
|
|
||||||
|
|
||||||
# IPython
|
|
||||||
profile_default/
|
|
||||||
ipython_config.py
|
|
||||||
|
|
||||||
# pyenv
|
|
||||||
.python-version
|
|
||||||
|
|
||||||
# pipenv
|
|
||||||
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
|
|
||||||
# However, in case of collaboration, if having platform-specific dependencies or dependencies
|
|
||||||
# having no cross-platform support, pipenv may install dependencies that don't work, or not
|
|
||||||
# install all needed dependencies.
|
|
||||||
#Pipfile.lock
|
|
||||||
|
|
||||||
# PEP 582; used by e.g. github.com/David-OConnor/pyflow
|
|
||||||
__pypackages__/
|
|
||||||
|
|
||||||
# Celery stuff
|
|
||||||
celerybeat-schedule
|
|
||||||
celerybeat.pid
|
|
||||||
|
|
||||||
# SageMath parsed files
|
|
||||||
*.sage.py
|
|
||||||
|
|
||||||
# Environments
|
|
||||||
.env
|
|
||||||
.venv
|
|
||||||
env/
|
|
||||||
venv/
|
|
||||||
ENV/
|
|
||||||
env.bak/
|
|
||||||
venv.bak/
|
|
||||||
|
|
||||||
# Spyder project settings
|
|
||||||
.spyderproject
|
|
||||||
.spyproject
|
|
||||||
|
|
||||||
# Rope project settings
|
|
||||||
.ropeproject
|
|
||||||
|
|
||||||
# mkdocs documentation
|
|
||||||
/site
|
|
||||||
|
|
||||||
# mypy
|
|
||||||
.mypy_cache/
|
|
||||||
.dmypy.json
|
|
||||||
dmypy.json
|
|
||||||
|
|
||||||
# Pyre type checker
|
|
||||||
.pyre/
|
|
@@ -1,11 +0,0 @@
|
|||||||
# N-dle Solver
|
|
||||||
|
|
||||||
A solver designed to beat New York Time's Wordle (link [here](https://www.nytimes.com/games/wordle/index.html)). If you are bored enough, can extend to solve the more general N-dle problem (for quordle, octordle, etc.)
|
|
||||||
|
|
||||||
I originally made this out of frustration for the game (and my own lack of lingual talent). One day, my friend thought she could beat my bot. To her dismay, she learned that she is no better than a machine. Let's see if you can do any better (the average number of attempts is 3.6).
|
|
||||||
|
|
||||||
## Usage:
|
|
||||||
1. Run `python main.py --n 1`
|
|
||||||
2. Follow the prompts
|
|
||||||
|
|
||||||
Currently only supports solving for 1 word at a time (i.e. wordle).
|
|
@@ -1,126 +0,0 @@
|
|||||||
import re
|
|
||||||
import string
|
|
||||||
|
|
||||||
import numpy as np
|
|
||||||
|
|
||||||
|
|
||||||
class AI:
|
|
||||||
def __init__(self, vocab_file, num_letters=5, num_guesses=6):
|
|
||||||
self.vocab_file = vocab_file
|
|
||||||
self.num_letters = num_letters
|
|
||||||
self.num_guesses = 6
|
|
||||||
|
|
||||||
self.vocab, self.vocab_scores, self.letter_scores = self.get_vocab(self.vocab_file)
|
|
||||||
self.best_words = sorted(list(self.vocab_scores.items()), key=lambda tup: tup[1])[::-1]
|
|
||||||
|
|
||||||
self.domains = None
|
|
||||||
self.possible_letters = None
|
|
||||||
|
|
||||||
self.reset()
|
|
||||||
|
|
||||||
def solve(self):
|
|
||||||
num_guesses = 0
|
|
||||||
while [len(e) for e in self.domains] != [1 for _ in range(self.num_letters)]:
|
|
||||||
num_guesses += 1
|
|
||||||
word = self.sample()
|
|
||||||
|
|
||||||
# # Always start with these two words
|
|
||||||
# if num_guesses == 1:
|
|
||||||
# word = 'soare'
|
|
||||||
# elif num_guesses == 2:
|
|
||||||
# word = 'culti'
|
|
||||||
|
|
||||||
print('-----------------------------------------------')
|
|
||||||
print(f'Guess #{num_guesses}/{self.num_guesses}: {word}')
|
|
||||||
print('-----------------------------------------------')
|
|
||||||
self.arc_consistency(word)
|
|
||||||
|
|
||||||
print(f'You did it! The word is {"".join([e[0] for e in self.domains])}')
|
|
||||||
|
|
||||||
|
|
||||||
def arc_consistency(self, word):
|
|
||||||
print(f'Performing arc consistency check on {word}...')
|
|
||||||
print(f'Specify 0 for completely nonexistent letter at the specified index, 1 for existent letter but incorrect index, and 2 for correct letter at correct index.')
|
|
||||||
results = []
|
|
||||||
|
|
||||||
# Collect results
|
|
||||||
for l in word:
|
|
||||||
while True:
|
|
||||||
result = input(f'{l}: ')
|
|
||||||
if result not in ['0', '1', '2']:
|
|
||||||
print('Incorrect option. Try again.')
|
|
||||||
continue
|
|
||||||
results.append(result)
|
|
||||||
break
|
|
||||||
|
|
||||||
self.possible_letters += [word[i] for i in range(len(word)) if results[i] == '1']
|
|
||||||
|
|
||||||
for i in range(len(word)):
|
|
||||||
if results[i] == '0':
|
|
||||||
if word[i] in self.possible_letters:
|
|
||||||
if word[i] in self.domains[i]:
|
|
||||||
self.domains[i].remove(word[i])
|
|
||||||
else:
|
|
||||||
for j in range(len(self.domains)):
|
|
||||||
if word[i] in self.domains[j] and len(self.domains[j]) > 1:
|
|
||||||
self.domains[j].remove(word[i])
|
|
||||||
if results[i] == '1':
|
|
||||||
if word[i] in self.domains[i]:
|
|
||||||
self.domains[i].remove(word[i])
|
|
||||||
if results[i] == '2':
|
|
||||||
self.domains[i] = [word[i]]
|
|
||||||
|
|
||||||
|
|
||||||
def reset(self):
|
|
||||||
self.domains = [list(string.ascii_lowercase) for _ in range(self.num_letters)]
|
|
||||||
self.possible_letters = []
|
|
||||||
|
|
||||||
def sample(self):
|
|
||||||
"""
|
|
||||||
Samples a best word given the current domains
|
|
||||||
:return:
|
|
||||||
"""
|
|
||||||
# Compile a regex of possible words with the current domain
|
|
||||||
regex_string = ''
|
|
||||||
for domain in self.domains:
|
|
||||||
regex_string += ''.join(['[', ''.join(domain), ']', '{1}'])
|
|
||||||
pattern = re.compile(regex_string)
|
|
||||||
|
|
||||||
# From the words with the highest scores, only return the best word that match the regex pattern
|
|
||||||
for word, _ in self.best_words:
|
|
||||||
if pattern.match(word) and False not in [e in word for e in self.possible_letters]:
|
|
||||||
return word
|
|
||||||
|
|
||||||
def get_vocab(self, vocab_file):
|
|
||||||
vocab = []
|
|
||||||
with open(vocab_file, 'r') as f:
|
|
||||||
for l in f:
|
|
||||||
vocab.append(l.strip())
|
|
||||||
|
|
||||||
# Count letter frequencies at each index
|
|
||||||
letter_freqs = [{letter: 0 for letter in string.ascii_lowercase} for _ in range(self.num_letters)]
|
|
||||||
for word in vocab:
|
|
||||||
for i, l in enumerate(word):
|
|
||||||
letter_freqs[i][l] += 1
|
|
||||||
|
|
||||||
# Assign a score to each letter at each index by the probability of it appearing
|
|
||||||
letter_scores = [{letter: 0 for letter in string.ascii_lowercase} for _ in range(self.num_letters)]
|
|
||||||
for i in range(len(letter_scores)):
|
|
||||||
max_freq = np.max(list(letter_freqs[i].values()))
|
|
||||||
for l in letter_scores[i].keys():
|
|
||||||
letter_scores[i][l] = letter_freqs[i][l] / max_freq
|
|
||||||
|
|
||||||
# Find a sorted list of words ranked by sum of letter scores
|
|
||||||
vocab_scores = {} # (score, word)
|
|
||||||
for word in vocab:
|
|
||||||
score = 0
|
|
||||||
for i, l in enumerate(word):
|
|
||||||
score += letter_scores[i][l]
|
|
||||||
|
|
||||||
# # Optimization: If repeating letters, deduct a couple points
|
|
||||||
# if len(set(word)) < len(word):
|
|
||||||
# score -= 0.25 * (len(word) - len(set(word)))
|
|
||||||
|
|
||||||
vocab_scores[word] = score
|
|
||||||
|
|
||||||
return vocab, vocab_scores, letter_scores
|
|
@@ -1,37 +0,0 @@
|
|||||||
import string
|
|
||||||
|
|
||||||
import numpy as np
|
|
||||||
|
|
||||||
words = []
|
|
||||||
with open('words.txt', 'r') as f:
|
|
||||||
for l in f:
|
|
||||||
words.append(l.strip())
|
|
||||||
|
|
||||||
# Count letter frequencies at each index
|
|
||||||
letter_freqs = [{letter: 0 for letter in string.ascii_lowercase} for _ in range(5)]
|
|
||||||
for word in words:
|
|
||||||
for i, l in enumerate(word):
|
|
||||||
letter_freqs[i][l] += 1
|
|
||||||
|
|
||||||
# Assign a score to each letter at each index by the probability of it appearing
|
|
||||||
letter_scores = [{letter: 0 for letter in string.ascii_lowercase} for _ in range(5)]
|
|
||||||
for i in range(len(letter_scores)):
|
|
||||||
max_freq = np.max(list(letter_freqs[i].values()))
|
|
||||||
for l in letter_scores[i].keys():
|
|
||||||
letter_scores[i][l] = letter_freqs[i][l] / max_freq
|
|
||||||
|
|
||||||
# Find a sorted list of words ranked by sum of letter scores
|
|
||||||
word_scores = [] # (score, word)
|
|
||||||
for word in words:
|
|
||||||
score = 0
|
|
||||||
for i, l in enumerate(word):
|
|
||||||
score += letter_scores[i][l]
|
|
||||||
word_scores.append((score, word))
|
|
||||||
|
|
||||||
sorted_by_second = sorted(word_scores, key=lambda tup: tup[0])[::-1]
|
|
||||||
print(sorted_by_second[:10])
|
|
||||||
|
|
||||||
for i, (score, word) in enumerate(sorted_by_second):
|
|
||||||
if word == 'soare':
|
|
||||||
print(f'{word} with a score of {score} is found at index {i}')
|
|
||||||
|
|
@@ -1,18 +0,0 @@
|
|||||||
import argparse
|
|
||||||
from ai import AI
|
|
||||||
|
|
||||||
|
|
||||||
def main(args):
|
|
||||||
if args.n is None:
|
|
||||||
raise Exception('Need to specify n (i.e. n = 1 for wordle, n = 4 for quordle, n = 16 for sedecordle).')
|
|
||||||
|
|
||||||
ai = AI(args.vocab_file)
|
|
||||||
ai.solve()
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
parser = argparse.ArgumentParser()
|
|
||||||
parser.add_argument('--n', dest='n', type=int, default=None)
|
|
||||||
parser.add_argument('--vocab_file', dest='vocab_file', type=str, default='wordle_words.txt')
|
|
||||||
args = parser.parse_args()
|
|
||||||
main(args)
|
|
@@ -1,15 +0,0 @@
|
|||||||
import pandas
|
|
||||||
|
|
||||||
print('Loading in words dictionary; this may take a while...')
|
|
||||||
df = pandas.read_json('words_dictionary.json')
|
|
||||||
print('Done loading words dictionary.')
|
|
||||||
words = []
|
|
||||||
for word in df.axes[0].tolist():
|
|
||||||
if len(word) != 5:
|
|
||||||
continue
|
|
||||||
words.append(word)
|
|
||||||
words.sort()
|
|
||||||
|
|
||||||
with open('words.txt', 'w') as f:
|
|
||||||
for word in words:
|
|
||||||
f.write(word + '\n')
|
|
File diff suppressed because it is too large
Load Diff
7
gym_wordle/__init__.py
Normal file
7
gym_wordle/__init__.py
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
from gym.envs.registration import register
|
||||||
|
from .wordle import WordleEnv
|
||||||
|
|
||||||
|
register(
|
||||||
|
id='Wordle-v0',
|
||||||
|
entry_point='gym_wordle.wordle:WordleEnv'
|
||||||
|
)
|
File diff suppressed because it is too large
Load Diff
BIN
gym_wordle/dictionary/guess_list.npy
Normal file
BIN
gym_wordle/dictionary/guess_list.npy
Normal file
Binary file not shown.
File diff suppressed because it is too large
Load Diff
BIN
gym_wordle/dictionary/solution_list.npy
Normal file
BIN
gym_wordle/dictionary/solution_list.npy
Normal file
Binary file not shown.
93
gym_wordle/utils.py
Normal file
93
gym_wordle/utils.py
Normal file
@@ -0,0 +1,93 @@
|
|||||||
|
import numpy as np
|
||||||
|
import numpy.typing as npt
|
||||||
|
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
|
||||||
|
_chars = ' abcdefghijklmnopqrstuvwxyz'
|
||||||
|
_char_d = {c: i for i, c in enumerate(_chars)}
|
||||||
|
|
||||||
|
|
||||||
|
def to_english(array: npt.NDArray[np.int64]) -> str:
|
||||||
|
"""Converts a numpy integer array into a corresponding English string.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
array: Word in array (int) form. It is assumed that each integer in the
|
||||||
|
array is between 0,...,26 (inclusive).
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
A (lowercase) string representation of the word.
|
||||||
|
"""
|
||||||
|
return ''.join(_chars[i] for i in array)
|
||||||
|
|
||||||
|
|
||||||
|
def to_array(word: str) -> npt.NDArray[np.int64]:
|
||||||
|
"""Converts a string of characters into a corresponding numpy array.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
word: Word in string form. It is assumed that each character in the
|
||||||
|
string is either an empty space ' ' or lowercase alphabetical
|
||||||
|
character.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
An array representation of the word.
|
||||||
|
"""
|
||||||
|
return np.array([_char_d[c] for c in word])
|
||||||
|
|
||||||
|
|
||||||
|
def get_words(category: str, build: bool = False) -> npt.NDArray[np.int64]:
|
||||||
|
"""Loads a list of words in array form.
|
||||||
|
|
||||||
|
If specified, this will recompute the list from the human-readable list of
|
||||||
|
words, and save the results in array form.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
category: Either 'guess' or 'solution', which corresponds to the list
|
||||||
|
of acceptable guess words and the list of acceptable solution words.
|
||||||
|
build: If True, recomputes and saves the array-version of the computed
|
||||||
|
list for future access.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
An array representation of the list of words specified by the category.
|
||||||
|
This array has two dimensions, and the number of columns is fixed at
|
||||||
|
five.
|
||||||
|
"""
|
||||||
|
assert category in {'guess', 'solution'}
|
||||||
|
|
||||||
|
arr_path = Path(__file__).parent / f'dictionary/{category}_list.npy'
|
||||||
|
if build:
|
||||||
|
list_path = Path(__file__).parent / f'dictionary/{category}_list.csv'
|
||||||
|
|
||||||
|
with open(list_path, 'r') as f:
|
||||||
|
words = np.array([to_array(line.strip()) for line in f])
|
||||||
|
np.save(arr_path, words)
|
||||||
|
|
||||||
|
return np.load(arr_path)
|
||||||
|
|
||||||
|
|
||||||
|
def play():
|
||||||
|
"""Play Wordle yourself!"""
|
||||||
|
import gym
|
||||||
|
import gym_wordle
|
||||||
|
|
||||||
|
env = gym.make('Wordle-v0') # load the environment
|
||||||
|
|
||||||
|
env.reset()
|
||||||
|
solution = to_english(env.unwrapped.solution_space[env.solution]).upper() # no peeking!
|
||||||
|
|
||||||
|
done = False
|
||||||
|
|
||||||
|
while not done:
|
||||||
|
action = -1
|
||||||
|
|
||||||
|
# in general, the environment won't be forgiving if you input an
|
||||||
|
# invalid word, but for this function I want to let you screw up user
|
||||||
|
# input without consequence, so just loops until valid input is taken
|
||||||
|
while not env.action_space.contains(action):
|
||||||
|
guess = input('Guess: ')
|
||||||
|
action = env.unwrapped.action_space.index_of(to_array(guess))
|
||||||
|
|
||||||
|
state, reward, done, info = env.step(action)
|
||||||
|
env.render()
|
||||||
|
|
||||||
|
print(f"The word was {solution}")
|
285
gym_wordle/wordle.py
Normal file
285
gym_wordle/wordle.py
Normal file
@@ -0,0 +1,285 @@
|
|||||||
|
import gymnasium as gym
|
||||||
|
import numpy as np
|
||||||
|
import numpy.typing as npt
|
||||||
|
from sty import fg, bg, ef, rs
|
||||||
|
|
||||||
|
from collections import Counter, defaultdict
|
||||||
|
from gym_wordle.utils import to_english, to_array, get_words
|
||||||
|
from typing import Optional
|
||||||
|
|
||||||
|
class WordList(gym.spaces.Discrete):
|
||||||
|
"""Super class for defining a space of valid words according to a specified
|
||||||
|
list.
|
||||||
|
|
||||||
|
The space is a subclass of gym.spaces.Discrete, where each element
|
||||||
|
corresponds to an index of a valid word in the word list. The obfuscation
|
||||||
|
is necessary for more direct implementation of RL algorithms, which expect
|
||||||
|
spaces of less sophisticated form.
|
||||||
|
|
||||||
|
In addition to the default methods of the Discrete space, it implements
|
||||||
|
a __getitem__ method for easy index lookup, and an index_of method to
|
||||||
|
convert potential words into their corresponding index (if they exist).
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, words: npt.NDArray[np.int64], **kwargs):
|
||||||
|
"""
|
||||||
|
Args:
|
||||||
|
words: Collection of words in array form with shape (_, 5), where
|
||||||
|
each word is a row of the array. Each array element is an integer
|
||||||
|
between 0,...,26 (inclusive).
|
||||||
|
kwargs: See documentation for gym.spaces.MultiDiscrete
|
||||||
|
"""
|
||||||
|
super().__init__(words.shape[0], **kwargs)
|
||||||
|
self.words = words
|
||||||
|
|
||||||
|
def __getitem__(self, index: int) -> npt.NDArray[np.int64]:
|
||||||
|
"""Obtains the (int-encoded) word associated with the given index.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
index: Index for the list of words.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Associated word at the position specified by index.
|
||||||
|
"""
|
||||||
|
return self.words[index]
|
||||||
|
|
||||||
|
def index_of(self, word: npt.NDArray[np.int64]) -> int:
|
||||||
|
"""Given a word, determine its index in the list (if it exists),
|
||||||
|
otherwise returning -1 if no index exists.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
word: Word to find in the word list.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
The index of the given word if it exists, otherwise -1.
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
index, = np.nonzero((word == self.words).all(axis=1))
|
||||||
|
return index[0]
|
||||||
|
except:
|
||||||
|
return -1
|
||||||
|
|
||||||
|
|
||||||
|
class SolutionList(WordList):
|
||||||
|
"""Space for *solution* words to the Wordle environment.
|
||||||
|
|
||||||
|
In the game Wordle, there are two different collections of words:
|
||||||
|
|
||||||
|
* "guesses", which the game accepts as valid words to use to guess the
|
||||||
|
answer.
|
||||||
|
* "solutions", which the game uses to choose solutions from.
|
||||||
|
|
||||||
|
Of course, the set of solutions is a strict subset of the set of guesses.
|
||||||
|
|
||||||
|
This class represents the set of solution words.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, **kwargs):
|
||||||
|
"""
|
||||||
|
Args:
|
||||||
|
kwargs: See documentation for gym.spaces.MultiDiscrete
|
||||||
|
"""
|
||||||
|
words = get_words('solution')
|
||||||
|
super().__init__(words, **kwargs)
|
||||||
|
|
||||||
|
|
||||||
|
class WordleObsSpace(gym.spaces.Box):
|
||||||
|
"""Implementation of the state (observation) space in terms of gym
|
||||||
|
primitives, in this case, gym.spaces.Box.
|
||||||
|
|
||||||
|
The Wordle observation space can be thought of as a 6x5 array with two
|
||||||
|
channels:
|
||||||
|
|
||||||
|
- the character channel, indicating which characters are placed on the
|
||||||
|
board (unfilled rows are marked with the empty character, 0)
|
||||||
|
- the flag channel, indicating the in-game information associated with
|
||||||
|
each character's placement (green highlight, yellow highlight, etc.)
|
||||||
|
|
||||||
|
where there are 6 rows, one for each turn in the game, and 5 columns, since
|
||||||
|
the solution will always be a word of length 5.
|
||||||
|
|
||||||
|
For simplicity, and compatibility with stable_baselines algorithms,
|
||||||
|
this multichannel is modeled as a 6x10 array, where the two channels are
|
||||||
|
horizontally appended (along columns). Thus each row in the observation
|
||||||
|
should be interpreted as c0 c1 c2 c3 c4 f0 f1 f2 f3 f4 when the word is
|
||||||
|
c0...c4 and its associated flags are f0...f4.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, **kwargs):
|
||||||
|
self.n_rows = 6
|
||||||
|
self.n_cols = 5
|
||||||
|
self.max_char = 26
|
||||||
|
self.max_flag = 4
|
||||||
|
|
||||||
|
low = np.zeros((self.n_rows, 2*self.n_cols))
|
||||||
|
high = np.c_[np.full((self.n_rows, self.n_cols), self.max_char),
|
||||||
|
np.full((self.n_rows, self.n_cols), self.max_flag)]
|
||||||
|
|
||||||
|
super().__init__(low, high, dtype=np.int64, **kwargs)
|
||||||
|
|
||||||
|
|
||||||
|
class GuessList(WordList):
|
||||||
|
"""Space for *guess* words to the Wordle environment.
|
||||||
|
|
||||||
|
This class represents the set of guess words.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, **kwargs):
|
||||||
|
"""
|
||||||
|
Args:
|
||||||
|
kwargs: See documentation for gym.spaces.MultiDiscrete
|
||||||
|
"""
|
||||||
|
words = get_words('guess')
|
||||||
|
super().__init__(words, **kwargs)
|
||||||
|
|
||||||
|
|
||||||
|
class WordleEnv(gym.Env):
|
||||||
|
metadata = {'render.modes': ['human']}
|
||||||
|
|
||||||
|
# Character flag codes
|
||||||
|
no_char = 0
|
||||||
|
right_pos = 1
|
||||||
|
wrong_pos = 2
|
||||||
|
wrong_char = 3
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
super().__init__()
|
||||||
|
|
||||||
|
self.action_space = GuessList()
|
||||||
|
self.solution_space = SolutionList()
|
||||||
|
|
||||||
|
self.observation_space = WordleObsSpace()
|
||||||
|
|
||||||
|
self._highlights = {
|
||||||
|
self.right_pos: (bg.green, bg.rs),
|
||||||
|
self.wrong_pos: (bg.yellow, bg.rs),
|
||||||
|
self.wrong_char: ('', ''),
|
||||||
|
self.no_char: ('', ''),
|
||||||
|
}
|
||||||
|
|
||||||
|
self.n_rounds = 6
|
||||||
|
self.n_letters = 5
|
||||||
|
self.info = {'correct': False, 'guesses': defaultdict(int)}
|
||||||
|
|
||||||
|
def _highlighter(self, char: str, flag: int) -> str:
|
||||||
|
"""Terminal renderer functionality. Properly highlights a character
|
||||||
|
based on the flag associated with it.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
char: Character in question.
|
||||||
|
flag: Associated flag, one of:
|
||||||
|
- 0: no character (render no background)
|
||||||
|
- 1: right position (render green background)
|
||||||
|
- 2: wrong position (render yellow background)
|
||||||
|
- 3: wrong character (render no background)
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Correct ASCII sequence producing the desired character in the
|
||||||
|
correct background.
|
||||||
|
"""
|
||||||
|
front, back = self._highlights[flag]
|
||||||
|
return front + char + back
|
||||||
|
|
||||||
|
def reset(self, seed=None, options=None):
|
||||||
|
"""Reset the environment to an initial state and returns an initial
|
||||||
|
observation.
|
||||||
|
|
||||||
|
Note: The observation space instance should be a Box space.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
state (object): The initial observation of the space.
|
||||||
|
"""
|
||||||
|
self.round = 0
|
||||||
|
self.solution = self.solution_space.sample()
|
||||||
|
|
||||||
|
self.state = np.zeros((self.n_rounds, 2 * self.n_letters), dtype=np.int64)
|
||||||
|
|
||||||
|
self.info = {'correct': False, 'guesses': defaultdict(int)}
|
||||||
|
|
||||||
|
return self.state, self.info
|
||||||
|
|
||||||
|
def render(self, mode: str = 'human'):
|
||||||
|
"""Renders the Wordle environment.
|
||||||
|
|
||||||
|
Currently supported render modes:
|
||||||
|
- human: renders the Wordle game to the terminal.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
mode: the mode to render with.
|
||||||
|
"""
|
||||||
|
if mode == 'human':
|
||||||
|
for row in self.state:
|
||||||
|
text = ''.join(map(
|
||||||
|
self._highlighter,
|
||||||
|
to_english(row[:self.n_letters]).upper(),
|
||||||
|
row[self.n_letters:]
|
||||||
|
))
|
||||||
|
print(text)
|
||||||
|
else:
|
||||||
|
super().render(mode=mode)
|
||||||
|
|
||||||
|
def step(self, action):
|
||||||
|
"""Run one step of the Wordle game. Every game must be previously
|
||||||
|
initialized by a call to the `reset` method.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
action: Word guessed by the agent.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
state (object): Wordle game state after the guess.
|
||||||
|
reward (float): Reward associated with the guess.
|
||||||
|
done (bool): Whether the game has ended.
|
||||||
|
info (dict): Auxiliary diagnostic information.
|
||||||
|
"""
|
||||||
|
assert self.action_space.contains(action), 'Invalid word!'
|
||||||
|
|
||||||
|
action = self.action_space[action]
|
||||||
|
solution = self.solution_space[self.solution]
|
||||||
|
|
||||||
|
self.state[self.round][:self.n_letters] = action
|
||||||
|
|
||||||
|
counter = Counter()
|
||||||
|
for i, char in enumerate(action):
|
||||||
|
flag_i = i + self.n_letters
|
||||||
|
counter[char] += 1
|
||||||
|
|
||||||
|
if char == solution[i]:
|
||||||
|
self.state[self.round, flag_i] = self.right_pos
|
||||||
|
elif counter[char] <= (char == solution).sum():
|
||||||
|
self.state[self.round, flag_i] = self.wrong_pos
|
||||||
|
else:
|
||||||
|
self.state[self.round, flag_i] = self.wrong_char
|
||||||
|
|
||||||
|
self.round += 1
|
||||||
|
|
||||||
|
correct = (action == solution).all()
|
||||||
|
game_over = (self.round == self.n_rounds)
|
||||||
|
|
||||||
|
done = correct or game_over
|
||||||
|
|
||||||
|
reward = 0
|
||||||
|
# correct spot
|
||||||
|
reward += np.sum(self.state[:, 5:] == 1) * 2
|
||||||
|
|
||||||
|
# correct letter not correct spot
|
||||||
|
reward += np.sum(self.state[:, 5:] == 2) * 1
|
||||||
|
|
||||||
|
# incorrect letter
|
||||||
|
reward += np.sum(self.state[:, 5:] == 3) * -1
|
||||||
|
|
||||||
|
# guess same word as before
|
||||||
|
hashable_action = to_english(action)
|
||||||
|
if hashable_action in self.info['guesses']:
|
||||||
|
reward += -10 * self.info['guesses'][hashable_action]
|
||||||
|
else: # guess different word
|
||||||
|
reward += 10
|
||||||
|
|
||||||
|
self.info['guesses'][hashable_action] += 1
|
||||||
|
|
||||||
|
# for game ending in win or loss
|
||||||
|
reward += 10 if correct else -10 if done else 0
|
||||||
|
|
||||||
|
self.info['correct'] = correct
|
||||||
|
|
||||||
|
# observation, reward, terminated, truncated, info
|
||||||
|
return self.state, reward, done, False, self.info
|
108
letter_guess.py
108
letter_guess.py
@@ -1,108 +0,0 @@
|
|||||||
import gymnasium as gym
|
|
||||||
from gymnasium import spaces
|
|
||||||
import numpy as np
|
|
||||||
import random
|
|
||||||
import re
|
|
||||||
|
|
||||||
|
|
||||||
class LetterGuessingEnv(gym.Env):
|
|
||||||
"""
|
|
||||||
Custom Gymnasium environment for a letter guessing game with a focus on forming
|
|
||||||
valid prefixes and words from a list of valid Wordle words. The environment tracks
|
|
||||||
the current guess prefix and validates it against known valid words, ending the game
|
|
||||||
early with a negative reward for invalid prefixes.
|
|
||||||
"""
|
|
||||||
|
|
||||||
metadata = {'render_modes': ['human']}
|
|
||||||
|
|
||||||
def __init__(self, valid_words, seed=None):
|
|
||||||
self.action_space = spaces.Discrete(26)
|
|
||||||
self.observation_space = spaces.Box(low=0, high=1, shape=(26*2 + 26*4,), dtype=np.int32)
|
|
||||||
|
|
||||||
self.valid_words = valid_words # List of valid Wordle words
|
|
||||||
self.target_word = '' # Target word for the current episode
|
|
||||||
self.valid_words_str = ' '.join(self.valid_words) + ' '
|
|
||||||
self.letter_flags = None
|
|
||||||
self.letter_positions = None
|
|
||||||
self.guessed_letters = set()
|
|
||||||
self.guess_prefix = "" # Tracks the current guess prefix
|
|
||||||
|
|
||||||
self.reset()
|
|
||||||
|
|
||||||
def step(self, action):
|
|
||||||
letter_index = action % 26 # Assuming action is the letter index directly
|
|
||||||
position = len(self.guess_prefix) # The next position in the prefix is determined by its current length
|
|
||||||
letter = chr(ord('a') + letter_index)
|
|
||||||
|
|
||||||
reward = 0
|
|
||||||
done = False
|
|
||||||
|
|
||||||
# Check if the letter has already been used in the guess prefix
|
|
||||||
if letter in self.guessed_letters:
|
|
||||||
reward = -1 # Penalize for repeating letters in the prefix
|
|
||||||
else:
|
|
||||||
# Add the new letter to the prefix and update guessed letters set
|
|
||||||
self.guess_prefix += letter
|
|
||||||
self.guessed_letters.add(letter)
|
|
||||||
|
|
||||||
# Update letter flags based on whether the letter is in the target word
|
|
||||||
if self.target_word[position] == letter:
|
|
||||||
self.letter_flags[letter_index, :] = [1, 0] # Update flag for correct guess
|
|
||||||
elif letter in self.target_word:
|
|
||||||
self.letter_flags[letter_index, :] = [0, 1] # Update flag for correct guess wrong position
|
|
||||||
else:
|
|
||||||
self.letter_flags[letter_index, :] = [0, 0] # Update flag for incorrect guess
|
|
||||||
|
|
||||||
reward = 1 # Reward for adding new information by trying a new letter
|
|
||||||
|
|
||||||
# Update the letter_positions matrix to reflect the new guess
|
|
||||||
if position == 4:
|
|
||||||
self.letter_positions[:,:] = 1
|
|
||||||
else:
|
|
||||||
self.letter_positions[:, position] = 0
|
|
||||||
self.letter_positions[letter_index, position] = 1
|
|
||||||
|
|
||||||
# Use regex to check if the current prefix can lead to a valid word
|
|
||||||
if not re.search(r'\b' + self.guess_prefix, self.valid_words_str):
|
|
||||||
reward = -5 # Penalize for forming an invalid prefix
|
|
||||||
done = True # End the episode if the prefix is invalid
|
|
||||||
|
|
||||||
# guessed a full word so we reset our guess prefix to guess next round
|
|
||||||
if len(self.guess_prefix) == len(self.target_word):
|
|
||||||
self.guess_prefix = ''
|
|
||||||
self.round += 1
|
|
||||||
|
|
||||||
# end after 5 rounds of total guesses
|
|
||||||
if self.round == 2:
|
|
||||||
# reward = 5
|
|
||||||
done = True
|
|
||||||
|
|
||||||
obs = self._get_obs()
|
|
||||||
|
|
||||||
if reward < -50:
|
|
||||||
print(obs, reward, done)
|
|
||||||
|
|
||||||
return obs, reward, done, False, {}
|
|
||||||
|
|
||||||
def reset(self, seed=None):
|
|
||||||
self.target_word = random.choice(self.valid_words)
|
|
||||||
# self.target_word_encoded = self.encode_word(self.target_word)
|
|
||||||
self.letter_flags = np.ones((26, 2), dtype=np.int32)
|
|
||||||
self.letter_positions = np.ones((26, 4), dtype=np.int32)
|
|
||||||
self.guessed_letters = set()
|
|
||||||
self.guess_prefix = "" # Reset the guess prefix for the new episode
|
|
||||||
self.round = 1
|
|
||||||
return self._get_obs(), {}
|
|
||||||
|
|
||||||
def encode_word(self, word):
|
|
||||||
encoded = np.zeros((26,))
|
|
||||||
for char in word:
|
|
||||||
index = ord(char) - ord('a')
|
|
||||||
encoded[index] = 1
|
|
||||||
return encoded
|
|
||||||
|
|
||||||
def _get_obs(self):
|
|
||||||
return np.concatenate([self.letter_flags.flatten(), self.letter_positions.flatten()])
|
|
||||||
|
|
||||||
def render(self, mode='human'):
|
|
||||||
pass # Optional: Implement rendering logic if needed
|
|
Reference in New Issue
Block a user