mirror of
https://github.com/ltcptgeneral/cse151b-final-project.git
synced 2025-09-08 08:17:21 +00:00
started new letter guess environment
This commit is contained in:
129
eric_wordle/.gitignore
vendored
Normal file
129
eric_wordle/.gitignore
vendored
Normal file
@@ -0,0 +1,129 @@
|
||||
# Byte-compiled / optimized / DLL files
|
||||
__pycache__/
|
||||
*.py[cod]
|
||||
*$py.class
|
||||
|
||||
# C extensions
|
||||
*.so
|
||||
|
||||
# Distribution / packaging
|
||||
.Python
|
||||
build/
|
||||
develop-eggs/
|
||||
dist/
|
||||
downloads/
|
||||
eggs/
|
||||
.eggs/
|
||||
lib/
|
||||
lib64/
|
||||
parts/
|
||||
sdist/
|
||||
var/
|
||||
wheels/
|
||||
pip-wheel-metadata/
|
||||
share/python-wheels/
|
||||
*.egg-info/
|
||||
.installed.cfg
|
||||
*.egg
|
||||
MANIFEST
|
||||
|
||||
# PyInstaller
|
||||
# Usually these files are written by a python script from a template
|
||||
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
||||
*.manifest
|
||||
*.spec
|
||||
|
||||
# Installer logs
|
||||
pip-log.txt
|
||||
pip-delete-this-directory.txt
|
||||
|
||||
# Unit test / coverage reports
|
||||
htmlcov/
|
||||
.tox/
|
||||
.nox/
|
||||
.coverage
|
||||
.coverage.*
|
||||
.cache
|
||||
nosetests.xml
|
||||
coverage.xml
|
||||
*.cover
|
||||
*.py,cover
|
||||
.hypothesis/
|
||||
.pytest_cache/
|
||||
|
||||
# Translations
|
||||
*.mo
|
||||
*.pot
|
||||
|
||||
# Django stuff:
|
||||
*.log
|
||||
local_settings.py
|
||||
db.sqlite3
|
||||
db.sqlite3-journal
|
||||
|
||||
# Flask stuff:
|
||||
instance/
|
||||
.webassets-cache
|
||||
|
||||
# Scrapy stuff:
|
||||
.scrapy
|
||||
|
||||
# Sphinx documentation
|
||||
docs/_build/
|
||||
|
||||
# PyBuilder
|
||||
target/
|
||||
|
||||
# Jupyter Notebook
|
||||
.ipynb_checkpoints
|
||||
|
||||
# IPython
|
||||
profile_default/
|
||||
ipython_config.py
|
||||
|
||||
# pyenv
|
||||
.python-version
|
||||
|
||||
# pipenv
|
||||
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
|
||||
# However, in case of collaboration, if having platform-specific dependencies or dependencies
|
||||
# having no cross-platform support, pipenv may install dependencies that don't work, or not
|
||||
# install all needed dependencies.
|
||||
#Pipfile.lock
|
||||
|
||||
# PEP 582; used by e.g. github.com/David-OConnor/pyflow
|
||||
__pypackages__/
|
||||
|
||||
# Celery stuff
|
||||
celerybeat-schedule
|
||||
celerybeat.pid
|
||||
|
||||
# SageMath parsed files
|
||||
*.sage.py
|
||||
|
||||
# Environments
|
||||
.env
|
||||
.venv
|
||||
env/
|
||||
venv/
|
||||
ENV/
|
||||
env.bak/
|
||||
venv.bak/
|
||||
|
||||
# Spyder project settings
|
||||
.spyderproject
|
||||
.spyproject
|
||||
|
||||
# Rope project settings
|
||||
.ropeproject
|
||||
|
||||
# mkdocs documentation
|
||||
/site
|
||||
|
||||
# mypy
|
||||
.mypy_cache/
|
||||
.dmypy.json
|
||||
dmypy.json
|
||||
|
||||
# Pyre type checker
|
||||
.pyre/
|
11
eric_wordle/README.md
Normal file
11
eric_wordle/README.md
Normal file
@@ -0,0 +1,11 @@
|
||||
# N-dle Solver
|
||||
|
||||
A solver designed to beat New York Time's Wordle (link [here](https://www.nytimes.com/games/wordle/index.html)). If you are bored enough, can extend to solve the more general N-dle problem (for quordle, octordle, etc.)
|
||||
|
||||
I originally made this out of frustration for the game (and my own lack of lingual talent). One day, my friend thought she could beat my bot. To her dismay, she learned that she is no better than a machine. Let's see if you can do any better (the average number of attempts is 3.6).
|
||||
|
||||
## Usage:
|
||||
1. Run `python main.py --n 1`
|
||||
2. Follow the prompts
|
||||
|
||||
Currently only supports solving for 1 word at a time (i.e. wordle).
|
126
eric_wordle/ai.py
Normal file
126
eric_wordle/ai.py
Normal file
@@ -0,0 +1,126 @@
|
||||
import re
|
||||
import string
|
||||
|
||||
import numpy as np
|
||||
|
||||
|
||||
class AI:
|
||||
def __init__(self, vocab_file, num_letters=5, num_guesses=6):
|
||||
self.vocab_file = vocab_file
|
||||
self.num_letters = num_letters
|
||||
self.num_guesses = 6
|
||||
|
||||
self.vocab, self.vocab_scores, self.letter_scores = self.get_vocab(self.vocab_file)
|
||||
self.best_words = sorted(list(self.vocab_scores.items()), key=lambda tup: tup[1])[::-1]
|
||||
|
||||
self.domains = None
|
||||
self.possible_letters = None
|
||||
|
||||
self.reset()
|
||||
|
||||
def solve(self):
|
||||
num_guesses = 0
|
||||
while [len(e) for e in self.domains] != [1 for _ in range(self.num_letters)]:
|
||||
num_guesses += 1
|
||||
word = self.sample()
|
||||
|
||||
# # Always start with these two words
|
||||
# if num_guesses == 1:
|
||||
# word = 'soare'
|
||||
# elif num_guesses == 2:
|
||||
# word = 'culti'
|
||||
|
||||
print('-----------------------------------------------')
|
||||
print(f'Guess #{num_guesses}/{self.num_guesses}: {word}')
|
||||
print('-----------------------------------------------')
|
||||
self.arc_consistency(word)
|
||||
|
||||
print(f'You did it! The word is {"".join([e[0] for e in self.domains])}')
|
||||
|
||||
|
||||
def arc_consistency(self, word):
|
||||
print(f'Performing arc consistency check on {word}...')
|
||||
print(f'Specify 0 for completely nonexistent letter at the specified index, 1 for existent letter but incorrect index, and 2 for correct letter at correct index.')
|
||||
results = []
|
||||
|
||||
# Collect results
|
||||
for l in word:
|
||||
while True:
|
||||
result = input(f'{l}: ')
|
||||
if result not in ['0', '1', '2']:
|
||||
print('Incorrect option. Try again.')
|
||||
continue
|
||||
results.append(result)
|
||||
break
|
||||
|
||||
self.possible_letters += [word[i] for i in range(len(word)) if results[i] == '1']
|
||||
|
||||
for i in range(len(word)):
|
||||
if results[i] == '0':
|
||||
if word[i] in self.possible_letters:
|
||||
if word[i] in self.domains[i]:
|
||||
self.domains[i].remove(word[i])
|
||||
else:
|
||||
for j in range(len(self.domains)):
|
||||
if word[i] in self.domains[j] and len(self.domains[j]) > 1:
|
||||
self.domains[j].remove(word[i])
|
||||
if results[i] == '1':
|
||||
if word[i] in self.domains[i]:
|
||||
self.domains[i].remove(word[i])
|
||||
if results[i] == '2':
|
||||
self.domains[i] = [word[i]]
|
||||
|
||||
|
||||
def reset(self):
|
||||
self.domains = [list(string.ascii_lowercase) for _ in range(self.num_letters)]
|
||||
self.possible_letters = []
|
||||
|
||||
def sample(self):
|
||||
"""
|
||||
Samples a best word given the current domains
|
||||
:return:
|
||||
"""
|
||||
# Compile a regex of possible words with the current domain
|
||||
regex_string = ''
|
||||
for domain in self.domains:
|
||||
regex_string += ''.join(['[', ''.join(domain), ']', '{1}'])
|
||||
pattern = re.compile(regex_string)
|
||||
|
||||
# From the words with the highest scores, only return the best word that match the regex pattern
|
||||
for word, _ in self.best_words:
|
||||
if pattern.match(word) and False not in [e in word for e in self.possible_letters]:
|
||||
return word
|
||||
|
||||
def get_vocab(self, vocab_file):
|
||||
vocab = []
|
||||
with open(vocab_file, 'r') as f:
|
||||
for l in f:
|
||||
vocab.append(l.strip())
|
||||
|
||||
# Count letter frequencies at each index
|
||||
letter_freqs = [{letter: 0 for letter in string.ascii_lowercase} for _ in range(self.num_letters)]
|
||||
for word in vocab:
|
||||
for i, l in enumerate(word):
|
||||
letter_freqs[i][l] += 1
|
||||
|
||||
# Assign a score to each letter at each index by the probability of it appearing
|
||||
letter_scores = [{letter: 0 for letter in string.ascii_lowercase} for _ in range(self.num_letters)]
|
||||
for i in range(len(letter_scores)):
|
||||
max_freq = np.max(list(letter_freqs[i].values()))
|
||||
for l in letter_scores[i].keys():
|
||||
letter_scores[i][l] = letter_freqs[i][l] / max_freq
|
||||
|
||||
# Find a sorted list of words ranked by sum of letter scores
|
||||
vocab_scores = {} # (score, word)
|
||||
for word in vocab:
|
||||
score = 0
|
||||
for i, l in enumerate(word):
|
||||
score += letter_scores[i][l]
|
||||
|
||||
# # Optimization: If repeating letters, deduct a couple points
|
||||
# if len(set(word)) < len(word):
|
||||
# score -= 0.25 * (len(word) - len(set(word)))
|
||||
|
||||
vocab_scores[word] = score
|
||||
|
||||
return vocab, vocab_scores, letter_scores
|
37
eric_wordle/dist.py
Normal file
37
eric_wordle/dist.py
Normal file
@@ -0,0 +1,37 @@
|
||||
import string
|
||||
|
||||
import numpy as np
|
||||
|
||||
words = []
|
||||
with open('words.txt', 'r') as f:
|
||||
for l in f:
|
||||
words.append(l.strip())
|
||||
|
||||
# Count letter frequencies at each index
|
||||
letter_freqs = [{letter: 0 for letter in string.ascii_lowercase} for _ in range(5)]
|
||||
for word in words:
|
||||
for i, l in enumerate(word):
|
||||
letter_freqs[i][l] += 1
|
||||
|
||||
# Assign a score to each letter at each index by the probability of it appearing
|
||||
letter_scores = [{letter: 0 for letter in string.ascii_lowercase} for _ in range(5)]
|
||||
for i in range(len(letter_scores)):
|
||||
max_freq = np.max(list(letter_freqs[i].values()))
|
||||
for l in letter_scores[i].keys():
|
||||
letter_scores[i][l] = letter_freqs[i][l] / max_freq
|
||||
|
||||
# Find a sorted list of words ranked by sum of letter scores
|
||||
word_scores = [] # (score, word)
|
||||
for word in words:
|
||||
score = 0
|
||||
for i, l in enumerate(word):
|
||||
score += letter_scores[i][l]
|
||||
word_scores.append((score, word))
|
||||
|
||||
sorted_by_second = sorted(word_scores, key=lambda tup: tup[0])[::-1]
|
||||
print(sorted_by_second[:10])
|
||||
|
||||
for i, (score, word) in enumerate(sorted_by_second):
|
||||
if word == 'soare':
|
||||
print(f'{word} with a score of {score} is found at index {i}')
|
||||
|
18
eric_wordle/main.py
Normal file
18
eric_wordle/main.py
Normal file
@@ -0,0 +1,18 @@
|
||||
import argparse
|
||||
from ai import AI
|
||||
|
||||
|
||||
def main(args):
|
||||
if args.n is None:
|
||||
raise Exception('Need to specify n (i.e. n = 1 for wordle, n = 4 for quordle, n = 16 for sedecordle).')
|
||||
|
||||
ai = AI(args.vocab_file)
|
||||
ai.solve()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument('--n', dest='n', type=int, default=None)
|
||||
parser.add_argument('--vocab_file', dest='vocab_file', type=str, default='wordle_words.txt')
|
||||
args = parser.parse_args()
|
||||
main(args)
|
15
eric_wordle/process.py
Normal file
15
eric_wordle/process.py
Normal file
@@ -0,0 +1,15 @@
|
||||
import pandas
|
||||
|
||||
print('Loading in words dictionary; this may take a while...')
|
||||
df = pandas.read_json('words_dictionary.json')
|
||||
print('Done loading words dictionary.')
|
||||
words = []
|
||||
for word in df.axes[0].tolist():
|
||||
if len(word) != 5:
|
||||
continue
|
||||
words.append(word)
|
||||
words.sort()
|
||||
|
||||
with open('words.txt', 'w') as f:
|
||||
for word in words:
|
||||
f.write(word + '\n')
|
15919
eric_wordle/words.txt
Normal file
15919
eric_wordle/words.txt
Normal file
File diff suppressed because it is too large
Load Diff
370104
eric_wordle/words_dictionary.json
Normal file
370104
eric_wordle/words_dictionary.json
Normal file
File diff suppressed because it is too large
Load Diff
Reference in New Issue
Block a user