mirror of
https://github.com/ltcptgeneral/cse151b-final-project.git
synced 2025-10-22 18:49:21 +00:00
Compare commits
2 Commits
ethan-test
...
83e81722d2
Author | SHA1 | Date | |
---|---|---|---|
|
83e81722d2 | ||
|
320f2f81b7 |
3
.gitignore
vendored
3
.gitignore
vendored
@@ -1 +1,2 @@
|
||||
**/data/*
|
||||
**/data/*
|
||||
**/*.zip
|
24
dqn_wordle.py
Normal file
24
dqn_wordle.py
Normal file
@@ -0,0 +1,24 @@
|
||||
import gym
|
||||
import gym_wordle
|
||||
from stable_baselines3 import DQN
|
||||
|
||||
env = gym.make("Wordle-v0")
|
||||
done = False
|
||||
|
||||
print(env)
|
||||
|
||||
model = DQN("MlpPolicy", env, verbose=1)
|
||||
model.learn(total_timesteps=10000, log_interval=100)
|
||||
model.save("dqn_wordle")
|
||||
|
||||
del model # remove to demonstrate saving and loading
|
||||
|
||||
model = DQN.load("dqn_wordle")
|
||||
|
||||
state = env.reset()
|
||||
|
||||
while not done:
|
||||
|
||||
action, _states = model.predict(state, deterministic=True)
|
||||
|
||||
state, reward, done, info = env.step(action)
|
165
test.ipynb
165
test.ipynb
File diff suppressed because one or more lines are too long
61
test.py
61
test.py
@@ -1,61 +0,0 @@
|
||||
|
||||
from torch.utils.data import Dataset
|
||||
from transformers import BertGenerationEncoder, BertGenerationDecoder, EncoderDecoderModel, BertTokenizer
|
||||
from tqdm import tqdm as progress_bar
|
||||
import torch
|
||||
import matplotlib
|
||||
|
||||
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
||||
print(device)
|
||||
|
||||
encoder = BertGenerationEncoder.from_pretrained("google-bert/bert-base-uncased", bos_token_id=101, eos_token_id=102)
|
||||
# add cross attention layers and use BERT's cls token as BOS token and sep token as EOS token
|
||||
decoder = BertGenerationDecoder.from_pretrained("google-bert/bert-base-uncased", add_cross_attention=True, is_decoder=True, bos_token_id=101, eos_token_id=102)
|
||||
model = EncoderDecoderModel(encoder=encoder, decoder=decoder)
|
||||
|
||||
# create tokenizer...
|
||||
tokenizer = BertTokenizer.from_pretrained("google-bert/bert-large-uncased")
|
||||
|
||||
import json
|
||||
|
||||
class CodeDataset(Dataset):
|
||||
def __init__(self):
|
||||
with open("data/conala-train.json") as f:
|
||||
self.data = json.load(f)
|
||||
|
||||
def __len__(self):
|
||||
return len(self.data)
|
||||
|
||||
def __getitem__(self, idx):
|
||||
intent = self.data[idx]["rewritten_intent"] if self.data[idx]["rewritten_intent"] else self.data[idx]["intent"]
|
||||
return intent, self.data[idx]["snippet"]
|
||||
|
||||
|
||||
optimizer = torch.optim.AdamW(params=model.parameters(), lr=1e-3)
|
||||
dataloader = CodeDataset()
|
||||
model = model.to(device)
|
||||
|
||||
losses = []
|
||||
epochs = 10
|
||||
for i in range(epochs):
|
||||
|
||||
epoch_loss = 0
|
||||
|
||||
for idx, (question, answer) in progress_bar(enumerate(dataloader), total=len(dataloader)):
|
||||
|
||||
input_ids = tokenizer(question, add_special_tokens=False, return_tensors="pt").input_ids.to(device)
|
||||
label_ids = tokenizer(answer, return_tensors="pt").input_ids.to(device)
|
||||
|
||||
loss = model(input_ids=input_ids, decoder_input_ids=label_ids, labels=label_ids).loss
|
||||
|
||||
optimizer.zero_grad()
|
||||
loss.backward()
|
||||
optimizer.step()
|
||||
|
||||
epoch_loss += loss.item()
|
||||
|
||||
losses.append(epoch_loss)
|
||||
|
||||
plt.plot(losses, color="green", label="Training Loss")
|
||||
plt.legend(loc = 'upper left')
|
||||
plt.savefig("plot.png")
|
Reference in New Issue
Block a user