This commit is contained in:
jlevine18 2019-03-02 08:18:28 -06:00 committed by GitHub
parent 791c4e82a5
commit e98e66bdf0
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

View File

@ -1,199 +1,201 @@
#Titan Robotics Team 2022: ML Module #Titan Robotics Team 2022: ML Module
#Written by Arthur Lu & Jacob Levine #Written by Arthur Lu & Jacob Levine
#Notes: #Notes:
# this should be imported as a python module using 'import titanlearn' # this should be imported as a python module using 'import titanlearn'
# this should be included in the local directory or environment variable # this should be included in the local directory or environment variable
# this module has not been optimized for multhreaded computing # this module has not been optimized for multhreaded computing
# this module learns from its mistakes far faster than 2022's captains # this module learns from its mistakes far faster than 2022's captains
#setup: #setup:
__version__ = "1.0.0.001" __version__ = "1.0.0.001"
#changelog should be viewed using print(analysis.__changelog__) #changelog should be viewed using print(analysis.__changelog__)
__changelog__ = """changelog: __changelog__ = """changelog:
1.0.0.xxx: 1.0.0.xxx:
-added generation of ANNS, basic SGD training""" -added generation of ANNS, basic SGD training"""
__author__ = ( __author__ = (
"Arthur Lu <arthurlu@ttic.edu>, " "Arthur Lu <arthurlu@ttic.edu>, "
"Jacob Levine <jlevine@ttic.edu>," "Jacob Levine <jlevine@ttic.edu>,"
) )
__all__ = [ __all__ = [
'linear_nn', 'linear_nn',
'train_sgd_minibatch', 'train_sgd_minibatch',
'train_sgd_simple' 'train_sgd_simple'
] ]
#imports #imports
import torch import torch
import warnings import warnings
from collections import OrderedDict from collections import OrderedDict
from sklearn import metrics from sklearn import metrics, datasets
import numpy as np import numpy as np
import matplotlib.pyplot as plt import matplotlib.pyplot as plt
import math import math
from sklearn import datasets
#enable CUDA if possible
#enable CUDA if possible device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
#linear_nn: creates a fully connected network given params
#linear_nn: creates a fully connected network given params def linear_nn(in_dim, hidden_dim, out_dim, num_hidden, act_fn="tanh", end="none"):
def linear_nn(in_dim, hidden_dim, out_dim, num_hidden, act_fn="tanh", end="none"): if act_fn.lower()=="tanh":
if act_fn.lower()=="tanh": k=OrderedDict([("in", torch.nn.Linear(in_dim,hidden_dim))])
k=OrderedDict([("in", torch.nn.Linear(in_dim,hidden_dim)), ('tanh0', torch.nn.Tanh())]) for i in range(num_hidden):
for i in range(num_hidden): k.update({"lin"+str(i+1): torch.nn.Linear(hidden_dim,hidden_dim), "tanh"+str(i+1):torch.nn.Tanh()})
k.update({"lin"+str(i+1): torch.nn.Linear(hidden_dim,hidden_dim), "tanh"+str(i+1):torch.nn.Tanh()})
elif act_fn.lower()=="sigmoid":
elif act_fn.lower()=="sigmoid": k=OrderedDict([("in", torch.nn.Linear(in_dim,hidden_dim))])
k=OrderedDict([("in", torch.nn.Linear(in_dim,hidden_dim)), ('sig0', torch.nn.Sigmoid())]) for i in range(num_hidden):
for i in range(num_hidden): k.update({"lin"+str(i+1): torch.nn.Linear(hidden_dim,hidden_dim), "sig"+str(i+1):torch.nn.Sigmoid()})
k.update({"lin"+str(i+1): torch.nn.Linear(hidden_dim,hidden_dim), "sig"+str(i+1):torch.nn.Sigmoid()})
elif act_fn.lower()=="relu":
elif act_fn.lower()=="relu": k=OrderedDict([("in", torch.nn.Linear(in_dim,hidden_dim))])
k=OrderedDict([("in", torch.nn.Linear(in_dim,hidden_dim)), ('relu0', torch.nn.ReLU())]) for i in range(num_hidden):
for i in range(num_hidden): k.update({"lin"+str(i+1): torch.nn.Linear(hidden_dim,hidden_dim), "relu"+str(i+1):torch.nn.ReLU()})
k.update({"lin"+str(i+1): torch.nn.Linear(hidden_dim,hidden_dim), "relu"+str(i+1):torch.nn.ReLU()})
elif act_fn.lower()=="leaky relu":
elif act_fn.lower()=="leaky relu": k=OrderedDict([("in", torch.nn.Linear(in_dim,hidden_dim))])
k=OrderedDict([("in", torch.nn.Linear(in_dim,hidden_dim)), ('lre0', torch.nn.LeakyReLU())]) for i in range(num_hidden):
for i in range(num_hidden): k.update({"lin"+str(i+1): torch.nn.Linear(hidden_dim,hidden_dim), "lre"+str(i+1):torch.nn.LeakyReLU()})
k.update({"lin"+str(i+1): torch.nn.Linear(hidden_dim,hidden_dim), "lre"+str(i+1):torch.nn.LeakyReLU()}) else:
else: warnings.warn("Did not specify a valid inner activation function. Returning nothing.")
warnings.warn("Did not specify a valid inner activation function. Returning nothing.") return None
return None
if end.lower()=="softmax":
if end.lower()=="softmax": k.update({"out": torch.nn.Linear(hidden_dim,out_dim), "softmax": torch.nn.Softmax()})
k.update({"out": torch.nn.Linear(hidden_dim,out_dim), "softmax": torch.nn.Softmax()}) elif end.lower()=="none":
elif end.lower()=="none": k.update({"out": torch.nn.Linear(hidden_dim,out_dim)})
k.update({"out": torch.nn.Linear(hidden_dim,out_dim)}) elif end.lower()=="sigmoid":
elif end.lower()=="sigmoid": k.update({"out": torch.nn.Linear(hidden_dim,out_dim), "sigmoid": torch.nn.Sigmoid()})
k.update({"out": torch.nn.Linear(hidden_dim,out_dim), "sigmoid": torch.nn.Sigmoid()}) else:
else: warnings.warn("Did not specify a valid final activation function. Returning nothing.")
warnings.warn("Did not specify a valid final activation function. Returning nothing.") return None
return None
return torch.nn.Sequential(k)
return torch.nn.Sequential(k)
#train_sgd_simple: trains network using SGD
#train_sgd_simple: trains network using SGD def train_sgd_simple(net, evalType, data, ground, dev=None, devg=None, iters=1000, learnrate=1e-4, testevery=1, graphsaveloc=None, modelsaveloc=None, loss="mse"):
def train_sgd_simple(net, evalType, data, ground, dev=None, devg=None, iters=1000, learnrate=1e-4, testevery=1, graphsaveloc=None, modelsaveloc=None, loss="mse"): model=net.to(device)
model=net.to(device) data=data.to(device)
data=data.to(device) ground=ground.to(device)
ground=ground.to(device) if dev != None:
if dev != None: dev=dev.to(device)
dev=dev.to(device) losses=[]
losses=[] dev_losses=[]
dev_losses=[] if loss.lower()=="mse":
if loss.lower()=="mse": loss_fn = torch.nn.MSELoss()
loss_fn = torch.nn.MSELoss() elif loss.lower()=="cross entropy":
elif loss.lower()=="cross entropy": loss_fn = torch.nn.CrossEntropyLoss()
loss_fn = torch.nn.CrossEntropyLoss() elif loss.lower()=="nll":
elif loss.lower()=="nll": loss_fn = torch.nn.NLLLoss()
loss_fn = torch.nn.NLLLoss() elif loss.lower()=="poisson nll":
elif loss.lower()=="poisson nll": loss_fn = torch.nn.PoissonNLLLoss()
loss_fn = torch.nn.PoissonNLLLoss() else:
else: warnings.warn("Did not specify a valid loss function. Returning nothing.")
warnings.warn("Did not specify a valid loss function. Returning nothing.") return None
return None optimizer=torch.optim.SGD(model.parameters(), lr=learnrate)
optimizer=torch.optim.SGD(model.parameters(), lr=learnrate) for i in range(iters):
for i in range(iters): if i%testevery==0:
if i%testevery==0: with torch.no_grad():
with torch.no_grad(): output = model(data)
output = model(data) if evalType == "ap":
if evalType == "ap": ap = metrics.average_precision_score(ground.cpu().numpy(), output.cpu().numpy())
ap = metrics.average_precision_score(ground.cpu().numpy(), output.cpu().numpy()) if evalType == "regression":
if evalType == "regression": ap = metrics.explained_variance_score(ground.cpu().numpy(), output.cpu().numpy())
ap = metrics.explained_variance_score(ground.cpu().numpy(), output.cpu().numpy()) losses.append(ap)
losses.append(ap) print(str(i)+": "+str(ap))
print(str(i)+": "+str(ap)) plt.plot(np.array(range(0,i+1,testevery)),np.array(losses), label="train AP")
plt.plot(np.array(range(0,i+1,testevery)),np.array(losses), label="train AP") if dev != None:
if dev != None: output = model(dev)
output = model(dev) print(evalType)
print(evalType) if evalType == "ap":
if evalType == "ap":
ap = metrics.average_precision_score(devg.numpy(), output.numpy())
ap = metrics.average_precision_score(devg.numpy(), output.numpy()) dev_losses.append(ap)
dev_losses.append(ap) plt.plot(np.array(range(0,i+1,testevery)),np.array(losses), label="dev AP")
plt.plot(np.array(range(0,i+1,testevery)),np.array(losses), label="dev AP") elif evalType == "regression":
elif evalType == "regression": ev = metrics.explained_variance_score(devg.numpy(), output.numpy())
ap = metrics.explained_variance_score(devg.numpy(), output.numpy()) dev_losses.append(ev)
dev_losses.append(ap) plt.plot(np.array(range(0,i+1,testevery)),np.array(losses), label="dev EV")
plt.plot(np.array(range(0,i+1,testevery)),np.array(losses), label="dev EV")
if graphsaveloc != None:
if graphsaveloc != None: plt.savefig(graphsaveloc+".pdf")
plt.savefig(graphsaveloc+".pdf") with torch.enable_grad():
with torch.enable_grad(): optimizer.zero_grad()
optimizer.zero_grad() output = model(data)
output = model(data) loss = loss_fn(output, ground)
loss = loss_fn(output, ground) print(loss.item())
print(loss.item()) loss.backward()
loss.backward() optimizer.step()
optimizer.step() if modelsaveloc != None:
if modelsaveloc != None: torch.save(model, modelsaveloc)
torch.save(model, modelsaveloc) plt.show()
plt.show() return model
return model
#train_sgd_minibatch: same as above, but with minibatches
#train_sgd_minibatch: same as above, but with minibatches def train_sgd_minibatch(net, data, ground, dev=None, devg=None, epoch=100, batchsize=20, learnrate=1e-4, testevery=20, graphsaveloc=None, modelsaveloc=None, loss="mse"):
def train_sgd_minibatch(net, data, ground, dev=None, devg=None, epoch=100, batchsize=20, learnrate=1e-4, testevery=20, graphsaveloc=None, modelsaveloc=None, loss="mse"): model=net.to(device)
model=net.to(device) data=data.to(device)
data=data.to(device) ground=ground.to(device)
ground=ground.to(device) if dev != None:
if dev != None: dev=dev.to(device)
dev=dev.to(device) losses=[]
losses=[] dev_losses=[]
dev_losses=[] if loss.lower()=="mse":
if loss.lower()=="mse": loss_fn = torch.nn.MSELoss()
loss_fn = torch.nn.MSELoss() elif loss.lower()=="cross entropy":
elif loss.lower()=="cross entropy": loss_fn = torch.nn.CrossEntropyLoss()
loss_fn = torch.nn.CrossEntropyLoss() elif loss.lower()=="nll":
elif loss.lower()=="nll": loss_fn = torch.nn.NLLLoss()
loss_fn = torch.nn.NLLLoss() elif loss.lower()=="poisson nll":
elif loss.lower()=="poisson nll": loss_fn = torch.nn.PoissonNLLLoss()
loss_fn = torch.nn.PoissonNLLLoss() else:
else: warnings.warn("Did not specify a valid loss function. Returning nothing.")
warnings.warn("Did not specify a valid loss function. Returning nothing.") return None
return None optimizer=torch.optim.LBFGS(model.parameters(), lr=learnrate)
optimizer=torch.optim.LBFGS(model.parameters(), lr=learnrate) itercount=0
itercount=0 for i in range(epoch):
for i in range(epoch): print("EPOCH "+str(i)+" OF "+str(epoch-1))
print("EPOCH "+str(i)+" OF "+str(epoch-1)) batches=math.ceil(data.size()[0].item()/batchsize)
batches=math.ceil(data.size()[0].item()/batchsize) for j in range(batches):
for j in range(batches): batchdata=[]
batchdata=[] batchground=[]
batchground=[] for k in range(j*batchsize, min((j+1)*batchsize, data.size()[0].item()),1):
for k in range(j*batchsize, min((j+1)*batchsize, data.size()[0].item()),1): batchdata.append(data[k])
batchdata.append(data[k]) batchground.append(ground[k])
batchground.append(ground[k]) batchdata=torch.stack(batchdata)
batchdata=torch.stack(batchdata) batchground=torch.stack(batchground)
batchground=torch.stack(batchground) if itercount%testevery==0:
if itercount%testevery==0: with torch.no_grad():
with torch.no_grad(): output = model(data)
output = model(data) ap = metrics.average_precision_score(ground.numpy(), output.numpy())
ap = metrics.average_precision_score(ground.numpy(), output.numpy()) losses.append(ap)
losses.append(ap) print(str(i)+": "+str(ap))
print(str(i)+": "+str(ap)) plt.plot(np.array(range(0,i+1,testevery)),np.array(losses))
plt.plot(np.array(range(0,i+1,testevery)),np.array(losses)) if dev != None:
if dev != None: output = model(dev)
output = model(dev) ap = metrics.average_precision_score(devg.numpy(), output.numpy())
ap = metrics.average_precision_score(devg.numpy(), output.numpy()) dev_losses.append(ap)
dev_losses.append(ap) plt.plot(np.array(range(0,i+1,testevery)),np.array(losses), label="dev AP")
plt.plot(np.array(range(0,i+1,testevery)),np.array(losses), label="dev AP") if graphsaveloc != None:
if graphsaveloc != None: plt.savefig(graphsaveloc+".pdf")
plt.savefig(graphsaveloc+".pdf") with torch.enable_grad():
with torch.enable_grad(): optimizer.zero_grad()
optimizer.zero_grad() output = model(batchdata)
output = model(batchdata) loss = loss_fn(output, ground)
loss = loss_fn(output, ground) loss.backward()
loss.backward() optimizer.step()
optimizer.step() itercount +=1
itercount +=1 if modelsaveloc != None:
if modelsaveloc != None: torch.save(model, modelsaveloc)
torch.save(model, modelsaveloc) plt.show()
plt.show() return model
return model
def retyuoipufdyu():
data = datasets.load_diabetes()
print(data["data"], data["target"]) data = torch.tensor(datasets.fetch_california_housing()['data']).to(torch.float)
ground = torch.tensor(data["target"]).to(torch.float) ground = datasets.fetch_california_housing()['target']
data = torch.tensor(data["data"]).to(torch.float) ground=torch.tensor(ground).to(torch.float)
model = linear_nn(10, 100, 1, 20, act_fn = "tanh") model = linear_nn(8, 100, 1, 20, act_fn = "relu")
model = train_sgd_simple(model,"regression", data, ground, learnrate=1e-4) print(model)
return train_sgd_simple(model,"regression", data, ground, learnrate=1e-4, iters=1000)
retyuoipufdyu()