moved and renamed cudaregress.py to regression.py

This commit is contained in:
ltcptgeneral 2019-09-23 09:58:08 -05:00
parent 3d77fc5290
commit 9fd5ca249c

View File

@ -1,180 +1,180 @@
# Titan Robotics Team 2022: CUDA-based Regressions Module # Titan Robotics Team 2022: CUDA-based Regressions Module
# Written by Arthur Lu & Jacob Levine # Written by Arthur Lu & Jacob Levine
# Notes: # Notes:
# this should be imported as a python module using 'import cudaregress' # this should be imported as a python module using 'import cudaregress'
# this should be included in the local directory or environment variable # this should be included in the local directory or environment variable
# this module is cuda-optimized and vectorized (except for one small part) # this module is cuda-optimized and vectorized (except for one small part)
# setup: # setup:
__version__ = "1.0.0.001" __version__ = "1.0.0.001"
# changelog should be viewed using print(cudaregress.__changelog__) # changelog should be viewed using print(cudaregress.__changelog__)
__changelog__ = """ __changelog__ = """
1.0.0.001: 1.0.0.001:
-initial release, with linear, log, exponential, polynomial, and sigmoid kernels -initial release, with linear, log, exponential, polynomial, and sigmoid kernels
-already vectorized (except for polynomial generation) and CUDA-optimized -already vectorized (except for polynomial generation) and CUDA-optimized
""" """
__author__ = ( __author__ = (
"Jacob Levine <jlevine@imsa.edu>", "Jacob Levine <jlevine@imsa.edu>",
) )
__all__ = [ __all__ = [
'factorial', 'factorial',
'take_all_pwrs', 'take_all_pwrs',
'set_device', 'set_device',
'LinearRegKernel', 'LinearRegKernel',
'SigmoidalRegKernel', 'SigmoidalRegKernel',
'LogRegKernel', 'LogRegKernel',
'PolyRegKernel', 'PolyRegKernel',
'ExpRegKernel', 'ExpRegKernel',
'SGDTrain', 'SGDTrain',
'CustomTrain' 'CustomTrain'
] ]
# imports (just one for now): # imports (just one for now):
import torch import torch
#set device #set device
device='cuda:0' if torch.cuda.is_available() else 'cpu' device='cuda:0' if torch.cuda.is_available() else 'cpu'
#todo: document completely #todo: document completely
def factorial(n): def factorial(n):
if n==0: if n==0:
return 1 return 1
else: else:
return n*factorial(n-1) return n*factorial(n-1)
def take_all_pwrs(vec,pwr): def take_all_pwrs(vec,pwr):
#todo: vectorize (kinda) #todo: vectorize (kinda)
combins=torch.combinations(vec, r=pwr, with_replacement=True) combins=torch.combinations(vec, r=pwr, with_replacement=True)
out=torch.ones(combins.size()[0]) out=torch.ones(combins.size()[0])
for i in torch.t(combins): for i in torch.t(combins):
out *= i out *= i
return out return out
def set_device(new_device): def set_device(new_device):
global device global device
device=new_device device=new_device
class LinearRegKernel(): class LinearRegKernel():
parameters= [] parameters= []
weights=None weights=None
bias=None bias=None
def __init__(self, num_vars): def __init__(self, num_vars):
self.weights=torch.rand(num_vars, requires_grad=True, device=device) self.weights=torch.rand(num_vars, requires_grad=True, device=device)
self.bias=torch.rand(1, requires_grad=True, device=device) self.bias=torch.rand(1, requires_grad=True, device=device)
self.parameters=[self.weights,self.bias] self.parameters=[self.weights,self.bias]
def forward(self,mtx): def forward(self,mtx):
long_bias=self.bias.repeat([1,mtx.size()[1]]) long_bias=self.bias.repeat([1,mtx.size()[1]])
return torch.matmul(self.weights,mtx)+long_bias return torch.matmul(self.weights,mtx)+long_bias
class SigmoidalRegKernel(): class SigmoidalRegKernel():
parameters= [] parameters= []
weights=None weights=None
bias=None bias=None
sigmoid=torch.nn.Sigmoid() sigmoid=torch.nn.Sigmoid()
def __init__(self, num_vars): def __init__(self, num_vars):
self.weights=torch.rand(num_vars, requires_grad=True, device=device) self.weights=torch.rand(num_vars, requires_grad=True, device=device)
self.bias=torch.rand(1, requires_grad=True, device=device) self.bias=torch.rand(1, requires_grad=True, device=device)
self.parameters=[self.weights,self.bias] self.parameters=[self.weights,self.bias]
def forward(self,mtx): def forward(self,mtx):
long_bias=self.bias.repeat([1,mtx.size()[1]]) long_bias=self.bias.repeat([1,mtx.size()[1]])
return self.sigmoid(torch.matmul(self.weights,mtx)+long_bias) return self.sigmoid(torch.matmul(self.weights,mtx)+long_bias)
class LogRegKernel(): class LogRegKernel():
parameters= [] parameters= []
weights=None weights=None
bias=None bias=None
def __init__(self, num_vars): def __init__(self, num_vars):
self.weights=torch.rand(num_vars, requires_grad=True, device=device) self.weights=torch.rand(num_vars, requires_grad=True, device=device)
self.bias=torch.rand(1, requires_grad=True, device=device) self.bias=torch.rand(1, requires_grad=True, device=device)
self.parameters=[self.weights,self.bias] self.parameters=[self.weights,self.bias]
def forward(self,mtx): def forward(self,mtx):
long_bias=self.bias.repeat([1,mtx.size()[1]]) long_bias=self.bias.repeat([1,mtx.size()[1]])
return torch.log(torch.matmul(self.weights,mtx)+long_bias) return torch.log(torch.matmul(self.weights,mtx)+long_bias)
class ExpRegKernel(): class ExpRegKernel():
parameters= [] parameters= []
weights=None weights=None
bias=None bias=None
def __init__(self, num_vars): def __init__(self, num_vars):
self.weights=torch.rand(num_vars, requires_grad=True, device=device) self.weights=torch.rand(num_vars, requires_grad=True, device=device)
self.bias=torch.rand(1, requires_grad=True, device=device) self.bias=torch.rand(1, requires_grad=True, device=device)
self.parameters=[self.weights,self.bias] self.parameters=[self.weights,self.bias]
def forward(self,mtx): def forward(self,mtx):
long_bias=self.bias.repeat([1,mtx.size()[1]]) long_bias=self.bias.repeat([1,mtx.size()[1]])
return torch.exp(torch.matmul(self.weights,mtx)+long_bias) return torch.exp(torch.matmul(self.weights,mtx)+long_bias)
class PolyRegKernel(): class PolyRegKernel():
parameters= [] parameters= []
weights=None weights=None
bias=None bias=None
power=None power=None
def __init__(self, num_vars, power): def __init__(self, num_vars, power):
self.power=power self.power=power
num_terms=int(factorial(num_vars+power-1) / factorial(power) / factorial(num_vars-1)) num_terms=int(factorial(num_vars+power-1) / factorial(power) / factorial(num_vars-1))
self.weights=torch.rand(num_terms, requires_grad=True, device=device) self.weights=torch.rand(num_terms, requires_grad=True, device=device)
self.bias=torch.rand(1, requires_grad=True, device=device) self.bias=torch.rand(1, requires_grad=True, device=device)
self.parameters=[self.weights,self.bias] self.parameters=[self.weights,self.bias]
def forward(self,mtx): def forward(self,mtx):
#TODO: Vectorize the last part #TODO: Vectorize the last part
cols=[] cols=[]
for i in torch.t(mtx): for i in torch.t(mtx):
cols.append(take_all_pwrs(i,self.power)) cols.append(take_all_pwrs(i,self.power))
new_mtx=torch.t(torch.stack(cols)) new_mtx=torch.t(torch.stack(cols))
long_bias=self.bias.repeat([1,mtx.size()[1]]) long_bias=self.bias.repeat([1,mtx.size()[1]])
return torch.matmul(self.weights,new_mtx)+long_bias return torch.matmul(self.weights,new_mtx)+long_bias
def SGDTrain(kernel, data, ground, loss=torch.nn.MSELoss(), iterations=1000, learning_rate=.1, return_losses=False): def SGDTrain(kernel, data, ground, loss=torch.nn.MSELoss(), iterations=1000, learning_rate=.1, return_losses=False):
optim=torch.optim.SGD(kernel.parameters, lr=learning_rate) optim=torch.optim.SGD(kernel.parameters, lr=learning_rate)
data_cuda=data.to(device) data_cuda=data.to(device)
ground_cuda=ground.to(device) ground_cuda=ground.to(device)
if (return_losses): if (return_losses):
losses=[] losses=[]
for i in range(iterations): for i in range(iterations):
with torch.set_grad_enabled(True): with torch.set_grad_enabled(True):
optim.zero_grad() optim.zero_grad()
pred=kernel.forward(data_cuda) pred=kernel.forward(data_cuda)
ls=loss(pred,ground_cuda) ls=loss(pred,ground_cuda)
losses.append(ls.item()) losses.append(ls.item())
ls.backward() ls.backward()
optim.step() optim.step()
return [kernel,losses] return [kernel,losses]
else: else:
for i in range(iterations): for i in range(iterations):
with torch.set_grad_enabled(True): with torch.set_grad_enabled(True):
optim.zero_grad() optim.zero_grad()
pred=kernel.forward(data_cuda) pred=kernel.forward(data_cuda)
ls=loss(pred,ground_cuda) ls=loss(pred,ground_cuda)
ls.backward() ls.backward()
optim.step() optim.step()
return kernel return kernel
def CustomTrain(kernel, optim, data, ground, loss=torch.nn.MSELoss(), iterations=1000, return_losses=False): def CustomTrain(kernel, optim, data, ground, loss=torch.nn.MSELoss(), iterations=1000, return_losses=False):
data_cuda=data.to(device) data_cuda=data.to(device)
ground_cuda=ground.to(device) ground_cuda=ground.to(device)
if (return_losses): if (return_losses):
losses=[] losses=[]
for i in range(iterations): for i in range(iterations):
with torch.set_grad_enabled(True): with torch.set_grad_enabled(True):
optim.zero_grad() optim.zero_grad()
pred=kernel.forward(data) pred=kernel.forward(data)
ls=loss(pred,ground) ls=loss(pred,ground)
losses.append(ls.item()) losses.append(ls.item())
ls.backward() ls.backward()
optim.step() optim.step()
return [kernel,losses] return [kernel,losses]
else: else:
for i in range(iterations): for i in range(iterations):
with torch.set_grad_enabled(True): with torch.set_grad_enabled(True):
optim.zero_grad() optim.zero_grad()
pred=kernel.forward(data_cuda) pred=kernel.forward(data_cuda)
ls=loss(pred,ground_cuda) ls=loss(pred,ground_cuda)
ls.backward() ls.backward()
optim.step() optim.step()
return kernel return kernel