cudaregress v 1.0.0.002

This commit is contained in:
jlevine18 2019-09-26 13:35:37 -05:00 committed by GitHub
parent daab3e8c5b
commit e47b6efe71

View File

@ -1,180 +1,217 @@
# Titan Robotics Team 2022: CUDA-based Regressions Module # Titan Robotics Team 2022: CUDA-based Regressions Module
# Written by Arthur Lu & Jacob Levine # Written by Arthur Lu & Jacob Levine
# Notes: # Notes:
# this should be imported as a python module using 'import cudaregress' # this should be imported as a python module using 'import cudaregress'
# this should be included in the local directory or environment variable # this should be included in the local directory or environment variable
# this module is cuda-optimized and vectorized (except for one small part) # this module is cuda-optimized and vectorized (except for one small part)
# setup: # setup:
__version__ = "1.0.0.001" __version__ = "1.0.0.002"
# changelog should be viewed using print(cudaregress.__changelog__) # changelog should be viewed using print(cudaregress.__changelog__)
__changelog__ = """ __changelog__ = """
1.0.0.001: 1.0.0.002:
-initial release, with linear, log, exponential, polynomial, and sigmoid kernels -Added more parameters to log, exponential, polynomial
-already vectorized (except for polynomial generation) and CUDA-optimized -
""" 1.0.0.001:
-initial release, with linear, log, exponential, polynomial, and sigmoid kernels
__author__ = ( -already vectorized (except for polynomial generation) and CUDA-optimized
"Jacob Levine <jlevine@imsa.edu>",
) """
__all__ = [ __author__ = (
'factorial', "Jacob Levine <jlevine@imsa.edu>",
'take_all_pwrs', )
'set_device',
'LinearRegKernel', __all__ = [
'SigmoidalRegKernel', 'factorial',
'LogRegKernel', 'take_all_pwrs',
'PolyRegKernel', 'num_poly_terms',
'ExpRegKernel', 'set_device',
'SGDTrain', 'LinearRegKernel',
'CustomTrain' 'SigmoidalRegKernel',
] 'LogRegKernel',
'PolyRegKernel',
'ExpRegKernel',
# imports (just one for now): 'SigmoidalRegKernelArthur',
'SGDTrain',
import torch 'CustomTrain'
]
#set device
device='cuda:0' if torch.cuda.is_available() else 'cpu'
# imports (just one for now):
#todo: document completely
import torch
def factorial(n):
if n==0: device = "cuda:0" if torch.torch.cuda.is_available() else "cpu"
return 1
else: #todo: document completely
return n*factorial(n-1)
def factorial(n):
def take_all_pwrs(vec,pwr): if n==0:
#todo: vectorize (kinda) return 1
combins=torch.combinations(vec, r=pwr, with_replacement=True) else:
out=torch.ones(combins.size()[0]) return n*factorial(n-1)
for i in torch.t(combins): def num_poly_terms(num_vars, power):
out *= i if power == 0:
return out return 0
return int(factorial(num_vars+power-1) / factorial(power) / factorial(num_vars-1)) + nt(num_vars, power-1)
def set_device(new_device):
global device def take_all_pwrs(vec,pwr):
device=new_device #todo: vectorize (kinda)
combins=torch.combinations(vec, r=pwr, with_replacement=True)
class LinearRegKernel(): out=torch.ones(combins.size()[0])
parameters= [] for i in torch.t(combins):
weights=None out *= i
bias=None return torch.cat(out,take_all_pwrs(vec, pwr-1))
def __init__(self, num_vars):
self.weights=torch.rand(num_vars, requires_grad=True, device=device) def set_device(new_device):
self.bias=torch.rand(1, requires_grad=True, device=device) global device
self.parameters=[self.weights,self.bias] device=new_device
def forward(self,mtx):
long_bias=self.bias.repeat([1,mtx.size()[1]]) class LinearRegKernel():
return torch.matmul(self.weights,mtx)+long_bias parameters= []
weights=None
class SigmoidalRegKernel(): bias=None
parameters= [] def __init__(self, num_vars):
weights=None self.weights=torch.rand(num_vars, requires_grad=True, device=device)
bias=None self.bias=torch.rand(1, requires_grad=True, device=device)
sigmoid=torch.nn.Sigmoid() self.parameters=[self.weights,self.bias]
def __init__(self, num_vars): def forward(self,mtx):
self.weights=torch.rand(num_vars, requires_grad=True, device=device) long_bias=self.bias.repeat([1,mtx.size()[1]])
self.bias=torch.rand(1, requires_grad=True, device=device) return torch.matmul(self.weights,mtx)+long_bias
self.parameters=[self.weights,self.bias]
def forward(self,mtx): class SigmoidalRegKernel():
long_bias=self.bias.repeat([1,mtx.size()[1]]) parameters= []
return self.sigmoid(torch.matmul(self.weights,mtx)+long_bias) weights=None
bias=None
class LogRegKernel(): sigmoid=torch.nn.Sigmoid()
parameters= [] def __init__(self, num_vars):
weights=None self.weights=torch.rand(num_vars, requires_grad=True, device=device)
bias=None self.bias=torch.rand(1, requires_grad=True, device=device)
def __init__(self, num_vars): self.parameters=[self.weights,self.bias]
self.weights=torch.rand(num_vars, requires_grad=True, device=device) def forward(self,mtx):
self.bias=torch.rand(1, requires_grad=True, device=device) long_bias=self.bias.repeat([1,mtx.size()[1]])
self.parameters=[self.weights,self.bias] return self.sigmoid(torch.matmul(self.weights,mtx)+long_bias)
def forward(self,mtx):
long_bias=self.bias.repeat([1,mtx.size()[1]]) class SigmoidalRegKernelArthur():
return torch.log(torch.matmul(self.weights,mtx)+long_bias) parameters= []
weights=None
class ExpRegKernel(): in_bias=None
parameters= [] scal_mult=None
weights=None out_bias=None
bias=None sigmoid=torch.nn.Sigmoid()
def __init__(self, num_vars): def __init__(self, num_vars):
self.weights=torch.rand(num_vars, requires_grad=True, device=device) self.weights=torch.rand(num_vars, requires_grad=True, device=device)
self.bias=torch.rand(1, requires_grad=True, device=device) self.in_bias=torch.rand(1, requires_grad=True, device=device)
self.parameters=[self.weights,self.bias] self.scal_mult=torch.rand(1, requires_grad=True, device=device)
def forward(self,mtx): self.out_bias==torch.rand(1, requires_grad=True, device=device)
long_bias=self.bias.repeat([1,mtx.size()[1]]) self.parameters=[self.weights,self.in_bias, self.scal_mult, self.out_bias]
return torch.exp(torch.matmul(self.weights,mtx)+long_bias) def forward(self,mtx):
long_in_bias=self.in_bias.repeat([1,mtx.size()[1]])
class PolyRegKernel(): long_out_bias=self.out_bias.repeat([1,mtx.size()[1]])
parameters= [] return (scal_mult*self.sigmoid(torch.matmul(self.weights,mtx)+long_in_bias))+long_out_bias
weights=None
bias=None class LogRegKernel():
power=None parameters= []
def __init__(self, num_vars, power): weights=None
self.power=power in_bias=None
num_terms=int(factorial(num_vars+power-1) / factorial(power) / factorial(num_vars-1)) scal_mult=None
self.weights=torch.rand(num_terms, requires_grad=True, device=device) out_bias=None
self.bias=torch.rand(1, requires_grad=True, device=device) def __init__(self, num_vars):
self.parameters=[self.weights,self.bias] self.weights=torch.rand(num_vars, requires_grad=True, device=device)
def forward(self,mtx): self.in_bias=torch.rand(1, requires_grad=True, device=device)
#TODO: Vectorize the last part self.scal_mult=torch.rand(1, requires_grad=True, device=device)
cols=[] self.out_bias==torch.rand(1, requires_grad=True, device=device)
for i in torch.t(mtx): self.parameters=[self.weights,self.in_bias, self.scal_mult, self.out_bias]
cols.append(take_all_pwrs(i,self.power)) def forward(self,mtx):
new_mtx=torch.t(torch.stack(cols)) long_in_bias=self.in_bias.repeat([1,mtx.size()[1]])
long_bias=self.bias.repeat([1,mtx.size()[1]]) long_out_bias=self.out_bias.repeat([1,mtx.size()[1]])
return torch.matmul(self.weights,new_mtx)+long_bias return (scal_mult*torch.log(torch.matmul(self.weights,mtx)+long_in_bias))+long_out_bias
def SGDTrain(kernel, data, ground, loss=torch.nn.MSELoss(), iterations=1000, learning_rate=.1, return_losses=False): class ExpRegKernel():
optim=torch.optim.SGD(kernel.parameters, lr=learning_rate) parameters= []
data_cuda=data.to(device) weights=None
ground_cuda=ground.to(device) in_bias=None
if (return_losses): scal_mult=None
losses=[] out_bias=None
for i in range(iterations): def __init__(self, num_vars):
with torch.set_grad_enabled(True): self.weights=torch.rand(num_vars, requires_grad=True, device=device)
optim.zero_grad() self.in_bias=torch.rand(1, requires_grad=True, device=device)
pred=kernel.forward(data_cuda) self.scal_mult=torch.rand(1, requires_grad=True, device=device)
ls=loss(pred,ground_cuda) self.out_bias==torch.rand(1, requires_grad=True, device=device)
losses.append(ls.item()) self.parameters=[self.weights,self.in_bias, self.scal_mult, self.out_bias]
ls.backward() def forward(self,mtx):
optim.step() long_in_bias=self.in_bias.repeat([1,mtx.size()[1]])
return [kernel,losses] long_out_bias=self.out_bias.repeat([1,mtx.size()[1]])
else: return (scal_mult*torch.exp(torch.matmul(self.weights,mtx)+long_in_bias))+long_out_bias
for i in range(iterations):
with torch.set_grad_enabled(True): class PolyRegKernel():
optim.zero_grad() parameters= []
pred=kernel.forward(data_cuda) weights=None
ls=loss(pred,ground_cuda) bias=None
ls.backward() power=None
optim.step() def __init__(self, num_vars, power):
return kernel self.power=power
num_terms=num_poly_terms(num_vars, power)
def CustomTrain(kernel, optim, data, ground, loss=torch.nn.MSELoss(), iterations=1000, return_losses=False): self.weights=torch.rand(num_terms, requires_grad=True, device=device)
data_cuda=data.to(device) self.bias=torch.rand(1, requires_grad=True, device=device)
ground_cuda=ground.to(device) self.parameters=[self.weights,self.bias]
if (return_losses): def forward(self,mtx):
losses=[] #TODO: Vectorize the last part
for i in range(iterations): cols=[]
with torch.set_grad_enabled(True): for i in torch.t(mtx):
optim.zero_grad() cols.append(take_all_pwrs(i,self.power))
pred=kernel.forward(data) new_mtx=torch.t(torch.stack(cols))
ls=loss(pred,ground) long_bias=self.bias.repeat([1,mtx.size()[1]])
losses.append(ls.item()) return torch.matmul(self.weights,new_mtx)+long_bias
ls.backward()
optim.step() def SGDTrain(kernel, data, ground, loss=torch.nn.MSELoss(), iterations=1000, learning_rate=.1, return_losses=False):
return [kernel,losses] optim=torch.optim.SGD(kernel.parameters, lr=learning_rate)
else: data_cuda=data.to(device)
for i in range(iterations): ground_cuda=ground.to(device)
with torch.set_grad_enabled(True): if (return_losses):
optim.zero_grad() losses=[]
pred=kernel.forward(data_cuda) for i in range(iterations):
ls=loss(pred,ground_cuda) with torch.set_grad_enabled(True):
ls.backward() optim.zero_grad()
optim.step() pred=kernel.forward(data_cuda)
return kernel ls=loss(pred,ground_cuda)
losses.append(ls.item())
ls.backward()
optim.step()
return [kernel,losses]
else:
for i in range(iterations):
with torch.set_grad_enabled(True):
optim.zero_grad()
pred=kernel.forward(data_cuda)
ls=loss(pred,ground_cuda)
ls.backward()
optim.step()
return kernel
def CustomTrain(kernel, optim, data, ground, loss=torch.nn.MSELoss(), iterations=1000, return_losses=False):
data_cuda=data.to(device)
ground_cuda=ground.to(device)
if (return_losses):
losses=[]
for i in range(iterations):
with torch.set_grad_enabled(True):
optim.zero_grad()
pred=kernel.forward(data)
ls=loss(pred,ground)
losses.append(ls.item())
ls.backward()
optim.step()
return [kernel,losses]
else:
for i in range(iterations):
with torch.set_grad_enabled(True):
optim.zero_grad()
pred=kernel.forward(data_cuda)
ls=loss(pred,ground_cuda)
ls.backward()
optim.step()
return kernel