Merge pull request #4 from titanscout2022/comp-edits

Comp edits merge
This commit is contained in:
ltcptgeneral 2020-03-06 20:29:50 -06:00 committed by GitHub
commit 484f266659
22 changed files with 100 additions and 539 deletions

0
analysis-master/build.sh Normal file → Executable file
View File

View File

@ -278,7 +278,6 @@ import scipy
from scipy import * from scipy import *
import sklearn import sklearn
from sklearn import * from sklearn import *
import torch
try: try:
from analysis import trueskill as Trueskill from analysis import trueskill as Trueskill
except: except:
@ -287,10 +286,6 @@ except:
class error(ValueError): class error(ValueError):
pass pass
def _init_device(): # initiates computation device for ANNs
device = 'cuda:0' if torch.cuda.is_available() else 'cpu'
return device
def load_csv(filepath): def load_csv(filepath):
with open(filepath, newline='') as csvfile: with open(filepath, newline='') as csvfile:
file_array = np.array(list(csv.reader(csvfile))) file_array = np.array(list(csv.reader(csvfile)))
@ -700,225 +695,6 @@ def random_forest_regressor(data, outputs, test_size, n_estimators="warn", crite
return kernel, RegressionMetrics(predictions, outputs_test) return kernel, RegressionMetrics(predictions, outputs_test)
class Regression:
# Titan Robotics Team 2022: CUDA-based Regressions Module
# Written by Arthur Lu & Jacob Levine
# Notes:
# this module has been automatically inegrated into analysis.py, and should be callable as a class from the package
# this module is cuda-optimized and vectorized (except for one small part)
# setup:
__version__ = "1.0.0.003"
# changelog should be viewed using print(analysis.regression.__changelog__)
__changelog__ = """
1.0.0.003:
- bug fixes
1.0.0.002:
-Added more parameters to log, exponential, polynomial
-Added SigmoidalRegKernelArthur, because Arthur apparently needs
to train the scaling and shifting of sigmoids
1.0.0.001:
-initial release, with linear, log, exponential, polynomial, and sigmoid kernels
-already vectorized (except for polynomial generation) and CUDA-optimized
"""
__author__ = (
"Jacob Levine <jlevine@imsa.edu>",
"Arthur Lu <learthurgo@gmail.com>"
)
__all__ = [
'factorial',
'take_all_pwrs',
'num_poly_terms',
'set_device',
'LinearRegKernel',
'SigmoidalRegKernel',
'LogRegKernel',
'PolyRegKernel',
'ExpRegKernel',
'SigmoidalRegKernelArthur',
'SGDTrain',
'CustomTrain'
]
global device
device = "cuda:0" if torch.torch.cuda.is_available() else "cpu"
#todo: document completely
def set_device(self, new_device):
device=new_device
class LinearRegKernel():
parameters= []
weights=None
bias=None
def __init__(self, num_vars):
self.weights=torch.rand(num_vars, requires_grad=True, device=device)
self.bias=torch.rand(1, requires_grad=True, device=device)
self.parameters=[self.weights,self.bias]
def forward(self,mtx):
long_bias=self.bias.repeat([1,mtx.size()[1]])
return torch.matmul(self.weights,mtx)+long_bias
class SigmoidalRegKernel():
parameters= []
weights=None
bias=None
sigmoid=torch.nn.Sigmoid()
def __init__(self, num_vars):
self.weights=torch.rand(num_vars, requires_grad=True, device=device)
self.bias=torch.rand(1, requires_grad=True, device=device)
self.parameters=[self.weights,self.bias]
def forward(self,mtx):
long_bias=self.bias.repeat([1,mtx.size()[1]])
return self.sigmoid(torch.matmul(self.weights,mtx)+long_bias)
class SigmoidalRegKernelArthur():
parameters= []
weights=None
in_bias=None
scal_mult=None
out_bias=None
sigmoid=torch.nn.Sigmoid()
def __init__(self, num_vars):
self.weights=torch.rand(num_vars, requires_grad=True, device=device)
self.in_bias=torch.rand(1, requires_grad=True, device=device)
self.scal_mult=torch.rand(1, requires_grad=True, device=device)
self.out_bias=torch.rand(1, requires_grad=True, device=device)
self.parameters=[self.weights,self.in_bias, self.scal_mult, self.out_bias]
def forward(self,mtx):
long_in_bias=self.in_bias.repeat([1,mtx.size()[1]])
long_out_bias=self.out_bias.repeat([1,mtx.size()[1]])
return (self.scal_mult*self.sigmoid(torch.matmul(self.weights,mtx)+long_in_bias))+long_out_bias
class LogRegKernel():
parameters= []
weights=None
in_bias=None
scal_mult=None
out_bias=None
def __init__(self, num_vars):
self.weights=torch.rand(num_vars, requires_grad=True, device=device)
self.in_bias=torch.rand(1, requires_grad=True, device=device)
self.scal_mult=torch.rand(1, requires_grad=True, device=device)
self.out_bias=torch.rand(1, requires_grad=True, device=device)
self.parameters=[self.weights,self.in_bias, self.scal_mult, self.out_bias]
def forward(self,mtx):
long_in_bias=self.in_bias.repeat([1,mtx.size()[1]])
long_out_bias=self.out_bias.repeat([1,mtx.size()[1]])
return (self.scal_mult*torch.log(torch.matmul(self.weights,mtx)+long_in_bias))+long_out_bias
class ExpRegKernel():
parameters= []
weights=None
in_bias=None
scal_mult=None
out_bias=None
def __init__(self, num_vars):
self.weights=torch.rand(num_vars, requires_grad=True, device=device)
self.in_bias=torch.rand(1, requires_grad=True, device=device)
self.scal_mult=torch.rand(1, requires_grad=True, device=device)
self.out_bias=torch.rand(1, requires_grad=True, device=device)
self.parameters=[self.weights,self.in_bias, self.scal_mult, self.out_bias]
def forward(self,mtx):
long_in_bias=self.in_bias.repeat([1,mtx.size()[1]])
long_out_bias=self.out_bias.repeat([1,mtx.size()[1]])
return (self.scal_mult*torch.exp(torch.matmul(self.weights,mtx)+long_in_bias))+long_out_bias
class PolyRegKernel():
parameters= []
weights=None
bias=None
power=None
def __init__(self, num_vars, power):
self.power=power
num_terms=self.num_poly_terms(num_vars, power)
self.weights=torch.rand(num_terms, requires_grad=True, device=device)
self.bias=torch.rand(1, requires_grad=True, device=device)
self.parameters=[self.weights,self.bias]
def num_poly_terms(self,num_vars, power):
if power == 0:
return 0
return int(self.factorial(num_vars+power-1) / self.factorial(power) / self.factorial(num_vars-1)) + self.num_poly_terms(num_vars, power-1)
def factorial(self,n):
if n==0:
return 1
else:
return n*self.factorial(n-1)
def take_all_pwrs(self, vec, pwr):
#todo: vectorize (kinda)
combins=torch.combinations(vec, r=pwr, with_replacement=True)
out=torch.ones(combins.size()[0]).to(device).to(torch.float)
for i in torch.t(combins).to(device).to(torch.float):
out *= i
if pwr == 1:
return out
else:
return torch.cat((out,self.take_all_pwrs(vec, pwr-1)))
def forward(self,mtx):
#TODO: Vectorize the last part
cols=[]
for i in torch.t(mtx):
cols.append(self.take_all_pwrs(i,self.power))
new_mtx=torch.t(torch.stack(cols))
long_bias=self.bias.repeat([1,mtx.size()[1]])
return torch.matmul(self.weights,new_mtx)+long_bias
def SGDTrain(self, kernel, data, ground, loss=torch.nn.MSELoss(), iterations=1000, learning_rate=.1, return_losses=False):
optim=torch.optim.SGD(kernel.parameters, lr=learning_rate)
data_cuda=data.to(device)
ground_cuda=ground.to(device)
if (return_losses):
losses=[]
for i in range(iterations):
with torch.set_grad_enabled(True):
optim.zero_grad()
pred=kernel.forward(data_cuda)
ls=loss(pred,ground_cuda)
losses.append(ls.item())
ls.backward()
optim.step()
return [kernel,losses]
else:
for i in range(iterations):
with torch.set_grad_enabled(True):
optim.zero_grad()
pred=kernel.forward(data_cuda)
ls=loss(pred,ground_cuda)
ls.backward()
optim.step()
return kernel
def CustomTrain(self, kernel, optim, data, ground, loss=torch.nn.MSELoss(), iterations=1000, return_losses=False):
data_cuda=data.to(device)
ground_cuda=ground.to(device)
if (return_losses):
losses=[]
for i in range(iterations):
with torch.set_grad_enabled(True):
optim.zero_grad()
pred=kernel.forward(data)
ls=loss(pred,ground)
losses.append(ls.item())
ls.backward()
optim.step()
return [kernel,losses]
else:
for i in range(iterations):
with torch.set_grad_enabled(True):
optim.zero_grad()
pred=kernel.forward(data_cuda)
ls=loss(pred,ground_cuda)
ls.backward()
optim.step()
return kernel
class Glicko2: class Glicko2:
_tau = 0.5 _tau = 0.5

Binary file not shown.

View File

@ -7,10 +7,18 @@
# current benchmark of optimization: 1.33 times faster # current benchmark of optimization: 1.33 times faster
# setup: # setup:
__version__ = "1.1.13.001" __version__ = "1.1.13.005"
# changelog should be viewed using print(analysis.__changelog__) # changelog should be viewed using print(analysis.__changelog__)
__changelog__ = """changelog: __changelog__ = """changelog:
1.1.13.005:
- cleaned up package
1.1.13.004:
- small fixes to regression to improve performance
1.1.13.003:
- filtered nans from regression
1.1.13.002:
- removed torch requirement, and moved Regression back to regression.py
1.1.13.001: 1.1.13.001:
- bug fix with linear regression not returning a proper value - bug fix with linear regression not returning a proper value
- cleaned up regression - cleaned up regression
@ -239,7 +247,6 @@ __author__ = (
) )
__all__ = [ __all__ = [
'_init_device',
'load_csv', 'load_csv',
'basic_stats', 'basic_stats',
'z_score', 'z_score',
@ -260,7 +267,6 @@ __all__ = [
'SVM', 'SVM',
'random_forest_classifier', 'random_forest_classifier',
'random_forest_regressor', 'random_forest_regressor',
'Regression',
'Glicko2', 'Glicko2',
# all statistics functions left out due to integration in other functions # all statistics functions left out due to integration in other functions
] ]
@ -273,12 +279,10 @@ import csv
import numba import numba
from numba import jit from numba import jit
import numpy as np import numpy as np
import math
import scipy import scipy
from scipy import * from scipy import *
import sklearn import sklearn
from sklearn import * from sklearn import *
import torch
try: try:
from analysis import trueskill as Trueskill from analysis import trueskill as Trueskill
except: except:
@ -287,10 +291,6 @@ except:
class error(ValueError): class error(ValueError):
pass pass
def _init_device(): # initiates computation device for ANNs
device = 'cuda:0' if torch.cuda.is_available() else 'cpu'
return device
def load_csv(filepath): def load_csv(filepath):
with open(filepath, newline='') as csvfile: with open(filepath, newline='') as csvfile:
file_array = np.array(list(csv.reader(csvfile))) file_array = np.array(list(csv.reader(csvfile)))
@ -349,15 +349,15 @@ def histo_analysis(hist_data):
def regression(inputs, outputs, args): # inputs, outputs expects N-D array def regression(inputs, outputs, args): # inputs, outputs expects N-D array
X = np.array(inputs)
y = np.array(outputs)
regressions = [] regressions = []
if 'lin' in args: # formula: ax + b if 'lin' in args: # formula: ax + b
try: try:
X = np.array(inputs)
y = np.array(outputs)
def func(x, a, b): def func(x, a, b):
return a * x + b return a * x + b
@ -374,9 +374,6 @@ def regression(inputs, outputs, args): # inputs, outputs expects N-D array
try: try:
X = np.array(inputs)
y = np.array(outputs)
def func(x, a, b, c, d): def func(x, a, b, c, d):
return a * np.log(b*(x + c)) + d return a * np.log(b*(x + c)) + d
@ -393,9 +390,6 @@ def regression(inputs, outputs, args): # inputs, outputs expects N-D array
try: try:
X = np.array(inputs)
y = np.array(outputs)
def func(x, a, b, c, d): def func(x, a, b, c, d):
return a * np.exp(b*(x + c)) + d return a * np.exp(b*(x + c)) + d
@ -410,8 +404,8 @@ def regression(inputs, outputs, args): # inputs, outputs expects N-D array
if 'ply' in args: # formula: a + bx^1 + cx^2 + dx^3 + ... if 'ply' in args: # formula: a + bx^1 + cx^2 + dx^3 + ...
inputs = [inputs] inputs = np.array([inputs])
outputs = [outputs] outputs = np.array([outputs])
plys = [] plys = []
limit = len(outputs[0]) limit = len(outputs[0])
@ -435,9 +429,6 @@ def regression(inputs, outputs, args): # inputs, outputs expects N-D array
try: try:
X = np.array(inputs)
y = np.array(outputs)
def func(x, a, b, c, d): def func(x, a, b, c, d):
return a * np.tanh(b*(x + c)) + d return a * np.tanh(b*(x + c)) + d
@ -700,225 +691,6 @@ def random_forest_regressor(data, outputs, test_size, n_estimators="warn", crite
return kernel, RegressionMetrics(predictions, outputs_test) return kernel, RegressionMetrics(predictions, outputs_test)
class Regression:
# Titan Robotics Team 2022: CUDA-based Regressions Module
# Written by Arthur Lu & Jacob Levine
# Notes:
# this module has been automatically inegrated into analysis.py, and should be callable as a class from the package
# this module is cuda-optimized and vectorized (except for one small part)
# setup:
__version__ = "1.0.0.003"
# changelog should be viewed using print(analysis.regression.__changelog__)
__changelog__ = """
1.0.0.003:
- bug fixes
1.0.0.002:
-Added more parameters to log, exponential, polynomial
-Added SigmoidalRegKernelArthur, because Arthur apparently needs
to train the scaling and shifting of sigmoids
1.0.0.001:
-initial release, with linear, log, exponential, polynomial, and sigmoid kernels
-already vectorized (except for polynomial generation) and CUDA-optimized
"""
__author__ = (
"Jacob Levine <jlevine@imsa.edu>",
"Arthur Lu <learthurgo@gmail.com>"
)
__all__ = [
'factorial',
'take_all_pwrs',
'num_poly_terms',
'set_device',
'LinearRegKernel',
'SigmoidalRegKernel',
'LogRegKernel',
'PolyRegKernel',
'ExpRegKernel',
'SigmoidalRegKernelArthur',
'SGDTrain',
'CustomTrain'
]
global device
device = "cuda:0" if torch.torch.cuda.is_available() else "cpu"
#todo: document completely
def set_device(self, new_device):
device=new_device
class LinearRegKernel():
parameters= []
weights=None
bias=None
def __init__(self, num_vars):
self.weights=torch.rand(num_vars, requires_grad=True, device=device)
self.bias=torch.rand(1, requires_grad=True, device=device)
self.parameters=[self.weights,self.bias]
def forward(self,mtx):
long_bias=self.bias.repeat([1,mtx.size()[1]])
return torch.matmul(self.weights,mtx)+long_bias
class SigmoidalRegKernel():
parameters= []
weights=None
bias=None
sigmoid=torch.nn.Sigmoid()
def __init__(self, num_vars):
self.weights=torch.rand(num_vars, requires_grad=True, device=device)
self.bias=torch.rand(1, requires_grad=True, device=device)
self.parameters=[self.weights,self.bias]
def forward(self,mtx):
long_bias=self.bias.repeat([1,mtx.size()[1]])
return self.sigmoid(torch.matmul(self.weights,mtx)+long_bias)
class SigmoidalRegKernelArthur():
parameters= []
weights=None
in_bias=None
scal_mult=None
out_bias=None
sigmoid=torch.nn.Sigmoid()
def __init__(self, num_vars):
self.weights=torch.rand(num_vars, requires_grad=True, device=device)
self.in_bias=torch.rand(1, requires_grad=True, device=device)
self.scal_mult=torch.rand(1, requires_grad=True, device=device)
self.out_bias=torch.rand(1, requires_grad=True, device=device)
self.parameters=[self.weights,self.in_bias, self.scal_mult, self.out_bias]
def forward(self,mtx):
long_in_bias=self.in_bias.repeat([1,mtx.size()[1]])
long_out_bias=self.out_bias.repeat([1,mtx.size()[1]])
return (self.scal_mult*self.sigmoid(torch.matmul(self.weights,mtx)+long_in_bias))+long_out_bias
class LogRegKernel():
parameters= []
weights=None
in_bias=None
scal_mult=None
out_bias=None
def __init__(self, num_vars):
self.weights=torch.rand(num_vars, requires_grad=True, device=device)
self.in_bias=torch.rand(1, requires_grad=True, device=device)
self.scal_mult=torch.rand(1, requires_grad=True, device=device)
self.out_bias=torch.rand(1, requires_grad=True, device=device)
self.parameters=[self.weights,self.in_bias, self.scal_mult, self.out_bias]
def forward(self,mtx):
long_in_bias=self.in_bias.repeat([1,mtx.size()[1]])
long_out_bias=self.out_bias.repeat([1,mtx.size()[1]])
return (self.scal_mult*torch.log(torch.matmul(self.weights,mtx)+long_in_bias))+long_out_bias
class ExpRegKernel():
parameters= []
weights=None
in_bias=None
scal_mult=None
out_bias=None
def __init__(self, num_vars):
self.weights=torch.rand(num_vars, requires_grad=True, device=device)
self.in_bias=torch.rand(1, requires_grad=True, device=device)
self.scal_mult=torch.rand(1, requires_grad=True, device=device)
self.out_bias=torch.rand(1, requires_grad=True, device=device)
self.parameters=[self.weights,self.in_bias, self.scal_mult, self.out_bias]
def forward(self,mtx):
long_in_bias=self.in_bias.repeat([1,mtx.size()[1]])
long_out_bias=self.out_bias.repeat([1,mtx.size()[1]])
return (self.scal_mult*torch.exp(torch.matmul(self.weights,mtx)+long_in_bias))+long_out_bias
class PolyRegKernel():
parameters= []
weights=None
bias=None
power=None
def __init__(self, num_vars, power):
self.power=power
num_terms=self.num_poly_terms(num_vars, power)
self.weights=torch.rand(num_terms, requires_grad=True, device=device)
self.bias=torch.rand(1, requires_grad=True, device=device)
self.parameters=[self.weights,self.bias]
def num_poly_terms(self,num_vars, power):
if power == 0:
return 0
return int(self.factorial(num_vars+power-1) / self.factorial(power) / self.factorial(num_vars-1)) + self.num_poly_terms(num_vars, power-1)
def factorial(self,n):
if n==0:
return 1
else:
return n*self.factorial(n-1)
def take_all_pwrs(self, vec, pwr):
#todo: vectorize (kinda)
combins=torch.combinations(vec, r=pwr, with_replacement=True)
out=torch.ones(combins.size()[0]).to(device).to(torch.float)
for i in torch.t(combins).to(device).to(torch.float):
out *= i
if pwr == 1:
return out
else:
return torch.cat((out,self.take_all_pwrs(vec, pwr-1)))
def forward(self,mtx):
#TODO: Vectorize the last part
cols=[]
for i in torch.t(mtx):
cols.append(self.take_all_pwrs(i,self.power))
new_mtx=torch.t(torch.stack(cols))
long_bias=self.bias.repeat([1,mtx.size()[1]])
return torch.matmul(self.weights,new_mtx)+long_bias
def SGDTrain(self, kernel, data, ground, loss=torch.nn.MSELoss(), iterations=1000, learning_rate=.1, return_losses=False):
optim=torch.optim.SGD(kernel.parameters, lr=learning_rate)
data_cuda=data.to(device)
ground_cuda=ground.to(device)
if (return_losses):
losses=[]
for i in range(iterations):
with torch.set_grad_enabled(True):
optim.zero_grad()
pred=kernel.forward(data_cuda)
ls=loss(pred,ground_cuda)
losses.append(ls.item())
ls.backward()
optim.step()
return [kernel,losses]
else:
for i in range(iterations):
with torch.set_grad_enabled(True):
optim.zero_grad()
pred=kernel.forward(data_cuda)
ls=loss(pred,ground_cuda)
ls.backward()
optim.step()
return kernel
def CustomTrain(self, kernel, optim, data, ground, loss=torch.nn.MSELoss(), iterations=1000, return_losses=False):
data_cuda=data.to(device)
ground_cuda=ground.to(device)
if (return_losses):
losses=[]
for i in range(iterations):
with torch.set_grad_enabled(True):
optim.zero_grad()
pred=kernel.forward(data)
ls=loss(pred,ground)
losses.append(ls.item())
ls.backward()
optim.step()
return [kernel,losses]
else:
for i in range(iterations):
with torch.set_grad_enabled(True):
optim.zero_grad()
pred=kernel.forward(data_cuda)
ls=loss(pred,ground_cuda)
ls.backward()
optim.step()
return kernel
class Glicko2: class Glicko2:
_tau = 0.5 _tau = 0.5

View File

@ -1,27 +1,28 @@
# Titan Robotics Team 2022: CUDA-based Regressions Module # Titan Robotics Team 2022: CUDA-based Regressions Module
# Written by Arthur Lu & Jacob Levine # Written by Arthur Lu & Jacob Levine
# Notes: # Notes:
# this should be imported as a python module using 'import regression' # this module has been automatically inegrated into analysis.py, and should be callable as a class from the package
# this should be included in the local directory or environment variable # this module is cuda-optimized and vectorized (except for one small part)
# this module is cuda-optimized and vectorized (except for one small part)
# setup: # setup:
__version__ = "1.0.0.002" __version__ = "1.0.0.003"
# changelog should be viewed using print(regression.__changelog__) # changelog should be viewed using print(analysis.regression.__changelog__)
__changelog__ = """ __changelog__ = """
1.0.0.002: 1.0.0.003:
-Added more parameters to log, exponential, polynomial - bug fixes
-Added SigmoidalRegKernelArthur, because Arthur apparently needs 1.0.0.002:
to train the scaling and shifting of sigmoids -Added more parameters to log, exponential, polynomial
-Added SigmoidalRegKernelArthur, because Arthur apparently needs
1.0.0.001: to train the scaling and shifting of sigmoids
-initial release, with linear, log, exponential, polynomial, and sigmoid kernels 1.0.0.001:
-already vectorized (except for polynomial generation) and CUDA-optimized -initial release, with linear, log, exponential, polynomial, and sigmoid kernels
-already vectorized (except for polynomial generation) and CUDA-optimized
""" """
__author__ = ( __author__ = (
"Jacob Levine <jlevine@imsa.edu>", "Jacob Levine <jlevine@imsa.edu>",
"Arthur Lu <learthurgo@gmail.com>"
) )
__all__ = [ __all__ = [
@ -39,35 +40,13 @@ __all__ = [
'CustomTrain' 'CustomTrain'
] ]
global device
# imports (just one for now):
import torch
device = "cuda:0" if torch.torch.cuda.is_available() else "cpu" device = "cuda:0" if torch.torch.cuda.is_available() else "cpu"
#todo: document completely #todo: document completely
def factorial(n): def set_device(self, new_device):
if n==0:
return 1
else:
return n*factorial(n-1)
def num_poly_terms(num_vars, power):
if power == 0:
return 0
return int(factorial(num_vars+power-1) / factorial(power) / factorial(num_vars-1)) + num_poly_terms(num_vars, power-1)
def take_all_pwrs(vec,pwr):
#todo: vectorize (kinda)
combins=torch.combinations(vec, r=pwr, with_replacement=True)
out=torch.ones(combins.size()[0])
for i in torch.t(combins):
out *= i
return torch.cat(out,take_all_pwrs(vec, pwr-1))
def set_device(new_device):
global device
device=new_device device=new_device
class LinearRegKernel(): class LinearRegKernel():
@ -154,20 +133,39 @@ class PolyRegKernel():
power=None power=None
def __init__(self, num_vars, power): def __init__(self, num_vars, power):
self.power=power self.power=power
num_terms=num_poly_terms(num_vars, power) num_terms=self.num_poly_terms(num_vars, power)
self.weights=torch.rand(num_terms, requires_grad=True, device=device) self.weights=torch.rand(num_terms, requires_grad=True, device=device)
self.bias=torch.rand(1, requires_grad=True, device=device) self.bias=torch.rand(1, requires_grad=True, device=device)
self.parameters=[self.weights,self.bias] self.parameters=[self.weights,self.bias]
def num_poly_terms(self,num_vars, power):
if power == 0:
return 0
return int(self.factorial(num_vars+power-1) / self.factorial(power) / self.factorial(num_vars-1)) + self.num_poly_terms(num_vars, power-1)
def factorial(self,n):
if n==0:
return 1
else:
return n*self.factorial(n-1)
def take_all_pwrs(self, vec, pwr):
#todo: vectorize (kinda)
combins=torch.combinations(vec, r=pwr, with_replacement=True)
out=torch.ones(combins.size()[0]).to(device).to(torch.float)
for i in torch.t(combins).to(device).to(torch.float):
out *= i
if pwr == 1:
return out
else:
return torch.cat((out,self.take_all_pwrs(vec, pwr-1)))
def forward(self,mtx): def forward(self,mtx):
#TODO: Vectorize the last part #TODO: Vectorize the last part
cols=[] cols=[]
for i in torch.t(mtx): for i in torch.t(mtx):
cols.append(take_all_pwrs(i,self.power)) cols.append(self.take_all_pwrs(i,self.power))
new_mtx=torch.t(torch.stack(cols)) new_mtx=torch.t(torch.stack(cols))
long_bias=self.bias.repeat([1,mtx.size()[1]]) long_bias=self.bias.repeat([1,mtx.size()[1]])
return torch.matmul(self.weights,new_mtx)+long_bias return torch.matmul(self.weights,new_mtx)+long_bias
def SGDTrain(kernel, data, ground, loss=torch.nn.MSELoss(), iterations=1000, learning_rate=.1, return_losses=False): def SGDTrain(self, kernel, data, ground, loss=torch.nn.MSELoss(), iterations=1000, learning_rate=.1, return_losses=False):
optim=torch.optim.SGD(kernel.parameters, lr=learning_rate) optim=torch.optim.SGD(kernel.parameters, lr=learning_rate)
data_cuda=data.to(device) data_cuda=data.to(device)
ground_cuda=ground.to(device) ground_cuda=ground.to(device)
@ -192,7 +190,7 @@ def SGDTrain(kernel, data, ground, loss=torch.nn.MSELoss(), iterations=1000, lea
optim.step() optim.step()
return kernel return kernel
def CustomTrain(kernel, optim, data, ground, loss=torch.nn.MSELoss(), iterations=1000, return_losses=False): def CustomTrain(self, kernel, optim, data, ground, loss=torch.nn.MSELoss(), iterations=1000, return_losses=False):
data_cuda=data.to(device) data_cuda=data.to(device)
ground_cuda=ground.to(device) ground_cuda=ground.to(device)
if (return_losses): if (return_losses):

View File

@ -1,9 +1,11 @@
2020ilch 2020ilch
balls-blocked,basic_stats,historical_analysis,regression_linear,regression_logarithmic,regression_exponential,regression_polynomial,regression_sigmoidal balls-blocked,basic_stats,historical_analysis,regression_linear,regression_logarithmic,regression_exponential,regression_polynomial,regression_sigmoidal
balls-collected,basic_stats,historical_analysis,regression_linear,regression_logarithmic,regression_exponential,regression_polynomial,regression_sigmoidal balls-collected,basic_stats,historical_analysis,regression_linear,regression_logarithmic,regression_exponential,regression_polynomial,regression_sigmoidal
balls-lower,basic_stats,historical_analysis,regression_linear,regression_logarithmic,regression_exponential,regression_polynomial,regression_sigmoidal balls-lower-teleop,basic_stats,historical_analysis,regression_linear,regression_logarithmic,regression_exponential,regression_polynomial,regression_sigmoidal
balls-lower-auto,basic_stats,historical_analysis,regression_linear,regression_logarithmic,regression_exponential,regression_polynomial,regression_sigmoidal
balls-started,basic_stats,historical_analyss,regression_linear,regression_logarithmic,regression_exponential,regression_polynomial,regression_sigmoidal balls-started,basic_stats,historical_analyss,regression_linear,regression_logarithmic,regression_exponential,regression_polynomial,regression_sigmoidal
balls-upper,basic_stats,historical_analysis,regression_linear,regression_logarithmic,regression_exponential,regression_polynomial,regression_sigmoidal balls-upper-teleop,basic_stats,historical_analysis,regression_linear,regression_logarithmic,regression_exponential,regression_polynomial,regression_sigmoidal
balls-upper-auto,basic_stats,historical_analysis,regression_linear,regression_logarithmic,regression_exponential,regression_polynomial,regression_sigmoidal
wheel-mechanism wheel-mechanism
low-balls low-balls
high-balls high-balls

1 2020ilch
2 balls-blocked,basic_stats,historical_analysis,regression_linear,regression_logarithmic,regression_exponential,regression_polynomial,regression_sigmoidal
3 balls-collected,basic_stats,historical_analysis,regression_linear,regression_logarithmic,regression_exponential,regression_polynomial,regression_sigmoidal
4 balls-lower,basic_stats,historical_analysis,regression_linear,regression_logarithmic,regression_exponential,regression_polynomial,regression_sigmoidal balls-lower-teleop,basic_stats,historical_analysis,regression_linear,regression_logarithmic,regression_exponential,regression_polynomial,regression_sigmoidal
5 balls-lower-auto,basic_stats,historical_analysis,regression_linear,regression_logarithmic,regression_exponential,regression_polynomial,regression_sigmoidal
6 balls-started,basic_stats,historical_analyss,regression_linear,regression_logarithmic,regression_exponential,regression_polynomial,regression_sigmoidal
7 balls-upper,basic_stats,historical_analysis,regression_linear,regression_logarithmic,regression_exponential,regression_polynomial,regression_sigmoidal balls-upper-teleop,basic_stats,historical_analysis,regression_linear,regression_logarithmic,regression_exponential,regression_polynomial,regression_sigmoidal
8 balls-upper-auto,basic_stats,historical_analysis,regression_linear,regression_logarithmic,regression_exponential,regression_polynomial,regression_sigmoidal
9 wheel-mechanism
10 low-balls
11 high-balls

View File

@ -3,11 +3,19 @@
# Notes: # Notes:
# setup: # setup:
__version__ = "0.0.3.000" __version__ = "0.0.4.001"
# changelog should be viewed using print(analysis.__changelog__) # changelog should be viewed using print(analysis.__changelog__)
__changelog__ = """changelog: __changelog__ = """changelog:
0.0.3.00: 0.0.4.001:
- fixed bug where X range for regression was determined before sanitization
- better sanitized data
0.0.4.000:
- fixed spelling issue in __changelog__
- addressed nan bug in regression
- fixed errors on line 335 with metrics calling incorrect key "glicko2"
- fixed errors in metrics computing
0.0.3.000:
- added analysis to pit data - added analysis to pit data
0.0.2.001: 0.0.2.001:
- minor stability patches - minor stability patches
@ -71,6 +79,7 @@ __all__ = [
from analysis import analysis as an from analysis import analysis as an
import data as d import data as d
import numpy as np
import matplotlib.pyplot as plt import matplotlib.pyplot as plt
import time import time
import warnings import warnings
@ -114,7 +123,7 @@ def main():
print(" finished tests") print(" finished tests")
print(" running metrics") print(" running metrics")
metrics = metricsloop(tbakey, apikey, competition, previous_time) metricsloop(tbakey, apikey, competition, previous_time)
print(" finished metrics") print(" finished metrics")
print(" running pit analysis") print(" running pit analysis")
@ -124,7 +133,7 @@ def main():
d.set_analysis_flags(apikey, "latest_update", {"latest_update":current_time}) d.set_analysis_flags(apikey, "latest_update", {"latest_update":current_time})
print(" pushing to database") print(" pushing to database")
push_to_database(apikey, competition, results, metrics, pit) push_to_database(apikey, competition, results, pit)
print(" pushed to database") print(" pushed to database")
def load_config(file): def load_config(file):
@ -155,37 +164,37 @@ def simpleloop(data, tests): # expects 3D array with [Team][Variable][Match]
def simplestats(data, test): def simplestats(data, test):
data = np.array(data)
data = data[np.isfinite(data)]
ranges = list(range(len(data)))
if(test == "basic_stats"): if(test == "basic_stats"):
return an.basic_stats(data) return an.basic_stats(data)
if(test == "historical_analysis"): if(test == "historical_analysis"):
return an.histo_analysis([list(range(len(data))), data]) return an.histo_analysis([ranges, data])
if(test == "regression_linear"): if(test == "regression_linear"):
return an.regression(list(range(len(data))), data, ['lin']) return an.regression(ranges, data, ['lin'])
if(test == "regression_logarithmic"): if(test == "regression_logarithmic"):
return an.regression(list(range(len(data))), data, ['log']) return an.regression(ranges, data, ['log'])
if(test == "regression_exponential"): if(test == "regression_exponential"):
return an.regression(list(range(len(data))), data, ['exp']) return an.regression(ranges, data, ['exp'])
if(test == "regression_polynomial"): if(test == "regression_polynomial"):
return an.regression(list(range(len(data))), data, ['ply']) return an.regression(ranges, data, ['ply'])
if(test == "regression_sigmoidal"): if(test == "regression_sigmoidal"):
return an.regression(list(range(len(data))), data, ['sig']) return an.regression(ranges, data, ['sig'])
def push_to_database(apikey, competition, results, metrics, pit): def push_to_database(apikey, competition, results, pit):
for team in results: for team in results:
d.push_team_tests_data(apikey, competition, team, results[team]) d.push_team_tests_data(apikey, competition, team, results[team])
for team in metrics:
d.push_team_metrics_data(apikey, competition, team, metrics[team])
for variable in pit: for variable in pit:
d.push_team_pit_data(apikey, competition, variable, pit[variable]) d.push_team_pit_data(apikey, competition, variable, pit[variable])
@ -279,6 +288,14 @@ def metricsloop(tbakey, apikey, competition, timestamp): # listener based metric
blu[team]["gl2"]["rd"] = blu[team]["gl2"]["rd"] + blu_gl2_delta["rd"] blu[team]["gl2"]["rd"] = blu[team]["gl2"]["rd"] + blu_gl2_delta["rd"]
blu[team]["gl2"]["vol"] = blu[team]["gl2"]["vol"] + blu_gl2_delta["vol"] blu[team]["gl2"]["vol"] = blu[team]["gl2"]["vol"] + blu_gl2_delta["vol"]
temp_vector = {}
temp_vector.update(red)
temp_vector.update(blu)
for team in temp_vector:
d.push_team_metrics_data(apikey, competition, team, temp_vector[team])
""" not functional for now """ not functional for now
red_trueskill = [] red_trueskill = []
blu_trueskill = [] blu_trueskill = []
@ -305,11 +322,6 @@ def metricsloop(tbakey, apikey, competition, timestamp): # listener based metric
""" """
return_vector.update(red)
return_vector.update(blu)
return return_vector
def load_metrics(apikey, competition, match, group_name): def load_metrics(apikey, competition, match, group_name):
group = {} group = {}
@ -324,16 +336,17 @@ def load_metrics(apikey, competition, match, group_name):
gl2 = {"score": 1500, "rd": 250, "vol": 0.06} gl2 = {"score": 1500, "rd": 250, "vol": 0.06}
ts = {"mu": 25, "sigma": 25/3} ts = {"mu": 25, "sigma": 25/3}
d.push_team_metrics_data(apikey, competition, team, {"elo":elo, "gliko2":gl2,"trueskill":ts}) #d.push_team_metrics_data(apikey, competition, team, {"elo":elo, "gl2":gl2,"trueskill":ts})
group[team] = {"elo": elo, "gl2": gl2, "ts": ts} group[team] = {"elo": elo, "gl2": gl2, "ts": ts}
else: else:
metrics = db_data["metrics"] metrics = db_data["metrics"]
elo = metrics["elo"] elo = metrics["elo"]
gl2 = metrics["gliko2"] gl2 = metrics["gl2"]
ts = metrics["trueskill"] ts = metrics["ts"]
group[team] = {"elo": elo, "gl2": gl2, "ts": ts} group[team] = {"elo": elo, "gl2": gl2, "ts": ts}

View File

@ -34,7 +34,7 @@ import numpy as np
# %% # %%
fig, ax = plt.subplots(1, len(pit), sharey=True, figsize=(20,10)) fig, ax = plt.subplots(1, len(pit), sharey=True, figsize=(80,15))
i = 0 i = 0