From d38744438b85e6623ae27c492313b6d56d406ecd Mon Sep 17 00:00:00 2001 From: Dev Singh Date: Fri, 6 Mar 2020 11:50:07 -0600 Subject: [PATCH] working --- data analysis/analysis/analysis.py | 4 +- data analysis/analysis/regression.py | 82 ++++++++++++++-------------- data analysis/superscript.py | 11 +++- 3 files changed, 51 insertions(+), 46 deletions(-) diff --git a/data analysis/analysis/analysis.py b/data analysis/analysis/analysis.py index 9297585b..3521862e 100644 --- a/data analysis/analysis/analysis.py +++ b/data analysis/analysis/analysis.py @@ -7,10 +7,12 @@ # current benchmark of optimization: 1.33 times faster # setup: -__version__ = "1.1.13.001" +__version__ = "1.1.13.002" # changelog should be viewed using print(analysis.__changelog__) __changelog__ = """changelog: + 1.1.13.002: + - removed torch requirement, and moved Regression back to regression.py 1.1.13.001: - bug fix with linear regression not returning a proper value - cleaned up regression diff --git a/data analysis/analysis/regression.py b/data analysis/analysis/regression.py index 6cbe7868..adf2a54e 100644 --- a/data analysis/analysis/regression.py +++ b/data analysis/analysis/regression.py @@ -1,27 +1,28 @@ # Titan Robotics Team 2022: CUDA-based Regressions Module # Written by Arthur Lu & Jacob Levine # Notes: -# this should be imported as a python module using 'import regression' -# this should be included in the local directory or environment variable -# this module is cuda-optimized and vectorized (except for one small part) +# this module has been automatically inegrated into analysis.py, and should be callable as a class from the package +# this module is cuda-optimized and vectorized (except for one small part) # setup: -__version__ = "1.0.0.002" +__version__ = "1.0.0.003" -# changelog should be viewed using print(regression.__changelog__) +# changelog should be viewed using print(analysis.regression.__changelog__) __changelog__ = """ - 1.0.0.002: - -Added more parameters to log, exponential, polynomial - -Added SigmoidalRegKernelArthur, because Arthur apparently needs - to train the scaling and shifting of sigmoids - - 1.0.0.001: - -initial release, with linear, log, exponential, polynomial, and sigmoid kernels - -already vectorized (except for polynomial generation) and CUDA-optimized +1.0.0.003: + - bug fixes +1.0.0.002: + -Added more parameters to log, exponential, polynomial + -Added SigmoidalRegKernelArthur, because Arthur apparently needs + to train the scaling and shifting of sigmoids +1.0.0.001: + -initial release, with linear, log, exponential, polynomial, and sigmoid kernels + -already vectorized (except for polynomial generation) and CUDA-optimized """ __author__ = ( "Jacob Levine ", + "Arthur Lu " ) __all__ = [ @@ -39,35 +40,13 @@ __all__ = [ 'CustomTrain' ] - -# imports (just one for now): - -import torch +global device device = "cuda:0" if torch.torch.cuda.is_available() else "cpu" #todo: document completely -def factorial(n): - if n==0: - return 1 - else: - return n*factorial(n-1) -def num_poly_terms(num_vars, power): - if power == 0: - return 0 - return int(factorial(num_vars+power-1) / factorial(power) / factorial(num_vars-1)) + num_poly_terms(num_vars, power-1) - -def take_all_pwrs(vec,pwr): - #todo: vectorize (kinda) - combins=torch.combinations(vec, r=pwr, with_replacement=True) - out=torch.ones(combins.size()[0]) - for i in torch.t(combins): - out *= i - return torch.cat(out,take_all_pwrs(vec, pwr-1)) - -def set_device(new_device): - global device +def set_device(self, new_device): device=new_device class LinearRegKernel(): @@ -154,20 +133,39 @@ class PolyRegKernel(): power=None def __init__(self, num_vars, power): self.power=power - num_terms=num_poly_terms(num_vars, power) + num_terms=self.num_poly_terms(num_vars, power) self.weights=torch.rand(num_terms, requires_grad=True, device=device) self.bias=torch.rand(1, requires_grad=True, device=device) self.parameters=[self.weights,self.bias] + def num_poly_terms(self,num_vars, power): + if power == 0: + return 0 + return int(self.factorial(num_vars+power-1) / self.factorial(power) / self.factorial(num_vars-1)) + self.num_poly_terms(num_vars, power-1) + def factorial(self,n): + if n==0: + return 1 + else: + return n*self.factorial(n-1) + def take_all_pwrs(self, vec, pwr): + #todo: vectorize (kinda) + combins=torch.combinations(vec, r=pwr, with_replacement=True) + out=torch.ones(combins.size()[0]).to(device).to(torch.float) + for i in torch.t(combins).to(device).to(torch.float): + out *= i + if pwr == 1: + return out + else: + return torch.cat((out,self.take_all_pwrs(vec, pwr-1))) def forward(self,mtx): #TODO: Vectorize the last part cols=[] for i in torch.t(mtx): - cols.append(take_all_pwrs(i,self.power)) + cols.append(self.take_all_pwrs(i,self.power)) new_mtx=torch.t(torch.stack(cols)) long_bias=self.bias.repeat([1,mtx.size()[1]]) return torch.matmul(self.weights,new_mtx)+long_bias -def SGDTrain(kernel, data, ground, loss=torch.nn.MSELoss(), iterations=1000, learning_rate=.1, return_losses=False): +def SGDTrain(self, kernel, data, ground, loss=torch.nn.MSELoss(), iterations=1000, learning_rate=.1, return_losses=False): optim=torch.optim.SGD(kernel.parameters, lr=learning_rate) data_cuda=data.to(device) ground_cuda=ground.to(device) @@ -192,7 +190,7 @@ def SGDTrain(kernel, data, ground, loss=torch.nn.MSELoss(), iterations=1000, lea optim.step() return kernel -def CustomTrain(kernel, optim, data, ground, loss=torch.nn.MSELoss(), iterations=1000, return_losses=False): +def CustomTrain(self, kernel, optim, data, ground, loss=torch.nn.MSELoss(), iterations=1000, return_losses=False): data_cuda=data.to(device) ground_cuda=ground.to(device) if (return_losses): @@ -214,4 +212,4 @@ def CustomTrain(kernel, optim, data, ground, loss=torch.nn.MSELoss(), iterations ls=loss(pred,ground_cuda) ls.backward() optim.step() - return kernel + return kernel \ No newline at end of file diff --git a/data analysis/superscript.py b/data analysis/superscript.py index 7ee79e5d..82dcd4f3 100644 --- a/data analysis/superscript.py +++ b/data analysis/superscript.py @@ -3,11 +3,16 @@ # Notes: # setup: -__version__ = "0.0.3.000" +__version__ = "0.0.4.000" # changelog should be viewed using print(analysis.__changelog__) __changelog__ = """changelog: - 0.0.3.00: + 0.0.4.000: + - fixed spelling issue in __changelog__ + - addressed nan bug in regression + - fixed errors on line 335 with metrics calling incorrect key "glicko2" + - fixed errors in metrics computing + 0.0.3.000: - added analysis to pit data 0.0.2.001: - minor stability patches @@ -124,7 +129,7 @@ def main(): d.set_analysis_flags(apikey, "latest_update", {"latest_update":current_time}) print(" pushing to database") - push_to_database(apikey, competition, results, metrics, pit) + push_to_database(apikey, competition, results, pit) print(" pushed to database") def load_config(file):