diff --git a/analysis-master/analysis/__pycache__/__init__.cpython-37.pyc b/analysis-master/analysis/__pycache__/__init__.cpython-37.pyc deleted file mode 100644 index dd1607ef..00000000 Binary files a/analysis-master/analysis/__pycache__/__init__.cpython-37.pyc and /dev/null differ diff --git a/analysis-master/analysis/__pycache__/analysis.cpython-37.pyc b/analysis-master/analysis/__pycache__/analysis.cpython-37.pyc deleted file mode 100644 index 46f1441d..00000000 Binary files a/analysis-master/analysis/__pycache__/analysis.cpython-37.pyc and /dev/null differ diff --git a/analysis-master/build.sh b/analysis-master/build.sh old mode 100644 new mode 100755 diff --git a/analysis-master/build/lib/analysis/analysis.py b/analysis-master/build/lib/analysis/analysis.py index 03a1aa11..9297585b 100644 --- a/analysis-master/build/lib/analysis/analysis.py +++ b/analysis-master/build/lib/analysis/analysis.py @@ -278,7 +278,6 @@ import scipy from scipy import * import sklearn from sklearn import * -import torch try: from analysis import trueskill as Trueskill except: @@ -287,10 +286,6 @@ except: class error(ValueError): pass -def _init_device(): # initiates computation device for ANNs - device = 'cuda:0' if torch.cuda.is_available() else 'cpu' - return device - def load_csv(filepath): with open(filepath, newline='') as csvfile: file_array = np.array(list(csv.reader(csvfile))) @@ -700,225 +695,6 @@ def random_forest_regressor(data, outputs, test_size, n_estimators="warn", crite return kernel, RegressionMetrics(predictions, outputs_test) -class Regression: - - # Titan Robotics Team 2022: CUDA-based Regressions Module - # Written by Arthur Lu & Jacob Levine - # Notes: - # this module has been automatically inegrated into analysis.py, and should be callable as a class from the package - # this module is cuda-optimized and vectorized (except for one small part) - # setup: - - __version__ = "1.0.0.003" - - # changelog should be viewed using print(analysis.regression.__changelog__) - __changelog__ = """ - 1.0.0.003: - - bug fixes - 1.0.0.002: - -Added more parameters to log, exponential, polynomial - -Added SigmoidalRegKernelArthur, because Arthur apparently needs - to train the scaling and shifting of sigmoids - - 1.0.0.001: - -initial release, with linear, log, exponential, polynomial, and sigmoid kernels - -already vectorized (except for polynomial generation) and CUDA-optimized - """ - - __author__ = ( - "Jacob Levine ", - "Arthur Lu " - ) - - __all__ = [ - 'factorial', - 'take_all_pwrs', - 'num_poly_terms', - 'set_device', - 'LinearRegKernel', - 'SigmoidalRegKernel', - 'LogRegKernel', - 'PolyRegKernel', - 'ExpRegKernel', - 'SigmoidalRegKernelArthur', - 'SGDTrain', - 'CustomTrain' - ] - - global device - - device = "cuda:0" if torch.torch.cuda.is_available() else "cpu" - - #todo: document completely - - def set_device(self, new_device): - device=new_device - - class LinearRegKernel(): - parameters= [] - weights=None - bias=None - def __init__(self, num_vars): - self.weights=torch.rand(num_vars, requires_grad=True, device=device) - self.bias=torch.rand(1, requires_grad=True, device=device) - self.parameters=[self.weights,self.bias] - def forward(self,mtx): - long_bias=self.bias.repeat([1,mtx.size()[1]]) - return torch.matmul(self.weights,mtx)+long_bias - - class SigmoidalRegKernel(): - parameters= [] - weights=None - bias=None - sigmoid=torch.nn.Sigmoid() - def __init__(self, num_vars): - self.weights=torch.rand(num_vars, requires_grad=True, device=device) - self.bias=torch.rand(1, requires_grad=True, device=device) - self.parameters=[self.weights,self.bias] - def forward(self,mtx): - long_bias=self.bias.repeat([1,mtx.size()[1]]) - return self.sigmoid(torch.matmul(self.weights,mtx)+long_bias) - - class SigmoidalRegKernelArthur(): - parameters= [] - weights=None - in_bias=None - scal_mult=None - out_bias=None - sigmoid=torch.nn.Sigmoid() - def __init__(self, num_vars): - self.weights=torch.rand(num_vars, requires_grad=True, device=device) - self.in_bias=torch.rand(1, requires_grad=True, device=device) - self.scal_mult=torch.rand(1, requires_grad=True, device=device) - self.out_bias=torch.rand(1, requires_grad=True, device=device) - self.parameters=[self.weights,self.in_bias, self.scal_mult, self.out_bias] - def forward(self,mtx): - long_in_bias=self.in_bias.repeat([1,mtx.size()[1]]) - long_out_bias=self.out_bias.repeat([1,mtx.size()[1]]) - return (self.scal_mult*self.sigmoid(torch.matmul(self.weights,mtx)+long_in_bias))+long_out_bias - - class LogRegKernel(): - parameters= [] - weights=None - in_bias=None - scal_mult=None - out_bias=None - def __init__(self, num_vars): - self.weights=torch.rand(num_vars, requires_grad=True, device=device) - self.in_bias=torch.rand(1, requires_grad=True, device=device) - self.scal_mult=torch.rand(1, requires_grad=True, device=device) - self.out_bias=torch.rand(1, requires_grad=True, device=device) - self.parameters=[self.weights,self.in_bias, self.scal_mult, self.out_bias] - def forward(self,mtx): - long_in_bias=self.in_bias.repeat([1,mtx.size()[1]]) - long_out_bias=self.out_bias.repeat([1,mtx.size()[1]]) - return (self.scal_mult*torch.log(torch.matmul(self.weights,mtx)+long_in_bias))+long_out_bias - - class ExpRegKernel(): - parameters= [] - weights=None - in_bias=None - scal_mult=None - out_bias=None - def __init__(self, num_vars): - self.weights=torch.rand(num_vars, requires_grad=True, device=device) - self.in_bias=torch.rand(1, requires_grad=True, device=device) - self.scal_mult=torch.rand(1, requires_grad=True, device=device) - self.out_bias=torch.rand(1, requires_grad=True, device=device) - self.parameters=[self.weights,self.in_bias, self.scal_mult, self.out_bias] - def forward(self,mtx): - long_in_bias=self.in_bias.repeat([1,mtx.size()[1]]) - long_out_bias=self.out_bias.repeat([1,mtx.size()[1]]) - return (self.scal_mult*torch.exp(torch.matmul(self.weights,mtx)+long_in_bias))+long_out_bias - - class PolyRegKernel(): - parameters= [] - weights=None - bias=None - power=None - def __init__(self, num_vars, power): - self.power=power - num_terms=self.num_poly_terms(num_vars, power) - self.weights=torch.rand(num_terms, requires_grad=True, device=device) - self.bias=torch.rand(1, requires_grad=True, device=device) - self.parameters=[self.weights,self.bias] - def num_poly_terms(self,num_vars, power): - if power == 0: - return 0 - return int(self.factorial(num_vars+power-1) / self.factorial(power) / self.factorial(num_vars-1)) + self.num_poly_terms(num_vars, power-1) - def factorial(self,n): - if n==0: - return 1 - else: - return n*self.factorial(n-1) - def take_all_pwrs(self, vec, pwr): - #todo: vectorize (kinda) - combins=torch.combinations(vec, r=pwr, with_replacement=True) - out=torch.ones(combins.size()[0]).to(device).to(torch.float) - for i in torch.t(combins).to(device).to(torch.float): - out *= i - if pwr == 1: - return out - else: - return torch.cat((out,self.take_all_pwrs(vec, pwr-1))) - def forward(self,mtx): - #TODO: Vectorize the last part - cols=[] - for i in torch.t(mtx): - cols.append(self.take_all_pwrs(i,self.power)) - new_mtx=torch.t(torch.stack(cols)) - long_bias=self.bias.repeat([1,mtx.size()[1]]) - return torch.matmul(self.weights,new_mtx)+long_bias - - def SGDTrain(self, kernel, data, ground, loss=torch.nn.MSELoss(), iterations=1000, learning_rate=.1, return_losses=False): - optim=torch.optim.SGD(kernel.parameters, lr=learning_rate) - data_cuda=data.to(device) - ground_cuda=ground.to(device) - if (return_losses): - losses=[] - for i in range(iterations): - with torch.set_grad_enabled(True): - optim.zero_grad() - pred=kernel.forward(data_cuda) - ls=loss(pred,ground_cuda) - losses.append(ls.item()) - ls.backward() - optim.step() - return [kernel,losses] - else: - for i in range(iterations): - with torch.set_grad_enabled(True): - optim.zero_grad() - pred=kernel.forward(data_cuda) - ls=loss(pred,ground_cuda) - ls.backward() - optim.step() - return kernel - - def CustomTrain(self, kernel, optim, data, ground, loss=torch.nn.MSELoss(), iterations=1000, return_losses=False): - data_cuda=data.to(device) - ground_cuda=ground.to(device) - if (return_losses): - losses=[] - for i in range(iterations): - with torch.set_grad_enabled(True): - optim.zero_grad() - pred=kernel.forward(data) - ls=loss(pred,ground) - losses.append(ls.item()) - ls.backward() - optim.step() - return [kernel,losses] - else: - for i in range(iterations): - with torch.set_grad_enabled(True): - optim.zero_grad() - pred=kernel.forward(data_cuda) - ls=loss(pred,ground_cuda) - ls.backward() - optim.step() - return kernel - class Glicko2: _tau = 0.5 @@ -1016,4 +792,4 @@ class Glicko2: def did_not_compete(self): - self._preRatingRD() \ No newline at end of file + self._preRatingRD() diff --git a/analysis-master/dist/analysis-1.0.0.6-py3-none-any.whl b/analysis-master/dist/analysis-1.0.0.6-py3-none-any.whl index bc4762c9..46500f86 100644 Binary files a/analysis-master/dist/analysis-1.0.0.6-py3-none-any.whl and b/analysis-master/dist/analysis-1.0.0.6-py3-none-any.whl differ diff --git a/analysis-master/dist/analysis-1.0.0.6.tar.gz b/analysis-master/dist/analysis-1.0.0.6.tar.gz index 2a848dc8..570b4f71 100644 Binary files a/analysis-master/dist/analysis-1.0.0.6.tar.gz and b/analysis-master/dist/analysis-1.0.0.6.tar.gz differ diff --git a/data analysis/__pycache__/data.cpython-37.pyc b/data analysis/__pycache__/data.cpython-37.pyc index 6b91e2a6..6d3e92a8 100644 Binary files a/data analysis/__pycache__/data.cpython-37.pyc and b/data analysis/__pycache__/data.cpython-37.pyc differ diff --git a/analysis-master/analysis/__init__.py b/data analysis/analysis/__init__.py similarity index 100% rename from analysis-master/analysis/__init__.py rename to data analysis/analysis/__init__.py diff --git a/data analysis/analysis/__pycache__/__init__.cpython-37.pyc b/data analysis/analysis/__pycache__/__init__.cpython-37.pyc new file mode 100644 index 00000000..fa1abb00 Binary files /dev/null and b/data analysis/analysis/__pycache__/__init__.cpython-37.pyc differ diff --git a/analysis-master/analysis/__pycache__/analysis.cpython-36.pyc b/data analysis/analysis/__pycache__/analysis.cpython-36.pyc similarity index 100% rename from analysis-master/analysis/__pycache__/analysis.cpython-36.pyc rename to data analysis/analysis/__pycache__/analysis.cpython-36.pyc diff --git a/data analysis/analysis/__pycache__/analysis.cpython-37.pyc b/data analysis/analysis/__pycache__/analysis.cpython-37.pyc new file mode 100644 index 00000000..b8a24f34 Binary files /dev/null and b/data analysis/analysis/__pycache__/analysis.cpython-37.pyc differ diff --git a/analysis-master/analysis/__pycache__/regression.cpython-37.pyc b/data analysis/analysis/__pycache__/regression.cpython-37.pyc similarity index 100% rename from analysis-master/analysis/__pycache__/regression.cpython-37.pyc rename to data analysis/analysis/__pycache__/regression.cpython-37.pyc diff --git a/analysis-master/analysis/__pycache__/titanlearn.cpython-37.pyc b/data analysis/analysis/__pycache__/titanlearn.cpython-37.pyc similarity index 100% rename from analysis-master/analysis/__pycache__/titanlearn.cpython-37.pyc rename to data analysis/analysis/__pycache__/titanlearn.cpython-37.pyc diff --git a/analysis-master/analysis/__pycache__/trueskill.cpython-37.pyc b/data analysis/analysis/__pycache__/trueskill.cpython-37.pyc similarity index 70% rename from analysis-master/analysis/__pycache__/trueskill.cpython-37.pyc rename to data analysis/analysis/__pycache__/trueskill.cpython-37.pyc index 1d5c8c7b..15c7554d 100644 Binary files a/analysis-master/analysis/__pycache__/trueskill.cpython-37.pyc and b/data analysis/analysis/__pycache__/trueskill.cpython-37.pyc differ diff --git a/analysis-master/analysis/analysis.py b/data analysis/analysis/analysis.py similarity index 75% rename from analysis-master/analysis/analysis.py rename to data analysis/analysis/analysis.py index 03a1aa11..9297585b 100644 --- a/analysis-master/analysis/analysis.py +++ b/data analysis/analysis/analysis.py @@ -278,7 +278,6 @@ import scipy from scipy import * import sklearn from sklearn import * -import torch try: from analysis import trueskill as Trueskill except: @@ -287,10 +286,6 @@ except: class error(ValueError): pass -def _init_device(): # initiates computation device for ANNs - device = 'cuda:0' if torch.cuda.is_available() else 'cpu' - return device - def load_csv(filepath): with open(filepath, newline='') as csvfile: file_array = np.array(list(csv.reader(csvfile))) @@ -700,225 +695,6 @@ def random_forest_regressor(data, outputs, test_size, n_estimators="warn", crite return kernel, RegressionMetrics(predictions, outputs_test) -class Regression: - - # Titan Robotics Team 2022: CUDA-based Regressions Module - # Written by Arthur Lu & Jacob Levine - # Notes: - # this module has been automatically inegrated into analysis.py, and should be callable as a class from the package - # this module is cuda-optimized and vectorized (except for one small part) - # setup: - - __version__ = "1.0.0.003" - - # changelog should be viewed using print(analysis.regression.__changelog__) - __changelog__ = """ - 1.0.0.003: - - bug fixes - 1.0.0.002: - -Added more parameters to log, exponential, polynomial - -Added SigmoidalRegKernelArthur, because Arthur apparently needs - to train the scaling and shifting of sigmoids - - 1.0.0.001: - -initial release, with linear, log, exponential, polynomial, and sigmoid kernels - -already vectorized (except for polynomial generation) and CUDA-optimized - """ - - __author__ = ( - "Jacob Levine ", - "Arthur Lu " - ) - - __all__ = [ - 'factorial', - 'take_all_pwrs', - 'num_poly_terms', - 'set_device', - 'LinearRegKernel', - 'SigmoidalRegKernel', - 'LogRegKernel', - 'PolyRegKernel', - 'ExpRegKernel', - 'SigmoidalRegKernelArthur', - 'SGDTrain', - 'CustomTrain' - ] - - global device - - device = "cuda:0" if torch.torch.cuda.is_available() else "cpu" - - #todo: document completely - - def set_device(self, new_device): - device=new_device - - class LinearRegKernel(): - parameters= [] - weights=None - bias=None - def __init__(self, num_vars): - self.weights=torch.rand(num_vars, requires_grad=True, device=device) - self.bias=torch.rand(1, requires_grad=True, device=device) - self.parameters=[self.weights,self.bias] - def forward(self,mtx): - long_bias=self.bias.repeat([1,mtx.size()[1]]) - return torch.matmul(self.weights,mtx)+long_bias - - class SigmoidalRegKernel(): - parameters= [] - weights=None - bias=None - sigmoid=torch.nn.Sigmoid() - def __init__(self, num_vars): - self.weights=torch.rand(num_vars, requires_grad=True, device=device) - self.bias=torch.rand(1, requires_grad=True, device=device) - self.parameters=[self.weights,self.bias] - def forward(self,mtx): - long_bias=self.bias.repeat([1,mtx.size()[1]]) - return self.sigmoid(torch.matmul(self.weights,mtx)+long_bias) - - class SigmoidalRegKernelArthur(): - parameters= [] - weights=None - in_bias=None - scal_mult=None - out_bias=None - sigmoid=torch.nn.Sigmoid() - def __init__(self, num_vars): - self.weights=torch.rand(num_vars, requires_grad=True, device=device) - self.in_bias=torch.rand(1, requires_grad=True, device=device) - self.scal_mult=torch.rand(1, requires_grad=True, device=device) - self.out_bias=torch.rand(1, requires_grad=True, device=device) - self.parameters=[self.weights,self.in_bias, self.scal_mult, self.out_bias] - def forward(self,mtx): - long_in_bias=self.in_bias.repeat([1,mtx.size()[1]]) - long_out_bias=self.out_bias.repeat([1,mtx.size()[1]]) - return (self.scal_mult*self.sigmoid(torch.matmul(self.weights,mtx)+long_in_bias))+long_out_bias - - class LogRegKernel(): - parameters= [] - weights=None - in_bias=None - scal_mult=None - out_bias=None - def __init__(self, num_vars): - self.weights=torch.rand(num_vars, requires_grad=True, device=device) - self.in_bias=torch.rand(1, requires_grad=True, device=device) - self.scal_mult=torch.rand(1, requires_grad=True, device=device) - self.out_bias=torch.rand(1, requires_grad=True, device=device) - self.parameters=[self.weights,self.in_bias, self.scal_mult, self.out_bias] - def forward(self,mtx): - long_in_bias=self.in_bias.repeat([1,mtx.size()[1]]) - long_out_bias=self.out_bias.repeat([1,mtx.size()[1]]) - return (self.scal_mult*torch.log(torch.matmul(self.weights,mtx)+long_in_bias))+long_out_bias - - class ExpRegKernel(): - parameters= [] - weights=None - in_bias=None - scal_mult=None - out_bias=None - def __init__(self, num_vars): - self.weights=torch.rand(num_vars, requires_grad=True, device=device) - self.in_bias=torch.rand(1, requires_grad=True, device=device) - self.scal_mult=torch.rand(1, requires_grad=True, device=device) - self.out_bias=torch.rand(1, requires_grad=True, device=device) - self.parameters=[self.weights,self.in_bias, self.scal_mult, self.out_bias] - def forward(self,mtx): - long_in_bias=self.in_bias.repeat([1,mtx.size()[1]]) - long_out_bias=self.out_bias.repeat([1,mtx.size()[1]]) - return (self.scal_mult*torch.exp(torch.matmul(self.weights,mtx)+long_in_bias))+long_out_bias - - class PolyRegKernel(): - parameters= [] - weights=None - bias=None - power=None - def __init__(self, num_vars, power): - self.power=power - num_terms=self.num_poly_terms(num_vars, power) - self.weights=torch.rand(num_terms, requires_grad=True, device=device) - self.bias=torch.rand(1, requires_grad=True, device=device) - self.parameters=[self.weights,self.bias] - def num_poly_terms(self,num_vars, power): - if power == 0: - return 0 - return int(self.factorial(num_vars+power-1) / self.factorial(power) / self.factorial(num_vars-1)) + self.num_poly_terms(num_vars, power-1) - def factorial(self,n): - if n==0: - return 1 - else: - return n*self.factorial(n-1) - def take_all_pwrs(self, vec, pwr): - #todo: vectorize (kinda) - combins=torch.combinations(vec, r=pwr, with_replacement=True) - out=torch.ones(combins.size()[0]).to(device).to(torch.float) - for i in torch.t(combins).to(device).to(torch.float): - out *= i - if pwr == 1: - return out - else: - return torch.cat((out,self.take_all_pwrs(vec, pwr-1))) - def forward(self,mtx): - #TODO: Vectorize the last part - cols=[] - for i in torch.t(mtx): - cols.append(self.take_all_pwrs(i,self.power)) - new_mtx=torch.t(torch.stack(cols)) - long_bias=self.bias.repeat([1,mtx.size()[1]]) - return torch.matmul(self.weights,new_mtx)+long_bias - - def SGDTrain(self, kernel, data, ground, loss=torch.nn.MSELoss(), iterations=1000, learning_rate=.1, return_losses=False): - optim=torch.optim.SGD(kernel.parameters, lr=learning_rate) - data_cuda=data.to(device) - ground_cuda=ground.to(device) - if (return_losses): - losses=[] - for i in range(iterations): - with torch.set_grad_enabled(True): - optim.zero_grad() - pred=kernel.forward(data_cuda) - ls=loss(pred,ground_cuda) - losses.append(ls.item()) - ls.backward() - optim.step() - return [kernel,losses] - else: - for i in range(iterations): - with torch.set_grad_enabled(True): - optim.zero_grad() - pred=kernel.forward(data_cuda) - ls=loss(pred,ground_cuda) - ls.backward() - optim.step() - return kernel - - def CustomTrain(self, kernel, optim, data, ground, loss=torch.nn.MSELoss(), iterations=1000, return_losses=False): - data_cuda=data.to(device) - ground_cuda=ground.to(device) - if (return_losses): - losses=[] - for i in range(iterations): - with torch.set_grad_enabled(True): - optim.zero_grad() - pred=kernel.forward(data) - ls=loss(pred,ground) - losses.append(ls.item()) - ls.backward() - optim.step() - return [kernel,losses] - else: - for i in range(iterations): - with torch.set_grad_enabled(True): - optim.zero_grad() - pred=kernel.forward(data_cuda) - ls=loss(pred,ground_cuda) - ls.backward() - optim.step() - return kernel - class Glicko2: _tau = 0.5 @@ -1016,4 +792,4 @@ class Glicko2: def did_not_compete(self): - self._preRatingRD() \ No newline at end of file + self._preRatingRD() diff --git a/analysis-master/analysis/regression.py b/data analysis/analysis/regression.py similarity index 100% rename from analysis-master/analysis/regression.py rename to data analysis/analysis/regression.py diff --git a/analysis-master/analysis/titanlearn.py b/data analysis/analysis/titanlearn.py similarity index 100% rename from analysis-master/analysis/titanlearn.py rename to data analysis/analysis/titanlearn.py diff --git a/analysis-master/analysis/trueskill.py b/data analysis/analysis/trueskill.py similarity index 100% rename from analysis-master/analysis/trueskill.py rename to data analysis/analysis/trueskill.py diff --git a/analysis-master/analysis/visualization.py b/data analysis/analysis/visualization.py similarity index 100% rename from analysis-master/analysis/visualization.py rename to data analysis/analysis/visualization.py