diff --git a/analysis-master/analysis.egg-info/PKG-INFO b/analysis-master/analysis.egg-info/PKG-INFO index ce78f47e..3d2c5284 100644 --- a/analysis-master/analysis.egg-info/PKG-INFO +++ b/analysis-master/analysis.egg-info/PKG-INFO @@ -1,6 +1,6 @@ Metadata-Version: 2.1 Name: analysis -Version: 1.0.0.6 +Version: 1.0.0.7 Summary: analysis package developed by Titan Scouting for The Red Alliance Home-page: https://github.com/titanscout2022/tr2022-strategy Author: The Titan Scouting Team diff --git a/data analysis/analysis/__init__.py b/analysis-master/analysis/__init__.py similarity index 100% rename from data analysis/analysis/__init__.py rename to analysis-master/analysis/__init__.py diff --git a/data analysis/analysis/__pycache__/__init__.cpython-37.pyc b/analysis-master/analysis/__pycache__/__init__.cpython-37.pyc similarity index 100% rename from data analysis/analysis/__pycache__/__init__.cpython-37.pyc rename to analysis-master/analysis/__pycache__/__init__.cpython-37.pyc diff --git a/data analysis/analysis/__pycache__/analysis.cpython-36.pyc b/analysis-master/analysis/__pycache__/analysis.cpython-36.pyc similarity index 100% rename from data analysis/analysis/__pycache__/analysis.cpython-36.pyc rename to analysis-master/analysis/__pycache__/analysis.cpython-36.pyc diff --git a/data analysis/analysis/__pycache__/analysis.cpython-37.pyc b/analysis-master/analysis/__pycache__/analysis.cpython-37.pyc similarity index 100% rename from data analysis/analysis/__pycache__/analysis.cpython-37.pyc rename to analysis-master/analysis/__pycache__/analysis.cpython-37.pyc diff --git a/data analysis/analysis/__pycache__/regression.cpython-37.pyc b/analysis-master/analysis/__pycache__/regression.cpython-37.pyc similarity index 100% rename from data analysis/analysis/__pycache__/regression.cpython-37.pyc rename to analysis-master/analysis/__pycache__/regression.cpython-37.pyc diff --git a/data analysis/analysis/__pycache__/titanlearn.cpython-37.pyc b/analysis-master/analysis/__pycache__/titanlearn.cpython-37.pyc similarity index 100% rename from data analysis/analysis/__pycache__/titanlearn.cpython-37.pyc rename to analysis-master/analysis/__pycache__/titanlearn.cpython-37.pyc diff --git a/data analysis/analysis/__pycache__/trueskill.cpython-37.pyc b/analysis-master/analysis/__pycache__/trueskill.cpython-37.pyc similarity index 100% rename from data analysis/analysis/__pycache__/trueskill.cpython-37.pyc rename to analysis-master/analysis/__pycache__/trueskill.cpython-37.pyc diff --git a/data analysis/analysis/analysis.py b/analysis-master/analysis/analysis.py similarity index 100% rename from data analysis/analysis/analysis.py rename to analysis-master/analysis/analysis.py diff --git a/data analysis/analysis/regression.py b/analysis-master/analysis/regression.py similarity index 100% rename from data analysis/analysis/regression.py rename to analysis-master/analysis/regression.py diff --git a/data analysis/analysis/titanlearn.py b/analysis-master/analysis/titanlearn.py similarity index 100% rename from data analysis/analysis/titanlearn.py rename to analysis-master/analysis/titanlearn.py diff --git a/data analysis/analysis/trueskill.py b/analysis-master/analysis/trueskill.py similarity index 100% rename from data analysis/analysis/trueskill.py rename to analysis-master/analysis/trueskill.py diff --git a/data analysis/analysis/visualization.py b/analysis-master/analysis/visualization.py similarity index 100% rename from data analysis/analysis/visualization.py rename to analysis-master/analysis/visualization.py diff --git a/analysis-master/build/lib/analysis/regression.py b/analysis-master/build/lib/analysis/regression.py index 6cbe7868..adf2a54e 100644 --- a/analysis-master/build/lib/analysis/regression.py +++ b/analysis-master/build/lib/analysis/regression.py @@ -1,27 +1,28 @@ # Titan Robotics Team 2022: CUDA-based Regressions Module # Written by Arthur Lu & Jacob Levine # Notes: -# this should be imported as a python module using 'import regression' -# this should be included in the local directory or environment variable -# this module is cuda-optimized and vectorized (except for one small part) +# this module has been automatically inegrated into analysis.py, and should be callable as a class from the package +# this module is cuda-optimized and vectorized (except for one small part) # setup: -__version__ = "1.0.0.002" +__version__ = "1.0.0.003" -# changelog should be viewed using print(regression.__changelog__) +# changelog should be viewed using print(analysis.regression.__changelog__) __changelog__ = """ - 1.0.0.002: - -Added more parameters to log, exponential, polynomial - -Added SigmoidalRegKernelArthur, because Arthur apparently needs - to train the scaling and shifting of sigmoids - - 1.0.0.001: - -initial release, with linear, log, exponential, polynomial, and sigmoid kernels - -already vectorized (except for polynomial generation) and CUDA-optimized +1.0.0.003: + - bug fixes +1.0.0.002: + -Added more parameters to log, exponential, polynomial + -Added SigmoidalRegKernelArthur, because Arthur apparently needs + to train the scaling and shifting of sigmoids +1.0.0.001: + -initial release, with linear, log, exponential, polynomial, and sigmoid kernels + -already vectorized (except for polynomial generation) and CUDA-optimized """ __author__ = ( "Jacob Levine ", + "Arthur Lu " ) __all__ = [ @@ -39,35 +40,13 @@ __all__ = [ 'CustomTrain' ] - -# imports (just one for now): - -import torch +global device device = "cuda:0" if torch.torch.cuda.is_available() else "cpu" #todo: document completely -def factorial(n): - if n==0: - return 1 - else: - return n*factorial(n-1) -def num_poly_terms(num_vars, power): - if power == 0: - return 0 - return int(factorial(num_vars+power-1) / factorial(power) / factorial(num_vars-1)) + num_poly_terms(num_vars, power-1) - -def take_all_pwrs(vec,pwr): - #todo: vectorize (kinda) - combins=torch.combinations(vec, r=pwr, with_replacement=True) - out=torch.ones(combins.size()[0]) - for i in torch.t(combins): - out *= i - return torch.cat(out,take_all_pwrs(vec, pwr-1)) - -def set_device(new_device): - global device +def set_device(self, new_device): device=new_device class LinearRegKernel(): @@ -154,20 +133,39 @@ class PolyRegKernel(): power=None def __init__(self, num_vars, power): self.power=power - num_terms=num_poly_terms(num_vars, power) + num_terms=self.num_poly_terms(num_vars, power) self.weights=torch.rand(num_terms, requires_grad=True, device=device) self.bias=torch.rand(1, requires_grad=True, device=device) self.parameters=[self.weights,self.bias] + def num_poly_terms(self,num_vars, power): + if power == 0: + return 0 + return int(self.factorial(num_vars+power-1) / self.factorial(power) / self.factorial(num_vars-1)) + self.num_poly_terms(num_vars, power-1) + def factorial(self,n): + if n==0: + return 1 + else: + return n*self.factorial(n-1) + def take_all_pwrs(self, vec, pwr): + #todo: vectorize (kinda) + combins=torch.combinations(vec, r=pwr, with_replacement=True) + out=torch.ones(combins.size()[0]).to(device).to(torch.float) + for i in torch.t(combins).to(device).to(torch.float): + out *= i + if pwr == 1: + return out + else: + return torch.cat((out,self.take_all_pwrs(vec, pwr-1))) def forward(self,mtx): #TODO: Vectorize the last part cols=[] for i in torch.t(mtx): - cols.append(take_all_pwrs(i,self.power)) + cols.append(self.take_all_pwrs(i,self.power)) new_mtx=torch.t(torch.stack(cols)) long_bias=self.bias.repeat([1,mtx.size()[1]]) return torch.matmul(self.weights,new_mtx)+long_bias -def SGDTrain(kernel, data, ground, loss=torch.nn.MSELoss(), iterations=1000, learning_rate=.1, return_losses=False): +def SGDTrain(self, kernel, data, ground, loss=torch.nn.MSELoss(), iterations=1000, learning_rate=.1, return_losses=False): optim=torch.optim.SGD(kernel.parameters, lr=learning_rate) data_cuda=data.to(device) ground_cuda=ground.to(device) @@ -192,7 +190,7 @@ def SGDTrain(kernel, data, ground, loss=torch.nn.MSELoss(), iterations=1000, lea optim.step() return kernel -def CustomTrain(kernel, optim, data, ground, loss=torch.nn.MSELoss(), iterations=1000, return_losses=False): +def CustomTrain(self, kernel, optim, data, ground, loss=torch.nn.MSELoss(), iterations=1000, return_losses=False): data_cuda=data.to(device) ground_cuda=ground.to(device) if (return_losses): @@ -214,4 +212,4 @@ def CustomTrain(kernel, optim, data, ground, loss=torch.nn.MSELoss(), iterations ls=loss(pred,ground_cuda) ls.backward() optim.step() - return kernel + return kernel \ No newline at end of file diff --git a/analysis-master/dist/analysis-1.0.0.6-py3-none-any.whl b/analysis-master/dist/analysis-1.0.0.6-py3-none-any.whl deleted file mode 100644 index 46500f86..00000000 Binary files a/analysis-master/dist/analysis-1.0.0.6-py3-none-any.whl and /dev/null differ diff --git a/analysis-master/dist/analysis-1.0.0.6.tar.gz b/analysis-master/dist/analysis-1.0.0.6.tar.gz deleted file mode 100644 index 570b4f71..00000000 Binary files a/analysis-master/dist/analysis-1.0.0.6.tar.gz and /dev/null differ diff --git a/analysis-master/dist/analysis-1.0.0.7-py3-none-any.whl b/analysis-master/dist/analysis-1.0.0.7-py3-none-any.whl new file mode 100644 index 00000000..8e08c31b Binary files /dev/null and b/analysis-master/dist/analysis-1.0.0.7-py3-none-any.whl differ diff --git a/analysis-master/dist/analysis-1.0.0.7.tar.gz b/analysis-master/dist/analysis-1.0.0.7.tar.gz new file mode 100644 index 00000000..3f66ab93 Binary files /dev/null and b/analysis-master/dist/analysis-1.0.0.7.tar.gz differ diff --git a/analysis-master/setup.py b/analysis-master/setup.py index ff6d5c3e..f95cac6b 100644 --- a/analysis-master/setup.py +++ b/analysis-master/setup.py @@ -2,7 +2,7 @@ import setuptools setuptools.setup( name="analysis", # Replace with your own username - version="1.0.0.006", + version="1.0.0.007", author="The Titan Scouting Team", author_email="titanscout2022@gmail.com", description="analysis package developed by Titan Scouting for The Red Alliance",