diff --git a/data analysis/analysis/__pycache__/analysis.cpython-37.pyc b/data analysis/analysis/__pycache__/analysis.cpython-37.pyc index 312a3c47..c23175fb 100644 Binary files a/data analysis/analysis/__pycache__/analysis.cpython-37.pyc and b/data analysis/analysis/__pycache__/analysis.cpython-37.pyc differ diff --git a/data analysis/analysis/__pycache__/regression.cpython-37.pyc b/data analysis/analysis/__pycache__/regression.cpython-37.pyc new file mode 100644 index 00000000..cfd0b73f Binary files /dev/null and b/data analysis/analysis/__pycache__/regression.cpython-37.pyc differ diff --git a/data analysis/analysis/analysis.py b/data analysis/analysis/analysis.py index 17c2cd55..1ed658ee 100644 --- a/data analysis/analysis/analysis.py +++ b/data analysis/analysis/analysis.py @@ -7,10 +7,12 @@ # current benchmark of optimization: 1.33 times faster # setup: -__version__ = "1.1.1.000" +__version__ = "1.1.1.001" # changelog should be viewed using print(analysis.__changelog__) __changelog__ = """changelog: +1.1.1.001: + - regression_engine() bug fixes, now actaully regresses 1.1.1.000: - added regression_engine() - added all regressions except polynomial @@ -157,7 +159,7 @@ import numba from numba import jit import numpy as np import math -import regression +from analysis import regression from sklearn import metrics from sklearn import preprocessing import torch @@ -224,7 +226,7 @@ def histo_analysis(hist_data): return basic_stats(derivative)[0], basic_stats(derivative)[3] @jit(forceobj=True) -def regression_engine(device, inputs, outputs, loss = torch.nn.MSELoss(), _iterations = 10000, lr = 0.1, *args): +def regression_engine(device, inputs, outputs, args, loss = torch.nn.MSELoss(), _iterations = 10000, lr = 0.01): regressions = [] @@ -234,18 +236,18 @@ def regression_engine(device, inputs, outputs, loss = torch.nn.MSELoss(), _itera if 'linear' in args: - model = regression.SGDTrain(regression.LinearRegKernel(len(inputs)), torch.tensor(inputs).to(torch.float).cuda(), torch.tensor(outputs).to(torch.float).cuda(), iterations=_iterations, learning_rate=lr, return_losses=True) - regressions.append([model[0].parameter, model[1][::-1][0]]) + model = regression.SGDTrain(regression.LinearRegKernel(len(inputs)), torch.tensor(inputs).to(torch.float).cuda(), torch.tensor([outputs]).to(torch.float).cuda(), iterations=_iterations, learning_rate=lr, return_losses=True) + regressions.append([model[0].parameters, model[1][::-1][0]]) if 'log' in args: model = regression.SGDTrain(regression.LogRegKernel(len(inputs)), torch.tensor(inputs).to(torch.float).cuda(), torch.tensor(outputs).to(torch.float).cuda(), iterations=_iterations, learning_rate=lr, return_losses=True) - regressions.append([model[0].parameter, model[1][::-1][0]]) + regressions.append([model[0].parameters, model[1][::-1][0]]) if 'exp' in args: model = regression.SGDTrain(regression.ExpRegKernel(len(inputs)), torch.tensor(inputs).to(torch.float).cuda(), torch.tensor(outputs).to(torch.float).cuda(), iterations=_iterations, learning_rate=lr, return_losses=True) - regressions.append([model[0].parameter, model[1][::-1][0]]) + regressions.append([model[0].parameters, model[1][::-1][0]]) #if 'poly' in args: @@ -254,7 +256,7 @@ def regression_engine(device, inputs, outputs, loss = torch.nn.MSELoss(), _itera if 'sig' in args: model = regression.SGDTrain(regression.SigmoidalRegKernelArthur(len(inputs)), torch.tensor(inputs).to(torch.float).cuda(), torch.tensor(outputs).to(torch.float).cuda(), iterations=_iterations, learning_rate=lr, return_losses=True) - regressions.append([model[0].parameter, model[1][::-1][0]]) + regressions.append([model[0].parameters, model[1][::-1][0]]) else: @@ -263,17 +265,17 @@ def regression_engine(device, inputs, outputs, loss = torch.nn.MSELoss(), _itera if 'linear' in args: model = regression.SGDTrain(regression.LinearRegKernel(len(inputs)), torch.tensor(inputs).to(torch.float), torch.tensor(outputs).to(torch.float), iterations=_iterations, learning_rate=lr, return_losses=True) - regressions.append([model[0].parameter, model[1][::-1][0]]) + regressions.append([model[0].parameters, model[1][::-1][0]]) if 'log' in args: model = regression.SGDTrain(regression.LogRegKernel(len(inputs)), torch.tensor(inputs).to(torch.float), torch.tensor(outputs).to(torch.float), iterations=_iterations, learning_rate=lr, return_losses=True) - regressions.append([model[0].parameter, model[1][::-1][0]]) + regressions.append([model[0].parameters, model[1][::-1][0]]) if 'exp' in args: model = regression.SGDTrain(regression.ExpRegKernel(len(inputs)), torch.tensor(inputs).to(torch.float), torch.tensor(outputs).to(torch.float), iterations=_iterations, learning_rate=lr, return_losses=True) - regressions.append([model[0].parameter, model[1][::-1][0]]) + regressions.append([model[0].parameters, model[1][::-1][0]]) #if 'poly' in args: @@ -282,7 +284,9 @@ def regression_engine(device, inputs, outputs, loss = torch.nn.MSELoss(), _itera if 'sig' in args: model = regression.SGDTrain(regression.SigmoidalRegKernelArthur(len(inputs)), torch.tensor(inputs).to(torch.float), torch.tensor(outputs).to(torch.float), iterations=_iterations, learning_rate=lr, return_losses=True) - regressions.append([model[0].parameter, model[1][::-1][0]]) + regressions.append([model[0].parameters, model[1][::-1][0]]) + + return regressions @jit(forceobj=True) def r_squared(predictions, targets): # assumes equal size inputs