mirror of
https://github.com/titanscouting/tra-analysis.git
synced 2024-12-26 17:49:09 +00:00
don't need the testing notebook up here anymore
This commit is contained in:
parent
bda2db7003
commit
c5d087dada
@ -1,248 +0,0 @@
|
|||||||
{
|
|
||||||
"cells": [
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": 1,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"import torch\n",
|
|
||||||
"device='cuda:0' if torch.cuda.is_available() else 'cpu'"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": 29,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"def factorial(n):\n",
|
|
||||||
" if n==0:\n",
|
|
||||||
" return 1\n",
|
|
||||||
" else:\n",
|
|
||||||
" return n*factorial(n-1)\n",
|
|
||||||
"\n",
|
|
||||||
"def take_all_pwrs(vec,pwr):\n",
|
|
||||||
" #todo: vectorize (kinda)\n",
|
|
||||||
" combins=torch.combinations(vec, r=pwr, with_replacement=True)\n",
|
|
||||||
" out=torch.ones(combins.size()[0])\n",
|
|
||||||
" for i in torch.t(combins):\n",
|
|
||||||
" out *= i\n",
|
|
||||||
" return out\n",
|
|
||||||
"\n",
|
|
||||||
"def set_device(new_device):\n",
|
|
||||||
" device=new_device\n",
|
|
||||||
"\n",
|
|
||||||
"class LinearRegKernel():\n",
|
|
||||||
" parameters= []\n",
|
|
||||||
" weights=None\n",
|
|
||||||
" bias=None\n",
|
|
||||||
" def __init__(self, num_vars):\n",
|
|
||||||
" self.weights=torch.rand(num_vars, requires_grad=True, device=device)\n",
|
|
||||||
" self.bias=torch.rand(1, requires_grad=True, device=device)\n",
|
|
||||||
" self.parameters=[self.weights,self.bias]\n",
|
|
||||||
" def forward(self,mtx):\n",
|
|
||||||
" long_bias=self.bias.repeat([1,mtx.size()[1]])\n",
|
|
||||||
" return torch.matmul(self.weights,mtx)+long_bias\n",
|
|
||||||
" \n",
|
|
||||||
"class SigmoidalRegKernel():\n",
|
|
||||||
" parameters= []\n",
|
|
||||||
" weights=None\n",
|
|
||||||
" bias=None\n",
|
|
||||||
" sigmoid=torch.nn.Sigmoid()\n",
|
|
||||||
" def __init__(self, num_vars):\n",
|
|
||||||
" self.weights=torch.rand(num_vars, requires_grad=True, device=device)\n",
|
|
||||||
" self.bias=torch.rand(1, requires_grad=True, device=device)\n",
|
|
||||||
" self.parameters=[self.weights,self.bias]\n",
|
|
||||||
" def forward(self,mtx):\n",
|
|
||||||
" long_bias=self.bias.repeat([1,mtx.size()[1]])\n",
|
|
||||||
" return self.sigmoid(torch.matmul(self.weights,mtx)+long_bias)\n",
|
|
||||||
"\n",
|
|
||||||
"class LogRegKernel():\n",
|
|
||||||
" parameters= []\n",
|
|
||||||
" weights=None\n",
|
|
||||||
" bias=None\n",
|
|
||||||
" def __init__(self, num_vars):\n",
|
|
||||||
" self.weights=torch.rand(num_vars, requires_grad=True, device=device)\n",
|
|
||||||
" self.bias=torch.rand(1, requires_grad=True, device=device)\n",
|
|
||||||
" self.parameters=[self.weights,self.bias]\n",
|
|
||||||
" def forward(self,mtx):\n",
|
|
||||||
" long_bias=self.bias.repeat([1,mtx.size()[1]])\n",
|
|
||||||
" return torch.log(torch.matmul(self.weights,mtx)+long_bias)\n",
|
|
||||||
"\n",
|
|
||||||
"class ExpRegKernel():\n",
|
|
||||||
" parameters= []\n",
|
|
||||||
" weights=None\n",
|
|
||||||
" bias=None\n",
|
|
||||||
" def __init__(self, num_vars):\n",
|
|
||||||
" self.weights=torch.rand(num_vars, requires_grad=True, device=device)\n",
|
|
||||||
" self.bias=torch.rand(1, requires_grad=True, device=device)\n",
|
|
||||||
" self.parameters=[self.weights,self.bias]\n",
|
|
||||||
" def forward(self,mtx):\n",
|
|
||||||
" long_bias=self.bias.repeat([1,mtx.size()[1]])\n",
|
|
||||||
" return torch.exp(torch.matmul(self.weights,mtx)+long_bias)\n",
|
|
||||||
"\n",
|
|
||||||
"class PolyRegKernel():\n",
|
|
||||||
" parameters= []\n",
|
|
||||||
" weights=None\n",
|
|
||||||
" bias=None\n",
|
|
||||||
" power=None\n",
|
|
||||||
" def __init__(self, num_vars, power):\n",
|
|
||||||
" self.power=power\n",
|
|
||||||
" num_terms=int(factorial(num_vars+power-1) / factorial(power) / factorial(num_vars-1))\n",
|
|
||||||
" self.weights=torch.rand(num_terms, requires_grad=True, device=device)\n",
|
|
||||||
" self.bias=torch.rand(1, requires_grad=True, device=device)\n",
|
|
||||||
" self.parameters=[self.weights,self.bias]\n",
|
|
||||||
" def forward(self,mtx):\n",
|
|
||||||
" #TODO: Vectorize the last part\n",
|
|
||||||
" cols=[]\n",
|
|
||||||
" for i in torch.t(mtx):\n",
|
|
||||||
" cols.append(take_all_pwrs(i,self.power))\n",
|
|
||||||
" new_mtx=torch.t(torch.stack(cols))\n",
|
|
||||||
" long_bias=self.bias.repeat([1,mtx.size()[1]])\n",
|
|
||||||
" return torch.matmul(self.weights,new_mtx)+long_bias\n",
|
|
||||||
"\n",
|
|
||||||
"def SGDTrain(kernel, data, ground, loss=torch.nn.MSELoss(), iterations=1000, learning_rate=.1, return_losses=False):\n",
|
|
||||||
" optim=torch.optim.SGD(kernel.parameters, lr=learning_rate)\n",
|
|
||||||
" data_cuda=data.to(device)\n",
|
|
||||||
" ground_cuda=ground.to(device)\n",
|
|
||||||
" if (return_losses):\n",
|
|
||||||
" losses=[]\n",
|
|
||||||
" for i in range(iterations):\n",
|
|
||||||
" with torch.set_grad_enabled(True):\n",
|
|
||||||
" optim.zero_grad()\n",
|
|
||||||
" pred=kernel.forward(data_cuda)\n",
|
|
||||||
" ls=loss(pred,ground_cuda)\n",
|
|
||||||
" losses.append(ls.item())\n",
|
|
||||||
" ls.backward()\n",
|
|
||||||
" optim.step()\n",
|
|
||||||
" return [kernel,losses]\n",
|
|
||||||
" else:\n",
|
|
||||||
" for i in range(iterations):\n",
|
|
||||||
" with torch.set_grad_enabled(True):\n",
|
|
||||||
" optim.zero_grad()\n",
|
|
||||||
" pred=kernel.forward(data_cuda)\n",
|
|
||||||
" ls=loss(pred,ground_cuda)\n",
|
|
||||||
" ls.backward()\n",
|
|
||||||
" optim.step() \n",
|
|
||||||
" return kernel\n",
|
|
||||||
"\n",
|
|
||||||
"def CustomTrain(kernel, optim, data, ground, loss=torch.nn.MSELoss(), iterations=1000, return_losses=False):\n",
|
|
||||||
" data_cuda=data.to(device)\n",
|
|
||||||
" ground_cuda=ground.to(device)\n",
|
|
||||||
" if (return_losses):\n",
|
|
||||||
" losses=[]\n",
|
|
||||||
" for i in range(iterations):\n",
|
|
||||||
" with torch.set_grad_enabled(True):\n",
|
|
||||||
" optim.zero_grad()\n",
|
|
||||||
" pred=kernel.forward(data)\n",
|
|
||||||
" ls=loss(pred,ground)\n",
|
|
||||||
" losses.append(ls.item())\n",
|
|
||||||
" ls.backward()\n",
|
|
||||||
" optim.step()\n",
|
|
||||||
" return [kernel,losses]\n",
|
|
||||||
" else:\n",
|
|
||||||
" for i in range(iterations):\n",
|
|
||||||
" with torch.set_grad_enabled(True):\n",
|
|
||||||
" optim.zero_grad()\n",
|
|
||||||
" pred=kernel.forward(data_cuda)\n",
|
|
||||||
" ls=loss(pred,ground_cuda)\n",
|
|
||||||
" ls.backward()\n",
|
|
||||||
" optim.step() \n",
|
|
||||||
" return kernel"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": 31,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [
|
|
||||||
{
|
|
||||||
"data": {
|
|
||||||
"text/plain": [
|
|
||||||
"tensor([[1.0000, 2.0000]], device='cuda:0', grad_fn=<AddBackward0>)"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"execution_count": 31,
|
|
||||||
"metadata": {},
|
|
||||||
"output_type": "execute_result"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"source": [
|
|
||||||
"model=SGDTrain(LinearRegKernel(3),torch.tensor([[1,2],[3,4],[5,6]]).to(torch.float).cuda(),torch.tensor([[1,2]]).to(torch.float).cuda(),iterations=10000, learning_rate=.01, return_losses=True)\n",
|
|
||||||
"model[0].forward(torch.tensor([[1,2],[3,4],[5,6]]).to(torch.float).cuda())"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": 30,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [
|
|
||||||
{
|
|
||||||
"data": {
|
|
||||||
"text/plain": [
|
|
||||||
"[tensor([0.2347, 0.4494, 0.3156], device='cuda:0', requires_grad=True),\n",
|
|
||||||
" tensor([0.9541], device='cuda:0', requires_grad=True)]"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"execution_count": 30,
|
|
||||||
"metadata": {},
|
|
||||||
"output_type": "execute_result"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"source": [
|
|
||||||
"kernel=LinearRegKernel(3)\n",
|
|
||||||
"kernel.parameters\n"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": []
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": []
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": []
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": []
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"metadata": {
|
|
||||||
"kernelspec": {
|
|
||||||
"display_name": "Python 3",
|
|
||||||
"language": "python",
|
|
||||||
"name": "python3"
|
|
||||||
},
|
|
||||||
"language_info": {
|
|
||||||
"codemirror_mode": {
|
|
||||||
"name": "ipython",
|
|
||||||
"version": 3
|
|
||||||
},
|
|
||||||
"file_extension": ".py",
|
|
||||||
"mimetype": "text/x-python",
|
|
||||||
"name": "python",
|
|
||||||
"nbconvert_exporter": "python",
|
|
||||||
"pygments_lexer": "ipython3",
|
|
||||||
"version": "3.7.3"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"nbformat": 4,
|
|
||||||
"nbformat_minor": 4
|
|
||||||
}
|
|
Loading…
Reference in New Issue
Block a user