mirror of
https://github.com/titanscouting/tra-analysis.git
synced 2024-12-26 09:39:10 +00:00
added cuda to cudaregress notebook
This commit is contained in:
parent
f5b9a678fc
commit
3ec7e5fed5
@ -6,12 +6,13 @@
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import torch"
|
||||
"import torch\n",
|
||||
"device='cuda:0' if torch.cuda.is_available() else 'cpu'"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"execution_count": 29,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
@ -29,13 +30,16 @@
|
||||
" out *= i\n",
|
||||
" return out\n",
|
||||
"\n",
|
||||
"def set_device(new_device):\n",
|
||||
" device=new_device\n",
|
||||
"\n",
|
||||
"class LinearRegKernel():\n",
|
||||
" parameters= []\n",
|
||||
" weights=None\n",
|
||||
" bias=None\n",
|
||||
" def __init__(self, num_vars):\n",
|
||||
" self.weights=torch.rand(num_vars, requires_grad=True)\n",
|
||||
" self.bias=torch.rand(1, requires_grad=True)\n",
|
||||
" self.weights=torch.rand(num_vars, requires_grad=True, device=device)\n",
|
||||
" self.bias=torch.rand(1, requires_grad=True, device=device)\n",
|
||||
" self.parameters=[self.weights,self.bias]\n",
|
||||
" def forward(self,mtx):\n",
|
||||
" long_bias=self.bias.repeat([1,mtx.size()[1]])\n",
|
||||
@ -47,8 +51,8 @@
|
||||
" bias=None\n",
|
||||
" sigmoid=torch.nn.Sigmoid()\n",
|
||||
" def __init__(self, num_vars):\n",
|
||||
" self.weights=torch.rand(num_vars)\n",
|
||||
" self.bias=torch.rand(1)\n",
|
||||
" self.weights=torch.rand(num_vars, requires_grad=True, device=device)\n",
|
||||
" self.bias=torch.rand(1, requires_grad=True, device=device)\n",
|
||||
" self.parameters=[self.weights,self.bias]\n",
|
||||
" def forward(self,mtx):\n",
|
||||
" long_bias=self.bias.repeat([1,mtx.size()[1]])\n",
|
||||
@ -59,8 +63,8 @@
|
||||
" weights=None\n",
|
||||
" bias=None\n",
|
||||
" def __init__(self, num_vars):\n",
|
||||
" self.weights=torch.rand(num_vars)\n",
|
||||
" self.bias=torch.rand(1)\n",
|
||||
" self.weights=torch.rand(num_vars, requires_grad=True, device=device)\n",
|
||||
" self.bias=torch.rand(1, requires_grad=True, device=device)\n",
|
||||
" self.parameters=[self.weights,self.bias]\n",
|
||||
" def forward(self,mtx):\n",
|
||||
" long_bias=self.bias.repeat([1,mtx.size()[1]])\n",
|
||||
@ -71,8 +75,8 @@
|
||||
" weights=None\n",
|
||||
" bias=None\n",
|
||||
" def __init__(self, num_vars):\n",
|
||||
" self.weights=torch.rand(num_vars)\n",
|
||||
" self.bias=torch.rand(1)\n",
|
||||
" self.weights=torch.rand(num_vars, requires_grad=True, device=device)\n",
|
||||
" self.bias=torch.rand(1, requires_grad=True, device=device)\n",
|
||||
" self.parameters=[self.weights,self.bias]\n",
|
||||
" def forward(self,mtx):\n",
|
||||
" long_bias=self.bias.repeat([1,mtx.size()[1]])\n",
|
||||
@ -86,8 +90,8 @@
|
||||
" def __init__(self, num_vars, power):\n",
|
||||
" self.power=power\n",
|
||||
" num_terms=int(factorial(num_vars+power-1) / factorial(power) / factorial(num_vars-1))\n",
|
||||
" self.weights=torch.rand(num_terms)\n",
|
||||
" self.bias=torch.rand(1)\n",
|
||||
" self.weights=torch.rand(num_terms, requires_grad=True, device=device)\n",
|
||||
" self.bias=torch.rand(1, requires_grad=True, device=device)\n",
|
||||
" self.parameters=[self.weights,self.bias]\n",
|
||||
" def forward(self,mtx):\n",
|
||||
" #TODO: Vectorize the last part\n",
|
||||
@ -100,13 +104,15 @@
|
||||
"\n",
|
||||
"def SGDTrain(kernel, data, ground, loss=torch.nn.MSELoss(), iterations=1000, learning_rate=.1, return_losses=False):\n",
|
||||
" optim=torch.optim.SGD(kernel.parameters, lr=learning_rate)\n",
|
||||
" data_cuda=data.to(device)\n",
|
||||
" ground_cuda=ground.to(device)\n",
|
||||
" if (return_losses):\n",
|
||||
" losses=[]\n",
|
||||
" for i in range(iterations):\n",
|
||||
" with torch.set_grad_enabled(True):\n",
|
||||
" optim.zero_grad()\n",
|
||||
" pred=kernel.forward(data)\n",
|
||||
" ls=loss(pred,ground)\n",
|
||||
" pred=kernel.forward(data_cuda)\n",
|
||||
" ls=loss(pred,ground_cuda)\n",
|
||||
" losses.append(ls.item())\n",
|
||||
" ls.backward()\n",
|
||||
" optim.step()\n",
|
||||
@ -115,13 +121,15 @@
|
||||
" for i in range(iterations):\n",
|
||||
" with torch.set_grad_enabled(True):\n",
|
||||
" optim.zero_grad()\n",
|
||||
" pred=kernel.forward(data)\n",
|
||||
" ls=loss(pred,ground)\n",
|
||||
" pred=kernel.forward(data_cuda)\n",
|
||||
" ls=loss(pred,ground_cuda)\n",
|
||||
" ls.backward()\n",
|
||||
" optim.step() \n",
|
||||
" return kernel\n",
|
||||
"\n",
|
||||
"def CustomTrain(kernel, optim, data, ground, loss=torch.nn.MSELoss(), iterations=1000, return_losses=False):\n",
|
||||
" data_cuda=data.to(device)\n",
|
||||
" ground_cuda=ground.to(device)\n",
|
||||
" if (return_losses):\n",
|
||||
" losses=[]\n",
|
||||
" for i in range(iterations):\n",
|
||||
@ -137,8 +145,8 @@
|
||||
" for i in range(iterations):\n",
|
||||
" with torch.set_grad_enabled(True):\n",
|
||||
" optim.zero_grad()\n",
|
||||
" pred=kernel.forward(data)\n",
|
||||
" ls=loss(pred,ground)\n",
|
||||
" pred=kernel.forward(data_cuda)\n",
|
||||
" ls=loss(pred,ground_cuda)\n",
|
||||
" ls.backward()\n",
|
||||
" optim.step() \n",
|
||||
" return kernel"
|
||||
@ -146,31 +154,46 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"execution_count": 31,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"tensor([[1.0000, 2.0000]], grad_fn=<AddBackward0>)"
|
||||
"tensor([[1.0000, 2.0000]], device='cuda:0', grad_fn=<AddBackward0>)"
|
||||
]
|
||||
},
|
||||
"execution_count": 3,
|
||||
"execution_count": 31,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"model=SGDTrain(LinearRegKernel(3),torch.tensor([[1,2],[3,4],[5,6]]).to(torch.float),torch.tensor([[1,2]]).to(torch.float),iterations=10000, learning_rate=.01, return_losses=True)\n",
|
||||
"model[0].forward(torch.tensor([[1,2],[3,4],[5,6]]).to(torch.float))"
|
||||
"model=SGDTrain(LinearRegKernel(3),torch.tensor([[1,2],[3,4],[5,6]]).to(torch.float).cuda(),torch.tensor([[1,2]]).to(torch.float).cuda(),iterations=10000, learning_rate=.01, return_losses=True)\n",
|
||||
"model[0].forward(torch.tensor([[1,2],[3,4],[5,6]]).to(torch.float).cuda())"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"execution_count": 30,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[tensor([0.2347, 0.4494, 0.3156], device='cuda:0', requires_grad=True),\n",
|
||||
" tensor([0.9541], device='cuda:0', requires_grad=True)]"
|
||||
]
|
||||
},
|
||||
"execution_count": 30,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"kernel=LinearRegKernel(3)\n",
|
||||
"kernel.parameters\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
|
Loading…
Reference in New Issue
Block a user