added cuda to cudaregress notebook

This commit is contained in:
jlevine18 2019-09-22 23:05:49 -05:00 committed by GitHub
parent f5b9a678fc
commit 3ec7e5fed5
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

View File

@ -6,12 +6,13 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"import torch" "import torch\n",
"device='cuda:0' if torch.cuda.is_available() else 'cpu'"
] ]
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 2, "execution_count": 29,
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
@ -29,13 +30,16 @@
" out *= i\n", " out *= i\n",
" return out\n", " return out\n",
"\n", "\n",
"def set_device(new_device):\n",
" device=new_device\n",
"\n",
"class LinearRegKernel():\n", "class LinearRegKernel():\n",
" parameters= []\n", " parameters= []\n",
" weights=None\n", " weights=None\n",
" bias=None\n", " bias=None\n",
" def __init__(self, num_vars):\n", " def __init__(self, num_vars):\n",
" self.weights=torch.rand(num_vars, requires_grad=True)\n", " self.weights=torch.rand(num_vars, requires_grad=True, device=device)\n",
" self.bias=torch.rand(1, requires_grad=True)\n", " self.bias=torch.rand(1, requires_grad=True, device=device)\n",
" self.parameters=[self.weights,self.bias]\n", " self.parameters=[self.weights,self.bias]\n",
" def forward(self,mtx):\n", " def forward(self,mtx):\n",
" long_bias=self.bias.repeat([1,mtx.size()[1]])\n", " long_bias=self.bias.repeat([1,mtx.size()[1]])\n",
@ -47,8 +51,8 @@
" bias=None\n", " bias=None\n",
" sigmoid=torch.nn.Sigmoid()\n", " sigmoid=torch.nn.Sigmoid()\n",
" def __init__(self, num_vars):\n", " def __init__(self, num_vars):\n",
" self.weights=torch.rand(num_vars)\n", " self.weights=torch.rand(num_vars, requires_grad=True, device=device)\n",
" self.bias=torch.rand(1)\n", " self.bias=torch.rand(1, requires_grad=True, device=device)\n",
" self.parameters=[self.weights,self.bias]\n", " self.parameters=[self.weights,self.bias]\n",
" def forward(self,mtx):\n", " def forward(self,mtx):\n",
" long_bias=self.bias.repeat([1,mtx.size()[1]])\n", " long_bias=self.bias.repeat([1,mtx.size()[1]])\n",
@ -59,8 +63,8 @@
" weights=None\n", " weights=None\n",
" bias=None\n", " bias=None\n",
" def __init__(self, num_vars):\n", " def __init__(self, num_vars):\n",
" self.weights=torch.rand(num_vars)\n", " self.weights=torch.rand(num_vars, requires_grad=True, device=device)\n",
" self.bias=torch.rand(1)\n", " self.bias=torch.rand(1, requires_grad=True, device=device)\n",
" self.parameters=[self.weights,self.bias]\n", " self.parameters=[self.weights,self.bias]\n",
" def forward(self,mtx):\n", " def forward(self,mtx):\n",
" long_bias=self.bias.repeat([1,mtx.size()[1]])\n", " long_bias=self.bias.repeat([1,mtx.size()[1]])\n",
@ -71,8 +75,8 @@
" weights=None\n", " weights=None\n",
" bias=None\n", " bias=None\n",
" def __init__(self, num_vars):\n", " def __init__(self, num_vars):\n",
" self.weights=torch.rand(num_vars)\n", " self.weights=torch.rand(num_vars, requires_grad=True, device=device)\n",
" self.bias=torch.rand(1)\n", " self.bias=torch.rand(1, requires_grad=True, device=device)\n",
" self.parameters=[self.weights,self.bias]\n", " self.parameters=[self.weights,self.bias]\n",
" def forward(self,mtx):\n", " def forward(self,mtx):\n",
" long_bias=self.bias.repeat([1,mtx.size()[1]])\n", " long_bias=self.bias.repeat([1,mtx.size()[1]])\n",
@ -86,8 +90,8 @@
" def __init__(self, num_vars, power):\n", " def __init__(self, num_vars, power):\n",
" self.power=power\n", " self.power=power\n",
" num_terms=int(factorial(num_vars+power-1) / factorial(power) / factorial(num_vars-1))\n", " num_terms=int(factorial(num_vars+power-1) / factorial(power) / factorial(num_vars-1))\n",
" self.weights=torch.rand(num_terms)\n", " self.weights=torch.rand(num_terms, requires_grad=True, device=device)\n",
" self.bias=torch.rand(1)\n", " self.bias=torch.rand(1, requires_grad=True, device=device)\n",
" self.parameters=[self.weights,self.bias]\n", " self.parameters=[self.weights,self.bias]\n",
" def forward(self,mtx):\n", " def forward(self,mtx):\n",
" #TODO: Vectorize the last part\n", " #TODO: Vectorize the last part\n",
@ -100,13 +104,15 @@
"\n", "\n",
"def SGDTrain(kernel, data, ground, loss=torch.nn.MSELoss(), iterations=1000, learning_rate=.1, return_losses=False):\n", "def SGDTrain(kernel, data, ground, loss=torch.nn.MSELoss(), iterations=1000, learning_rate=.1, return_losses=False):\n",
" optim=torch.optim.SGD(kernel.parameters, lr=learning_rate)\n", " optim=torch.optim.SGD(kernel.parameters, lr=learning_rate)\n",
" data_cuda=data.to(device)\n",
" ground_cuda=ground.to(device)\n",
" if (return_losses):\n", " if (return_losses):\n",
" losses=[]\n", " losses=[]\n",
" for i in range(iterations):\n", " for i in range(iterations):\n",
" with torch.set_grad_enabled(True):\n", " with torch.set_grad_enabled(True):\n",
" optim.zero_grad()\n", " optim.zero_grad()\n",
" pred=kernel.forward(data)\n", " pred=kernel.forward(data_cuda)\n",
" ls=loss(pred,ground)\n", " ls=loss(pred,ground_cuda)\n",
" losses.append(ls.item())\n", " losses.append(ls.item())\n",
" ls.backward()\n", " ls.backward()\n",
" optim.step()\n", " optim.step()\n",
@ -115,13 +121,15 @@
" for i in range(iterations):\n", " for i in range(iterations):\n",
" with torch.set_grad_enabled(True):\n", " with torch.set_grad_enabled(True):\n",
" optim.zero_grad()\n", " optim.zero_grad()\n",
" pred=kernel.forward(data)\n", " pred=kernel.forward(data_cuda)\n",
" ls=loss(pred,ground)\n", " ls=loss(pred,ground_cuda)\n",
" ls.backward()\n", " ls.backward()\n",
" optim.step() \n", " optim.step() \n",
" return kernel\n", " return kernel\n",
"\n", "\n",
"def CustomTrain(kernel, optim, data, ground, loss=torch.nn.MSELoss(), iterations=1000, return_losses=False):\n", "def CustomTrain(kernel, optim, data, ground, loss=torch.nn.MSELoss(), iterations=1000, return_losses=False):\n",
" data_cuda=data.to(device)\n",
" ground_cuda=ground.to(device)\n",
" if (return_losses):\n", " if (return_losses):\n",
" losses=[]\n", " losses=[]\n",
" for i in range(iterations):\n", " for i in range(iterations):\n",
@ -137,8 +145,8 @@
" for i in range(iterations):\n", " for i in range(iterations):\n",
" with torch.set_grad_enabled(True):\n", " with torch.set_grad_enabled(True):\n",
" optim.zero_grad()\n", " optim.zero_grad()\n",
" pred=kernel.forward(data)\n", " pred=kernel.forward(data_cuda)\n",
" ls=loss(pred,ground)\n", " ls=loss(pred,ground_cuda)\n",
" ls.backward()\n", " ls.backward()\n",
" optim.step() \n", " optim.step() \n",
" return kernel" " return kernel"
@ -146,31 +154,46 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 3, "execution_count": 31,
"metadata": {}, "metadata": {},
"outputs": [ "outputs": [
{ {
"data": { "data": {
"text/plain": [ "text/plain": [
"tensor([[1.0000, 2.0000]], grad_fn=<AddBackward0>)" "tensor([[1.0000, 2.0000]], device='cuda:0', grad_fn=<AddBackward0>)"
] ]
}, },
"execution_count": 3, "execution_count": 31,
"metadata": {}, "metadata": {},
"output_type": "execute_result" "output_type": "execute_result"
} }
], ],
"source": [ "source": [
"model=SGDTrain(LinearRegKernel(3),torch.tensor([[1,2],[3,4],[5,6]]).to(torch.float),torch.tensor([[1,2]]).to(torch.float),iterations=10000, learning_rate=.01, return_losses=True)\n", "model=SGDTrain(LinearRegKernel(3),torch.tensor([[1,2],[3,4],[5,6]]).to(torch.float).cuda(),torch.tensor([[1,2]]).to(torch.float).cuda(),iterations=10000, learning_rate=.01, return_losses=True)\n",
"model[0].forward(torch.tensor([[1,2],[3,4],[5,6]]).to(torch.float))" "model[0].forward(torch.tensor([[1,2],[3,4],[5,6]]).to(torch.float).cuda())"
] ]
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": 30,
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [
"source": [] {
"data": {
"text/plain": [
"[tensor([0.2347, 0.4494, 0.3156], device='cuda:0', requires_grad=True),\n",
" tensor([0.9541], device='cuda:0', requires_grad=True)]"
]
},
"execution_count": 30,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"kernel=LinearRegKernel(3)\n",
"kernel.parameters\n"
]
}, },
{ {
"cell_type": "code", "cell_type": "code",