titanlearn.py v 2.0.0.000

This commit is contained in:
art 2019-10-29 09:41:49 -05:00
parent 9f0d366408
commit 68006de8c0
3 changed files with 59 additions and 7 deletions

View File

@ -1,10 +1,10 @@
# Titan Robotics Team 2022: Data Analysis Module # Titan Robotics Team 2022: Data Analysis Module
# Written by Arthur Lu & Jacob Levine # Written by Arthur Lu & Jacob Levine
# Notes: # Notes:
# this should be imported as a python module using 'import analysis' # this should be imported as a python module using 'import analysis'
# this should be included in the local directory or environment variable # this should be included in the local directory or environment variable
# this module has been optimized for multhreaded computing # this module has been optimized for multhreaded computing
# current benchmark of optimization: 1.33 times faster # current benchmark of optimization: 1.33 times faster
# setup: # setup:
__version__ = "1.1.5.001" __version__ = "1.1.5.001"

View File

@ -1,9 +1,9 @@
# Titan Robotics Team 2022: CUDA-based Regressions Module # Titan Robotics Team 2022: CUDA-based Regressions Module
# Written by Arthur Lu & Jacob Levine # Written by Arthur Lu & Jacob Levine
# Notes: # Notes:
# this should be imported as a python module using 'import regression' # this should be imported as a python module using 'import regression'
# this should be included in the local directory or environment variable # this should be included in the local directory or environment variable
# this module is cuda-optimized and vectorized (except for one small part) # this module is cuda-optimized and vectorized (except for one small part)
# setup: # setup:
__version__ = "1.0.0.002" __version__ = "1.0.0.002"

View File

@ -0,0 +1,52 @@
# Titan Robotics Team 2022: ML Module
# Written by Arthur Lu & Jacob Levine
# Notes:
# this should be imported as a python module using 'import titanlearn'
# this should be included in the local directory or environment variable
# this module is optimized for multhreaded computing
# this module learns from its mistakes far faster than 2022's captains
# setup:
__version__ = "2.0.0.000"
#changelog should be viewed using print(analysis.__changelog__)
__changelog__ = """changelog:
2.0.0.000:
- complete rewrite planned
- depreciated 1.0.0.xxx versions
- added simple training loop
1.0.0.xxx:
-added generation of ANNS, basic SGD training
"""
__author__ = (
"Arthur Lu <arthurlu@ttic.edu>, "
"Jacob Levine <jlevine@ttic.edu>,"
)
__all__ = [
'train',
]
import torch
import torch.optim as optim
def train(device, net, epochs, trainloader, optimizer, criterion):
for epoch in range(epochs): # loop over the dataset multiple times
for i, data in enumerate(trainloader, 0):
inputs = data[0].to(device)
labels = data[1].to(device)
optimizer.zero_grad()
outputs = net(inputs)
loss = criterion(outputs, labels.to(torch.float))
loss.backward()
optimizer.step()
return net
print("finished training")