From 2aa5b376b42cea9f4a4785f227b6f80c50120613 Mon Sep 17 00:00:00 2001 From: art Date: Tue, 29 Oct 2019 14:21:53 -0500 Subject: [PATCH] titanlearn v 2.0.1.000 --- .../__pycache__/titanlearn.cpython-37.pyc | Bin 1213 -> 3058 bytes data analysis/analysis/titanlearn.py | 69 +++++++++++++++++- 2 files changed, 65 insertions(+), 4 deletions(-) diff --git a/data analysis/analysis/__pycache__/titanlearn.cpython-37.pyc b/data analysis/analysis/__pycache__/titanlearn.cpython-37.pyc index 1387547ef85980ae346bc74caec5799affa92504..20d728b4f689d96a58a67d5efa62afb6878a2386 100644 GIT binary patch literal 3058 zcmbUj%WfRUvAgHFv%BPyVk|pxl1X4ASVSf%#)ttz7A#wUfI#6z1`G@$42ILa%UR9N zth#$7YB8HbD8MHJ{si*qKk&EoC5N1R$p;uYq^kEpnn7|%54yUhs;8@}>ouSE`w@Zh z-Cw_d{|G<~^Z~^%YrfLCjk`PW4%2~p=!`(4U9PV+ybQ2fWp!BAG;!lGlpoE9q zc}tS;3E>?cy(K&n?xZt`E}Wzbv@Y)ftp~K;q(9jL>NdZ_xA^v3nhf}WyYTG}-{HHk z?sESb8Sc$HNBf8P9NwS*JwMB;u_){DakQZxM=^|#;*4{_!PmIX4~O< zTV79RrO+al;3aejdLPQrY8fic$rh6l<9kRZ)%O zvaV;*=9`tgrB-8637MfraXpG3ot!+o7oTRT$m3^E9&b*F<_{l9eb&hMX%j!lELt|7 zXA@l~R)*Bj^gALu zfdBa9;H;jCgRE>0WPMtL_Ub^(qr;=4j}$1T#rVwu&vX`NRaU-Hg*w*FL;Y8&FAT*>A+h~sz2LTNA0RTZDLP^5DCL_whg0~?=DMTwolKVArN#cPGo^eiu z&QX0FxZ)u=)h+-{G;PTRyJT0uv}V(jp<_JN%!HI(*kZyo1%K-_1*2Uh$`P!cG|kIQ zsWi1$AThIKAHY4B3U8Uvc@ISIuRqQ$5$L$09$&yzc$X`w_8_?9Eg%`MsAi2`o{e{G z1(;w|%h#FY2l(kqNEd~y=tCcsMW?5!iPCgh^QJ_+m!>b9tXy)eo^l7p^jBu&qAb^@ zptvEG+iR)z5`K>oQQ468981aJWi#+TaVRwZe>n!r2sp^I4#IQ~On{-GZaE2!vrc*k z5OmUnO`M5)K`*_Vj`AfGGPo^W%lyoEjV?-MTs+VOkC(?9$=?8;+bRPHra}nQDt!c(-Z~R< zNw3Jl=X4Rk498^A`Iep!G;5($J|^GN=WG$3|4Tcq!|4TkOrHPqFKp3W^x7V$?~zNl zC0BIOhc~u)+iKlq_`+56q0DZ#Uc23HUAo$z1Xpm%V$cpQLkw`(*|BTD*(YQXv>|Y! zD*|}fb}qYZctt7Ey$jf@``l^CU2=hxK6mw2+r6N73HR<2y`{HXAM6?Y0{E8d11CUE z8(r^0t2QTuz#BjdzovA661D-JvE23YWw0j~kUw^=mo%c5_vaXKf z`QA*{V=0t6jv)zFZShBXFn8iMHlCQ(`PqCgZ=@8JhBgU70lgKs&wH||R=rlK`OXLm zu{wiJy%sRsSq1?{uJI)PESQdf5JEA+FkL5|xsY`_mKnFfWV{iCgHFQJEPsK;6l%H_ zGvi`@8b6!OM8$0x=m2_)TBDq@l*5mM8Vv@yqN~ zlqw18MnkhuiHo1#N&tzK^bt(84}h?Uy7;hNw#T;U77JNOcVUJuRCXFspY`bHbeFyl zv`vCwas)7Kf;n;%gm)XX ztfd#&0-Te%pEo=^K1`f^)(rh+(qofLTx2A@LZ#WOtN?>cA%6!9Yt}H7hlpbVmw!Nj zX)W&~uvGz%kXW#8U5bg6=GI?Aj)1MOixBoM+zvi2lYfNwA^mcACxPO<`WZ~q1pDRs zk965$vqqoQ5^$)!nQfw2u?(w~{ZCr{F=0|9tf~_K&$c7m26|o9jALs6y3A^D oL+wq=GU0<|TYrfE912UL+jAY)W7Ky-I$)u{7rfJr-0;JH13vV&rvLx| delta 560 zcmX|7&2H2%5VjrLv6JkoRMjFl0f$yA@go(9Gol<37bGeyDimUOH)WG-9k*1KqaKzQ zK>GsO8xj||apBH0{V!V&t}lYd4a zc?_tZecZeAfG=3V6~{<39WKH`C{aiyfeRgcKq}HwQ;4zVFVHml`sok8+U=)779lKf zM6w|THUflz13>M;U7S1Q$qGM3M<)%x&<^`R;}rq^@-=C~MtGs|4b&2kHn@?%$bS}j z+6Ehhk;h)Ha1(pxV;{BT%Fz?H0huQ8Ny~>8+J=2Ry?KaEqeB>hW9f4nz!ASNxXWwD zd70O9qi;EzLR!wIvST(aXQq>;JS&V%ol(S9N?saUy_wlmcg$5bn|JJZQKe3aw`un7 zefrAkj@HgBJGQiW>82yawK>l-qk_^n#m%zH=Cz84O)#mARkB*TyvRXX$;{@?*t{wg z8(KS@cT}e?UH^~|Z{O{IZvqaHG1(zwJSKt&ybahQ6i<`&qj>+`80xU0Kkxn^t=JGc hQq`71(X!t4Z?|OqCElHELrN1GP)0Bh1WrIs@V{Y&hQ|N^ diff --git a/data analysis/analysis/titanlearn.py b/data analysis/analysis/titanlearn.py index 3e0cdd38..1ab327fa 100644 --- a/data analysis/analysis/titanlearn.py +++ b/data analysis/analysis/titanlearn.py @@ -7,10 +7,13 @@ # this module learns from its mistakes far faster than 2022's captains # setup: -__version__ = "2.0.0.001" +__version__ = "2.0.1.000" #changelog should be viewed using print(analysis.__changelog__) __changelog__ = """changelog: +2.0.1.000: + - added net, dataset, dataloader, and stdtrain template definitions + - added graphloss function 2.0.0.001: - added clear functions 2.0.0.000: @@ -33,6 +36,8 @@ __all__ = [ import torch from os import system, name +import matplotlib.pyplot as plt +import numpy as np def clear(): if name == 'nt': @@ -40,7 +45,34 @@ def clear(): else: _ = system('clear') -def train(device, net, epochs, trainloader, optimizer, criterion): +class net(torch.nn.Module): #template for standard neural net + def __init__(self): + super(Net, self).__init__() + + def forward(self, input): + pass + +class dataset(torch.utils.data.Dataset): #template for standard dataset + + def __init__(self): + super(torch.utils.data.Dataset).__init__() + + def __getitem__(self, index): + pass + + def __len__(self): + pass + +def dataloader(dataset, batch_size, num_workers, shuffle = True): + + return torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=shuffle, num_workers=num_workers) + +def train(device, net, epochs, trainloader, optimizer, criterion): #expects standard dataloader, whch returns (inputs, labels) + + dataset_len = trainloader.dataset.__len__() + iter_count = 0 + running_loss = 0 + running_loss_list = [] for epoch in range(epochs): # loop over the dataset multiple times @@ -56,6 +88,35 @@ def train(device, net, epochs, trainloader, optimizer, criterion): loss.backward() optimizer.step() + + # monitoring steps below + + iter_count += 1 + running_loss += loss.item() + running_loss_list.append(running_loss) + clear() + + print("training on: " + device) + print("iteration: " + str(i) + "/" + str(int(dataset_len / trainloader.batch_size)) + " | " + "epoch: " + str(epoch) + "/" + str(epochs)) + print("current batch loss: " + str(loss.item)) + print("running loss: " + str(running_loss / iter_count)) - return net - print("finished training") \ No newline at end of file + return net, running_loss_list + print("finished training") + +def stdtrainer(net, criterion, optimizer, dataloader, epochs, batch_size): + + device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") + + net = net.to(device) + criterion = criterion.to(device) + optimizer = optimizer.to(device) + trainloader = dataloader + + return train(device, net, epochs, trainloader, optimizer, criterion) + +def graphloss(losses): + + x = range(0, len(losses)) + plt.plot(x, losses) + plt.show() \ No newline at end of file