From 5e9e90507bf4ac078d83f653ad3061dfeddb5299 Mon Sep 17 00:00:00 2001 From: art Date: Tue, 3 Mar 2020 20:30:54 -0600 Subject: [PATCH] packagefied analysis (finally) --- data analysis/__pycache__/data.cpython-37.pyc | Bin 2703 -> 0 bytes .../analysis.egg-info/PKG-INFO | 15 + .../analysis.egg-info/SOURCES.txt | 11 + .../analysis.egg-info/dependency_links.txt | 1 + .../analysis.egg-info/top_level.txt | 1 + .../.ipynb_checkpoints/analysis-checkpoint.py | 0 .../analysis/__init__.py | 0 .../__pycache__/__init__.cpython-37.pyc | Bin .../__pycache__/analysis.cpython-36.pyc | Bin .../__pycache__/analysis.cpython-37.pyc | Bin .../__pycache__/regression.cpython-37.pyc | Bin .../__pycache__/titanlearn.cpython-37.pyc | Bin .../__pycache__/trueskill.cpython-37.pyc | Bin .../analysis/analysis.py | 0 .../analysis/regression.py | 434 ++++---- .../analysis/titanlearn.py | 0 .../analysis/trueskill.py | 0 .../analysis/visualization.py | 0 .../build/lib/analysis/__init__.py | 0 .../build/lib/analysis/analysis.py | 952 ++++++++++++++++++ .../build/lib/analysis/regression.py | 217 ++++ .../build/lib/analysis/titanlearn.py | 122 +++ .../build/lib/analysis/trueskill.py | 907 +++++++++++++++++ .../build/lib/analysis/visualization.py | 34 + .../dist/analysis-1.0.0.0-py3-none-any.whl | Bin 0 -> 21422 bytes .../dist/analysis-1.0.0.0.tar.gz | Bin 0 -> 19298 bytes data analysis/{ => analysis-master}/setup.py | 0 data analysis/superscript.py | 4 - 28 files changed, 2477 insertions(+), 221 deletions(-) delete mode 100644 data analysis/__pycache__/data.cpython-37.pyc create mode 100644 data analysis/analysis-master/analysis.egg-info/PKG-INFO create mode 100644 data analysis/analysis-master/analysis.egg-info/SOURCES.txt create mode 100644 data analysis/analysis-master/analysis.egg-info/dependency_links.txt create mode 100644 data analysis/analysis-master/analysis.egg-info/top_level.txt rename data analysis/{ => analysis-master}/analysis/.ipynb_checkpoints/analysis-checkpoint.py (100%) rename data analysis/{ => analysis-master}/analysis/__init__.py (100%) rename data analysis/{ => analysis-master}/analysis/__pycache__/__init__.cpython-37.pyc (100%) rename data analysis/{ => analysis-master}/analysis/__pycache__/analysis.cpython-36.pyc (100%) rename data analysis/{ => analysis-master}/analysis/__pycache__/analysis.cpython-37.pyc (100%) rename data analysis/{ => analysis-master}/analysis/__pycache__/regression.cpython-37.pyc (100%) rename data analysis/{ => analysis-master}/analysis/__pycache__/titanlearn.cpython-37.pyc (100%) rename data analysis/{ => analysis-master}/analysis/__pycache__/trueskill.cpython-37.pyc (100%) rename data analysis/{ => analysis-master}/analysis/analysis.py (100%) rename data analysis/{ => analysis-master}/analysis/regression.py (97%) rename data analysis/{ => analysis-master}/analysis/titanlearn.py (100%) rename data analysis/{ => analysis-master}/analysis/trueskill.py (100%) rename data analysis/{ => analysis-master}/analysis/visualization.py (100%) create mode 100644 data analysis/analysis-master/build/lib/analysis/__init__.py create mode 100644 data analysis/analysis-master/build/lib/analysis/analysis.py create mode 100644 data analysis/analysis-master/build/lib/analysis/regression.py create mode 100644 data analysis/analysis-master/build/lib/analysis/titanlearn.py create mode 100644 data analysis/analysis-master/build/lib/analysis/trueskill.py create mode 100644 data analysis/analysis-master/build/lib/analysis/visualization.py create mode 100644 data analysis/analysis-master/dist/analysis-1.0.0.0-py3-none-any.whl create mode 100644 data analysis/analysis-master/dist/analysis-1.0.0.0.tar.gz rename data analysis/{ => analysis-master}/setup.py (100%) diff --git a/data analysis/__pycache__/data.cpython-37.pyc b/data analysis/__pycache__/data.cpython-37.pyc deleted file mode 100644 index fb5f86d2bd71756539e47ba71cd851ca7e340c99..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 2703 zcmcguO>Y}T7@pa0+e!LKL&{fxBE_hQAxKD6p(1VI03w%0YFUX|o1JaE>DueejGH)G zpVDjpL6Phu|AgPLS57^1mQWLSFG2c@Lr zmt6kNq>tL)-?ln=H)%!LptZHRv)PjQZm!cI$wsCzgV`=B1=|O1Pc8X)kB#`zkD3>3 zY)x2YDMD_G{klG~3O=@hq`Sm~hwLvAuG9*u)}DoSZHet!6VBPet%t0D%>)DH_3c-G zK-26DbQS{P0kVP{;S7Q=(M)SQY(%jhL|I#>-NXO{e0geWm7oP>u#rXGT@l@Fp!F9r zJ6;Cf6$EWUYHcIS<0w;iQK_b4BOCaKwW)DcXv;)|HJwD=HbzwFknO|EhiR{u_V(J- zi7;bB=|~S$JI+N?dMcMX5v8-Q@*df>pY%l2F8x#Em;6Z5!QMRe!s|z*nU-u zy+OD1;%YNX>_VSeZQ~3VqIiu^G%JlNW9>`WPAxz%K`+Y zB8hh)n!a43`b`q=L4gu#U#k1Vi)jqDtKULfL^rXx2;Zehb2-Kw_$ z6P!l=3L+oQDZs$b!M`uS2HMud_-GgQiFE>&E-X0XLd6l_GFmHm#qL zf}BI&3iZ!J9DgwAA*RzO!}fZ15gABwm$y><>TMJ^r;3|y=@6O7E9fa5qKeYVQl(3~ z8}*xBIG07k1jN%8X{^soTH28irBCy*yyW|DvPa|Gg~%n;RR{)8UqE*FRS?K!WVwdk z*&9g|GP7_WjTwc2uR$S1BsLMr*r{GmkV{1<_X-ytaA)KXSG21=*qLh($_YQQ$H9b+ zYZC@bdR5;Bd&myfIHT_u)E*(jx}M>56BEt)n?YcrpMVJiB8qn2OU&6#(3W-zZmHqz z>#2UOlF-+=0qnIHS;mFVg@%N8m#D2azH1cVDH!L4UJ49MI94v!xZYY_kzR$&YKC4ymM^XhSQ z!NC<9I>c$E6?L0VX}42RQOP+4_Ky@me;?*mGiH@UKb4gUTeJ3@pBvSLF&_ACE(Te0mtxO&dVs$fd`{p8 JHGGX={vWo{N}~V( diff --git a/data analysis/analysis-master/analysis.egg-info/PKG-INFO b/data analysis/analysis-master/analysis.egg-info/PKG-INFO new file mode 100644 index 00000000..a83498a0 --- /dev/null +++ b/data analysis/analysis-master/analysis.egg-info/PKG-INFO @@ -0,0 +1,15 @@ +Metadata-Version: 2.1 +Name: analysis +Version: 1.0.0.0 +Summary: analysis package developed by TitanScouting and The Red Alliance +Home-page: https://github.com/titanscout2022/tr2022-strategy +Author: +Author-email: +License: UNKNOWN +Description: analysis package developed by TitanScouting and The Red Alliance +Platform: UNKNOWN +Classifier: Programming Language :: Python :: 3 +Classifier: License :: GNU General Public License v3.0 +Classifier: Operating System :: OS Independent +Requires-Python: >=3.6 +Description-Content-Type: text/markdown diff --git a/data analysis/analysis-master/analysis.egg-info/SOURCES.txt b/data analysis/analysis-master/analysis.egg-info/SOURCES.txt new file mode 100644 index 00000000..ea473c34 --- /dev/null +++ b/data analysis/analysis-master/analysis.egg-info/SOURCES.txt @@ -0,0 +1,11 @@ +setup.py +analysis/__init__.py +analysis/analysis.py +analysis/regression.py +analysis/titanlearn.py +analysis/trueskill.py +analysis/visualization.py +analysis.egg-info/PKG-INFO +analysis.egg-info/SOURCES.txt +analysis.egg-info/dependency_links.txt +analysis.egg-info/top_level.txt \ No newline at end of file diff --git a/data analysis/analysis-master/analysis.egg-info/dependency_links.txt b/data analysis/analysis-master/analysis.egg-info/dependency_links.txt new file mode 100644 index 00000000..8b137891 --- /dev/null +++ b/data analysis/analysis-master/analysis.egg-info/dependency_links.txt @@ -0,0 +1 @@ + diff --git a/data analysis/analysis-master/analysis.egg-info/top_level.txt b/data analysis/analysis-master/analysis.egg-info/top_level.txt new file mode 100644 index 00000000..09ad3be3 --- /dev/null +++ b/data analysis/analysis-master/analysis.egg-info/top_level.txt @@ -0,0 +1 @@ +analysis diff --git a/data analysis/analysis/.ipynb_checkpoints/analysis-checkpoint.py b/data analysis/analysis-master/analysis/.ipynb_checkpoints/analysis-checkpoint.py similarity index 100% rename from data analysis/analysis/.ipynb_checkpoints/analysis-checkpoint.py rename to data analysis/analysis-master/analysis/.ipynb_checkpoints/analysis-checkpoint.py diff --git a/data analysis/analysis/__init__.py b/data analysis/analysis-master/analysis/__init__.py similarity index 100% rename from data analysis/analysis/__init__.py rename to data analysis/analysis-master/analysis/__init__.py diff --git a/data analysis/analysis/__pycache__/__init__.cpython-37.pyc b/data analysis/analysis-master/analysis/__pycache__/__init__.cpython-37.pyc similarity index 100% rename from data analysis/analysis/__pycache__/__init__.cpython-37.pyc rename to data analysis/analysis-master/analysis/__pycache__/__init__.cpython-37.pyc diff --git a/data analysis/analysis/__pycache__/analysis.cpython-36.pyc b/data analysis/analysis-master/analysis/__pycache__/analysis.cpython-36.pyc similarity index 100% rename from data analysis/analysis/__pycache__/analysis.cpython-36.pyc rename to data analysis/analysis-master/analysis/__pycache__/analysis.cpython-36.pyc diff --git a/data analysis/analysis/__pycache__/analysis.cpython-37.pyc b/data analysis/analysis-master/analysis/__pycache__/analysis.cpython-37.pyc similarity index 100% rename from data analysis/analysis/__pycache__/analysis.cpython-37.pyc rename to data analysis/analysis-master/analysis/__pycache__/analysis.cpython-37.pyc diff --git a/data analysis/analysis/__pycache__/regression.cpython-37.pyc b/data analysis/analysis-master/analysis/__pycache__/regression.cpython-37.pyc similarity index 100% rename from data analysis/analysis/__pycache__/regression.cpython-37.pyc rename to data analysis/analysis-master/analysis/__pycache__/regression.cpython-37.pyc diff --git a/data analysis/analysis/__pycache__/titanlearn.cpython-37.pyc b/data analysis/analysis-master/analysis/__pycache__/titanlearn.cpython-37.pyc similarity index 100% rename from data analysis/analysis/__pycache__/titanlearn.cpython-37.pyc rename to data analysis/analysis-master/analysis/__pycache__/titanlearn.cpython-37.pyc diff --git a/data analysis/analysis/__pycache__/trueskill.cpython-37.pyc b/data analysis/analysis-master/analysis/__pycache__/trueskill.cpython-37.pyc similarity index 100% rename from data analysis/analysis/__pycache__/trueskill.cpython-37.pyc rename to data analysis/analysis-master/analysis/__pycache__/trueskill.cpython-37.pyc diff --git a/data analysis/analysis/analysis.py b/data analysis/analysis-master/analysis/analysis.py similarity index 100% rename from data analysis/analysis/analysis.py rename to data analysis/analysis-master/analysis/analysis.py diff --git a/data analysis/analysis/regression.py b/data analysis/analysis-master/analysis/regression.py similarity index 97% rename from data analysis/analysis/regression.py rename to data analysis/analysis-master/analysis/regression.py index 4ebc101a..6cbe7868 100644 --- a/data analysis/analysis/regression.py +++ b/data analysis/analysis-master/analysis/regression.py @@ -1,217 +1,217 @@ -# Titan Robotics Team 2022: CUDA-based Regressions Module -# Written by Arthur Lu & Jacob Levine -# Notes: -# this should be imported as a python module using 'import regression' -# this should be included in the local directory or environment variable -# this module is cuda-optimized and vectorized (except for one small part) -# setup: - -__version__ = "1.0.0.002" - -# changelog should be viewed using print(regression.__changelog__) -__changelog__ = """ - 1.0.0.002: - -Added more parameters to log, exponential, polynomial - -Added SigmoidalRegKernelArthur, because Arthur apparently needs - to train the scaling and shifting of sigmoids - - 1.0.0.001: - -initial release, with linear, log, exponential, polynomial, and sigmoid kernels - -already vectorized (except for polynomial generation) and CUDA-optimized -""" - -__author__ = ( - "Jacob Levine ", -) - -__all__ = [ - 'factorial', - 'take_all_pwrs', - 'num_poly_terms', - 'set_device', - 'LinearRegKernel', - 'SigmoidalRegKernel', - 'LogRegKernel', - 'PolyRegKernel', - 'ExpRegKernel', - 'SigmoidalRegKernelArthur', - 'SGDTrain', - 'CustomTrain' -] - - -# imports (just one for now): - -import torch - -device = "cuda:0" if torch.torch.cuda.is_available() else "cpu" - -#todo: document completely - -def factorial(n): - if n==0: - return 1 - else: - return n*factorial(n-1) -def num_poly_terms(num_vars, power): - if power == 0: - return 0 - return int(factorial(num_vars+power-1) / factorial(power) / factorial(num_vars-1)) + num_poly_terms(num_vars, power-1) - -def take_all_pwrs(vec,pwr): - #todo: vectorize (kinda) - combins=torch.combinations(vec, r=pwr, with_replacement=True) - out=torch.ones(combins.size()[0]) - for i in torch.t(combins): - out *= i - return torch.cat(out,take_all_pwrs(vec, pwr-1)) - -def set_device(new_device): - global device - device=new_device - -class LinearRegKernel(): - parameters= [] - weights=None - bias=None - def __init__(self, num_vars): - self.weights=torch.rand(num_vars, requires_grad=True, device=device) - self.bias=torch.rand(1, requires_grad=True, device=device) - self.parameters=[self.weights,self.bias] - def forward(self,mtx): - long_bias=self.bias.repeat([1,mtx.size()[1]]) - return torch.matmul(self.weights,mtx)+long_bias - -class SigmoidalRegKernel(): - parameters= [] - weights=None - bias=None - sigmoid=torch.nn.Sigmoid() - def __init__(self, num_vars): - self.weights=torch.rand(num_vars, requires_grad=True, device=device) - self.bias=torch.rand(1, requires_grad=True, device=device) - self.parameters=[self.weights,self.bias] - def forward(self,mtx): - long_bias=self.bias.repeat([1,mtx.size()[1]]) - return self.sigmoid(torch.matmul(self.weights,mtx)+long_bias) - -class SigmoidalRegKernelArthur(): - parameters= [] - weights=None - in_bias=None - scal_mult=None - out_bias=None - sigmoid=torch.nn.Sigmoid() - def __init__(self, num_vars): - self.weights=torch.rand(num_vars, requires_grad=True, device=device) - self.in_bias=torch.rand(1, requires_grad=True, device=device) - self.scal_mult=torch.rand(1, requires_grad=True, device=device) - self.out_bias=torch.rand(1, requires_grad=True, device=device) - self.parameters=[self.weights,self.in_bias, self.scal_mult, self.out_bias] - def forward(self,mtx): - long_in_bias=self.in_bias.repeat([1,mtx.size()[1]]) - long_out_bias=self.out_bias.repeat([1,mtx.size()[1]]) - return (self.scal_mult*self.sigmoid(torch.matmul(self.weights,mtx)+long_in_bias))+long_out_bias - -class LogRegKernel(): - parameters= [] - weights=None - in_bias=None - scal_mult=None - out_bias=None - def __init__(self, num_vars): - self.weights=torch.rand(num_vars, requires_grad=True, device=device) - self.in_bias=torch.rand(1, requires_grad=True, device=device) - self.scal_mult=torch.rand(1, requires_grad=True, device=device) - self.out_bias=torch.rand(1, requires_grad=True, device=device) - self.parameters=[self.weights,self.in_bias, self.scal_mult, self.out_bias] - def forward(self,mtx): - long_in_bias=self.in_bias.repeat([1,mtx.size()[1]]) - long_out_bias=self.out_bias.repeat([1,mtx.size()[1]]) - return (self.scal_mult*torch.log(torch.matmul(self.weights,mtx)+long_in_bias))+long_out_bias - -class ExpRegKernel(): - parameters= [] - weights=None - in_bias=None - scal_mult=None - out_bias=None - def __init__(self, num_vars): - self.weights=torch.rand(num_vars, requires_grad=True, device=device) - self.in_bias=torch.rand(1, requires_grad=True, device=device) - self.scal_mult=torch.rand(1, requires_grad=True, device=device) - self.out_bias=torch.rand(1, requires_grad=True, device=device) - self.parameters=[self.weights,self.in_bias, self.scal_mult, self.out_bias] - def forward(self,mtx): - long_in_bias=self.in_bias.repeat([1,mtx.size()[1]]) - long_out_bias=self.out_bias.repeat([1,mtx.size()[1]]) - return (self.scal_mult*torch.exp(torch.matmul(self.weights,mtx)+long_in_bias))+long_out_bias - -class PolyRegKernel(): - parameters= [] - weights=None - bias=None - power=None - def __init__(self, num_vars, power): - self.power=power - num_terms=num_poly_terms(num_vars, power) - self.weights=torch.rand(num_terms, requires_grad=True, device=device) - self.bias=torch.rand(1, requires_grad=True, device=device) - self.parameters=[self.weights,self.bias] - def forward(self,mtx): - #TODO: Vectorize the last part - cols=[] - for i in torch.t(mtx): - cols.append(take_all_pwrs(i,self.power)) - new_mtx=torch.t(torch.stack(cols)) - long_bias=self.bias.repeat([1,mtx.size()[1]]) - return torch.matmul(self.weights,new_mtx)+long_bias - -def SGDTrain(kernel, data, ground, loss=torch.nn.MSELoss(), iterations=1000, learning_rate=.1, return_losses=False): - optim=torch.optim.SGD(kernel.parameters, lr=learning_rate) - data_cuda=data.to(device) - ground_cuda=ground.to(device) - if (return_losses): - losses=[] - for i in range(iterations): - with torch.set_grad_enabled(True): - optim.zero_grad() - pred=kernel.forward(data_cuda) - ls=loss(pred,ground_cuda) - losses.append(ls.item()) - ls.backward() - optim.step() - return [kernel,losses] - else: - for i in range(iterations): - with torch.set_grad_enabled(True): - optim.zero_grad() - pred=kernel.forward(data_cuda) - ls=loss(pred,ground_cuda) - ls.backward() - optim.step() - return kernel - -def CustomTrain(kernel, optim, data, ground, loss=torch.nn.MSELoss(), iterations=1000, return_losses=False): - data_cuda=data.to(device) - ground_cuda=ground.to(device) - if (return_losses): - losses=[] - for i in range(iterations): - with torch.set_grad_enabled(True): - optim.zero_grad() - pred=kernel.forward(data) - ls=loss(pred,ground) - losses.append(ls.item()) - ls.backward() - optim.step() - return [kernel,losses] - else: - for i in range(iterations): - with torch.set_grad_enabled(True): - optim.zero_grad() - pred=kernel.forward(data_cuda) - ls=loss(pred,ground_cuda) - ls.backward() - optim.step() - return kernel +# Titan Robotics Team 2022: CUDA-based Regressions Module +# Written by Arthur Lu & Jacob Levine +# Notes: +# this should be imported as a python module using 'import regression' +# this should be included in the local directory or environment variable +# this module is cuda-optimized and vectorized (except for one small part) +# setup: + +__version__ = "1.0.0.002" + +# changelog should be viewed using print(regression.__changelog__) +__changelog__ = """ + 1.0.0.002: + -Added more parameters to log, exponential, polynomial + -Added SigmoidalRegKernelArthur, because Arthur apparently needs + to train the scaling and shifting of sigmoids + + 1.0.0.001: + -initial release, with linear, log, exponential, polynomial, and sigmoid kernels + -already vectorized (except for polynomial generation) and CUDA-optimized +""" + +__author__ = ( + "Jacob Levine ", +) + +__all__ = [ + 'factorial', + 'take_all_pwrs', + 'num_poly_terms', + 'set_device', + 'LinearRegKernel', + 'SigmoidalRegKernel', + 'LogRegKernel', + 'PolyRegKernel', + 'ExpRegKernel', + 'SigmoidalRegKernelArthur', + 'SGDTrain', + 'CustomTrain' +] + + +# imports (just one for now): + +import torch + +device = "cuda:0" if torch.torch.cuda.is_available() else "cpu" + +#todo: document completely + +def factorial(n): + if n==0: + return 1 + else: + return n*factorial(n-1) +def num_poly_terms(num_vars, power): + if power == 0: + return 0 + return int(factorial(num_vars+power-1) / factorial(power) / factorial(num_vars-1)) + num_poly_terms(num_vars, power-1) + +def take_all_pwrs(vec,pwr): + #todo: vectorize (kinda) + combins=torch.combinations(vec, r=pwr, with_replacement=True) + out=torch.ones(combins.size()[0]) + for i in torch.t(combins): + out *= i + return torch.cat(out,take_all_pwrs(vec, pwr-1)) + +def set_device(new_device): + global device + device=new_device + +class LinearRegKernel(): + parameters= [] + weights=None + bias=None + def __init__(self, num_vars): + self.weights=torch.rand(num_vars, requires_grad=True, device=device) + self.bias=torch.rand(1, requires_grad=True, device=device) + self.parameters=[self.weights,self.bias] + def forward(self,mtx): + long_bias=self.bias.repeat([1,mtx.size()[1]]) + return torch.matmul(self.weights,mtx)+long_bias + +class SigmoidalRegKernel(): + parameters= [] + weights=None + bias=None + sigmoid=torch.nn.Sigmoid() + def __init__(self, num_vars): + self.weights=torch.rand(num_vars, requires_grad=True, device=device) + self.bias=torch.rand(1, requires_grad=True, device=device) + self.parameters=[self.weights,self.bias] + def forward(self,mtx): + long_bias=self.bias.repeat([1,mtx.size()[1]]) + return self.sigmoid(torch.matmul(self.weights,mtx)+long_bias) + +class SigmoidalRegKernelArthur(): + parameters= [] + weights=None + in_bias=None + scal_mult=None + out_bias=None + sigmoid=torch.nn.Sigmoid() + def __init__(self, num_vars): + self.weights=torch.rand(num_vars, requires_grad=True, device=device) + self.in_bias=torch.rand(1, requires_grad=True, device=device) + self.scal_mult=torch.rand(1, requires_grad=True, device=device) + self.out_bias=torch.rand(1, requires_grad=True, device=device) + self.parameters=[self.weights,self.in_bias, self.scal_mult, self.out_bias] + def forward(self,mtx): + long_in_bias=self.in_bias.repeat([1,mtx.size()[1]]) + long_out_bias=self.out_bias.repeat([1,mtx.size()[1]]) + return (self.scal_mult*self.sigmoid(torch.matmul(self.weights,mtx)+long_in_bias))+long_out_bias + +class LogRegKernel(): + parameters= [] + weights=None + in_bias=None + scal_mult=None + out_bias=None + def __init__(self, num_vars): + self.weights=torch.rand(num_vars, requires_grad=True, device=device) + self.in_bias=torch.rand(1, requires_grad=True, device=device) + self.scal_mult=torch.rand(1, requires_grad=True, device=device) + self.out_bias=torch.rand(1, requires_grad=True, device=device) + self.parameters=[self.weights,self.in_bias, self.scal_mult, self.out_bias] + def forward(self,mtx): + long_in_bias=self.in_bias.repeat([1,mtx.size()[1]]) + long_out_bias=self.out_bias.repeat([1,mtx.size()[1]]) + return (self.scal_mult*torch.log(torch.matmul(self.weights,mtx)+long_in_bias))+long_out_bias + +class ExpRegKernel(): + parameters= [] + weights=None + in_bias=None + scal_mult=None + out_bias=None + def __init__(self, num_vars): + self.weights=torch.rand(num_vars, requires_grad=True, device=device) + self.in_bias=torch.rand(1, requires_grad=True, device=device) + self.scal_mult=torch.rand(1, requires_grad=True, device=device) + self.out_bias=torch.rand(1, requires_grad=True, device=device) + self.parameters=[self.weights,self.in_bias, self.scal_mult, self.out_bias] + def forward(self,mtx): + long_in_bias=self.in_bias.repeat([1,mtx.size()[1]]) + long_out_bias=self.out_bias.repeat([1,mtx.size()[1]]) + return (self.scal_mult*torch.exp(torch.matmul(self.weights,mtx)+long_in_bias))+long_out_bias + +class PolyRegKernel(): + parameters= [] + weights=None + bias=None + power=None + def __init__(self, num_vars, power): + self.power=power + num_terms=num_poly_terms(num_vars, power) + self.weights=torch.rand(num_terms, requires_grad=True, device=device) + self.bias=torch.rand(1, requires_grad=True, device=device) + self.parameters=[self.weights,self.bias] + def forward(self,mtx): + #TODO: Vectorize the last part + cols=[] + for i in torch.t(mtx): + cols.append(take_all_pwrs(i,self.power)) + new_mtx=torch.t(torch.stack(cols)) + long_bias=self.bias.repeat([1,mtx.size()[1]]) + return torch.matmul(self.weights,new_mtx)+long_bias + +def SGDTrain(kernel, data, ground, loss=torch.nn.MSELoss(), iterations=1000, learning_rate=.1, return_losses=False): + optim=torch.optim.SGD(kernel.parameters, lr=learning_rate) + data_cuda=data.to(device) + ground_cuda=ground.to(device) + if (return_losses): + losses=[] + for i in range(iterations): + with torch.set_grad_enabled(True): + optim.zero_grad() + pred=kernel.forward(data_cuda) + ls=loss(pred,ground_cuda) + losses.append(ls.item()) + ls.backward() + optim.step() + return [kernel,losses] + else: + for i in range(iterations): + with torch.set_grad_enabled(True): + optim.zero_grad() + pred=kernel.forward(data_cuda) + ls=loss(pred,ground_cuda) + ls.backward() + optim.step() + return kernel + +def CustomTrain(kernel, optim, data, ground, loss=torch.nn.MSELoss(), iterations=1000, return_losses=False): + data_cuda=data.to(device) + ground_cuda=ground.to(device) + if (return_losses): + losses=[] + for i in range(iterations): + with torch.set_grad_enabled(True): + optim.zero_grad() + pred=kernel.forward(data) + ls=loss(pred,ground) + losses.append(ls.item()) + ls.backward() + optim.step() + return [kernel,losses] + else: + for i in range(iterations): + with torch.set_grad_enabled(True): + optim.zero_grad() + pred=kernel.forward(data_cuda) + ls=loss(pred,ground_cuda) + ls.backward() + optim.step() + return kernel diff --git a/data analysis/analysis/titanlearn.py b/data analysis/analysis-master/analysis/titanlearn.py similarity index 100% rename from data analysis/analysis/titanlearn.py rename to data analysis/analysis-master/analysis/titanlearn.py diff --git a/data analysis/analysis/trueskill.py b/data analysis/analysis-master/analysis/trueskill.py similarity index 100% rename from data analysis/analysis/trueskill.py rename to data analysis/analysis-master/analysis/trueskill.py diff --git a/data analysis/analysis/visualization.py b/data analysis/analysis-master/analysis/visualization.py similarity index 100% rename from data analysis/analysis/visualization.py rename to data analysis/analysis-master/analysis/visualization.py diff --git a/data analysis/analysis-master/build/lib/analysis/__init__.py b/data analysis/analysis-master/build/lib/analysis/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/data analysis/analysis-master/build/lib/analysis/analysis.py b/data analysis/analysis-master/build/lib/analysis/analysis.py new file mode 100644 index 00000000..40c12eac --- /dev/null +++ b/data analysis/analysis-master/build/lib/analysis/analysis.py @@ -0,0 +1,952 @@ +# Titan Robotics Team 2022: Data Analysis Module +# Written by Arthur Lu & Jacob Levine +# Notes: +# this should be imported as a python module using 'import analysis' +# this should be included in the local directory or environment variable +# this module has been optimized for multhreaded computing +# current benchmark of optimization: 1.33 times faster +# setup: + +__version__ = "1.1.12.003" + +# changelog should be viewed using print(analysis.__changelog__) +__changelog__ = """changelog: + 1.1.12.003: + - removed depreciated code + 1.1.12.002: + - removed team first time trueskill instantiation in favor of integration in superscript.py + 1.1.12.001: + - improved readibility of regression outputs by stripping tensor data + - used map with lambda to acheive the improved readibility + - lost numba jit support with regression, and generated_jit hangs at execution + - TODO: reimplement correct numba integration in regression + 1.1.12.000: + - temporarily fixed polynomial regressions by using sklearn's PolynomialFeatures + 1.1.11.010: + - alphabeticaly ordered import lists + 1.1.11.009: + - bug fixes + 1.1.11.008: + - bug fixes + 1.1.11.007: + - bug fixes + 1.1.11.006: + - tested min and max + - bug fixes + 1.1.11.005: + - added min and max in basic_stats + 1.1.11.004: + - bug fixes + 1.1.11.003: + - bug fixes + 1.1.11.002: + - consolidated metrics + - fixed __all__ + 1.1.11.001: + - added test/train split to RandomForestClassifier and RandomForestRegressor + 1.1.11.000: + - added RandomForestClassifier and RandomForestRegressor + - note: untested + 1.1.10.000: + - added numba.jit to remaining functions + 1.1.9.002: + - kernelized PCA and KNN + 1.1.9.001: + - fixed bugs with SVM and NaiveBayes + 1.1.9.000: + - added SVM class, subclasses, and functions + - note: untested + 1.1.8.000: + - added NaiveBayes classification engine + - note: untested + 1.1.7.000: + - added knn() + - added confusion matrix to decisiontree() + 1.1.6.002: + - changed layout of __changelog to be vscode friendly + 1.1.6.001: + - added additional hyperparameters to decisiontree() + 1.1.6.000: + - fixed __version__ + - fixed __all__ order + - added decisiontree() + 1.1.5.003: + - added pca + 1.1.5.002: + - reduced import list + - added kmeans clustering engine + 1.1.5.001: + - simplified regression by using .to(device) + 1.1.5.000: + - added polynomial regression to regression(); untested + 1.1.4.000: + - added trueskill() + 1.1.3.002: + - renamed regression class to Regression, regression_engine() to regression gliko2_engine class to Gliko2 + 1.1.3.001: + - changed glicko2() to return tuple instead of array + 1.1.3.000: + - added glicko2_engine class and glicko() + - verified glicko2() accuracy + 1.1.2.003: + - fixed elo() + 1.1.2.002: + - added elo() + - elo() has bugs to be fixed + 1.1.2.001: + - readded regrression import + 1.1.2.000: + - integrated regression.py as regression class + - removed regression import + - fixed metadata for regression class + - fixed metadata for analysis class + 1.1.1.001: + - regression_engine() bug fixes, now actaully regresses + 1.1.1.000: + - added regression_engine() + - added all regressions except polynomial + 1.1.0.007: + - updated _init_device() + 1.1.0.006: + - removed useless try statements + 1.1.0.005: + - removed impossible outcomes + 1.1.0.004: + - added performance metrics (r^2, mse, rms) + 1.1.0.003: + - resolved nopython mode for mean, median, stdev, variance + 1.1.0.002: + - snapped (removed) majority of uneeded imports + - forced object mode (bad) on all jit + - TODO: stop numba complaining about not being able to compile in nopython mode + 1.1.0.001: + - removed from sklearn import * to resolve uneeded wildcard imports + 1.1.0.000: + - removed c_entities,nc_entities,obstacles,objectives from __all__ + - applied numba.jit to all functions + - depreciated and removed stdev_z_split + - cleaned up histo_analysis to include numpy and numba.jit optimizations + - depreciated and removed all regression functions in favor of future pytorch optimizer + - depreciated and removed all nonessential functions (basic_analysis, benchmark, strip_data) + - optimized z_normalize using sklearn.preprocessing.normalize + - TODO: implement kernel/function based pytorch regression optimizer + 1.0.9.000: + - refactored + - numpyed everything + - removed stats in favor of numpy functions + 1.0.8.005: + - minor fixes + 1.0.8.004: + - removed a few unused dependencies + 1.0.8.003: + - added p_value function + 1.0.8.002: + - updated __all__ correctly to contain changes made in v 1.0.8.000 and v 1.0.8.001 + 1.0.8.001: + - refactors + - bugfixes + 1.0.8.000: + - depreciated histo_analysis_old + - depreciated debug + - altered basic_analysis to take array data instead of filepath + - refactor + - optimization + 1.0.7.002: + - bug fixes + 1.0.7.001: + - bug fixes + 1.0.7.000: + - added tanh_regression (logistical regression) + - bug fixes + 1.0.6.005: + - added z_normalize function to normalize dataset + - bug fixes + 1.0.6.004: + - bug fixes + 1.0.6.003: + - bug fixes + 1.0.6.002: + - bug fixes + 1.0.6.001: + - corrected __all__ to contain all of the functions + 1.0.6.000: + - added calc_overfit, which calculates two measures of overfit, error and performance + - added calculating overfit to optimize_regression + 1.0.5.000: + - added optimize_regression function, which is a sample function to find the optimal regressions + - optimize_regression function filters out some overfit funtions (functions with r^2 = 1) + - planned addition: overfit detection in the optimize_regression function + 1.0.4.002: + - added __changelog__ + - updated debug function with log and exponential regressions + 1.0.4.001: + - added log regressions + - added exponential regressions + - added log_regression and exp_regression to __all__ + 1.0.3.008: + - added debug function to further consolidate functions + 1.0.3.007: + - added builtin benchmark function + - added builtin random (linear) data generation function + - added device initialization (_init_device) + 1.0.3.006: + - reorganized the imports list to be in alphabetical order + - added search and regurgitate functions to c_entities, nc_entities, obstacles, objectives + 1.0.3.005: + - major bug fixes + - updated historical analysis + - depreciated old historical analysis + 1.0.3.004: + - added __version__, __author__, __all__ + - added polynomial regression + - added root mean squared function + - added r squared function + 1.0.3.003: + - bug fixes + - added c_entities + 1.0.3.002: + - bug fixes + - added nc_entities, obstacles, objectives + - consolidated statistics.py to analysis.py + 1.0.3.001: + - compiled 1d, column, and row basic stats into basic stats function + 1.0.3.000: + - added historical analysis function + 1.0.2.xxx: + - added z score test + 1.0.1.xxx: + - major bug fixes + 1.0.0.xxx: + - added loading csv + - added 1d, column, row basic stats +""" + +__author__ = ( + "Arthur Lu ", + "Jacob Levine ", +) + +__all__ = [ + '_init_device', + 'load_csv', + 'basic_stats', + 'z_score', + 'z_normalize', + 'histo_analysis', + 'regression', + 'elo', + 'gliko2', + 'trueskill', + 'RegressionMetrics', + 'ClassificationMetrics', + 'kmeans', + 'pca', + 'decisiontree', + 'knn_classifier', + 'knn_regressor', + 'NaiveBayes', + 'SVM', + 'random_forest_classifier', + 'random_forest_regressor', + 'Regression', + 'Gliko2', + # all statistics functions left out due to integration in other functions +] + +# now back to your regularly scheduled programming: + +# imports (now in alphabetical order! v 1.0.3.006): + +import csv +import numba +from numba import jit +import numpy as np +import math +import sklearn +from sklearn import * +import torch +try: + from analysis import trueskill as Trueskill +except: + import trueskill as Trueskill + +class error(ValueError): + pass + +def _init_device(): # initiates computation device for ANNs + device = 'cuda:0' if torch.cuda.is_available() else 'cpu' + return device + +def load_csv(filepath): + with open(filepath, newline='') as csvfile: + file_array = np.array(list(csv.reader(csvfile))) + csvfile.close() + return file_array + +# expects 1d array +@jit(forceobj=True) +def basic_stats(data): + + data_t = np.array(data).astype(float) + + _mean = mean(data_t) + _median = median(data_t) + _stdev = stdev(data_t) + _variance = variance(data_t) + _min = npmin(data_t) + _max = npmax(data_t) + + return _mean, _median, _stdev, _variance, _min, _max + +# returns z score with inputs of point, mean and standard deviation of spread +@jit(forceobj=True) +def z_score(point, mean, stdev): + score = (point - mean) / stdev + + return score + +# expects 2d array, normalizes across all axes +@jit(forceobj=True) +def z_normalize(array, *args): + + array = np.array(array) + for arg in args: + array = sklearn.preprocessing.normalize(array, axis = arg) + + return array + +@jit(forceobj=True) +# expects 2d array of [x,y] +def histo_analysis(hist_data): + + hist_data = np.array(hist_data) + derivative = np.array(len(hist_data) - 1, dtype = float) + t = np.diff(hist_data) + derivative = t[1] / t[0] + np.sort(derivative) + + return basic_stats(derivative)[0], basic_stats(derivative)[3] + +def regression(ndevice, inputs, outputs, args, loss = torch.nn.MSELoss(), _iterations = 10000, lr = 0.01, _iterations_ply = 10000, lr_ply = 0.01): # inputs, outputs expects N-D array + + regressions = [] + Regression().set_device(ndevice) + + if 'lin' in args: # formula: ax + b + + model = Regression().SGDTrain(Regression.LinearRegKernel(len(inputs)), torch.tensor(inputs).to(torch.float).to(device), torch.tensor([outputs]).to(torch.float).to(device), iterations=_iterations, learning_rate=lr, return_losses=True) + params = model[0].parameters + params[:] = map(lambda x: x.item(), params) + regressions.append((params, model[1][::-1][0])) + + if 'log' in args: # formula: a log (b(x + c)) + d + + model = Regression().SGDTrain(Regression.LogRegKernel(len(inputs)), torch.tensor(inputs).to(torch.float).to(device), torch.tensor(outputs).to(torch.float).to(device), iterations=_iterations, learning_rate=lr, return_losses=True) + params = model[0].parameters + params[:] = map(lambda x: x.item(), params) + regressions.append((params, model[1][::-1][0])) + + if 'exp' in args: # formula: a e ^ (b(x + c)) + d + + model = Regression().SGDTrain(Regression.ExpRegKernel(len(inputs)), torch.tensor(inputs).to(torch.float).to(device), torch.tensor(outputs).to(torch.float).to(device), iterations=_iterations, learning_rate=lr, return_losses=True) + params = model[0].parameters + params[:] = map(lambda x: x.item(), params) + regressions.append((params, model[1][::-1][0])) + + if 'ply' in args: # formula: a + bx^1 + cx^2 + dx^3 + ... + + plys = [] + limit = len(outputs[0]) + + for i in range(2, limit): + + model = sklearn.preprocessing.PolynomialFeatures(degree = i) + model = sklearn.pipeline.make_pipeline(model, sklearn.linear_model.LinearRegression()) + model = model.fit(np.rot90(inputs), np.rot90(outputs)) + + params = model.steps[1][1].intercept_.tolist() + params = np.append(params, model.steps[1][1].coef_[0].tolist()[1::]) + params.flatten() + params = params.tolist() + + plys.append(params) + + regressions.append(plys) + + if 'sig' in args: # formula: a sig (b(x + c)) + d | sig() = 1/(1 + e ^ -x) + + model = Regression().SGDTrain(Regression.SigmoidalRegKernelArthur(len(inputs)), torch.tensor(inputs).to(torch.float).to(device), torch.tensor(outputs).to(torch.float).to(device), iterations=_iterations, learning_rate=lr, return_losses=True) + params = model[0].parameters + params[:] = map(lambda x: x.item(), params) + regressions.append((params, model[1][::-1][0])) + + return regressions + +@jit(nopython=True) +def elo(starting_score, opposing_score, observed, N, K): + + expected = 1/(1+10**((np.array(opposing_score) - starting_score)/N)) + + return starting_score + K*(np.sum(observed) - np.sum(expected)) + +@jit(forceobj=True) +def gliko2(starting_score, starting_rd, starting_vol, opposing_score, opposing_rd, observations): + + player = Gliko2(rating = starting_score, rd = starting_rd, vol = starting_vol) + + player.update_player([x for x in opposing_score], [x for x in opposing_rd], observations) + + return (player.rating, player.rd, player.vol) + +@jit(forceobj=True) +def trueskill(teams_data, observations): # teams_data is array of array of tuples ie. [[(mu, sigma), (mu, sigma), (mu, sigma)], [(mu, sigma), (mu, sigma), (mu, sigma)]] + + team_ratings = [] + + for team in teams_data: + team_temp = [] + for player in team: + player = Trueskill.Rating(player[0], player[1]) + team_temp.append(player) + team_ratings.append(team_temp) + + return Trueskill.rate(teams_data, observations) + +class RegressionMetrics(): + + def __new__(cls, predictions, targets): + + return cls.r_squared(cls, predictions, targets), cls.mse(cls, predictions, targets), cls.rms(cls, predictions, targets) + + def r_squared(self, predictions, targets): # assumes equal size inputs + + return sklearn.metrics.r2_score(targets, predictions) + + def mse(self, predictions, targets): + + return sklearn.metrics.mean_squared_error(targets, predictions) + + def rms(self, predictions, targets): + + return math.sqrt(sklearn.metrics.mean_squared_error(targets, predictions)) + +class ClassificationMetrics(): + + def __new__(cls, predictions, targets): + + return cls.cm(cls, predictions, targets), cls.cr(cls, predictions, targets) + + def cm(self, predictions, targets): + + return sklearn.metrics.confusion_matrix(targets, predictions) + + def cr(self, predictions, targets): + + return sklearn.metrics.classification_report(targets, predictions) + +@jit(nopython=True) +def mean(data): + + return np.mean(data) + +@jit(nopython=True) +def median(data): + + return np.median(data) + +@jit(nopython=True) +def stdev(data): + + return np.std(data) + +@jit(nopython=True) +def variance(data): + + return np.var(data) + +@jit(nopython=True) +def npmin(data): + + return np.amin(data) + +@jit(nopython=True) +def npmax(data): + + return np.amax(data) + +@jit(forceobj=True) +def kmeans(data, n_clusters=8, init="k-means++", n_init=10, max_iter=300, tol=0.0001, precompute_distances="auto", verbose=0, random_state=None, copy_x=True, n_jobs=None, algorithm="auto"): + + kernel = sklearn.cluster.KMeans(n_clusters = n_clusters, init = init, n_init = n_init, max_iter = max_iter, tol = tol, precompute_distances = precompute_distances, verbose = verbose, random_state = random_state, copy_x = copy_x, n_jobs = n_jobs, algorithm = algorithm) + kernel.fit(data) + predictions = kernel.predict(data) + centers = kernel.cluster_centers_ + + return centers, predictions + +@jit(forceobj=True) +def pca(data, n_components = None, copy = True, whiten = False, svd_solver = "auto", tol = 0.0, iterated_power = "auto", random_state = None): + + kernel = sklearn.decomposition.PCA(n_components = n_components, copy = copy, whiten = whiten, svd_solver = svd_solver, tol = tol, iterated_power = iterated_power, random_state = random_state) + + return kernel.fit_transform(data) + +@jit(forceobj=True) +def decisiontree(data, labels, test_size = 0.3, criterion = "gini", splitter = "default", max_depth = None): #expects *2d data and 1d labels + + data_train, data_test, labels_train, labels_test = sklearn.model_selection.train_test_split(data, labels, test_size=test_size, random_state=1) + model = sklearn.tree.DecisionTreeClassifier(criterion = criterion, splitter = splitter, max_depth = max_depth) + model = model.fit(data_train,labels_train) + predictions = model.predict(data_test) + metrics = ClassificationMetrics(predictions, labels_test) + + return model, metrics + +@jit(forceobj=True) +def knn_classifier(data, labels, test_size = 0.3, algorithm='auto', leaf_size=30, metric='minkowski', metric_params=None, n_jobs=None, n_neighbors=5, p=2, weights='uniform'): #expects *2d data and 1d labels post-scaling + + data_train, data_test, labels_train, labels_test = sklearn.model_selection.train_test_split(data, labels, test_size=test_size, random_state=1) + model = sklearn.neighbors.KNeighborsClassifier() + model.fit(data_train, labels_train) + predictions = model.predict(data_test) + + return model, ClassificationMetrics(predictions, labels_test) + +def knn_regressor(data, outputs, test_size, n_neighbors = 5, weights = "uniform", algorithm = "auto", leaf_size = 30, p = 2, metric = "minkowski", metric_params = None, n_jobs = None): + + data_train, data_test, outputs_train, outputs_test = sklearn.model_selection.train_test_split(data, outputs, test_size=test_size, random_state=1) + model = sklearn.neighbors.KNeighborsRegressor(n_neighbors = n_neighbors, weights = weights, algorithm = algorithm, leaf_size = leaf_size, p = p, metric = metric, metric_params = metric_params, n_jobs = n_jobs) + model.fit(data_train, outputs_train) + predictions = model.predict(data_test) + + return model, RegressionMetrics(predictions, outputs_test) + +class NaiveBayes: + + def guassian(self, data, labels, test_size = 0.3, priors = None, var_smoothing = 1e-09): + + data_train, data_test, labels_train, labels_test = sklearn.model_selection.train_test_split(data, labels, test_size=test_size, random_state=1) + model = sklearn.naive_bayes.GaussianNB(priors = priors, var_smoothing = var_smoothing) + model.fit(data_train, labels_train) + predictions = model.predict(data_test) + + return model, ClassificationMetrics(predictions, labels_test) + + def multinomial(self, data, labels, test_size = 0.3, alpha=1.0, fit_prior=True, class_prior=None): + + data_train, data_test, labels_train, labels_test = sklearn.model_selection.train_test_split(data, labels, test_size=test_size, random_state=1) + model = sklearn.naive_bayes.MultinomialNB(alpha = alpha, fit_prior = fit_prior, class_prior = class_prior) + model.fit(data_train, labels_train) + predictions = model.predict(data_test) + + return model, ClassificationMetrics(predictions, labels_test) + + def bernoulli(self, data, labels, test_size = 0.3, alpha=1.0, binarize=0.0, fit_prior=True, class_prior=None): + + data_train, data_test, labels_train, labels_test = sklearn.model_selection.train_test_split(data, labels, test_size=test_size, random_state=1) + model = sklearn.naive_bayes.BernoulliNB(alpha = alpha, binarize = binarize, fit_prior = fit_prior, class_prior = class_prior) + model.fit(data_train, labels_train) + predictions = model.predict(data_test) + + return model, ClassificationMetrics(predictions, labels_test) + + def complement(self, data, labels, test_size = 0.3, alpha=1.0, fit_prior=True, class_prior=None, norm=False): + + data_train, data_test, labels_train, labels_test = sklearn.model_selection.train_test_split(data, labels, test_size=test_size, random_state=1) + model = sklearn.naive_bayes.ComplementNB(alpha = alpha, fit_prior = fit_prior, class_prior = class_prior, norm = norm) + model.fit(data_train, labels_train) + predictions = model.predict(data_test) + + return model, ClassificationMetrics(predictions, labels_test) + +class SVM: + + class CustomKernel: + + def __new__(cls, C, kernel, degre, gamma, coef0, shrinking, probability, tol, cache_size, class_weight, verbose, max_iter, decision_function_shape, random_state): + + return sklearn.svm.SVC(C = C, kernel = kernel, gamma = gamma, coef0 = coef0, shrinking = shrinking, probability = probability, tol = tol, cache_size = cache_size, class_weight = class_weight, verbose = verbose, max_iter = max_iter, decision_function_shape = decision_function_shape, random_state = random_state) + + class StandardKernel: + + def __new__(cls, kernel, C=1.0, degree=3, gamma='auto_deprecated', coef0=0.0, shrinking=True, probability=False, tol=0.001, cache_size=200, class_weight=None, verbose=False, max_iter=-1, decision_function_shape='ovr', random_state=None): + + return sklearn.svm.SVC(C = C, kernel = kernel, gamma = gamma, coef0 = coef0, shrinking = shrinking, probability = probability, tol = tol, cache_size = cache_size, class_weight = class_weight, verbose = verbose, max_iter = max_iter, decision_function_shape = decision_function_shape, random_state = random_state) + + class PrebuiltKernel: + + class Linear: + + def __new__(cls): + + return sklearn.svm.SVC(kernel = 'linear') + + class Polynomial: + + def __new__(cls, power, r_bias): + + return sklearn.svm.SVC(kernel = 'polynomial', degree = power, coef0 = r_bias) + + class RBF: + + def __new__(cls, gamma): + + return sklearn.svm.SVC(kernel = 'rbf', gamma = gamma) + + class Sigmoid: + + def __new__(cls, r_bias): + + return sklearn.svm.SVC(kernel = 'sigmoid', coef0 = r_bias) + + def fit(self, kernel, train_data, train_outputs): # expects *2d data, 1d labels or outputs + + return kernel.fit(train_data, train_outputs) + + def eval_classification(self, kernel, test_data, test_outputs): + + predictions = kernel.predict(test_data) + + return ClassificationMetrics(predictions, test_outputs) + + def eval_regression(self, kernel, test_data, test_outputs): + + predictions = kernel.predict(test_data) + + return RegressionMetrics(predictions, test_outputs) + +def random_forest_classifier(data, labels, test_size, n_estimators="warn", criterion="gini", max_depth=None, min_samples_split=2, min_samples_leaf=1, min_weight_fraction_leaf=0.0, max_features="auto", max_leaf_nodes=None, min_impurity_decrease=0.0, min_impurity_split=None, bootstrap=True, oob_score=False, n_jobs=None, random_state=None, verbose=0, warm_start=False, class_weight=None): + + data_train, data_test, labels_train, labels_test = sklearn.model_selection.train_test_split(data, labels, test_size=test_size, random_state=1) + kernel = sklearn.ensemble.RandomForestClassifier(n_estimators = n_estimators, criterion = criterion, max_depth = max_depth, min_samples_split = min_samples_split, min_samples_leaf = min_samples_leaf, min_weight_fraction_leaf = min_weight_fraction_leaf, max_leaf_nodes = max_leaf_nodes, min_impurity_decrease = min_impurity_decrease, bootstrap = bootstrap, oob_score = oob_score, n_jobs = n_jobs, random_state = random_state, verbose = verbose, warm_start = warm_start, class_weight = class_weight) + kernel.fit(data_train, labels_train) + predictions = kernel.predict(data_test) + + return kernel, ClassificationMetrics(predictions, labels_test) + +def random_forest_regressor(data, outputs, test_size, n_estimators="warn", criterion="mse", max_depth=None, min_samples_split=2, min_samples_leaf=1, min_weight_fraction_leaf=0.0, max_features="auto", max_leaf_nodes=None, min_impurity_decrease=0.0, min_impurity_split=None, bootstrap=True, oob_score=False, n_jobs=None, random_state=None, verbose=0, warm_start=False): + + data_train, data_test, outputs_train, outputs_test = sklearn.model_selection.train_test_split(data, outputs, test_size=test_size, random_state=1) + kernel = sklearn.ensemble.RandomForestRegressor(n_estimators = n_estimators, criterion = criterion, max_depth = max_depth, min_samples_split = min_samples_split, min_weight_fraction_leaf = min_weight_fraction_leaf, max_features = max_features, max_leaf_nodes = max_leaf_nodes, min_impurity_decrease = min_impurity_decrease, min_impurity_split = min_impurity_split, bootstrap = bootstrap, oob_score = oob_score, n_jobs = n_jobs, random_state = random_state, verbose = verbose, warm_start = warm_start) + kernel.fit(data_train, outputs_train) + predictions = kernel.predict(data_test) + + return kernel, RegressionMetrics(predictions, outputs_test) + +class Regression: + + # Titan Robotics Team 2022: CUDA-based Regressions Module + # Written by Arthur Lu & Jacob Levine + # Notes: + # this module has been automatically inegrated into analysis.py, and should be callable as a class from the package + # this module is cuda-optimized and vectorized (except for one small part) + # setup: + + __version__ = "1.0.0.003" + + # changelog should be viewed using print(analysis.regression.__changelog__) + __changelog__ = """ + 1.0.0.003: + - bug fixes + 1.0.0.002: + -Added more parameters to log, exponential, polynomial + -Added SigmoidalRegKernelArthur, because Arthur apparently needs + to train the scaling and shifting of sigmoids + + 1.0.0.001: + -initial release, with linear, log, exponential, polynomial, and sigmoid kernels + -already vectorized (except for polynomial generation) and CUDA-optimized + """ + + __author__ = ( + "Jacob Levine ", + "Arthur Lu " + ) + + __all__ = [ + 'factorial', + 'take_all_pwrs', + 'num_poly_terms', + 'set_device', + 'LinearRegKernel', + 'SigmoidalRegKernel', + 'LogRegKernel', + 'PolyRegKernel', + 'ExpRegKernel', + 'SigmoidalRegKernelArthur', + 'SGDTrain', + 'CustomTrain' + ] + + global device + + device = "cuda:0" if torch.torch.cuda.is_available() else "cpu" + + #todo: document completely + + def set_device(self, new_device): + device=new_device + + class LinearRegKernel(): + parameters= [] + weights=None + bias=None + def __init__(self, num_vars): + self.weights=torch.rand(num_vars, requires_grad=True, device=device) + self.bias=torch.rand(1, requires_grad=True, device=device) + self.parameters=[self.weights,self.bias] + def forward(self,mtx): + long_bias=self.bias.repeat([1,mtx.size()[1]]) + return torch.matmul(self.weights,mtx)+long_bias + + class SigmoidalRegKernel(): + parameters= [] + weights=None + bias=None + sigmoid=torch.nn.Sigmoid() + def __init__(self, num_vars): + self.weights=torch.rand(num_vars, requires_grad=True, device=device) + self.bias=torch.rand(1, requires_grad=True, device=device) + self.parameters=[self.weights,self.bias] + def forward(self,mtx): + long_bias=self.bias.repeat([1,mtx.size()[1]]) + return self.sigmoid(torch.matmul(self.weights,mtx)+long_bias) + + class SigmoidalRegKernelArthur(): + parameters= [] + weights=None + in_bias=None + scal_mult=None + out_bias=None + sigmoid=torch.nn.Sigmoid() + def __init__(self, num_vars): + self.weights=torch.rand(num_vars, requires_grad=True, device=device) + self.in_bias=torch.rand(1, requires_grad=True, device=device) + self.scal_mult=torch.rand(1, requires_grad=True, device=device) + self.out_bias=torch.rand(1, requires_grad=True, device=device) + self.parameters=[self.weights,self.in_bias, self.scal_mult, self.out_bias] + def forward(self,mtx): + long_in_bias=self.in_bias.repeat([1,mtx.size()[1]]) + long_out_bias=self.out_bias.repeat([1,mtx.size()[1]]) + return (self.scal_mult*self.sigmoid(torch.matmul(self.weights,mtx)+long_in_bias))+long_out_bias + + class LogRegKernel(): + parameters= [] + weights=None + in_bias=None + scal_mult=None + out_bias=None + def __init__(self, num_vars): + self.weights=torch.rand(num_vars, requires_grad=True, device=device) + self.in_bias=torch.rand(1, requires_grad=True, device=device) + self.scal_mult=torch.rand(1, requires_grad=True, device=device) + self.out_bias=torch.rand(1, requires_grad=True, device=device) + self.parameters=[self.weights,self.in_bias, self.scal_mult, self.out_bias] + def forward(self,mtx): + long_in_bias=self.in_bias.repeat([1,mtx.size()[1]]) + long_out_bias=self.out_bias.repeat([1,mtx.size()[1]]) + return (self.scal_mult*torch.log(torch.matmul(self.weights,mtx)+long_in_bias))+long_out_bias + + class ExpRegKernel(): + parameters= [] + weights=None + in_bias=None + scal_mult=None + out_bias=None + def __init__(self, num_vars): + self.weights=torch.rand(num_vars, requires_grad=True, device=device) + self.in_bias=torch.rand(1, requires_grad=True, device=device) + self.scal_mult=torch.rand(1, requires_grad=True, device=device) + self.out_bias=torch.rand(1, requires_grad=True, device=device) + self.parameters=[self.weights,self.in_bias, self.scal_mult, self.out_bias] + def forward(self,mtx): + long_in_bias=self.in_bias.repeat([1,mtx.size()[1]]) + long_out_bias=self.out_bias.repeat([1,mtx.size()[1]]) + return (self.scal_mult*torch.exp(torch.matmul(self.weights,mtx)+long_in_bias))+long_out_bias + + class PolyRegKernel(): + parameters= [] + weights=None + bias=None + power=None + def __init__(self, num_vars, power): + self.power=power + num_terms=self.num_poly_terms(num_vars, power) + self.weights=torch.rand(num_terms, requires_grad=True, device=device) + self.bias=torch.rand(1, requires_grad=True, device=device) + self.parameters=[self.weights,self.bias] + def num_poly_terms(self,num_vars, power): + if power == 0: + return 0 + return int(self.factorial(num_vars+power-1) / self.factorial(power) / self.factorial(num_vars-1)) + self.num_poly_terms(num_vars, power-1) + def factorial(self,n): + if n==0: + return 1 + else: + return n*self.factorial(n-1) + def take_all_pwrs(self, vec, pwr): + #todo: vectorize (kinda) + combins=torch.combinations(vec, r=pwr, with_replacement=True) + out=torch.ones(combins.size()[0]).to(device).to(torch.float) + for i in torch.t(combins).to(device).to(torch.float): + out *= i + if pwr == 1: + return out + else: + return torch.cat((out,self.take_all_pwrs(vec, pwr-1))) + def forward(self,mtx): + #TODO: Vectorize the last part + cols=[] + for i in torch.t(mtx): + cols.append(self.take_all_pwrs(i,self.power)) + new_mtx=torch.t(torch.stack(cols)) + long_bias=self.bias.repeat([1,mtx.size()[1]]) + return torch.matmul(self.weights,new_mtx)+long_bias + + def SGDTrain(self, kernel, data, ground, loss=torch.nn.MSELoss(), iterations=1000, learning_rate=.1, return_losses=False): + optim=torch.optim.SGD(kernel.parameters, lr=learning_rate) + data_cuda=data.to(device) + ground_cuda=ground.to(device) + if (return_losses): + losses=[] + for i in range(iterations): + with torch.set_grad_enabled(True): + optim.zero_grad() + pred=kernel.forward(data_cuda) + ls=loss(pred,ground_cuda) + losses.append(ls.item()) + ls.backward() + optim.step() + return [kernel,losses] + else: + for i in range(iterations): + with torch.set_grad_enabled(True): + optim.zero_grad() + pred=kernel.forward(data_cuda) + ls=loss(pred,ground_cuda) + ls.backward() + optim.step() + return kernel + + def CustomTrain(self, kernel, optim, data, ground, loss=torch.nn.MSELoss(), iterations=1000, return_losses=False): + data_cuda=data.to(device) + ground_cuda=ground.to(device) + if (return_losses): + losses=[] + for i in range(iterations): + with torch.set_grad_enabled(True): + optim.zero_grad() + pred=kernel.forward(data) + ls=loss(pred,ground) + losses.append(ls.item()) + ls.backward() + optim.step() + return [kernel,losses] + else: + for i in range(iterations): + with torch.set_grad_enabled(True): + optim.zero_grad() + pred=kernel.forward(data_cuda) + ls=loss(pred,ground_cuda) + ls.backward() + optim.step() + return kernel + +class Gliko2: + + _tau = 0.5 + + def getRating(self): + return (self.__rating * 173.7178) + 1500 + + def setRating(self, rating): + self.__rating = (rating - 1500) / 173.7178 + + rating = property(getRating, setRating) + + def getRd(self): + return self.__rd * 173.7178 + + def setRd(self, rd): + self.__rd = rd / 173.7178 + + rd = property(getRd, setRd) + + def __init__(self, rating = 1500, rd = 350, vol = 0.06): + + self.setRating(rating) + self.setRd(rd) + self.vol = vol + + def _preRatingRD(self): + + self.__rd = math.sqrt(math.pow(self.__rd, 2) + math.pow(self.vol, 2)) + + def update_player(self, rating_list, RD_list, outcome_list): + + rating_list = [(x - 1500) / 173.7178 for x in rating_list] + RD_list = [x / 173.7178 for x in RD_list] + + v = self._v(rating_list, RD_list) + self.vol = self._newVol(rating_list, RD_list, outcome_list, v) + self._preRatingRD() + + self.__rd = 1 / math.sqrt((1 / math.pow(self.__rd, 2)) + (1 / v)) + + tempSum = 0 + for i in range(len(rating_list)): + tempSum += self._g(RD_list[i]) * \ + (outcome_list[i] - self._E(rating_list[i], RD_list[i])) + self.__rating += math.pow(self.__rd, 2) * tempSum + + + def _newVol(self, rating_list, RD_list, outcome_list, v): + + i = 0 + delta = self._delta(rating_list, RD_list, outcome_list, v) + a = math.log(math.pow(self.vol, 2)) + tau = self._tau + x0 = a + x1 = 0 + + while x0 != x1: + # New iteration, so x(i) becomes x(i-1) + x0 = x1 + d = math.pow(self.__rating, 2) + v + math.exp(x0) + h1 = -(x0 - a) / math.pow(tau, 2) - 0.5 * math.exp(x0) \ + / d + 0.5 * math.exp(x0) * math.pow(delta / d, 2) + h2 = -1 / math.pow(tau, 2) - 0.5 * math.exp(x0) * \ + (math.pow(self.__rating, 2) + v) \ + / math.pow(d, 2) + 0.5 * math.pow(delta, 2) * math.exp(x0) \ + * (math.pow(self.__rating, 2) + v - math.exp(x0)) / math.pow(d, 3) + x1 = x0 - (h1 / h2) + + return math.exp(x1 / 2) + + def _delta(self, rating_list, RD_list, outcome_list, v): + + tempSum = 0 + for i in range(len(rating_list)): + tempSum += self._g(RD_list[i]) * (outcome_list[i] - self._E(rating_list[i], RD_list[i])) + return v * tempSum + + def _v(self, rating_list, RD_list): + + tempSum = 0 + for i in range(len(rating_list)): + tempE = self._E(rating_list[i], RD_list[i]) + tempSum += math.pow(self._g(RD_list[i]), 2) * tempE * (1 - tempE) + return 1 / tempSum + + def _E(self, p2rating, p2RD): + + return 1 / (1 + math.exp(-1 * self._g(p2RD) * \ + (self.__rating - p2rating))) + + def _g(self, RD): + + return 1 / math.sqrt(1 + 3 * math.pow(RD, 2) / math.pow(math.pi, 2)) + + def did_not_compete(self): + + self._preRatingRD() \ No newline at end of file diff --git a/data analysis/analysis-master/build/lib/analysis/regression.py b/data analysis/analysis-master/build/lib/analysis/regression.py new file mode 100644 index 00000000..6cbe7868 --- /dev/null +++ b/data analysis/analysis-master/build/lib/analysis/regression.py @@ -0,0 +1,217 @@ +# Titan Robotics Team 2022: CUDA-based Regressions Module +# Written by Arthur Lu & Jacob Levine +# Notes: +# this should be imported as a python module using 'import regression' +# this should be included in the local directory or environment variable +# this module is cuda-optimized and vectorized (except for one small part) +# setup: + +__version__ = "1.0.0.002" + +# changelog should be viewed using print(regression.__changelog__) +__changelog__ = """ + 1.0.0.002: + -Added more parameters to log, exponential, polynomial + -Added SigmoidalRegKernelArthur, because Arthur apparently needs + to train the scaling and shifting of sigmoids + + 1.0.0.001: + -initial release, with linear, log, exponential, polynomial, and sigmoid kernels + -already vectorized (except for polynomial generation) and CUDA-optimized +""" + +__author__ = ( + "Jacob Levine ", +) + +__all__ = [ + 'factorial', + 'take_all_pwrs', + 'num_poly_terms', + 'set_device', + 'LinearRegKernel', + 'SigmoidalRegKernel', + 'LogRegKernel', + 'PolyRegKernel', + 'ExpRegKernel', + 'SigmoidalRegKernelArthur', + 'SGDTrain', + 'CustomTrain' +] + + +# imports (just one for now): + +import torch + +device = "cuda:0" if torch.torch.cuda.is_available() else "cpu" + +#todo: document completely + +def factorial(n): + if n==0: + return 1 + else: + return n*factorial(n-1) +def num_poly_terms(num_vars, power): + if power == 0: + return 0 + return int(factorial(num_vars+power-1) / factorial(power) / factorial(num_vars-1)) + num_poly_terms(num_vars, power-1) + +def take_all_pwrs(vec,pwr): + #todo: vectorize (kinda) + combins=torch.combinations(vec, r=pwr, with_replacement=True) + out=torch.ones(combins.size()[0]) + for i in torch.t(combins): + out *= i + return torch.cat(out,take_all_pwrs(vec, pwr-1)) + +def set_device(new_device): + global device + device=new_device + +class LinearRegKernel(): + parameters= [] + weights=None + bias=None + def __init__(self, num_vars): + self.weights=torch.rand(num_vars, requires_grad=True, device=device) + self.bias=torch.rand(1, requires_grad=True, device=device) + self.parameters=[self.weights,self.bias] + def forward(self,mtx): + long_bias=self.bias.repeat([1,mtx.size()[1]]) + return torch.matmul(self.weights,mtx)+long_bias + +class SigmoidalRegKernel(): + parameters= [] + weights=None + bias=None + sigmoid=torch.nn.Sigmoid() + def __init__(self, num_vars): + self.weights=torch.rand(num_vars, requires_grad=True, device=device) + self.bias=torch.rand(1, requires_grad=True, device=device) + self.parameters=[self.weights,self.bias] + def forward(self,mtx): + long_bias=self.bias.repeat([1,mtx.size()[1]]) + return self.sigmoid(torch.matmul(self.weights,mtx)+long_bias) + +class SigmoidalRegKernelArthur(): + parameters= [] + weights=None + in_bias=None + scal_mult=None + out_bias=None + sigmoid=torch.nn.Sigmoid() + def __init__(self, num_vars): + self.weights=torch.rand(num_vars, requires_grad=True, device=device) + self.in_bias=torch.rand(1, requires_grad=True, device=device) + self.scal_mult=torch.rand(1, requires_grad=True, device=device) + self.out_bias=torch.rand(1, requires_grad=True, device=device) + self.parameters=[self.weights,self.in_bias, self.scal_mult, self.out_bias] + def forward(self,mtx): + long_in_bias=self.in_bias.repeat([1,mtx.size()[1]]) + long_out_bias=self.out_bias.repeat([1,mtx.size()[1]]) + return (self.scal_mult*self.sigmoid(torch.matmul(self.weights,mtx)+long_in_bias))+long_out_bias + +class LogRegKernel(): + parameters= [] + weights=None + in_bias=None + scal_mult=None + out_bias=None + def __init__(self, num_vars): + self.weights=torch.rand(num_vars, requires_grad=True, device=device) + self.in_bias=torch.rand(1, requires_grad=True, device=device) + self.scal_mult=torch.rand(1, requires_grad=True, device=device) + self.out_bias=torch.rand(1, requires_grad=True, device=device) + self.parameters=[self.weights,self.in_bias, self.scal_mult, self.out_bias] + def forward(self,mtx): + long_in_bias=self.in_bias.repeat([1,mtx.size()[1]]) + long_out_bias=self.out_bias.repeat([1,mtx.size()[1]]) + return (self.scal_mult*torch.log(torch.matmul(self.weights,mtx)+long_in_bias))+long_out_bias + +class ExpRegKernel(): + parameters= [] + weights=None + in_bias=None + scal_mult=None + out_bias=None + def __init__(self, num_vars): + self.weights=torch.rand(num_vars, requires_grad=True, device=device) + self.in_bias=torch.rand(1, requires_grad=True, device=device) + self.scal_mult=torch.rand(1, requires_grad=True, device=device) + self.out_bias=torch.rand(1, requires_grad=True, device=device) + self.parameters=[self.weights,self.in_bias, self.scal_mult, self.out_bias] + def forward(self,mtx): + long_in_bias=self.in_bias.repeat([1,mtx.size()[1]]) + long_out_bias=self.out_bias.repeat([1,mtx.size()[1]]) + return (self.scal_mult*torch.exp(torch.matmul(self.weights,mtx)+long_in_bias))+long_out_bias + +class PolyRegKernel(): + parameters= [] + weights=None + bias=None + power=None + def __init__(self, num_vars, power): + self.power=power + num_terms=num_poly_terms(num_vars, power) + self.weights=torch.rand(num_terms, requires_grad=True, device=device) + self.bias=torch.rand(1, requires_grad=True, device=device) + self.parameters=[self.weights,self.bias] + def forward(self,mtx): + #TODO: Vectorize the last part + cols=[] + for i in torch.t(mtx): + cols.append(take_all_pwrs(i,self.power)) + new_mtx=torch.t(torch.stack(cols)) + long_bias=self.bias.repeat([1,mtx.size()[1]]) + return torch.matmul(self.weights,new_mtx)+long_bias + +def SGDTrain(kernel, data, ground, loss=torch.nn.MSELoss(), iterations=1000, learning_rate=.1, return_losses=False): + optim=torch.optim.SGD(kernel.parameters, lr=learning_rate) + data_cuda=data.to(device) + ground_cuda=ground.to(device) + if (return_losses): + losses=[] + for i in range(iterations): + with torch.set_grad_enabled(True): + optim.zero_grad() + pred=kernel.forward(data_cuda) + ls=loss(pred,ground_cuda) + losses.append(ls.item()) + ls.backward() + optim.step() + return [kernel,losses] + else: + for i in range(iterations): + with torch.set_grad_enabled(True): + optim.zero_grad() + pred=kernel.forward(data_cuda) + ls=loss(pred,ground_cuda) + ls.backward() + optim.step() + return kernel + +def CustomTrain(kernel, optim, data, ground, loss=torch.nn.MSELoss(), iterations=1000, return_losses=False): + data_cuda=data.to(device) + ground_cuda=ground.to(device) + if (return_losses): + losses=[] + for i in range(iterations): + with torch.set_grad_enabled(True): + optim.zero_grad() + pred=kernel.forward(data) + ls=loss(pred,ground) + losses.append(ls.item()) + ls.backward() + optim.step() + return [kernel,losses] + else: + for i in range(iterations): + with torch.set_grad_enabled(True): + optim.zero_grad() + pred=kernel.forward(data_cuda) + ls=loss(pred,ground_cuda) + ls.backward() + optim.step() + return kernel diff --git a/data analysis/analysis-master/build/lib/analysis/titanlearn.py b/data analysis/analysis-master/build/lib/analysis/titanlearn.py new file mode 100644 index 00000000..b69d36e3 --- /dev/null +++ b/data analysis/analysis-master/build/lib/analysis/titanlearn.py @@ -0,0 +1,122 @@ +# Titan Robotics Team 2022: ML Module +# Written by Arthur Lu & Jacob Levine +# Notes: +# this should be imported as a python module using 'import titanlearn' +# this should be included in the local directory or environment variable +# this module is optimized for multhreaded computing +# this module learns from its mistakes far faster than 2022's captains +# setup: + +__version__ = "2.0.1.001" + +#changelog should be viewed using print(analysis.__changelog__) +__changelog__ = """changelog: + 2.0.1.001: + - removed matplotlib import + - removed graphloss() + 2.0.1.000: + - added net, dataset, dataloader, and stdtrain template definitions + - added graphloss function + 2.0.0.001: + - added clear functions + 2.0.0.000: + - complete rewrite planned + - depreciated 1.0.0.xxx versions + - added simple training loop + 1.0.0.xxx: + -added generation of ANNS, basic SGD training +""" + +__author__ = ( + "Arthur Lu ," + "Jacob Levine ," + ) + +__all__ = [ + 'clear', + 'net', + 'dataset', + 'dataloader', + 'train', + 'stdtrainer', + ] + +import torch +from os import system, name +import numpy as np + +def clear(): + if name == 'nt': + _ = system('cls') + else: + _ = system('clear') + +class net(torch.nn.Module): #template for standard neural net + def __init__(self): + super(Net, self).__init__() + + def forward(self, input): + pass + +class dataset(torch.utils.data.Dataset): #template for standard dataset + + def __init__(self): + super(torch.utils.data.Dataset).__init__() + + def __getitem__(self, index): + pass + + def __len__(self): + pass + +def dataloader(dataset, batch_size, num_workers, shuffle = True): + + return torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=shuffle, num_workers=num_workers) + +def train(device, net, epochs, trainloader, optimizer, criterion): #expects standard dataloader, whch returns (inputs, labels) + + dataset_len = trainloader.dataset.__len__() + iter_count = 0 + running_loss = 0 + running_loss_list = [] + + for epoch in range(epochs): # loop over the dataset multiple times + + for i, data in enumerate(trainloader, 0): + + inputs = data[0].to(device) + labels = data[1].to(device) + + optimizer.zero_grad() + + outputs = net(inputs) + loss = criterion(outputs, labels.to(torch.float)) + + loss.backward() + optimizer.step() + + # monitoring steps below + + iter_count += 1 + running_loss += loss.item() + running_loss_list.append(running_loss) + clear() + + print("training on: " + device) + print("iteration: " + str(i) + "/" + str(int(dataset_len / trainloader.batch_size)) + " | " + "epoch: " + str(epoch) + "/" + str(epochs)) + print("current batch loss: " + str(loss.item)) + print("running loss: " + str(running_loss / iter_count)) + + return net, running_loss_list + print("finished training") + +def stdtrainer(net, criterion, optimizer, dataloader, epochs, batch_size): + + device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") + + net = net.to(device) + criterion = criterion.to(device) + optimizer = optimizer.to(device) + trainloader = dataloader + + return train(device, net, epochs, trainloader, optimizer, criterion) \ No newline at end of file diff --git a/data analysis/analysis-master/build/lib/analysis/trueskill.py b/data analysis/analysis-master/build/lib/analysis/trueskill.py new file mode 100644 index 00000000..116357df --- /dev/null +++ b/data analysis/analysis-master/build/lib/analysis/trueskill.py @@ -0,0 +1,907 @@ +from __future__ import absolute_import + +from itertools import chain +import math + +from six import iteritems +from six.moves import map, range, zip +from six import iterkeys + +import copy +try: + from numbers import Number +except ImportError: + Number = (int, long, float, complex) + +inf = float('inf') + +class Gaussian(object): + #: Precision, the inverse of the variance. + pi = 0 + #: Precision adjusted mean, the precision multiplied by the mean. + tau = 0 + + def __init__(self, mu=None, sigma=None, pi=0, tau=0): + if mu is not None: + if sigma is None: + raise TypeError('sigma argument is needed') + elif sigma == 0: + raise ValueError('sigma**2 should be greater than 0') + pi = sigma ** -2 + tau = pi * mu + self.pi = pi + self.tau = tau + + @property + def mu(self): + return self.pi and self.tau / self.pi + + @property + def sigma(self): + return math.sqrt(1 / self.pi) if self.pi else inf + + def __mul__(self, other): + pi, tau = self.pi + other.pi, self.tau + other.tau + return Gaussian(pi=pi, tau=tau) + + def __truediv__(self, other): + pi, tau = self.pi - other.pi, self.tau - other.tau + return Gaussian(pi=pi, tau=tau) + + __div__ = __truediv__ # for Python 2 + + def __eq__(self, other): + return self.pi == other.pi and self.tau == other.tau + + def __lt__(self, other): + return self.mu < other.mu + + def __le__(self, other): + return self.mu <= other.mu + + def __gt__(self, other): + return self.mu > other.mu + + def __ge__(self, other): + return self.mu >= other.mu + + def __repr__(self): + return 'N(mu={:.3f}, sigma={:.3f})'.format(self.mu, self.sigma) + + def _repr_latex_(self): + latex = r'\mathcal{{ N }}( {:.3f}, {:.3f}^2 )'.format(self.mu, self.sigma) + return '$%s$' % latex + +class Matrix(list): + def __init__(self, src, height=None, width=None): + if callable(src): + f, src = src, {} + size = [height, width] + if not height: + def set_height(height): + size[0] = height + size[0] = set_height + if not width: + def set_width(width): + size[1] = width + size[1] = set_width + try: + for (r, c), val in f(*size): + src[r, c] = val + except TypeError: + raise TypeError('A callable src must return an interable ' + 'which generates a tuple containing ' + 'coordinate and value') + height, width = tuple(size) + if height is None or width is None: + raise TypeError('A callable src must call set_height and ' + 'set_width if the size is non-deterministic') + if isinstance(src, list): + is_number = lambda x: isinstance(x, Number) + unique_col_sizes = set(map(len, src)) + everything_are_number = filter(is_number, sum(src, [])) + if len(unique_col_sizes) != 1 or not everything_are_number: + raise ValueError('src must be a rectangular array of numbers') + two_dimensional_array = src + elif isinstance(src, dict): + if not height or not width: + w = h = 0 + for r, c in iterkeys(src): + if not height: + h = max(h, r + 1) + if not width: + w = max(w, c + 1) + if not height: + height = h + if not width: + width = w + two_dimensional_array = [] + for r in range(height): + row = [] + two_dimensional_array.append(row) + for c in range(width): + row.append(src.get((r, c), 0)) + else: + raise TypeError('src must be a list or dict or callable') + super(Matrix, self).__init__(two_dimensional_array) + + @property + def height(self): + return len(self) + + @property + def width(self): + return len(self[0]) + + def transpose(self): + height, width = self.height, self.width + src = {} + for c in range(width): + for r in range(height): + src[c, r] = self[r][c] + return type(self)(src, height=width, width=height) + + def minor(self, row_n, col_n): + height, width = self.height, self.width + if not (0 <= row_n < height): + raise ValueError('row_n should be between 0 and %d' % height) + elif not (0 <= col_n < width): + raise ValueError('col_n should be between 0 and %d' % width) + two_dimensional_array = [] + for r in range(height): + if r == row_n: + continue + row = [] + two_dimensional_array.append(row) + for c in range(width): + if c == col_n: + continue + row.append(self[r][c]) + return type(self)(two_dimensional_array) + + def determinant(self): + height, width = self.height, self.width + if height != width: + raise ValueError('Only square matrix can calculate a determinant') + tmp, rv = copy.deepcopy(self), 1. + for c in range(width - 1, 0, -1): + pivot, r = max((abs(tmp[r][c]), r) for r in range(c + 1)) + pivot = tmp[r][c] + if not pivot: + return 0. + tmp[r], tmp[c] = tmp[c], tmp[r] + if r != c: + rv = -rv + rv *= pivot + fact = -1. / pivot + for r in range(c): + f = fact * tmp[r][c] + for x in range(c): + tmp[r][x] += f * tmp[c][x] + return rv * tmp[0][0] + + def adjugate(self): + height, width = self.height, self.width + if height != width: + raise ValueError('Only square matrix can be adjugated') + if height == 2: + a, b = self[0][0], self[0][1] + c, d = self[1][0], self[1][1] + return type(self)([[d, -b], [-c, a]]) + src = {} + for r in range(height): + for c in range(width): + sign = -1 if (r + c) % 2 else 1 + src[r, c] = self.minor(r, c).determinant() * sign + return type(self)(src, height, width) + + def inverse(self): + if self.height == self.width == 1: + return type(self)([[1. / self[0][0]]]) + return (1. / self.determinant()) * self.adjugate() + + def __add__(self, other): + height, width = self.height, self.width + if (height, width) != (other.height, other.width): + raise ValueError('Must be same size') + src = {} + for r in range(height): + for c in range(width): + src[r, c] = self[r][c] + other[r][c] + return type(self)(src, height, width) + + def __mul__(self, other): + if self.width != other.height: + raise ValueError('Bad size') + height, width = self.height, other.width + src = {} + for r in range(height): + for c in range(width): + src[r, c] = sum(self[r][x] * other[x][c] + for x in range(self.width)) + return type(self)(src, height, width) + + def __rmul__(self, other): + if not isinstance(other, Number): + raise TypeError('The operand should be a number') + height, width = self.height, self.width + src = {} + for r in range(height): + for c in range(width): + src[r, c] = other * self[r][c] + return type(self)(src, height, width) + + def __repr__(self): + return '{}({})'.format(type(self).__name__, super(Matrix, self).__repr__()) + + def _repr_latex_(self): + rows = [' && '.join(['%.3f' % cell for cell in row]) for row in self] + latex = r'\begin{matrix} %s \end{matrix}' % r'\\'.join(rows) + return '$%s$' % latex + +def _gen_erfcinv(erfc, math=math): + def erfcinv(y): + """The inverse function of erfc.""" + if y >= 2: + return -100. + elif y <= 0: + return 100. + zero_point = y < 1 + if not zero_point: + y = 2 - y + t = math.sqrt(-2 * math.log(y / 2.)) + x = -0.70711 * \ + ((2.30753 + t * 0.27061) / (1. + t * (0.99229 + t * 0.04481)) - t) + for i in range(2): + err = erfc(x) - y + x += err / (1.12837916709551257 * math.exp(-(x ** 2)) - x * err) + return x if zero_point else -x + return erfcinv + +def _gen_ppf(erfc, math=math): + erfcinv = _gen_erfcinv(erfc, math) + def ppf(x, mu=0, sigma=1): + return mu - sigma * math.sqrt(2) * erfcinv(2 * x) + return ppf + +def erfc(x): + z = abs(x) + t = 1. / (1. + z / 2.) + r = t * math.exp(-z * z - 1.26551223 + t * (1.00002368 + t * ( + 0.37409196 + t * (0.09678418 + t * (-0.18628806 + t * ( + 0.27886807 + t * (-1.13520398 + t * (1.48851587 + t * ( + -0.82215223 + t * 0.17087277 + ))) + ))) + ))) + return 2. - r if x < 0 else r + +def cdf(x, mu=0, sigma=1): + return 0.5 * erfc(-(x - mu) / (sigma * math.sqrt(2))) + + +def pdf(x, mu=0, sigma=1): + return (1 / math.sqrt(2 * math.pi) * abs(sigma) * + math.exp(-(((x - mu) / abs(sigma)) ** 2 / 2))) + +ppf = _gen_ppf(erfc) + +def choose_backend(backend): + if backend is None: # fallback + return cdf, pdf, ppf + elif backend == 'mpmath': + try: + import mpmath + except ImportError: + raise ImportError('Install "mpmath" to use this backend') + return mpmath.ncdf, mpmath.npdf, _gen_ppf(mpmath.erfc, math=mpmath) + elif backend == 'scipy': + try: + from scipy.stats import norm + except ImportError: + raise ImportError('Install "scipy" to use this backend') + return norm.cdf, norm.pdf, norm.ppf + raise ValueError('%r backend is not defined' % backend) + +def available_backends(): + backends = [None] + for backend in ['mpmath', 'scipy']: + try: + __import__(backend) + except ImportError: + continue + backends.append(backend) + return backends + +class Node(object): + + pass + +class Variable(Node, Gaussian): + + def __init__(self): + self.messages = {} + super(Variable, self).__init__() + + def set(self, val): + delta = self.delta(val) + self.pi, self.tau = val.pi, val.tau + return delta + + def delta(self, other): + pi_delta = abs(self.pi - other.pi) + if pi_delta == inf: + return 0. + return max(abs(self.tau - other.tau), math.sqrt(pi_delta)) + + def update_message(self, factor, pi=0, tau=0, message=None): + message = message or Gaussian(pi=pi, tau=tau) + old_message, self[factor] = self[factor], message + return self.set(self / old_message * message) + + def update_value(self, factor, pi=0, tau=0, value=None): + value = value or Gaussian(pi=pi, tau=tau) + old_message = self[factor] + self[factor] = value * old_message / self + return self.set(value) + + def __getitem__(self, factor): + return self.messages[factor] + + def __setitem__(self, factor, message): + self.messages[factor] = message + + def __repr__(self): + args = (type(self).__name__, super(Variable, self).__repr__(), + len(self.messages), '' if len(self.messages) == 1 else 's') + return '<%s %s with %d connection%s>' % args + + +class Factor(Node): + + def __init__(self, variables): + self.vars = variables + for var in variables: + var[self] = Gaussian() + + def down(self): + return 0 + + def up(self): + return 0 + + @property + def var(self): + assert len(self.vars) == 1 + return self.vars[0] + + def __repr__(self): + args = (type(self).__name__, len(self.vars), + '' if len(self.vars) == 1 else 's') + return '<%s with %d connection%s>' % args + + +class PriorFactor(Factor): + + def __init__(self, var, val, dynamic=0): + super(PriorFactor, self).__init__([var]) + self.val = val + self.dynamic = dynamic + + def down(self): + sigma = math.sqrt(self.val.sigma ** 2 + self.dynamic ** 2) + value = Gaussian(self.val.mu, sigma) + return self.var.update_value(self, value=value) + + +class LikelihoodFactor(Factor): + + def __init__(self, mean_var, value_var, variance): + super(LikelihoodFactor, self).__init__([mean_var, value_var]) + self.mean = mean_var + self.value = value_var + self.variance = variance + + def calc_a(self, var): + return 1. / (1. + self.variance * var.pi) + + def down(self): + # update value. + msg = self.mean / self.mean[self] + a = self.calc_a(msg) + return self.value.update_message(self, a * msg.pi, a * msg.tau) + + def up(self): + # update mean. + msg = self.value / self.value[self] + a = self.calc_a(msg) + return self.mean.update_message(self, a * msg.pi, a * msg.tau) + + +class SumFactor(Factor): + + def __init__(self, sum_var, term_vars, coeffs): + super(SumFactor, self).__init__([sum_var] + term_vars) + self.sum = sum_var + self.terms = term_vars + self.coeffs = coeffs + + def down(self): + vals = self.terms + msgs = [var[self] for var in vals] + return self.update(self.sum, vals, msgs, self.coeffs) + + def up(self, index=0): + coeff = self.coeffs[index] + coeffs = [] + for x, c in enumerate(self.coeffs): + try: + if x == index: + coeffs.append(1. / coeff) + else: + coeffs.append(-c / coeff) + except ZeroDivisionError: + coeffs.append(0.) + vals = self.terms[:] + vals[index] = self.sum + msgs = [var[self] for var in vals] + return self.update(self.terms[index], vals, msgs, coeffs) + + def update(self, var, vals, msgs, coeffs): + pi_inv = 0 + mu = 0 + for val, msg, coeff in zip(vals, msgs, coeffs): + div = val / msg + mu += coeff * div.mu + if pi_inv == inf: + continue + try: + # numpy.float64 handles floating-point error by different way. + # For example, it can just warn RuntimeWarning on n/0 problem + # instead of throwing ZeroDivisionError. So div.pi, the + # denominator has to be a built-in float. + pi_inv += coeff ** 2 / float(div.pi) + except ZeroDivisionError: + pi_inv = inf + pi = 1. / pi_inv + tau = pi * mu + return var.update_message(self, pi, tau) + + +class TruncateFactor(Factor): + + def __init__(self, var, v_func, w_func, draw_margin): + super(TruncateFactor, self).__init__([var]) + self.v_func = v_func + self.w_func = w_func + self.draw_margin = draw_margin + + def up(self): + val = self.var + msg = self.var[self] + div = val / msg + sqrt_pi = math.sqrt(div.pi) + args = (div.tau / sqrt_pi, self.draw_margin * sqrt_pi) + v = self.v_func(*args) + w = self.w_func(*args) + denom = (1. - w) + pi, tau = div.pi / denom, (div.tau + sqrt_pi * v) / denom + return val.update_value(self, pi, tau) + +#: Default initial mean of ratings. +MU = 25. +#: Default initial standard deviation of ratings. +SIGMA = MU / 3 +#: Default distance that guarantees about 76% chance of winning. +BETA = SIGMA / 2 +#: Default dynamic factor. +TAU = SIGMA / 100 +#: Default draw probability of the game. +DRAW_PROBABILITY = .10 +#: A basis to check reliability of the result. +DELTA = 0.0001 + + +def calc_draw_probability(draw_margin, size, env=None): + if env is None: + env = global_env() + return 2 * env.cdf(draw_margin / (math.sqrt(size) * env.beta)) - 1 + + +def calc_draw_margin(draw_probability, size, env=None): + if env is None: + env = global_env() + return env.ppf((draw_probability + 1) / 2.) * math.sqrt(size) * env.beta + + +def _team_sizes(rating_groups): + team_sizes = [0] + for group in rating_groups: + team_sizes.append(len(group) + team_sizes[-1]) + del team_sizes[0] + return team_sizes + + +def _floating_point_error(env): + if env.backend == 'mpmath': + msg = 'Set "mpmath.mp.dps" to higher' + else: + msg = 'Cannot calculate correctly, set backend to "mpmath"' + return FloatingPointError(msg) + + +class Rating(Gaussian): + def __init__(self, mu=None, sigma=None): + if isinstance(mu, tuple): + mu, sigma = mu + elif isinstance(mu, Gaussian): + mu, sigma = mu.mu, mu.sigma + if mu is None: + mu = global_env().mu + if sigma is None: + sigma = global_env().sigma + super(Rating, self).__init__(mu, sigma) + + def __int__(self): + return int(self.mu) + + def __long__(self): + return long(self.mu) + + def __float__(self): + return float(self.mu) + + def __iter__(self): + return iter((self.mu, self.sigma)) + + def __repr__(self): + c = type(self) + args = ('.'.join([c.__module__, c.__name__]), self.mu, self.sigma) + return '%s(mu=%.3f, sigma=%.3f)' % args + + +class TrueSkill(object): + def __init__(self, mu=MU, sigma=SIGMA, beta=BETA, tau=TAU, + draw_probability=DRAW_PROBABILITY, backend=None): + self.mu = mu + self.sigma = sigma + self.beta = beta + self.tau = tau + self.draw_probability = draw_probability + self.backend = backend + if isinstance(backend, tuple): + self.cdf, self.pdf, self.ppf = backend + else: + self.cdf, self.pdf, self.ppf = choose_backend(backend) + + def create_rating(self, mu=None, sigma=None): + if mu is None: + mu = self.mu + if sigma is None: + sigma = self.sigma + return Rating(mu, sigma) + + def v_win(self, diff, draw_margin): + x = diff - draw_margin + denom = self.cdf(x) + return (self.pdf(x) / denom) if denom else -x + + def v_draw(self, diff, draw_margin): + abs_diff = abs(diff) + a, b = draw_margin - abs_diff, -draw_margin - abs_diff + denom = self.cdf(a) - self.cdf(b) + numer = self.pdf(b) - self.pdf(a) + return ((numer / denom) if denom else a) * (-1 if diff < 0 else +1) + + def w_win(self, diff, draw_margin): + x = diff - draw_margin + v = self.v_win(diff, draw_margin) + w = v * (v + x) + if 0 < w < 1: + return w + raise _floating_point_error(self) + + def w_draw(self, diff, draw_margin): + abs_diff = abs(diff) + a, b = draw_margin - abs_diff, -draw_margin - abs_diff + denom = self.cdf(a) - self.cdf(b) + if not denom: + raise _floating_point_error(self) + v = self.v_draw(abs_diff, draw_margin) + return (v ** 2) + (a * self.pdf(a) - b * self.pdf(b)) / denom + + def validate_rating_groups(self, rating_groups): + # check group sizes + if len(rating_groups) < 2: + raise ValueError('Need multiple rating groups') + elif not all(rating_groups): + raise ValueError('Each group must contain multiple ratings') + # check group types + group_types = set(map(type, rating_groups)) + if len(group_types) != 1: + raise TypeError('All groups should be same type') + elif group_types.pop() is Rating: + raise TypeError('Rating cannot be a rating group') + # normalize rating_groups + if isinstance(rating_groups[0], dict): + dict_rating_groups = rating_groups + rating_groups = [] + keys = [] + for dict_rating_group in dict_rating_groups: + rating_group, key_group = [], [] + for key, rating in iteritems(dict_rating_group): + rating_group.append(rating) + key_group.append(key) + rating_groups.append(tuple(rating_group)) + keys.append(tuple(key_group)) + else: + rating_groups = list(rating_groups) + keys = None + return rating_groups, keys + + def validate_weights(self, weights, rating_groups, keys=None): + if weights is None: + weights = [(1,) * len(g) for g in rating_groups] + elif isinstance(weights, dict): + weights_dict, weights = weights, [] + for x, group in enumerate(rating_groups): + w = [] + weights.append(w) + for y, rating in enumerate(group): + if keys is not None: + y = keys[x][y] + w.append(weights_dict.get((x, y), 1)) + return weights + + def factor_graph_builders(self, rating_groups, ranks, weights): + flatten_ratings = sum(map(tuple, rating_groups), ()) + flatten_weights = sum(map(tuple, weights), ()) + size = len(flatten_ratings) + group_size = len(rating_groups) + # create variables + rating_vars = [Variable() for x in range(size)] + perf_vars = [Variable() for x in range(size)] + team_perf_vars = [Variable() for x in range(group_size)] + team_diff_vars = [Variable() for x in range(group_size - 1)] + team_sizes = _team_sizes(rating_groups) + # layer builders + def build_rating_layer(): + for rating_var, rating in zip(rating_vars, flatten_ratings): + yield PriorFactor(rating_var, rating, self.tau) + def build_perf_layer(): + for rating_var, perf_var in zip(rating_vars, perf_vars): + yield LikelihoodFactor(rating_var, perf_var, self.beta ** 2) + def build_team_perf_layer(): + for team, team_perf_var in enumerate(team_perf_vars): + if team > 0: + start = team_sizes[team - 1] + else: + start = 0 + end = team_sizes[team] + child_perf_vars = perf_vars[start:end] + coeffs = flatten_weights[start:end] + yield SumFactor(team_perf_var, child_perf_vars, coeffs) + def build_team_diff_layer(): + for team, team_diff_var in enumerate(team_diff_vars): + yield SumFactor(team_diff_var, + team_perf_vars[team:team + 2], [+1, -1]) + def build_trunc_layer(): + for x, team_diff_var in enumerate(team_diff_vars): + if callable(self.draw_probability): + # dynamic draw probability + team_perf1, team_perf2 = team_perf_vars[x:x + 2] + args = (Rating(team_perf1), Rating(team_perf2), self) + draw_probability = self.draw_probability(*args) + else: + # static draw probability + draw_probability = self.draw_probability + size = sum(map(len, rating_groups[x:x + 2])) + draw_margin = calc_draw_margin(draw_probability, size, self) + if ranks[x] == ranks[x + 1]: # is a tie? + v_func, w_func = self.v_draw, self.w_draw + else: + v_func, w_func = self.v_win, self.w_win + yield TruncateFactor(team_diff_var, + v_func, w_func, draw_margin) + # build layers + return (build_rating_layer, build_perf_layer, build_team_perf_layer, + build_team_diff_layer, build_trunc_layer) + + def run_schedule(self, build_rating_layer, build_perf_layer, + build_team_perf_layer, build_team_diff_layer, + build_trunc_layer, min_delta=DELTA): + if min_delta <= 0: + raise ValueError('min_delta must be greater than 0') + layers = [] + def build(builders): + layers_built = [list(build()) for build in builders] + layers.extend(layers_built) + return layers_built + # gray arrows + layers_built = build([build_rating_layer, + build_perf_layer, + build_team_perf_layer]) + rating_layer, perf_layer, team_perf_layer = layers_built + for f in chain(*layers_built): + f.down() + # arrow #1, #2, #3 + team_diff_layer, trunc_layer = build([build_team_diff_layer, + build_trunc_layer]) + team_diff_len = len(team_diff_layer) + for x in range(10): + if team_diff_len == 1: + # only two teams + team_diff_layer[0].down() + delta = trunc_layer[0].up() + else: + # multiple teams + delta = 0 + for x in range(team_diff_len - 1): + team_diff_layer[x].down() + delta = max(delta, trunc_layer[x].up()) + team_diff_layer[x].up(1) # up to right variable + for x in range(team_diff_len - 1, 0, -1): + team_diff_layer[x].down() + delta = max(delta, trunc_layer[x].up()) + team_diff_layer[x].up(0) # up to left variable + # repeat until to small update + if delta <= min_delta: + break + # up both ends + team_diff_layer[0].up(0) + team_diff_layer[team_diff_len - 1].up(1) + # up the remainder of the black arrows + for f in team_perf_layer: + for x in range(len(f.vars) - 1): + f.up(x) + for f in perf_layer: + f.up() + return layers + + def rate(self, rating_groups, ranks=None, weights=None, min_delta=DELTA): + rating_groups, keys = self.validate_rating_groups(rating_groups) + weights = self.validate_weights(weights, rating_groups, keys) + group_size = len(rating_groups) + if ranks is None: + ranks = range(group_size) + elif len(ranks) != group_size: + raise ValueError('Wrong ranks') + # sort rating groups by rank + by_rank = lambda x: x[1][1] + sorting = sorted(enumerate(zip(rating_groups, ranks, weights)), + key=by_rank) + sorted_rating_groups, sorted_ranks, sorted_weights = [], [], [] + for x, (g, r, w) in sorting: + sorted_rating_groups.append(g) + sorted_ranks.append(r) + # make weights to be greater than 0 + sorted_weights.append(max(min_delta, w_) for w_ in w) + # build factor graph + args = (sorted_rating_groups, sorted_ranks, sorted_weights) + builders = self.factor_graph_builders(*args) + args = builders + (min_delta,) + layers = self.run_schedule(*args) + # make result + rating_layer, team_sizes = layers[0], _team_sizes(sorted_rating_groups) + transformed_groups = [] + for start, end in zip([0] + team_sizes[:-1], team_sizes): + group = [] + for f in rating_layer[start:end]: + group.append(Rating(float(f.var.mu), float(f.var.sigma))) + transformed_groups.append(tuple(group)) + by_hint = lambda x: x[0] + unsorting = sorted(zip((x for x, __ in sorting), transformed_groups), + key=by_hint) + if keys is None: + return [g for x, g in unsorting] + # restore the structure with input dictionary keys + return [dict(zip(keys[x], g)) for x, g in unsorting] + + def quality(self, rating_groups, weights=None): + rating_groups, keys = self.validate_rating_groups(rating_groups) + weights = self.validate_weights(weights, rating_groups, keys) + flatten_ratings = sum(map(tuple, rating_groups), ()) + flatten_weights = sum(map(tuple, weights), ()) + length = len(flatten_ratings) + # a vector of all of the skill means + mean_matrix = Matrix([[r.mu] for r in flatten_ratings]) + # a matrix whose diagonal values are the variances (sigma ** 2) of each + # of the players. + def variance_matrix(height, width): + variances = (r.sigma ** 2 for r in flatten_ratings) + for x, variance in enumerate(variances): + yield (x, x), variance + variance_matrix = Matrix(variance_matrix, length, length) + # the player-team assignment and comparison matrix + def rotated_a_matrix(set_height, set_width): + t = 0 + for r, (cur, _next) in enumerate(zip(rating_groups[:-1], + rating_groups[1:])): + for x in range(t, t + len(cur)): + yield (r, x), flatten_weights[x] + t += 1 + x += 1 + for x in range(x, x + len(_next)): + yield (r, x), -flatten_weights[x] + set_height(r + 1) + set_width(x + 1) + rotated_a_matrix = Matrix(rotated_a_matrix) + a_matrix = rotated_a_matrix.transpose() + # match quality further derivation + _ata = (self.beta ** 2) * rotated_a_matrix * a_matrix + _atsa = rotated_a_matrix * variance_matrix * a_matrix + start = mean_matrix.transpose() * a_matrix + middle = _ata + _atsa + end = rotated_a_matrix * mean_matrix + # make result + e_arg = (-0.5 * start * middle.inverse() * end).determinant() + s_arg = _ata.determinant() / middle.determinant() + return math.exp(e_arg) * math.sqrt(s_arg) + + def expose(self, rating): + k = self.mu / self.sigma + return rating.mu - k * rating.sigma + + def make_as_global(self): + return setup(env=self) + + def __repr__(self): + c = type(self) + if callable(self.draw_probability): + f = self.draw_probability + draw_probability = '.'.join([f.__module__, f.__name__]) + else: + draw_probability = '%.1f%%' % (self.draw_probability * 100) + if self.backend is None: + backend = '' + elif isinstance(self.backend, tuple): + backend = ', backend=...' + else: + backend = ', backend=%r' % self.backend + args = ('.'.join([c.__module__, c.__name__]), self.mu, self.sigma, + self.beta, self.tau, draw_probability, backend) + return ('%s(mu=%.3f, sigma=%.3f, beta=%.3f, tau=%.3f, ' + 'draw_probability=%s%s)' % args) + + +def rate_1vs1(rating1, rating2, drawn=False, min_delta=DELTA, env=None): + if env is None: + env = global_env() + ranks = [0, 0 if drawn else 1] + teams = env.rate([(rating1,), (rating2,)], ranks, min_delta=min_delta) + return teams[0][0], teams[1][0] + + +def quality_1vs1(rating1, rating2, env=None): + if env is None: + env = global_env() + return env.quality([(rating1,), (rating2,)]) + + +def global_env(): + try: + global_env.__trueskill__ + except AttributeError: + # setup the default environment + setup() + return global_env.__trueskill__ + + +def setup(mu=MU, sigma=SIGMA, beta=BETA, tau=TAU, + draw_probability=DRAW_PROBABILITY, backend=None, env=None): + if env is None: + env = TrueSkill(mu, sigma, beta, tau, draw_probability, backend) + global_env.__trueskill__ = env + return env + + +def rate(rating_groups, ranks=None, weights=None, min_delta=DELTA): + return global_env().rate(rating_groups, ranks, weights, min_delta) + + +def quality(rating_groups, weights=None): + return global_env().quality(rating_groups, weights) + + +def expose(rating): + return global_env().expose(rating) \ No newline at end of file diff --git a/data analysis/analysis-master/build/lib/analysis/visualization.py b/data analysis/analysis-master/build/lib/analysis/visualization.py new file mode 100644 index 00000000..72358662 --- /dev/null +++ b/data analysis/analysis-master/build/lib/analysis/visualization.py @@ -0,0 +1,34 @@ +# Titan Robotics Team 2022: Visualization Module +# Written by Arthur Lu & Jacob Levine +# Notes: +# this should be imported as a python module using 'import visualization' +# this should be included in the local directory or environment variable +# fancy +# setup: + +__version__ = "1.0.0.000" + +#changelog should be viewed using print(analysis.__changelog__) +__changelog__ = """changelog: + 1.0.0.000: + - created visualization.py + - added graphloss() + - added imports +""" + +__author__ = ( + "Arthur Lu ," + "Jacob Levine ," + ) + +__all__ = [ + 'graphloss', + ] + +import matplotlib.pyplot as plt + +def graphloss(losses): + + x = range(0, len(losses)) + plt.plot(x, losses) + plt.show() \ No newline at end of file diff --git a/data analysis/analysis-master/dist/analysis-1.0.0.0-py3-none-any.whl b/data analysis/analysis-master/dist/analysis-1.0.0.0-py3-none-any.whl new file mode 100644 index 0000000000000000000000000000000000000000..3f86af4324c2f00b6aa24a16cd106c017df900ca GIT binary patch literal 21422 zcmZ6xQ>-vduq?W4+qP}nwr$(CZQJ%*-?DAnwt4>jaPK}jo%EzLNxe(v@_S8fjPrsO{P&Qn$p zGL#?$0-(W1O6|n=`As9kOQq%Rm>8E*ft@Za)n8oPV#45q@|VSq2eXe|IJZ531iW$Kr!Cfb|sf~*xujYLVzH!K&V2aHPr zNs4ZiY(1j`bcW4tv02kmQ#wR+tf$!mcR1d4)KZ0XCVBdFqOPba)VSI&l!*GF(U_H& z1EP#`X+(D=qCVG{E~S}+gDbkwR%2y}DmK8tp)(-takqC24w8sv*%pnODq+ zh^%qQd#ssV?0Vpy>vQ6+--I<>vwS+?-SlycXY+sMR4UH14-BTruU&@)gz$lo=T%A` z1`k-&h~$bTR`{VS%-;hZSn_UpC^Lj1qHba-r0h;Dcyzo`X)Y<2FwsQ$FhKK_N+8Z> zP3`B@45~V^@c=GC-Li*5@Ot7wE+Dy5Q4?vZq#h^^c}MD3NtiVcUq_b=z&38OfJwTI4HNfCp5JTB{d)uNO~4GV&;p;P3^8xbCj$;d}N$M zzB@TjJH7Ef%O!2MB-S^3Gksbu3TI@e=r7RfE<#KAB}4VdD%hFPaZR}yYdVWarhh29 zH2yn44Tg?}2mD2w<72Kj$(Iji)&m7@OYVzjLa+kwvgFcaFei0=ReU}J&{7WsA{A({3^f>R8l;oCz8s6EYG|Y zS}v~M@P1{~I^5FJr-rSe5ogTJhi6Q*?QrSy>?|`DZ(cgcTyV?1=x{^ATGmF*pdNla z^5)V53A7y1D7>yBJV&`4vN=h*7Xt{;kI>`in*feQZLUK(ZLOGZ*B6y}+AF2THg5AItK3Iw|fW_XuX|FlCdm|0DxmT9psqSAT?M88g!GsM_-dkMSba!ux8OQr_ z<}jpk;U6r)ym#$CKl$*_LKgsOh=uv!@CfP8Tt55;rY0at8&N#zn|Bbym!%1o^;IOY z&$icx8xWp-)`Nae!(fGpLVOIl0K#OHzglbpzezxnm}9}`Q_0-uCXj$aB1@p?y*bfR zq7=4tCPuO?<(eWPZM_;lvA?_q2z8(?U^xl_2Ys(Z!5x%RC{PBDnG|DUwGNX(D7fUr zy}(=s)<#(=uyOJ=VlzgPG*?v{SDb?M>@l152>-CUY!+!F#Sajx`6t48#&cWXD<+x1 zZBIJ20yal6`Ef)C9V@XRSEE$Clm-LWAD-ilgt6nWHVY9}Jd*wM^avciwLIVjPAeBo zok1XN5wor#0;-Y#y+ zAzT%f7*!Ti`cp*B_r&ertY6i zF&JTroc$;b_O7>cv75`zrBog0X~4?V2xv*Uj+g;1$j|uBoL(`d@l(g#k*!$t(_=Gl zGZ-O(^Mg^V?eizXneY|i-TvvqZ(s@791R2S%?qw$6tOiT>r%mSYkww--c;$TMev6H z1kQ16n1^-a$^ki26YM(J8cxatUwsc^+OYK-D=r#4Uo{UQC)bsHUByIFVpwFzfPW&w z!j+)693lG}g@X%nAkBK)C@}PdX`EE3~Gns5uo5 zKJkL*5RkG(mr)SbOQmBWq<9bNlm(DGg#^A)_5yhEv4GB? zL?_Nwm-mw}oeDy=1g^CPIp^}_f=nA6R#1A9dEATcnSn_IF?d!vWr8mun8MWa;lU~N zPQCk)k{%jL94Fh?6If2_LS7Ql$LO-36M#KIulc4`QJHT5w*rrpY8axv=QrdJW+=_R zo8pzjWmH1ifOggFiX05ea@>Fcz}l?@K^e*|U%7ZZVOVP^!ogpmjgS;&*8!@x-Q^4T zE-_3%FdRxO(mFiP#R!n$_v8SSRIn41t&5P}&cl_QL_*6=Xl?AgNaU4)R~hQpgEw1N z$ZUDi5+53Wt@gR<17GxjuX?>CzU+X-AaIe-t7&QDPz$TkI}%qAS7#Phz3u}kCM7Lo z6(_VJ;HX}hTRH}BZ7?=yg~)^sMd0n2QOw%;T?D)q3#uuzKxNgL_tzg_Ckxo|i*cz8r)muWE? zdO}wI748K5LXYCUH&Om}ZA7tT1N#46IZWPc&s^-&^P4~4ueoSPXUb^VuoVK1H*0{M zEnkhZa*M-UKuAUa`2XBqKc5pt5GT66u5f5}jk~+MR_01|fv7S4jAKgvm@O7|^_Q{h zJ7S5b^Sn9z^*lp*G^@LAH!~9USQnF)L}mwC#g9B+}#wQwTNt*J^;NPIxh~tLv8J$(UyV_ zG*%NtRUJu3j)C{%sDbux2my3(4#|1#KsLVZM?I0FYAhhJ1R8H=OA#Q^Kr>)3yI$M3iapatn)WKXY;Y5* z?h;9in-ri$ECiM*%}kAZm~WU>gx0YvoFI9R!B#|0m$16E8i-sBs*JrSl!&PYYL9pD zI<(@A%e_FMqKFr5txPR@Ue5THKP;e4v=SDB=fe0|)4i2|zg!rGi2TRlo9tJ)Xcds3&mJ-Ww-HWQHo_v|FfK^UVqVvj zts&8^e;c%&MMoz%J@xSuOq~Zu>mzH4^?7+UYqka6DKB;{N4MF}sa?;3-BY3bRLL*Q zO2R4k7>6VsJY|_Puix$c#)}$+oX&1hbi{4b2q5PQe?{d9#_zblEN1F<-Kuc60bl)9 zbaxXP02Y7y$lx-9qO-e8`wjo9!(ZK!@rrreUK5MP>$sV_bU&$f6sqb)hpSb!=PZ z#+^lFr+a;C)Y0R@0jE0(3D!hAfV*U>E|YR5LaD4-`CdR->Vz<%N`%v5X(lJ-ik+RK zYsjS#cD1o{N4qv-ct&dt+S2Hc8#n8aOd~fOw=J`(85#N5*kJtj_hWPnRkaZd5uHeP zKrNa-oB;XF=MixkIbBQ4M%uzxwqkG|DN~UyQm~v955BmxK zT~I+lZ2(wXa%sX>(2{c5HrI9}CH_HEYOb^#@tls3s;ygUnkbiFJu}w8Kbh&$Cb>Sl8lsZ(2s)eXudH=&k3FKe$CzrW z*m+Z>t+vjT30_;;=eCtw&rgY53@4BNI2tpw(DB5F_fZVqtl+!%YCmQ`bu1=)3|p6A;Xqnom_I?fz7yM|@3*KL?s2CC(JxPw@wQ09d(#(*Jaq>hKCDait4V)*8BsR4a9G%1(AI&WeM6+sEUc^$bJ4~dFS3M!)5^wdNBI|)LIJ*@~ zhxntS3gzx@CBCC8{R@BuJ_+Izuh{UO@ir;Ez}3*`lLuB{Uz}!5qTraCSF-*paMgeT zZ>#E8S2oTw0)RFr++bPN7ec{p-K`#7H#}g~U629u?$7O@%FRtZ(vk3w>&ko5gmwra zw&6BFiM{0INxJf8mL{Gau5vWFJX~YmB-#_DAB6bq#UqNcF9I6P1=PD>DsLm6!=OkGXHBY6456 zPt`KyI|^X)ogSiY8c|HfG&FMffpJ`TZw8&!0B9DMQ}jn-L+4ko? zdA=KkyFM#=K?XhxIRvkdLr0*b%MXtuhLB<9i~x5tqE`i3)x0Izl#ZhmtKxN(is&(A z2G#TFXGdOYV}leJPm!`RM@nkXqx|_YwqHOmOfMp)Q+2N#5m!{)<9yceB0%X9ATkY@ zspC=WMFVCymTFFj2)G>;64TegM{`4P#g`Srs6S4~M`3WI?}JN4c&mPcx};D0y}=Eb8AL-`+~N}qCx}PUOqOsS zdn8S^^w78u4df3P#*IB{l*$|D(jLyL3};su4Gbo%T(4YaZa0f%DF~!jNF-5#|a`$ znED882)uX~&0$Nh7E#X&5qLP=j!qqv#$;Ylq*h)(%}7$29$u?wl=AVp-3?4X0n3;x z4bykd{##L1*z<;a{Y!Citz3Cs5-9v?6Ckgrz3Nhz@hk{sSjIo8aQL_Yz}j-LE{pgS zANZe8C3+~a?gE$>XyXgyTBY?_icLfDg=s5OG!u7GOhuT`o;t>Qc%@iAa=tLTjJbQ^ z{OZ~4Dg76g!?1T4wz=uE#RF#kXU*$>Z#ntClUI$O!maK3-uEnPsl|{iH=P{hZQCsz zummUv9#?SI8iR|9nzF$VL{;sgf_uz{TFQ9;O<{(b_X;?%RJf02T7*`odr@f_k6+db z3)ApFpS|vI@81~o?y7KR>g33xCetLgeA;D5rmOtEZiZO5xclr_o$E_w)Bc>l(fuUU#TTMN;4F zuFBRQ&DyAU>;}I+R7w#9HqT#Vf@9b1K*>0dv6d@(Yx+q~aXYItAbW{yCoN+z>Xnp( zjPaWMIa)X9vW1p3!zJ*C;xDBY-9&bIaTX^8*4DN*w%q52&M;==Y>C8^1v-YhrGZVY ze6$qe!7hmGhs%}9ZD?vC96}t5rE&Q>KKcf}4)6)GGeFun$zqb{(yigjE_r$| zO3|GvKJ4PclA!ljs*bNy<5x-IgS@FkRezGKg4^Q8T1%b8o4`oKdGM?)mUI{JRp+uIH$sN4ufs8+iwj_Z0wPzcYx6*>f7 zSAK#JC+_JlbRad27n@^rLt7W#H}^Gwinn!9c2y>q4|n%;SlZ3HypD(&YS);sPRzU} zrSI3JDdpO&$AgUnC!X>*9~ybStN(k`c>hPpZNUxx`)5h9mq>n5AT16k#)p9849f@( zGIyc2k4rK&ggxIOY_4up`9>ncrCW=qjliIvtT@&Dmko%52a@tKEYL#SlqNX@%P8sTu?Grim)59L$tvHA54HihnpC zotM6bx6b0)rV_}kqR!goY%lwdSo@>i<-uFqiK+g?Ep=k!-nFQ(8ZCvOrT(`pwV9l` zj)H9tn0wlry#kyE zzS=W`fQ{Z;H)8dQ_9#>NQ#*eRC%E1@e zrtpiZb|DBRULB34+UMtEy{uksr!6{ZKV);$D&v03^e^AiYn(6nrr2z@n3wHvb(dpLwQA3MfA{TU2HdXF?=4VY%+Cza7M_AfFFaufY+ilmRti0r!@d08mEu(# zp@DbDT#_GWpky%=)MG=Q@{qbQF_`%1r^c2j?q(*`r5*Q24&-0hrIrd01?(zCMf-cA zi=~LIe;44M7yll^*_0?eYxCY4I!=zYEjRyZ|ImxyeBk6v$Jd?G)vnTK41LYm#Nl2L z?0xxdsOr)!ZKK>v?vMRk$Es)>!%yBfU|fXrJ`NlUc2!qQCVp}5IA=-*978izW;MszJ1 zND^6!B5RNY`g#M6clx@*&kp)>Mw$Drr?4!!8PA;@_BZ!oH5?_gA@oD9h?UN+N~?Ba z`xZncD6+EcKdi6BR_)CXw9VQ}>)5r052Q8Pv6>l!&@aUEU&9P@J?xjrbnVd(2f8w) zmz5DRYPI(%rb{~n8m9DEoJUjQ@Ykk9?m8UKm?<^vs4|JDW&+AXwV!@)x~(M?kVIhH2L=W>v!%f#etgkwW{5J^=FN+_3nFX6yaF6qz0za|A zJUn{#VxWAp@cQ0hls)8^t49a5?RT(6e@&mRmqzO&F0H7 zpT{x;oz(qmFpo#3@##?tj-cQ2NG~x)=1mL*p8e~Qx|Q+*JFm|02I=;_<%KSPyP;^g zQjm?`o>+CkFQo@e8Q!XKX0qWcjjLwkW@XIA8$KZic=YCXlKqgyiuCKcg~)qC&+vkH z^xkLtLPuQQC}+~A9I?>#K+SZ;vQOi?$K4$t9*v|1>P=xX_FPB{9y!U3F+$WW^C0xG z4~@E|oNJZ7N7uAMK+w!aWJfGzzODF>8FRI6ws(X58SYR~zD@HL09VV*{SJ1qmj5Y? z1DnttC_&P|THy!qi9eW~j7<^TP@ce69vi8E%|HACDcb-`N2ilMTtA4Gga%)(DOUef z4-x-L7P*W0JQpt(*;x=55D&wgK8*O$y^7E4zOYgNua_%Z<8z}&8@j<8(6RdbqTf~a zK_On67&LymJ}N*Z>(N1F+T6}Q%40m-UY1d9@*G@=*n;(U$}D$oSq-Fzc%^I2pzTQ4 zK7%!m=04tfY3(!XouXdSvKxmyye^c2Zz-Z~Ck%bxAmw-Uc}DunfVCf-6{;0{6^7f) zVG}})E=Sl8H^X4glWtxqSP@`r#Svv1w{b#F=skC1;LZ3YstW3IwYOgIZ=2~{^2)I~ z!&rxP_m>D1S%f=XGFXP7glRLVdUWe@RLq`9p?YdeJ@NolgS7Ye#tiD$nrlMttz4hYp25O$E1@C{&`W5 z={_Tg$<8q{3z2)VwGduhB8@JUJNted`i%Br92Od4)@W?i%MDUppfC%(_m$J3(a+0- zeV&Gz?GU`YQn7gf?X6IN&-(;H9jMR$OC^u{VKDwgm*|3tyf@mgygv=~Qs76FvETJI zX=Bavf^Y|xGO^Ed(bDEF9__#F9?+{A2*pkHTMe@4g$uvOJ?3!-F#ps|gnfOi1c5yd zcpzCmFjJR+E*OTQVyWk~EbOTG?4l<49+Jl1YkPkvo*{9mVbF`XQeeKoP=h^)H0vi* zInFCbN6rZTgTd`dV_)?Ij#dZhX2{)dWcU`e_i<#VF7JDv=C#l>j0+UN~^K+ba~o8{}+P8PsM~PD2Nq#jM>o@o{p3 zn)~y3xb_f(r9Pn5wlITHU+kw=Wed8w!yVMm(w-ujt~~b`-6(C>PoqjKmBF~LTH7KDy$lfgxk;BAGN~Of>d+ZbTmaPNd~i;z(1aix zsi1R63?_uCA~3;9U9-4Wz{1dZs5KEK2~AzanHwsg)nI`kE~>m}tg5Z|06)vEIXHRr zPHCtPT1`8Rs4NLqRwN`K+bWBbW7t;7xc~EyN1yS=XV!Uq-+&3Ld+%W)(AxuIBxw>9 z*@_Xucw_@hHe}y=rj*q8yqM>1_1NZo+KhY*ls{N2bA8r!I_8HjL15B6mmvejuTWvZhlP=#ViA z&X=l=1p`KGe-WI?;UNQR#=`TbvFlBs!63=fs*x{p>^XKR(cFIyePktyZQT9_S~RFM z*s!@D{w@^%dze~SyvD|lN8;@*sY3&z?~$m6CrkMb9aY>iMZ;^NN9*5eS@LArWNtGj zTKDCM#A1(#HRzRyIiIzzD))!oz!#}iBlS0K?TwVa-b#BQ-1Y^kpQqz(#v)`M`)C!_ z`0HC49eZ8sAe~`A&)8WnrEX%07I`n?5m^ukvr@W*N=-2fDc|?+@L-=o@4S7 z>dD{@G6~=@_zcPE;^kK*m`eY@m*l<{}BtNX=Xai)Kw${)ZY2M*(vP zRJTx_CauMQPHU6%P6I?C7Ir$ZElgwtad4Z26Jwqj>lpd1K3yPLtr3$1mC!&k%)6LU zvGp>NPtNTq6o(V0b@PHdS)vVYcx`H^0`r^J(;xxVQO%mGKxn~eznXY5TIt@8BY!5? zM%Kf~V%BwQ5~9cGg~O%Klym#RDJP@Mw6cXX*L0valhuIGHJ1B`;0Nnh_rO}y1bS9l zFOns;2#g!_3^!9imJi@4!Etn9w57S>W_V<)p0aKj1+_P8j66l>_&@QTNkpb-LtlC_ zxlYqr6E0b|Pg)U0MQzp~oA-lho}2zJtXsRpH)>;_qY)n|&L}X6M~zc(>SldpW?4J! zr((~)fD_vUWod-UFA4S&e3D^#h1N=y26&S87N_Gqz|I!v)nPpvac{W24T z9BwaSh1H<2ztP3RB#su7Zra|#2gc6^*ASOToBDZ)j-z2LMQLRXi8|_XR;kqL&@)di)nu8aSPyOscYmDbyBsOnIj;r$9GK*!F)m{>-u|k{y)I~%O)lNvB@O*OAk3H06+)q|HCFOmM(^NHl~J7|HCGw zYSQ*cYzVzq>M$q;X$^IU#}+lwbPWQ5MA^dbfxI+8fobOvu%b{TrsSgjKIRfqX(s0M ztOz0TnfAN*uE2XC#!$bD-Zvtq2iWGc+xEA2?e`~uz2{x&Je`4eVjN?r*L(ZPj&^(a<*`Ss4A3xL>}wXFwc+8rP%kjm z1VC{(NjaFUU2!E;8*33HD0+?wT}DV&h8`y^p1`&N)x`{2tvZ|>OvLNE;EV>M%^i!m z$hGg_18^0quZyf<%s}-nTQ6`wUCU3snj9EAK@J`?HOBNf@)-mCR~>?$GE}9GriDhi z#zLARP1`R!Y?70Yu0dBV(BS@6Ntm_t zYK9@RP*JT=*qR?Qn^$fo75fg!uV3KIW0!jHB362a(@A-i-Zxcb567go)_o239P&0`eB9|t9f)BEkC-IP*^5OQ%%-AG2BmoXi}(vxQjNj8~r225@1zkU|9X_ z6S5S>jFZbvC2w@oGEW{8^0oV4aoIPBVI%t}#Rhy*Fa_twAs={0u`CfHt$NBEwrGRK*5W3-b0r|I5~j?C*Z} zH*;gxJR7j&N7sx+)=Z=Ue3Da#?K-aqJH^4H)lm~s1H%l4n$YL=0lMi43tp_O%)&?x zoqmHklxiD0of95r#F?Z$H8N_7U>`|%QxId)RqGlPcLUE(XwgFyPt7Z>$4x`Rmp)7> zH%RuH1(&SvEHtNrW;7OXIMPJEpJDthjkkMu*Ya>frEQw zE*u=9CjrhS8o|#lAH2{A^&vw~VjbU^IW`M24utw%BJI_7Z9}=xWs!gXb<$T`O;tp3i1*n!?2A&57GqvUtV~ zopo+T$Z`=YiE5Bc`}om!`NRL;*4h!OD<9qulAZORNDuY_06_h}g}RHAtEscKrH##h zopt5EHQS@n#Q)Z85vZdCXhTRlcT?S{A9p^D%5_k=>AaJdachMINhAiKfmi^fmhC?J z^!ZPDfuovyJ8`D;0yFadCvN}CgC^Tm#WdRXpElYi8mhJIvYX=XGV3Z^eUtB>bBjm^ zQ~eg-C6!!|?7dS{Uy|OMr(ZPyw-dP_1KfB2oysJaoo!U85w1NF#q70eqDa1!Y=fV- z5n*?d>`eCT@Zw%y4`-Bt@cyft&oT?tN&E9F#l~#(Y`)JZJ->xE*(ucyj;^S~rrYES zt4mcR+G?_J`X{r0l~FqoTGieWI3;9pBYpI^N?Ckt#PkzH1@p~eB zZ6iI`lVx&6zikndpKuXWMP`!vcaLaPGtnvi?4$Gsz{%ZCt+OLfWj$Er8ZTBf zU4i*dkn@TgqOQQxAOgB1QIOz5@1^gSPe`9Xw}#3HTSv-ab6YEJy34$UbWx&>0A*P( zQBnuJLA@}9zutPbwA1G1OBLO=ZK(I95o)%H!SewAk6c*XvIu*fkqtO)j#A`T)(9-|DDX%tl_(k8*e$yt;H z=0IDh-~3FJ=UcXo6nA+7Bc2gaPx?}#X^K|?dj+6mrY$y`=Ud^wXs0{NBpvUT32NpJ zf?=ekuL9LTq+Bswm+(Tc$xrb>1DkdFI68k+5eBXdfcc?U_%i+GhCrJ}OHe3&J4^OP z^G5i4CG=_h(!8U4V&#-D=rVyC`{+r63}qdNzfj9Vv(_QIK^QIN{QEjtXDegz3+xMX z{9}nWNq@g@EM6tN>9>3NE`9v5avhMCV!g9pZQCQN8r^d`FKN ze=$BB8=E#hu&_)S;3a@8lL(9=rwp_zI7@wB6qyFXAgE*}M34`LDd0wa3g*MK9f-vs z*S?^eY+9q{5mpgVZhXQXHsDn2s|i2oreg37&GxAn^AYs}sR=?5Z3x@BMWusF1dOL= z&%bRsdt%i^VN|>w)}G7u1FS&smA3;I)K@En8mF>Lbdy*N31B_KQ#TqW()aT@M$C*4 z)3_dBsea-h)2CA5+(d+62iU@!5%a)ZWy3&}!um00NS?AUA4LFa^?|aw`m?ccz!V)$ zu4~2Q7)w4ZC;>s2ObPkFGqXQOZobP{CE2f%);6|bl%R6Qj`z;bRX&`z4j3Tp#mLj>;wZL1%2y`iQk9LC8Dd?vsw*)7C% z-X%;CR{%vI=>gEft#1m1t^GfO#+=QtsEin$(pM9P(#vdbekTKNi$2Yim!dGOK7Izt z&O66^(ZDaHR2a`Lpv=mfQH_kH^Rn%T?^Bf4as@zwnIfGdeM&=^Pla29LPTcq!Mdr6 zRXni<<>JED2!19)Ny-iXvfp&G*!D$xNqD1&wFpS&w359R$ z;~6MLz%3($z|bP?9V*3iU&e893YzxN*}n;9o>^*~3dIFe@ym$*gHYrWg#}8MM5ML5 zO(O_vzEnV=DqY;3H3tk+bBp|4re#%fzjxM_Fd=yD&D9 zY{G&Psr_wib|g>@1pPHuVLVAdP#7F|f{vEcga=dR^|c)Dg^8ei4#E*Lz!QkaX1BVD z&rUW_lZ~aF`NrgOz^na*m+Y-y8W= zxO&6>T1t$4+(3n1=!s9yfgrNbUWJ4|4b>NxM{~&+Vp&v{|B3L*+pHF`(-4w_9S;XT zyN=ql1#~(BtD2n!koU2%$jqbhm&L=Y#OQWbNggJEGjA+ASIj`5QVZqcUibu<6r)0ekfc92#e{QhYLs1DXQ*b5AqBYkBnR_MVQsUGPix;{+O}k{s>JJO21! zZr~@T7Y)+<<-!z(p&9da^Ua~trN6=zGNSzXv>7!BHlU@vWE$MiRebHl8$+{n&`}9U>-diA06Ya>AV7LJw*z`H zQC-$tpgS`#Hu}z<_uEJPHMXIXhyX7x&i8qo;5Kgd>a0EEy=7&G$rI|NJcI9WW`@lw zd74;v;ILeH3%L5{h_DIttI#6b7D!iI50MJPOnaIZ*3fv+Wd~LF9{hSaag&x^c&5BP zJyNgQ9EINxA*U=1zh96OzZCs2VgeMvuhDVjk=2W#-{S+=g}XE7$n_7SFDnKBa#Pt4 zGk$5UzI{AYA$l!6D@>lAUSCd6 zPY=X#AAGYjM~A))^LaRsaE*c7pYp;S8#SUu+ zvyg9+q}ny__pHcmeupd+)bOUIXdbCgll6r`3J-Ealb~VtnWA}4H;~T2x2E^I4Eb_4)H#|~1`}*N~JV*1?bYOQ+Qv!Vy)qLr*oZevw z19rO;x`kE#e}jF2Fl2*9V%mro3SBB>kOZ!$Co-CZM6kZ0nTCSmGY(5CRU33_Zm36BFf^qSR`Xic(+Lx*fD zM0!4|GhMl--*}9t2AD7*7Ow9TcjTr#*H@%~9lj;$H0WF8Z*v!kq#k1Rm~WY?$-*MR zy5&^F+y~WT$9k2#Hv+g%j4#;&EHW))PIt2PJ#$9?xfeI(%e# z$)xSHsIm;8a`L8XA-X~jJZ`yi+Ud=7o5WKP}STy0Nyn>;R&NmVE*dJoZC6633O3b9sYJ zBZIQm+VZkgm?{_)#LO8nl;R|?6c(!^N>h+-f2BU~a+L2M(ZZf_k{_8S-)#x;Ilp&~ zdn0%7f&ls4BoIPe355S17FS7Brc)$M0ph!?JEF5rv@*UotZ2$@UCbfm@y7HxInc_I zCYEMLyR<8DF|T)=e|VaUttYpThJ|Jd4ad-e@E~Y~x=}q1A2;Mw-&RtKbn*usAk1g^ z*?Yg07AqskmFPg;zivJiCT`lS(O9;L2Fu}^Wus1-o`%&7UrtmF251pO#}d^64jB6A z<1|23>#MBLnK@g`TDBc<@Giqc?OGi>nMfU*|ghdTp3g|fjEy}g}sG8h$Cd^ zHZ1a%sQIC1=QN>@IP$|CBVLaEQEi{fALHP-VM}uQAtv><_p-n6BF z*(YSzjSv$cRAsR((6_Y~9X(XdQ|j7R)ydk3V1NLTehasUy%~`OPjIurVoF@8x#ikM z#+i?}g~yjU&_eO`gpgbVK}q7(3BV~S62?0AT0JE$#&@dai?@380?sDH?4H`$ z&^^9w73tDNqsJz0pTb!JZlwdL-8k{igD2LM+d%N@fITq!@7e^`yWYWhjOo0U#b2Hr zLFmXkx~~A(wl}FCLGw?ov%PDMC#9m%F(V5=*Lz=q8u+}15zN`CuUT*mT<+%z! z?TfQ}s3k>b?4v)t4O~>YI5vxd$pEI%{RyqcI#}5dxjBwkl2kBj$HFp5Q<9Pbu$|3E z`}^zqvHx`B1+}bH0qdxO=^OuvhHA^a;EwLiusL`d(J(z2Bm(qMGO$H*`{4c4VRf_o zlf4i{au6T34s8S)A5L zUO*)k4sDDD!b)_ehc-7$zUGwP&a@XjYJwI)m5?HRG|Rn!iTQ@nW&Xgr(+xVh`S|=* zlehMPJ%`Wn *;lYt{fBdOax;;&&fHKscZ<(0=&uAJ(hzoO-K&ebL`hyNWu2#G0He|DwS)9kqF2six4gAG#bUvw-yVrWT zABzy2+Q6U@E#ldL)ooem<$+|AA;J@i&ST;uu!!vfXVE+S{?9u=9lsVx4+h4d!2E-HbXlEBsm^}K_yYAwH_?}@?zEEWT?QexdbZGXEKWy5BP&-M8h|u&5HPJLg;Np?vGJI$ED(}D{q}ypi%D^+ z00x^Ypyj)4DuQJfZB$&C@ZJ+($&^c-HR+s3hhe)ON0;j5MvPsgkqWZ2km4<+cz%JtebK82Q zT%SXZ^ct^`i@o#ZPYOa*m`oLt`*CVk zqMqs1cB0xjZza<)^=M<(|1dh&sKM1(KQmiD0Oe9~Q$>Jd=U1D~{45*DF7jZj5NeU7 z&Crgv(m90#bBo}60M2jlaz{vw9s@v{%S2b;sm|e!&fS-#yilIE7P8 zwMTI;!Su6Lg_OhKba4>@ap=%#Y#`4*LxxxQVW503!3`54D0kU>bukRC zlD`7MyqwJy*08S%?vradXuT)(R`?^ftg9UhUPe>*1e!8_NF#ILZcJ3u?eM`VApMsR zt|kJbD8y!b>wa5!W&M4IZ<%CKtSzh$CW;YyK5IQK;)Whwiz#hc=(642<6c@^G|#ya zBfBWcFb&HUeX{qoj$@b}Qz!AEy!!{tQc;l6RH#y6<7Ru6xYP^JtO$H%9JM49HB*6t zFPO64$(v{q3*gyzYy~X-nN*lPWpuUo+T!i!*B?E{rW;x!GVFfp=O_|5)RYq1&IU@K;f6wW(V!Wpcc?IA>YwQpP@mV999!ExOw%M;mf~Xxxj^Q{Gac zJ!fn$%@qfFv&XXw;l4^dQ58+J@OBe)n!PSpjy8swq!~*R^*5N6y~oQ@eNMyJX_E)j zlwVjDh~7j6&QR;n-HbKoMed+~heD+ToYe0;kA6TD&V1`s^Bdso6y`;!?cB?g z+s0Nqm2ww8p;$JZu3fuhO(?m7uVMX%YE<7*Mvl`I>*eRYp1<4>vy)i+RY=ZECs#%Ra&}_kD_M zy+HRM#5o8jOm*OgRGCXpA|(d8@P=6wuG7`67yA`fkuggZcnb(8&^Dd@5|R6}uUp?O z=2NKf2X0ZVnN@vVxBr0kQS*weOF32Xm8J#sCOr`2(@GPj=}d}_w%;x4i4B*%}4St z9uN|V)rEP%jHCOgk&g1S8hXICGPYqeLn3l(X-hxbHsp6{aoTt_3lQ%>*mpyRa*7 zpc#0pS(%bIQba>1o(xwvkP9V;X0=Zm=E%$R0Y78!A=l9>CM6X0yGLSO##ptp1uV>3Q7oyW>Aw35I!(%5Dp#SwHcLm+(fx_oB}QNNfEasX zXyF0VXEPRs7d@+<;0+S51}Econ@gQ=+TUt}|T~$w}@@vIsZ=Lqr(IIh1 zDp*)_s6}atfmV0{Y)pfd-GtFe#R~CTJ8WNheGFz>6y>|Y@*hk^hLC2eaG>fC>3WG4 z?rhP)>j!Mjb>&6d0k9*#MF+{nmcP~|R2@f;Ye+LXL5!Z93UrwJuX(UX?IZYmCPdaD zH4k#P^FD3t8CVp-Q{MsrTNqsE6dR-)C2eFekLT9Gg*KL@Sl)ShTHTI}@E~_~TGl=1 zjn3>dA-gevn$Bv-V=Jf){ko}aU*V?*-y3*#&yUek;BoCvCfxOht1vgT*9QRholC3I z?u%6aN=VZRxUlIp(H?fGTR(3K?J~vCfK1pT)6wBhhRX&hTZ%&aScB8F=};i(A%JhF z^`+Dax|R_h23X6slhLL0eND&{AO5zTN@P5yN=(NGOrGrKyTcHNIL;RL7Xw=zEbMTSB+Hd;Hs-O)bq!RIajoHk$0(BhZa} zd&%zw&NQLH%Yztt0)<~5MMV!{`q;JE70JZ*4+&vzbC` z`=(2$fi-vLg-X*Oa-QZP_{vTRDnHn5LTA$%)ui}sQV0DFl)m|wbz)(=$khOL=huQP z4B^^wL6{{3AcbWg*VmbkyBVX~<2?#>=0v<%{A(?R+EMVLJqq|teKnALe9r;MI~Mi8 zXjyK=XT2uMnZe+2+wq-!Shf$SF^c=mBF!0Q8IY8=w2hSPIbx~~KGx7HDS#`KB}kk} z>}tDaouBb&?pZj3^6+E+L;Jd2aQ)>=rQ~kccl4y~Tvw@LlY|f71$Zwk?^GQF>^w%9 zFULUiTd8`{FFHG_Q3v2<&g3U!5^>U=2f7hymqgKLy-^;`q5c*1QzuMm@|hJKc`MJC zF`ljXKEpo6;Jj@zC8+B00ZJaKPwp z2k<0_+;;wj&p*wW>ln`wqERWmtkG zx^S^6p&$d1lq#s0&cPT+&h^-TFHhU}o${MG9X+6tXg!Yy^?2)s9jyhEGqKRx@0am8DM_SpM>+y@&Xgyjt76VOtC6HI0ig{iq)NT zEDy;e$4Kw8Mp${mO^HrYa!09+%q+2(0-?B9UYxP=D0)LCr%4ijrOJ7)Kv#Hv?~f`A zU|YGqka);I*!hiO_S6OEOfote+R3%o6!pzwwwtw!tBIYpmkIoT!qaCki0zyt zVH2{1#Bg(_RB(_kxiV)m{**-8nZ2{8x;&FNlkaG`5%G1Zy=ZHm<6LDe7pfkyn*q;+z!Hq7GKn(osNt#x#7b-GeNn6=V*~h*Qy;`enx^}KYto}x z+7@p$e*uzByKxJ%f&I$PX*){(v#Ttu>Y*G+WL6Hh zkAyjaRfBP@k4Enq8V3w3m+Z%zwV9Hp9pc^t?FfGYx}n%l9xT8KilQ@C@aNFYO_R6x zE_)~&vtM8Jj|mNOmk=Qou84U)i8MuPeB;MSZvC<`F8;Zb=GHE7E^B*B2Ofx=hLo(7hE%TFfJ!?rVe`hlLyTuNDl4jZIe0mN zRP&Ygj;FAa7fBLi?n5!MFyh_VpC6H zrLDiJ)Zw?f<<|(S1vrHec}B5mOqle2Wm;6k7{wKh6EIk-_L*f`lyxjlz+{RO*L#r? z^(peJEVqW-@Z&GxzTAb}?9?IVsw$t#>EFa~#^JQtn;!*wZE^S!Js6|cY~;?BB+vd_ z>{qPqAgIrQ2tN6qTBl7CB&HC3SXGmOQ#8t!Y+TPk3G2L|GJr%{`ky^L$K?D1GD1iZ zN0x*)#jgZqT9&D`Sa962Ud*uSlFWbQH@?p{n%!PYZ3^_co6{ZlL(JfyVOH0#osJim z2h>Sz{&hk%$8vVK69Z48AmXiX^=Uir>7EnsDb|0F{Zm1r^ONi2{(I8@`Pj7;aDP9DGb|zsg6kdO zT~GEu?+otXXk=&MW?{z-_kb()EA*y;qL{CdNh`c?j9M`{!yRV^e@3Fds z=L_KeA+M{xg&C#Lvf8pu;5@Cg{=F5&CrTJ#-#{Hs(dfz$s8fRKlVs}M%sNa;m*Mpv zBk(zWm=cP4Ra}H2jpbF{G%C1;Kz&ei{X^@1g!lWm^KmEIuq33}5e@S}c{^q2Uw~wqcIq-{9vA-7vi~i%fETOJ?RD0Q4H;D?>#ud<9?o1<-)HMyEQR0D-3_tJ|*a6XA{n zk__Oc8p~}BzFeko`xldCg|o$`;Zly@B8dng=U%l%^x&SBeu;|w{^=}KZ*}vmaq%O9 zep1}Yc|QYGmDblz7#5#{7Er+w%fZyeXKKl+(1!EZg+3Fd(gtcuekkG$ zrdF!_T~>M4qX#qN6Z+Z~uWPQG8PG8(0RO*#;q_bj=gSHF`}eOQ;Y~Ngn}pxF8UAlQ z8d^@Ez)ixxsB6*HO~UV5Dt;0Uu3ZxUUC8sF28)}F-wA1dG9*}UF#aQ}y~+5UV(cd) zmE$JkhJNfO;&*xaC*rB%4aBcm`zGdh3F0S4Q28h359#7p;IE literal 0 HcmV?d00001 diff --git a/data analysis/analysis-master/dist/analysis-1.0.0.0.tar.gz b/data analysis/analysis-master/dist/analysis-1.0.0.0.tar.gz new file mode 100644 index 0000000000000000000000000000000000000000..24bba0db94b380a2fc629bab1f644acc00f77c2d GIT binary patch literal 19298 zcmV((K;XY0iwFo<5?@{d|72-%bT46UVQhJGX>%zw=tuJdF?Ig%{NZ>jI9chjcr)7!@FNz(4Q*X!k>BuZvOkt#{qa`)N)erE>nASlT% zP1{|myD{r~+pUv<7d|MI&>?*Egc zqq_d@_YM!O{y*v;oj&n;yZ-;xe{Z5PoP=fA`7z3iI7`pGLAT#H59iUDCwQYFzi}pM zyj#xaVScN!c#ClSA)H0tB)X20Y!OYo(XIDBF2nTQI9rx+I)l6u@BLNey@ijjk|Yk( zan$(lY#wzMQ1Z;XD$7N2c5pC@%d6$6JI>|@B?2fAz@Rr69F#f!=@eximeK6C@oHIK zW%-%MA3D)Ij1zeMHXcW50U&=k|K|L=znwQ;N5wdg7bP{~!8m`Pgyl5L=L-JkNmvx| zG>!n7@AGVyhx0iW|29l#ORVAy(r*FB6rT=_{2W>Q@YVSb-d9l?vR$=BKRjQ8*ihZmvJ5y9ai<3_v!Gkd!iGg^Ldt* zkhSyvb^+unqg8nT9s6OD-K32tyWhY6uOk0N!olOp|GUlqN2idqoBw~){ttpUjmseD zE^dF_`Cse*-s#ctDfItoZ_wM#|G#bji-%vi|NFhu)6)~t|3@b$1ML4ksD!)z{|LWl zOyTj~W}~c($A$Mk3g;fGS!dpB&@qu zS3W%SAR#KeX$Trx4ke4ITrSQUjUc#YDs>QeLvOF&h5rWKUhi8BkGZHEJM|44R!T=cCa3DK4=g#JdDgA=Rd}#Ei6i zplpCyu~%RSlwP!o#!IAx2KN1TufID308o1p5d)9299f&!WzkR7WYVsu(+<`NfE$d1 zRQJ5^Q!chbg z1$Yz&;v~v(oU&1!#6@Wq?tP*c9xZ2tot5Y17I{v$$a7*+q(DB0)f)BBJY2Z|cqW8n zqp6dL2>^Rz6c+I~0M@q~db9M1!UK6Kh9~7J?M{db&)?hGDCkA zZ0x`L@eLI^4?&Q87T!i?T@GBa#2Bg31}QS4r>J0p!^C$jC0@FLD>PUgK=Cn?9#J~O zIeR1EQ#arbY3jEsslb!dB}wCX2n@MG@=st|!K5;eB3=aHo>cgmrmG2v<698rDBQI< z4FTbVSm4~`P4hTPC&{e=%)!<0Z;XuvId^pnGtDB*Ll`MAE z^uZW-z?H1)RQ2H)G}Umds;OvwjBR0HX@srlc?=hoiKWszQ=U=iv2jW83`l*mix@1V zwIDWQVWW&e38Cp2Jqt=At{?2Kb||M>7Of_|^>c0nC#`=CL`QCEOjdbmHmbI}%5rjQ zZ-d0WfxarkWdiCF&l{QG9L(VWS`~bWso6!VakMD4;VCQW+4|FR!88hH2x8-2F1te|&=n#!@Ql#Mm_Yr*F zq!vtmKV>;zp)AuVnn;mtFjkhMEYC(ip?*(T_@fYt15Vh1puSXA21S`I_)-8DrwLbV z!VylHFoVNjWbZ&u7@C+TCIMtN*Jw=N)h*LJn~QaTm~Qr&U?cj;cHP9uWE|#Z0|n@w z3-lQHwuE^ZhCr%4XCq*naY9c>5l~zThK8=haz0qVgk|e_NHdqlq%UnzhzdNYgM(iJ zQh0TF4vkKcqZi&4Ot4uXhdWfl%@0^J%A!=qK{o|#gMc|o74F80dAh`97aB}}U9P0D zBHt2znx!aJa51h?2N@05Dxzg=Wz%T0)pLNtP8Tf7O7csP;^;#q*<3Gn0S#C*kFh=J ztSfU>xLB=jnYw)-5W^)D4shOBeRXfN8@cO~Dt}Ewv;py%h-01cVa$T8(8oa&bP)9?=bq9I`)%ExU1S6?G|v!F8A{BZ-ia zf8Z3~TqWh^1DIe)9Hb>K@tDR^fSj6;Y`B)Ndc;8Ly>Haicagg=*R54j7@ef^syVuX zESc0~oJ0WFFf^28S}>UyTUdr4BBo=J%}3X~rXUCxVR_|fLY2)!gUY8imF@L8%hKPR z#iiti=~bX{x(~}8APgE&v;ojs3;M)upU_9D9AZEvm5M|vqH-hfqqB`!4mW2RY|dh8 z){J>IcGj2}r5kiB+Uly~^u$eTpy@b(p_otOvhCem#UNxbZJ7X{0R!D+s00*f=OWv# z$Qk8%#tapj?%{-vkWeJ@qKL2%T|vcK*Q;AcIf}?e3cO>qqZc8HQloRHF`$W5At0uS z&mqrURp3~mRWfQ71*l}QIgo=-AZjjQHqU1BSPPEcVvbQ@ zE}cyiUXRv{FV7rS2BVqC2+JI3o9l)`$4EJ}7XS?0XbCKUHch$LVf2S9Yfd&ikYBJS z3G)^cz})s-=};4OTx58-;D}KyvFyIS0%;leS__aYpM@!@Cfsn3dMFu3`0|vbg|bc8 z*l7yD8pajVpk~W_7MBLeNcbpy)zg(%Pie27QeO?EO|=162$l#m*_I~9oY0jnS?<{p z=FQsNqVglxxRC~}HZmD`Q}Jh8-L5z8RF|h2tU_=l_!uZww^CxH`al7$nT634}L<(p0givo?k zy*=DC6^sefi%;eE)CTllaNU6~v+S=k^aDf(!cX_wET6tP?fvDagdYAH&x^1dO_rFg zMHP@{G4w8|R8to_O$MonO$Y#g@viOkh$L7?BI(qtRI)^psqTtYHIzgO%w*z`StP}q zG>3`=W!`+l%pl^!=Z3A{`p68#;%zYw#iMShmYLHu7|XpHJvA5GG$K*$HHegVKfaO8 zq{$(e(hiRUqWQf7`)!3vU+JWIM%thnk($s>qG?Hb%w!p{1-!9?n$Z-gro~I#i%*FY z#~(2NZMG!yASiY@tfGqXRfO&XAP$ww=NT3f^Nx?Doiom#_`-^&uoe{Jb`=~L{E>_a z4YGRh4PTaywjh;-SwYi0Hd-mK6c!?n+|Gx(-iwz8 zGbOTe>vJ}k-I{b~|3_TEeu0lIR(64w)5av4T9(E$>`kT$pi0hMFj#kSJpoO1ug=ez z+~H|MuQ^^$!n0n}i>Iu4T}YIF$Q)ul%t6%!xwcFD^m_j$2aODWGh zsXI=xLReopt_mhH9jJ>iO%)(V`Od;$fm3|4EW!*n#Lj3@vowk2lW7OokLU-_L8+rh zU%O#Z-Yz143gjqTENeis`j94fpOtHg4`?!_PxxSdBr_X)qrc`SVOxc-;?V*er-T~( zwLXL^_90x!4?2|sX3GtPF*o4WTv=tCfVT+}5|WiGSYVgd_5q(Fd<_^kGjkqeRl__-IdGJHix$!8?=pn&9mQDlR8P}CN# zK==$11nW!QO3v#pw5u)0UbtAbZ!f7y=8Ws(YoIf~OwikH;$w#V%~+X9Je{tETVC`pp_|K#-X(p6GBEj-zRF~>w=tSk9w^?f{dRcC z1e~@Nn=--I=8WDJK2&Y$$2K}SVUI9znx@@1@4omJlKd8oo4DlbI?T~W??=d=!!ziC zedBwuNN)8!{2lXww0~w-F8lPn^O|?6XuI0b0&R^^>aDi1+$|zGkECMV$R?^O=x}k` zlq?F>q8SG!^)ui*?}ayNNa2ecBni}DfPVMY>-Xp$dTwg{}d8knwsEh*+Mp=M6sKX?0bIu8{OQV91AJJm+ zi$J1M(}9b+%2UG1ba8fx`ND@^seX&~X zdhOAB4a9L>uYp`#{iBb4w))2aJ8kukL-^C}c2(~|aZMiMFNx@kz!Eb<3!Rzyt8Hea~)hpKBTbQO>Q=hL{g9#Fi9P;u|h!w*p)-h9f|mbsW6GoVx@ z>m`S}t6;gOpsTtA+p)tckND*%?H1w_kT z6g3pq`9|Lxz{XiL4UqE$sEhvD*`*6041*Be{_ASvxtu7OC4fmx#I&|MI2ZG4ELz01 z(*b<41dI1Sm{e4tK`E@jDd*^NM8zD}ZJ{e6oKfLJo_V;~Xs`4ff z)aA`;S_kJAzUFt}uW$AdT(O+{0s{oYlSK6hq;BoS?0l8>$@hGsy(bp4QA5_WQzJoTdQIs?&< z^JN>Q%shm#Q1^oE-7e!LYcQe^ux9cpwIQZUI&6cxr4mbJW};$v%EO$7u^8`lUhvG4 z$($pk$*%jB>gEKe)g*uHThoKAQVDF#*D`V9O%ZuyA+vu~^(eyXJ4qN6D^_n`8ne)v z7^Kln5cuN+BoAl|lX%P}CGhVoDhpl9;21#OZXWQ>s5K?qlyhE08}sDzVoe@}t*W(% zl4%{nWZ*79m;nDpkUs&w`6c2D=SpjY{=!`&yZM0Ioj5$B77Z6{*E%dVR)p3n(Vl>r zeK%BzWV%z8XzA`2Kj)=?-^wK$Irm5&g_Xwhjr=jrw_*=~^q?M*=P?3y9%EB~09p^L z&!A}l`Z`*%YAUUp>7{*HRLCoX`BZ&cS6telT>#X_dJxJ~U4;POHWfE)*;NSec@qR> z@vasR<)^jfxnbN@UVN&bGT9cP&x9~;0Mc2KV)(L6TQkGG4;{+!;>8|*r?h^rjR)aK zIUOFNsSlO~L-GRdQ`fVtoG6&YXg!Bz+;A^kmKi{}j`C4fL_;XU_W{X+e0ZLv5$+E! zZi5wJhn4*VlLG$|CNuQ2zM6ALqOr`YR9o=z26Vr9L+w&6!WENzVU5G(V~S2+tX-6Y zC5YCMTFf4)kz^uG+-<}~k|SBR8Mi*zli7Ai*Ixw{AelV~N)USZpiu*j=;FDh5rmed z$^xy8Mkv9v@I)=!I7&ICcot5sfPV-K2IGmwSg+@^#W++<1~ekCDXdEMJl9Xi>kSW} z54|tL1gTYAPXcm9$NmtU#CijmPAr&UGFW6cMjngISXC|KOsMNXO~r6V@_`saB~z4@$^Y;F@{UxHpcfv5rZ?QG_!Ux`YIzb|8HZ zp{+ThhC5$C+8K-xAT7Dlvk|ff@WN$M?y+$*i5BIRq_+1=82R@H6Xqy}yK4OjuTI~a z#3gx~KLZW|Bk@7J!6&WXXvrDikrZOVE@ci_8xY63#tr3@C5pIbqisn-LUmtrs=bF7 zb*|Od2_&Bl8i_}fMDp3JQdyQXy6CiUjcQg%AJjwy4w;{w8P@1Rh7zFB+2k{BH4`V0 z>*o~1lVlTjDbe1?*vFu%xN4$Uc%!yJ=3X;1Z4fzY#(gVR#m`^g=p2cW# zwvrRFKBLqy21j0ek+1h+jVdJ`gE!xqCV}NMqw_+eGkd6#*m$of zKkJy+AoWAJw`zMZI9KmSxzJR8aoXB)w!~ooOA)r-+8_-VFm>dL(_jS=6!R>j^Gs;g zj5@tf^yS&_P#ihg4k;6ikTl(|!X**p{4-y+mOWJ4YrI?Fw-8=;Wr0T!V$#O4L|1v* zwc4)d$%}Bg^B3 z6eG~ovt^RRcV>xEoQ65lzV{!%AfE{`RoO$(0aC={zd6s)>2Eqi`{;6nc@GY0nf(W6 zna?Ga9xhTCSy1JGKYs{bVd}@bAK&m!DQ8L*vzi;ro+6vW2;AJtMBuOj)Yd& zihX9DR2JDSje`x}4r6(+td%)c+YzaUuds@)!z3_wH*G8EGILk>#uN{}vO! zQKf~p=J5Z~P~KoCvCt(~fSLn>b*drSfZ#cvhZuuoxOW2_x2Nq&48@*=G^g{~V-6k6 z!X^|<`9v$Qo`}||VV|Wju^LSCkcmS2Mzb1%opQ%hxsi#9WZX(YWh=A_`8Fp>Qrm4223Y4L5c_|84=F|<=%HLAO zuWZTT%^34h5_R3-M17qt$UvxG%}pz9PsX|Z;$Rj0Y^PP3$I69C_1wfWyFS_c#F0}8 zHN3?^R?;=L!iBPU*0>9j<+EdldF=+ZL-SCahX!K#vQ~w0Zdh--5#iXxcGz;IHhZ6~ zh?;}SYd0m<%c*%0?Zi}7O#QxhP_`3G`X0(}A(ieWa0I*XTE*+*OQR}V*?DhMfU`;?|OrS7S`6f%z;%`xzIri zw6ZbWnjNL|6$@C8gT;)34e;A~z^b+#B~~k5cNzx6M~=fKj3Uk_;Q}fFc+k-pCR80Q z!4Q2EWga_^dr&BQjl(i-6Gh*6Mz!Vnh zkSr4_5#uFN=EY53=pWMMJiyk1h{@;17j?*5PqofM>z}GzV-(a*;_B&WwQ+vX4}q?) zhSAK(!;g6BY0MtZk|~v+x|k)|2yd3*u|bSrBYP~^$es!|vLWQi-gvPTf}+eO*_k)V z#!G&|2Tt2%l-#OyyLOPBE!olh!jFQTX=YkX7^-i&70y1Ku9Z^)z&I}=JfToClZj}Z zF;kd5h+PvVumfhf4)daA9x%a=T>+I52EdG0vjSK$OI#>ww*%NDKm ze8Eqx-jh2(C3i3g%-#X|vzW=Cng)MJZW4`rI72DQJRFuJ=daD@y!mab32hUNyN?7x zR=tuf#{-p@1TgTp+lcbbHtpYegmJuE{8eCK%(r?(k>6~b?FP|q5PhUU#Apd?t;ZTc zMs%0m2-=OH|BMk7trm|pf{Zou<1GcqePQcS!rdrbgM|U2e~ph=lHw*hZG z2&woVYH4A$oCv#~uJP34Ck2OI@2t)vqvvABnEQ@)rDaHx`US!4^eOhFnS~)yO&3L> z5JutIq9Z$fQ~6i0Si7q&O^3t!HujAVXmzR0n(o`k+qf8ZG0x!E(HJnlv3r2qbEL)4 z^FPGtBs95UoXtmZDp-oWv9K0CfrU|VlQT0Wo|;d>F?p?8(R=u;#i8KMK0XI5L{INh zj~ZkL6fywGAY9ylB|xnMP^BnFR^A_aafK;wh$Z{AeE~=Ye6#|Kp>+>n4h4Mn z7vt1_@DXaO^|9PxhUcWDuriusrP9l?!;^H;}Dp zBh-RAHewO+6hR!kM0*{a4Wg2!qaaGr+C1?|UQ~-n8g+k(@{EdD(b{x}uc6rEiqrVUe7Z&F*R7|hJ!B}OcU176EO)p1PRv| z0y_7R5g8YGwutib)|cqERc$S^HItf_h{`8gBTU>yOtyUDz!`%i!ao%hCpLN$hME{- zjhfZ7Y#TP62Vp!s?up=wy)L^k#5C|o0&~(=72U2kRi+u6t{Aq$q!kl89-7eNPvN5+uqyP{DEH)Om9X!GL;*>9{iPq ziB#lCEw_?0ybdhBa+cy5E>*2`tp&CD+IQjWq&zDC%Jh#};%{yxaDWBI=pl=mb|>|r zC90!*`C92M>@NCz-Ex4$ppEaAc=)kb7sMF@;%cpFS%OtScp(Td^LcYG;>#9{!M{7j z8~l8o8ju+Sv=W3b^!ng~Bp^a;)kv8a!x{qM?ZynpYV+0L;N7+@i(=<#TpAk$oJ0xT zJi*YQx4ZH~D8?b0;WvynChb{e@GR3;cnl|0uYChaWh=bGi=-j*pN5dfT0?v1(T!5x zK;mWI%8y&<7KRsk&xcpj}1i7IFB zHLc#?R5RWBri!<$st>Kc#h##k@UM^JEWbVtpq*Ci99&K2fb4W+l-EwI&flzpBu~IWKqIakd*tf5VPTKHd4{?p^ zG>Iocnw1oVFe;-Ojm^*)o;(pQ$Q|xmdms`HG@s{m`(VJ+##W~S zFdI7(fKS3LUwOr|djruSA6@!c^jhJy&s^o3{Axk%cFY=3={Q+V=okUB+-G>*;v~+= zo_~u+fTQ#}&a;%9Z7DSGh&AL9j|(6;c2?kN<7uPjq`+VgKW=?ap!RqGR!}(`@YM4B zfBHbz|EG`U`~UQQ&i_a&)BXSHujc>%^zjb>JZ(^?Jhd*ceY)k%v`-r?URXHtvy!?O zKTok-_<5@Bzt59(=Y5`3@!aR>F1LMA#PHeYiI&4YPc^;uYe5TFeUU->>GK5LN&ji% z5^3{PzjO5`_)dcoJ1Na>S~L*#+fMQQf&TkcXRY@>t3cnzPFtL3#n$!HiZZ}b$i!Ls znC_b!)C~Lv=Ll7!<`z%^-Fw=w90At*064b)Yj^%PZu$#CRX1>HbNy*!-L5+O`k?J} z24dqr`qReRU2=`6Vh`LPs@k%~9qandYC#SC?n3Rb$J6Rrl&1~n;SU5zuVC}oP!CHmy>I3_<@sZA9Xj7kg3ge@Un%$__jf%${6_0YTLJbM)SjBEg?1sc2 zbVxk#q=Xt0_Bn|M=+)|M#754ymgufi5$cON60u>yzujos`Y6QjSvgzuQ-F52js{p&9ut3KJno*+H4Kawvtp9}8oi2by&)}xc#LDso*O3Rn+ z%c*w*cjSE9*ut66WRT6CgijkA-3Y5$*831v^Vd2MI8<*IH@^P3BxEn|`UnCfl6|^7n=)k*zZCJ8ePuxc#Yqz69 z@)Be0Dw(@5vKz>Xy?bcl4MyGd0Gl-B!5di?cBe^9fl8UMT&-3f@5U;!MT`)cmkV}U zl4T3civ`MR_4D>gHxl%Id3ApNuI-J&A|B%kPC{x>EJ;TkNj)$m3k#Vn|60PJCP$L? z-a3C1J6o&Kup>t{iI_ZGViWpRJo4VvUs)fElv0bc*c4qZKVLQs=ZppotE}MM=M}dw zo_a{7ykw*Pq*4X@+Dg_oCJ1IxiR!YnlEvvH zTG$U4f39m)bz4znouKwAiFw~q-#8nvBe;5AYaeEuZ9r= z?>mBm7rjf@xnNpQj{nRgI49Fu^t58myzii9(F*E zR`n2{4yx|-sKS+>9JP+~%rh9dS&mZ}X21yENp@oqP%*-bp%s0_;0X9ixUki6Z9ZZW zv6$9BSdmni)HiWq%F&*jceC`&+w)#{t`6mSiYw4u1hx0LBvX^TPf)a*3^d`TizgxIG+Lfnk(L>=bH})<+va+QP0l5N z4I6Uy74 zas7XM(mOdBoYMJ^!~W@R{r@q3q-_Vmbcq*xfVR&y>~K_M$+Cm_0F zi2m!i4v#_(XCXgwy(fmxqg(ohxfnnm!J$#N^*mqldv?&tD&)_L7_ZxbqQjo1M#p$L zIa){3tfXt^E78{o2Ii=KWk>L1X!rZuMMPxxn=F5r&zPq?f{r3s@-|ym;4cZgCWajW z{t?Ea0eOFapxH`hc@#<$OwX!<=+3Ix-}gEL^Y|js5VGt80j!ns8sB{?JmhSDS2;Xy;~ zq&Pr$!Sc`%M2WRX)DMz#oa9IWKI71a@c)*M0ZucM`1%f5bX-_;?u$hbQ0)L;!vz&z zTugk=%#;Hi;ppdDT&>Op9u}CI9W6gd*5QlVax2gLsmVe-#^dgtE1{^fh3O7{0318+lpHs1gBy!fxC_ncKBrja*cna3-iAKtE! z+C@HYdsq66$2akWUkqnTY;iY|4@Iog+zbwmAcXeMf0@Ba*{#ACSNzssf`3^Fw+S-= z%TX1|;>Zg7?z2BN!IZEzSYbnjEL)>Kg97P7hS03Xg+BZA?>bESSP|uF)PI-o%(+5n zAa*z^ANBNB8|M22jSy3RpRZ<|Smfgi%#Zb4hh`ieuD{4>bc=cRmAWDedwh;Z;W!h9 zbhrx^y7;lV@t}M2<|>9|CpSq)xMx!?(d07D(vmN&w}3LvvV4LEvLbeb7H4j2Mm87^ zr&FxPCz{zE&$0@=12=kDq3snB7lANE<2FKci>67opd~0CW5tm~T2tDYpl{P0m&av1 zZW`g`K$T0;LN}(N3i_uK05Vq*q(Vte!ue zVV3TSwo>p!H-Tmsdbr02YlUS3Yql`Y!&}s@x%zMMRe6)a)DHS4s?K2&uq7bTZ&yde%S4c{uL~vF!EY!9k3dl zb=(ow4PxnpwFOdF;x5;##@(?HQs8E*zI23P%#|Uzb|%WRn`+_OdZZyTyJ!$3Que$-3YbV_p0UQH%~T*w``M!Fwq+=s z#__E2CCB3;zq}YW6KKAHOAsPLgKPGhF5!ntVu*+Vq!J9PFXZKUA3?dSL7Tjuh+U*9|Lif5^}=xj7t{6bP^D z9k@PQbZ{^_nPCyiehn!EoYi_biUJp_OWcW^a)@J0u$e!<=3jf4@IS@rXrG;hW%RpY zb5sF1P7`yXA@s1JH!xv`ZEqwdDr#O^KKD)1qBbmY_O+b-O3tdxxVV_W*cd^Piw?jE z^-BhvbMYp5yrp0);#tZMvfP(h%qF!&6gJh4t^KaugW9qE%Jg@7|H)6OsKBK zSe0C#g%g`fYr9bEijPlLG&XS3g0SA_q+K~>|Ax6hQLNRvf4Aft`UNLr&5%Gjq>*OB za_;?A)GP4CZt_L|+FvN-iGn$Nq3Q(SP2qaPc5}(KI#>b3Y@g%O zwYM;eW^wvYHi7@kdtP{d2Q^&0BW%d?cU~bPv2izn#Tkv#Aj+p>m}7kW+ot`%A^xLn zE{GhrT0{5t_TFoo)xuXA?P{34i{8c>pWoun&A>S&b^1L`;iN_1Ew1-^&fG?3a+6y? zlTlZMLfSOV$Ck=rLc?|4095f?rPUh&={o}vQWc{p++uv;t|kYtQ#!rwY45b(uZCdu z2i?Qo>G2`Rc2qxm-N9+^grcC~e9992UiXtv27^yzp5D>XOALS3@w6qdbt-FM4X7x` z#RAgXU$uoKrOJS}dkDoW=9v4yv0CTap10$clUsdSU~w@7T)5?OeV`uXQEG3>MI_ zTURX|Wq{4lRtCkvT{eVjcJk;|B%?7eodve}T7OaGBQ~oFJ(Of)RW= z?9dw=p1c$(vJJiN;ptKDll~_sia~mxoSeQq>dVZ)as8JkgO@LRCzbLH#^Chj%afPA zQ&|G~{qT6uJN)FO!r|!U%j5p>%Tp%|I#vm#np=59)D5iisxNj;vb%aVnX5|t(w<9KnquEoq@40Bk`sE90^f}t3H3jBoG(SXQlC-MjfiSuj{ zDIdFreYE9Aey-2Qd~NCFquhqq99N-bepD3Uj5bL%4VfvA0=DBYj(#tYxD7N+^qa`^ zyFA#v1(?BhCu{32WRQW9@UO#ziJ<7#R&^Od)mawyz6MMlFv&0hibMc*$c9$wbu2 zZCI>MxrpblvRZdQN8aZ!GiU&D)?rW8c9O@#x>iyKyN#5{_+okI_E{}57*A_8gS~H* zWG158rc;qtW6jYm2ENYii;p;onFgqE0+PK^H+Z%7h<@9+mJIV54*GSA$LjbO%g1&# zDgocNlsLfi$DS9T;&gzhH-rHE zlA2E9uU6=_U2>+r9hBXqH7od@K0?;# zsqP*_y_L=^3do{JO>yOj%uW`>&xSq!UQA-vR%HpR`xQFYIYOLvTNDZMqP^yV5^< zAg&J=uxV{u5tEV!B?R)wbUI^-9_cy+G?T?b%E=JgA3!MuJ%6$-&Cw}85QJYw;*q)k zSEz5puXce0p4}ywg=8ShS?Mn|o47tR%!Dw2N7XdkMGZp5Ox4k~2#gdxJjA<_^E}h-LdcYTLpb-f) zLTGIrzbfDGW9^3crM!{@sNe%mU5pB#BtVLC}Ls1Clz>8!(z)yNlk z{B06XrxBfyz6o!;4sc&$Xn|FT2l_A`9(ha9dI=e^5_P_#(9iX4C@5#Abk9aZ+~h z!~iz9+8|D7)gjD6$xbM;D(^d}B!}VYFyo{gciX|wjrFJG#0`c@vofbCzG_v|())aw zjv?ROG}!<*sM_8Q|2xUUn_v#=ahz6WD6@j?G+P3Tl2t511`D(@vOtzL27vor5?G!2(Y>4|H@ zNVSA9%`R~)x{WtK;Ev*P*OgfXR*9}-8K_AXefRZOZ(c!Z0CC_Q>HsD&^9#T;siikt zhIyEl5uU0U;f+D3C(r3BxG^0T2GN2dsoVJMi}whQfj#gB21LFfV)l=2>9=+1KBG{r>*~oNk}WzoPI{B%;Pw z(f9*ivKCt~@~D6+0LmBN61EgttIv0ENUJ5T(rWiLHfwtozc$S0JT&t|uZX z#jZdK;Ket$sBnuZy+-@HUI#8uv_&Q!EaDkQ5!xnkhp&P!E9%?0KPtY60`7=ZY9Vh> zwl!(&joG+!tOJ;s=IqdoxTzHbX)eT6uRf!W(^Go`luNq>ot4t|1Y|)O*)&Rh%GDy} zUVOgj^e@FOZ=$F3%7hn){2(wFlRMjq4rp@sp?L;fyKA?=n6zxZi%PLq)txW8lSM&W zHdpcND$1LUs+P*jeIBN`=c2rY$61c2j1uhYsFeFB084CzHT5=q$y@L}Hh^vE&>~Ju zbL_HJU)!G8{(!#eVx+kjp?QPO$yv&rG<)DQu&h1igxM^F+z`l)0)Hr_8Xur~=#CUq zo!@nhaed$bQL9nfteDN~Z~0xn)_ks+d<+towcbQ=D$nm4Whj7pT_ODFEJdtbTa2k` zu5uKIrxry#h37nEyoJ*g>MF{9S1}d`HmT27^OG_s@t8aC{`FVjSv(Y6Qu_3B@qAu!(6^yM4gzOV&*N6oE)h!p8Z2&i;*@mEc zBF_dJa&m4p>}>_!S|P)G>yE-jb*Ih4Tu!^B$aMfJ8AlpdopsBO z6{)bWH0u}(09yeTv=lx`mCH`Z??r046vKm+6^>dOW?0b{sD+~qb*0tHu71VZxr?Wo9&6l2zh#(A`IvVuu zB3t+^oIco8u%RB735|+KVHTV~>j#4{=m7@2^Gjqlwq_$b<-y+3@nYdHRRoJ1Z@$7EZOMHZm_xo*}v1wSd(-O0a_2II1)hdy43b#hY7?GEnc6^XvCi4dk6>ZPg5EF$}S6hWeeP%Hp?Fu>2s=j4SR$9)Qarca`Y8)<9yaac#x{$7_ zt;*pvwCJghT15Hu?xo1Awq;S(I2!=!Pj>=DJKqAGi_7Rib` zk_&cQGftQ;Qwn3nHMMACmbS@WR$O(1^W0G?Fpu}CE1D+_$TG|+%7L;OQvu+u%R9Ik z2r$*4OU!m+fvqq64V-T@zW1{>AYPyid z?KWK~2DqaO<=C%fI}2S=puO=luE`=q{WI#}7v2CB!54jupmFJJ9&X^)H*NK>b{R1f zU8cF33Hh1ad$YII8f)9%*GAGnu%sf)>TE?MaRL!0TW+LMFkyPL(+1pV>sELj_D5&q z?cLkWL+}|6!g5=(Y>S68E~7TvQ(QZ9#Uv={P{++(`mJ~S+~mX%DX81v33Ut;#GY~Q z?vl=V#S}C#j{dik0H!B{Y0nUcx#{H&T~G(}hBjIm$Q#X~We#!6A>)qY+<`!iHRc%` z-fa3WDn^>>)Zebmv2FJpR>jgOC)-u|qM6emF(}4Y5pIWYjdm-P+!S)-S%cSF5DM8g z1_fg$YlgJ#Zkf%+N7v!&ils;877^uY<7cxDGuJ;-TKhtev&1GVL~8`P_k~untVD|` zmy8==L<$I(4glR~Rni8u2Cn5Yq3RhlmYZP+urS9H^^F?T7*ZFG&fbmObypLN0tRpp z9SoEb=~7Yz=^8O4Riu=XP+&~Tp`*iBq+>8pV07z{9Q6Vc4hE8=B?Pw7NDD(k2D_ef zKft|zJ)hxu&U1bp6#F_{Nd73KkvkDxpJWw-iv(pOEX7!K5=Qp+`1I?=$M2pFZI+~d zUeSs?-TXmw5%$wooWYO7Z1!*t`LF8x`<>H$CE;j9!t3}KR&IiA)7G-*&y@Reu3%hT zTrKs2TOPlRpFcV zar@1fI8|o_8$8_HPZZYTbBKtpEvkZfMc~gl$qhy{d@v@v$}!JjU;2eP&>U; zce{yUH^EkEq8@efGMUp?6OHF5y(FT@?>NYljmbL1Fga%BppweAgP+p7ycgnWUW%4+ zBN`Kxalt7=dWSi~(Z)X{fxSX6on@feVsrvqm%h=ut}m`*=J}DP^M4F0E76Sr-;ccY zNVL8r{i_W1N8LVd2!ji2aG6GB%`EWxbiQTMkU>3)Cp!9fU>@Ok7oboi+`5C?o~ZH6 zP`K7{RonFwK`0%T`i$J9Y3x?X4v|H7%+T~8c|fTUZGYQym5xA>lcRxz;8-Wt?v(uE zc#L&^wnb?VVAf7i;8IyEe>lp*UI{4uSt0!$S#@aM^h_Go+s_s@)uZ!WRVKl*+s@_G zSWSu}2I6(cpjX%XOT)JP3XbLdiuT&RcX`&`6qJd8SQWoWbC1*JvdO!u=CW$p%%p4r zuRZ*2i-v*K6Dl5OO<o|qPlfAqOl^(cU2PQ5!oy6w*|H%M>P@>lMkvBZFCj;VU@Wqe3 ztl-|;tMEYv-*!l>_BwZ=j@iPnpY_j^hb2Q=nxRzYo~;`Q$4^X&`JQ-%sy-&sR4luQ z0z*U5jHia2dzDqum+v~#S5NOWBKemxOdPlrzi`EH-`AJrYr$F2gc)srpfmUB39{kV zsC>j_Gakba01UtSaa8O`|F#rBZomCG5p&M}dqDkg9eN)nxvo@hmWL%}9W>1>MF4#b z$7rX$eL59JTG31s7F)VkKHPckw<;un7-cqIs&x_c4UoKB{iV5M(XY&f(azs7JAHpi z)rRNM_f)l9XJL<*QJu_ z@hdO6J=ifVbWR4SgRk;&nQmf+9L&1+4E*msv?s(tgp{f(HBQsHE`oqmfJcLry?Bz; z0DF$quKV2@z=Z*IyKWLR*Q|Y6ooRvtdA$Q;RWH4wm0q7;cN}c(9ofEo%YmmKg9XaE zG9tM|epqMvs}!Og$5xsp^c}Ms6u`5Ig|=fC#Ty_gOkP>VuCJUGPumL0_|2;p{Ow{% z$%p&OdG(J2>th=>kE+g1USSh{e(lzib&O)Jw5_}Z=~jP;FflaY-yDM9$m&f65~NLo zuucq0`1S|%3U=4{4%;PZs#t;J&Nq*^k!Nj7)p6=Wrq@g=-%XW6`59e2zO`wy7S4

kn*%gZ_WAs$ud9j%^$^dHNn^uE`%cWSAJ2Rhb-wV~8{m>Y!@E)Q0aK)Le zMo;YD40&8sLMbu^<+dZhVmp@Ok zNg(rP($>FHMW1(QVhs&8w{`2EhOK`9^!yWYkFJFIm0v4iEn5XNs5w#_9ul1#D2WC^ zAFzGWG@5-NFp$Hd6O zA^~cc6;4{g4s|3DF_JkWA_K>b0O<>N`?vA~w-~5t4(;xO?-s3k$rH(?M9d<|b*q&h5oV{G#a{=%=%b6almYUAs^#7Gb z$eeH-#lJBNz>;>ka8i`UID{#7u8Yd7kL&bza4Mzh{s|0(hf+au)<^yQY!^EPg2#v0 zlP#fqe(;8nV8J1Ca%gXRmqhzLOqfE@(r;}5E$Xv;C^1#;X|=@nn7uangTElmF+Nl% zQgE$($nYsoGSi+`HY1?Da{+&5u}-shZ9_N7jYvA+p3Z899WYLL49=egY@Gi%123xa z1(OV?;R#=c>)E>uwM!l_LBuO2)TFClEA8tZrr6bJG-j2r$}qJ&MfJW`398uuppdDn zqPHd#hoKJupk~#0Uj=W1%P&iq-ILJr%U+DUKVgw((QZkGtP4KC&0C#cdwcaUP1Ek0 zT{HsqlKn62R;S^9bXJNVxzq(Ext8hCz^Atu?5-`}2;bZ3zbt+m3iI6De9c9UcS|;u zF;?qbo7mO+a3i`=?K+8D837=!(!@m>7Y3Qy)@1I7mJ3nY$02nTFW}!2DL!a6w|yy2 zt39$EOGz8m6SGHnE<9PjBBm_nZK6VoY!O>Zi(S3dKOic2FXtC9*oAh>9(~cLvIurH z@`{Vh##@db#EwX1icoJWHC!l-dX|2 z8)seyluDexrLvBnNyL^4Q6co_*cbOqdTg@UBP~9Kx_BDSGf;(_fX_uhC3En-|RbFfFf~b#a@|XoN^oQ zLUhW%wZa=eR+-KFbyUOo_u3fyjg_JPu~s1yW48>CnYA>cxi zJw#P~cP%W9y7%>LacBK`+ZL+gz~()*y(Ch^Pje`e zSR1?g(YTXKR3R%WfIa$4x^&_TLrw7C8%O_w^?Kap*qEw=F|L@U)OL&crgPf71kXt*!L>w*QYMBFQfdX-hU>F7L z+Il>EMCz=yqaGto;S`~Q-8E|R!SdPb?^{rmm_bQVek(pbHeh7*6xJ*;1I6rUk%@)~ zQGRl&aMKTkbRx}(0?52m?tlIJ>Ar_