diff --git a/analysis-master/analysis-amd64/analysis.egg-info/PKG-INFO b/analysis-master/analysis-amd64/analysis.egg-info/PKG-INFO index 6db62a2e..a9c6413a 100644 --- a/analysis-master/analysis-amd64/analysis.egg-info/PKG-INFO +++ b/analysis-master/analysis-amd64/analysis.egg-info/PKG-INFO @@ -1,6 +1,6 @@ Metadata-Version: 2.1 Name: analysis -Version: 1.0.0.9 +Version: 1.0.0.10 Summary: analysis package developed by Titan Scouting for The Red Alliance Home-page: https://github.com/titanscout2022/tr2022-strategy Author: The Titan Scouting Team diff --git a/analysis-master/analysis-amd64/analysis.egg-info/SOURCES.txt b/analysis-master/analysis-amd64/analysis.egg-info/SOURCES.txt index b7f40198..25a54640 100644 --- a/analysis-master/analysis-amd64/analysis.egg-info/SOURCES.txt +++ b/analysis-master/analysis-amd64/analysis.egg-info/SOURCES.txt @@ -1,6 +1,7 @@ setup.py analysis/__init__.py analysis/analysis.py +analysis/glicko2.py analysis/regression.py analysis/titanlearn.py analysis/trueskill.py diff --git a/analysis-master/analysis-amd64/analysis/__pycache__/__init__.cpython-37.pyc b/analysis-master/analysis-amd64/analysis/__pycache__/__init__.cpython-37.pyc index fc74077c..58e435d6 100644 Binary files a/analysis-master/analysis-amd64/analysis/__pycache__/__init__.cpython-37.pyc and b/analysis-master/analysis-amd64/analysis/__pycache__/__init__.cpython-37.pyc differ diff --git a/analysis-master/analysis-amd64/analysis/__pycache__/analysis.cpython-37.pyc b/analysis-master/analysis-amd64/analysis/__pycache__/analysis.cpython-37.pyc index 050d4a4e..2fad4e2d 100644 Binary files a/analysis-master/analysis-amd64/analysis/__pycache__/analysis.cpython-37.pyc and b/analysis-master/analysis-amd64/analysis/__pycache__/analysis.cpython-37.pyc differ diff --git a/analysis-master/analysis-amd64/analysis/__pycache__/glicko2.cpython-37.pyc b/analysis-master/analysis-amd64/analysis/__pycache__/glicko2.cpython-37.pyc new file mode 100644 index 00000000..f2650e9d Binary files /dev/null and b/analysis-master/analysis-amd64/analysis/__pycache__/glicko2.cpython-37.pyc differ diff --git a/analysis-master/analysis-amd64/analysis/__pycache__/trueskill.cpython-37.pyc b/analysis-master/analysis-amd64/analysis/__pycache__/trueskill.cpython-37.pyc index 7fd72542..4d96410a 100644 Binary files a/analysis-master/analysis-amd64/analysis/__pycache__/trueskill.cpython-37.pyc and b/analysis-master/analysis-amd64/analysis/__pycache__/trueskill.cpython-37.pyc differ diff --git a/analysis-master/analysis-amd64/analysis/analysis.py b/analysis-master/analysis-amd64/analysis/analysis.py index 08871456..3651680f 100644 --- a/analysis-master/analysis-amd64/analysis/analysis.py +++ b/analysis-master/analysis-amd64/analysis/analysis.py @@ -7,10 +7,12 @@ # current benchmark of optimization: 1.33 times faster # setup: -__version__ = "1.1.13.007" +__version__ = "1.1.13.008" # changelog should be viewed using print(analysis.__changelog__) __changelog__ = """changelog: + 1.1.13.008: + - moved Glicko2 to a seperate package 1.1.13.007: - fixed bug with trueskill 1.1.13.006: @@ -271,7 +273,6 @@ __all__ = [ 'SVM', 'random_forest_classifier', 'random_forest_regressor', - 'Glicko2', # all statistics functions left out due to integration in other functions ] @@ -280,6 +281,7 @@ __all__ = [ # imports (now in alphabetical order! v 1.0.3.006): import csv +from analysis import glicko2 as Glicko2 import numba from numba import jit import numpy as np @@ -452,7 +454,7 @@ def elo(starting_score, opposing_score, observed, N, K): def glicko2(starting_score, starting_rd, starting_vol, opposing_score, opposing_rd, observations): - player = Glicko2(rating = starting_score, rd = starting_rd, vol = starting_vol) + player = Glicko2.Glicko2(rating = starting_score, rd = starting_rd, vol = starting_vol) player.update_player([x for x in opposing_score], [x for x in opposing_rd], observations) @@ -690,103 +692,4 @@ def random_forest_regressor(data, outputs, test_size, n_estimators="warn", crite kernel.fit(data_train, outputs_train) predictions = kernel.predict(data_test) - return kernel, RegressionMetrics(predictions, outputs_test) - -class Glicko2: - - _tau = 0.5 - - def getRating(self): - return (self.__rating * 173.7178) + 1500 - - def setRating(self, rating): - self.__rating = (rating - 1500) / 173.7178 - - rating = property(getRating, setRating) - - def getRd(self): - return self.__rd * 173.7178 - - def setRd(self, rd): - self.__rd = rd / 173.7178 - - rd = property(getRd, setRd) - - def __init__(self, rating = 1500, rd = 350, vol = 0.06): - - self.setRating(rating) - self.setRd(rd) - self.vol = vol - - def _preRatingRD(self): - - self.__rd = math.sqrt(math.pow(self.__rd, 2) + math.pow(self.vol, 2)) - - def update_player(self, rating_list, RD_list, outcome_list): - - rating_list = [(x - 1500) / 173.7178 for x in rating_list] - RD_list = [x / 173.7178 for x in RD_list] - - v = self._v(rating_list, RD_list) - self.vol = self._newVol(rating_list, RD_list, outcome_list, v) - self._preRatingRD() - - self.__rd = 1 / math.sqrt((1 / math.pow(self.__rd, 2)) + (1 / v)) - - tempSum = 0 - for i in range(len(rating_list)): - tempSum += self._g(RD_list[i]) * \ - (outcome_list[i] - self._E(rating_list[i], RD_list[i])) - self.__rating += math.pow(self.__rd, 2) * tempSum - - - def _newVol(self, rating_list, RD_list, outcome_list, v): - - i = 0 - delta = self._delta(rating_list, RD_list, outcome_list, v) - a = math.log(math.pow(self.vol, 2)) - tau = self._tau - x0 = a - x1 = 0 - - while x0 != x1: - # New iteration, so x(i) becomes x(i-1) - x0 = x1 - d = math.pow(self.__rating, 2) + v + math.exp(x0) - h1 = -(x0 - a) / math.pow(tau, 2) - 0.5 * math.exp(x0) \ - / d + 0.5 * math.exp(x0) * math.pow(delta / d, 2) - h2 = -1 / math.pow(tau, 2) - 0.5 * math.exp(x0) * \ - (math.pow(self.__rating, 2) + v) \ - / math.pow(d, 2) + 0.5 * math.pow(delta, 2) * math.exp(x0) \ - * (math.pow(self.__rating, 2) + v - math.exp(x0)) / math.pow(d, 3) - x1 = x0 - (h1 / h2) - - return math.exp(x1 / 2) - - def _delta(self, rating_list, RD_list, outcome_list, v): - - tempSum = 0 - for i in range(len(rating_list)): - tempSum += self._g(RD_list[i]) * (outcome_list[i] - self._E(rating_list[i], RD_list[i])) - return v * tempSum - - def _v(self, rating_list, RD_list): - - tempSum = 0 - for i in range(len(rating_list)): - tempE = self._E(rating_list[i], RD_list[i]) - tempSum += math.pow(self._g(RD_list[i]), 2) * tempE * (1 - tempE) - return 1 / tempSum - - def _E(self, p2rating, p2RD): - - return 1 / (1 + math.exp(-1 * self._g(p2RD) * \ - (self.__rating - p2rating))) - - def _g(self, RD): - - return 1 / math.sqrt(1 + 3 * math.pow(RD, 2) / math.pow(math.pi, 2)) - - def did_not_compete(self): - - self._preRatingRD() + return kernel, RegressionMetrics(predictions, outputs_test) \ No newline at end of file diff --git a/analysis-master/analysis-amd64/analysis/glicko2.py b/analysis-master/analysis-amd64/analysis/glicko2.py new file mode 100644 index 00000000..66c0df94 --- /dev/null +++ b/analysis-master/analysis-amd64/analysis/glicko2.py @@ -0,0 +1,99 @@ +import math + +class Glicko2: + _tau = 0.5 + + def getRating(self): + return (self.__rating * 173.7178) + 1500 + + def setRating(self, rating): + self.__rating = (rating - 1500) / 173.7178 + + rating = property(getRating, setRating) + + def getRd(self): + return self.__rd * 173.7178 + + def setRd(self, rd): + self.__rd = rd / 173.7178 + + rd = property(getRd, setRd) + + def __init__(self, rating = 1500, rd = 350, vol = 0.06): + + self.setRating(rating) + self.setRd(rd) + self.vol = vol + + def _preRatingRD(self): + + self.__rd = math.sqrt(math.pow(self.__rd, 2) + math.pow(self.vol, 2)) + + def update_player(self, rating_list, RD_list, outcome_list): + + rating_list = [(x - 1500) / 173.7178 for x in rating_list] + RD_list = [x / 173.7178 for x in RD_list] + + v = self._v(rating_list, RD_list) + self.vol = self._newVol(rating_list, RD_list, outcome_list, v) + self._preRatingRD() + + self.__rd = 1 / math.sqrt((1 / math.pow(self.__rd, 2)) + (1 / v)) + + tempSum = 0 + for i in range(len(rating_list)): + tempSum += self._g(RD_list[i]) * \ + (outcome_list[i] - self._E(rating_list[i], RD_list[i])) + self.__rating += math.pow(self.__rd, 2) * tempSum + + + def _newVol(self, rating_list, RD_list, outcome_list, v): + + i = 0 + delta = self._delta(rating_list, RD_list, outcome_list, v) + a = math.log(math.pow(self.vol, 2)) + tau = self._tau + x0 = a + x1 = 0 + + while x0 != x1: + # New iteration, so x(i) becomes x(i-1) + x0 = x1 + d = math.pow(self.__rating, 2) + v + math.exp(x0) + h1 = -(x0 - a) / math.pow(tau, 2) - 0.5 * math.exp(x0) \ + / d + 0.5 * math.exp(x0) * math.pow(delta / d, 2) + h2 = -1 / math.pow(tau, 2) - 0.5 * math.exp(x0) * \ + (math.pow(self.__rating, 2) + v) \ + / math.pow(d, 2) + 0.5 * math.pow(delta, 2) * math.exp(x0) \ + * (math.pow(self.__rating, 2) + v - math.exp(x0)) / math.pow(d, 3) + x1 = x0 - (h1 / h2) + + return math.exp(x1 / 2) + + def _delta(self, rating_list, RD_list, outcome_list, v): + + tempSum = 0 + for i in range(len(rating_list)): + tempSum += self._g(RD_list[i]) * (outcome_list[i] - self._E(rating_list[i], RD_list[i])) + return v * tempSum + + def _v(self, rating_list, RD_list): + + tempSum = 0 + for i in range(len(rating_list)): + tempE = self._E(rating_list[i], RD_list[i]) + tempSum += math.pow(self._g(RD_list[i]), 2) * tempE * (1 - tempE) + return 1 / tempSum + + def _E(self, p2rating, p2RD): + + return 1 / (1 + math.exp(-1 * self._g(p2RD) * \ + (self.__rating - p2rating))) + + def _g(self, RD): + + return 1 / math.sqrt(1 + 3 * math.pow(RD, 2) / math.pow(math.pi, 2)) + + def did_not_compete(self): + + self._preRatingRD() \ No newline at end of file diff --git a/analysis-master/analysis-amd64/build/lib/analysis/analysis.py b/analysis-master/analysis-amd64/build/lib/analysis/analysis.py index 08871456..3651680f 100644 --- a/analysis-master/analysis-amd64/build/lib/analysis/analysis.py +++ b/analysis-master/analysis-amd64/build/lib/analysis/analysis.py @@ -7,10 +7,12 @@ # current benchmark of optimization: 1.33 times faster # setup: -__version__ = "1.1.13.007" +__version__ = "1.1.13.008" # changelog should be viewed using print(analysis.__changelog__) __changelog__ = """changelog: + 1.1.13.008: + - moved Glicko2 to a seperate package 1.1.13.007: - fixed bug with trueskill 1.1.13.006: @@ -271,7 +273,6 @@ __all__ = [ 'SVM', 'random_forest_classifier', 'random_forest_regressor', - 'Glicko2', # all statistics functions left out due to integration in other functions ] @@ -280,6 +281,7 @@ __all__ = [ # imports (now in alphabetical order! v 1.0.3.006): import csv +from analysis import glicko2 as Glicko2 import numba from numba import jit import numpy as np @@ -452,7 +454,7 @@ def elo(starting_score, opposing_score, observed, N, K): def glicko2(starting_score, starting_rd, starting_vol, opposing_score, opposing_rd, observations): - player = Glicko2(rating = starting_score, rd = starting_rd, vol = starting_vol) + player = Glicko2.Glicko2(rating = starting_score, rd = starting_rd, vol = starting_vol) player.update_player([x for x in opposing_score], [x for x in opposing_rd], observations) @@ -690,103 +692,4 @@ def random_forest_regressor(data, outputs, test_size, n_estimators="warn", crite kernel.fit(data_train, outputs_train) predictions = kernel.predict(data_test) - return kernel, RegressionMetrics(predictions, outputs_test) - -class Glicko2: - - _tau = 0.5 - - def getRating(self): - return (self.__rating * 173.7178) + 1500 - - def setRating(self, rating): - self.__rating = (rating - 1500) / 173.7178 - - rating = property(getRating, setRating) - - def getRd(self): - return self.__rd * 173.7178 - - def setRd(self, rd): - self.__rd = rd / 173.7178 - - rd = property(getRd, setRd) - - def __init__(self, rating = 1500, rd = 350, vol = 0.06): - - self.setRating(rating) - self.setRd(rd) - self.vol = vol - - def _preRatingRD(self): - - self.__rd = math.sqrt(math.pow(self.__rd, 2) + math.pow(self.vol, 2)) - - def update_player(self, rating_list, RD_list, outcome_list): - - rating_list = [(x - 1500) / 173.7178 for x in rating_list] - RD_list = [x / 173.7178 for x in RD_list] - - v = self._v(rating_list, RD_list) - self.vol = self._newVol(rating_list, RD_list, outcome_list, v) - self._preRatingRD() - - self.__rd = 1 / math.sqrt((1 / math.pow(self.__rd, 2)) + (1 / v)) - - tempSum = 0 - for i in range(len(rating_list)): - tempSum += self._g(RD_list[i]) * \ - (outcome_list[i] - self._E(rating_list[i], RD_list[i])) - self.__rating += math.pow(self.__rd, 2) * tempSum - - - def _newVol(self, rating_list, RD_list, outcome_list, v): - - i = 0 - delta = self._delta(rating_list, RD_list, outcome_list, v) - a = math.log(math.pow(self.vol, 2)) - tau = self._tau - x0 = a - x1 = 0 - - while x0 != x1: - # New iteration, so x(i) becomes x(i-1) - x0 = x1 - d = math.pow(self.__rating, 2) + v + math.exp(x0) - h1 = -(x0 - a) / math.pow(tau, 2) - 0.5 * math.exp(x0) \ - / d + 0.5 * math.exp(x0) * math.pow(delta / d, 2) - h2 = -1 / math.pow(tau, 2) - 0.5 * math.exp(x0) * \ - (math.pow(self.__rating, 2) + v) \ - / math.pow(d, 2) + 0.5 * math.pow(delta, 2) * math.exp(x0) \ - * (math.pow(self.__rating, 2) + v - math.exp(x0)) / math.pow(d, 3) - x1 = x0 - (h1 / h2) - - return math.exp(x1 / 2) - - def _delta(self, rating_list, RD_list, outcome_list, v): - - tempSum = 0 - for i in range(len(rating_list)): - tempSum += self._g(RD_list[i]) * (outcome_list[i] - self._E(rating_list[i], RD_list[i])) - return v * tempSum - - def _v(self, rating_list, RD_list): - - tempSum = 0 - for i in range(len(rating_list)): - tempE = self._E(rating_list[i], RD_list[i]) - tempSum += math.pow(self._g(RD_list[i]), 2) * tempE * (1 - tempE) - return 1 / tempSum - - def _E(self, p2rating, p2RD): - - return 1 / (1 + math.exp(-1 * self._g(p2RD) * \ - (self.__rating - p2rating))) - - def _g(self, RD): - - return 1 / math.sqrt(1 + 3 * math.pow(RD, 2) / math.pow(math.pi, 2)) - - def did_not_compete(self): - - self._preRatingRD() + return kernel, RegressionMetrics(predictions, outputs_test) \ No newline at end of file diff --git a/analysis-master/analysis-amd64/build/lib/analysis/glicko2.py b/analysis-master/analysis-amd64/build/lib/analysis/glicko2.py new file mode 100644 index 00000000..66c0df94 --- /dev/null +++ b/analysis-master/analysis-amd64/build/lib/analysis/glicko2.py @@ -0,0 +1,99 @@ +import math + +class Glicko2: + _tau = 0.5 + + def getRating(self): + return (self.__rating * 173.7178) + 1500 + + def setRating(self, rating): + self.__rating = (rating - 1500) / 173.7178 + + rating = property(getRating, setRating) + + def getRd(self): + return self.__rd * 173.7178 + + def setRd(self, rd): + self.__rd = rd / 173.7178 + + rd = property(getRd, setRd) + + def __init__(self, rating = 1500, rd = 350, vol = 0.06): + + self.setRating(rating) + self.setRd(rd) + self.vol = vol + + def _preRatingRD(self): + + self.__rd = math.sqrt(math.pow(self.__rd, 2) + math.pow(self.vol, 2)) + + def update_player(self, rating_list, RD_list, outcome_list): + + rating_list = [(x - 1500) / 173.7178 for x in rating_list] + RD_list = [x / 173.7178 for x in RD_list] + + v = self._v(rating_list, RD_list) + self.vol = self._newVol(rating_list, RD_list, outcome_list, v) + self._preRatingRD() + + self.__rd = 1 / math.sqrt((1 / math.pow(self.__rd, 2)) + (1 / v)) + + tempSum = 0 + for i in range(len(rating_list)): + tempSum += self._g(RD_list[i]) * \ + (outcome_list[i] - self._E(rating_list[i], RD_list[i])) + self.__rating += math.pow(self.__rd, 2) * tempSum + + + def _newVol(self, rating_list, RD_list, outcome_list, v): + + i = 0 + delta = self._delta(rating_list, RD_list, outcome_list, v) + a = math.log(math.pow(self.vol, 2)) + tau = self._tau + x0 = a + x1 = 0 + + while x0 != x1: + # New iteration, so x(i) becomes x(i-1) + x0 = x1 + d = math.pow(self.__rating, 2) + v + math.exp(x0) + h1 = -(x0 - a) / math.pow(tau, 2) - 0.5 * math.exp(x0) \ + / d + 0.5 * math.exp(x0) * math.pow(delta / d, 2) + h2 = -1 / math.pow(tau, 2) - 0.5 * math.exp(x0) * \ + (math.pow(self.__rating, 2) + v) \ + / math.pow(d, 2) + 0.5 * math.pow(delta, 2) * math.exp(x0) \ + * (math.pow(self.__rating, 2) + v - math.exp(x0)) / math.pow(d, 3) + x1 = x0 - (h1 / h2) + + return math.exp(x1 / 2) + + def _delta(self, rating_list, RD_list, outcome_list, v): + + tempSum = 0 + for i in range(len(rating_list)): + tempSum += self._g(RD_list[i]) * (outcome_list[i] - self._E(rating_list[i], RD_list[i])) + return v * tempSum + + def _v(self, rating_list, RD_list): + + tempSum = 0 + for i in range(len(rating_list)): + tempE = self._E(rating_list[i], RD_list[i]) + tempSum += math.pow(self._g(RD_list[i]), 2) * tempE * (1 - tempE) + return 1 / tempSum + + def _E(self, p2rating, p2RD): + + return 1 / (1 + math.exp(-1 * self._g(p2RD) * \ + (self.__rating - p2rating))) + + def _g(self, RD): + + return 1 / math.sqrt(1 + 3 * math.pow(RD, 2) / math.pow(math.pi, 2)) + + def did_not_compete(self): + + self._preRatingRD() \ No newline at end of file diff --git a/analysis-master/analysis-amd64/dist/analysis-1.0.0.10-py3-none-any.whl b/analysis-master/analysis-amd64/dist/analysis-1.0.0.10-py3-none-any.whl new file mode 100644 index 00000000..a9401521 Binary files /dev/null and b/analysis-master/analysis-amd64/dist/analysis-1.0.0.10-py3-none-any.whl differ diff --git a/analysis-master/analysis-amd64/dist/analysis-1.0.0.10.tar.gz b/analysis-master/analysis-amd64/dist/analysis-1.0.0.10.tar.gz new file mode 100644 index 00000000..a970f1b5 Binary files /dev/null and b/analysis-master/analysis-amd64/dist/analysis-1.0.0.10.tar.gz differ diff --git a/analysis-master/analysis-amd64/setup.py b/analysis-master/analysis-amd64/setup.py index eb497dfc..f6ef54c4 100644 --- a/analysis-master/analysis-amd64/setup.py +++ b/analysis-master/analysis-amd64/setup.py @@ -8,7 +8,7 @@ with open("requirements.txt", 'r') as file: setuptools.setup( name="analysis", - version="1.0.0.009", + version="1.0.0.010", author="The Titan Scouting Team", author_email="titanscout2022@gmail.com", description="analysis package developed by Titan Scouting for The Red Alliance", diff --git a/data analysis/requirements.txt b/data analysis/requirements.txt new file mode 100644 index 00000000..a87051bc --- /dev/null +++ b/data analysis/requirements.txt @@ -0,0 +1,4 @@ +requests +pymongo +pandas +dnspython \ No newline at end of file diff --git a/data analysis/superscript.py b/data analysis/superscript.py index d57eab26..869c97e6 100644 --- a/data analysis/superscript.py +++ b/data analysis/superscript.py @@ -3,12 +3,15 @@ # Notes: # setup: -__version__ = "0.0.5.000" +__version__ = "0.0.5.001" # changelog should be viewed using print(analysis.__changelog__) __changelog__ = """changelog: + 0.0.5.001: + - text fixes + - removed matplotlib requirement 0.0.5.000: - improved user interface + - improved user interface 0.0.4.002: - removed unessasary code 0.0.4.001: @@ -84,7 +87,6 @@ __all__ = [ from analysis import analysis as an import data as d import numpy as np -import matplotlib.pyplot as plt from os import system, name from pathlib import Path import time