mirror of
https://github.com/titanscouting/tra-analysis.git
synced 2025-09-07 07:27:20 +00:00
Compare commits
26 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
7a58cd08e2 | ||
|
337fae68ee | ||
|
5e71d05626 | ||
|
01df42aa49 | ||
|
33eea153c1 | ||
|
114eee5d57 | ||
|
06f008746a | ||
|
4f9c4e0dbb | ||
|
5697e8b79e | ||
|
e054e66743 | ||
|
c914bd3754 | ||
|
6c08885a53 | ||
|
375befd0c4 | ||
|
893d1fb1d0 | ||
|
6a426ae4cd | ||
|
50c064ffa4 | ||
|
1b0a9967c8 | ||
|
2605f7c29f | ||
|
6f5a3edd88 | ||
|
457146b0e4 | ||
|
f7fd8ffcf9 | ||
|
77bc792426 | ||
|
39146cc555 | ||
|
2daa09c040 | ||
|
68d27a6302 | ||
|
7fc18b7c35 |
2
.devcontainer/Dockerfile
Normal file
2
.devcontainer/Dockerfile
Normal file
@@ -0,0 +1,2 @@
|
||||
FROM python
|
||||
WORKDIR ~/
|
26
.devcontainer/devcontainer.json
Normal file
26
.devcontainer/devcontainer.json
Normal file
@@ -0,0 +1,26 @@
|
||||
{
|
||||
"name": "TRA Analysis Development Environment",
|
||||
"build": {
|
||||
"dockerfile": "Dockerfile",
|
||||
},
|
||||
"settings": {
|
||||
"terminal.integrated.shell.linux": "/bin/bash",
|
||||
"python.pythonPath": "/usr/local/bin/python",
|
||||
"python.linting.enabled": true,
|
||||
"python.linting.pylintEnabled": true,
|
||||
"python.formatting.autopep8Path": "/usr/local/py-utils/bin/autopep8",
|
||||
"python.formatting.blackPath": "/usr/local/py-utils/bin/black",
|
||||
"python.formatting.yapfPath": "/usr/local/py-utils/bin/yapf",
|
||||
"python.linting.banditPath": "/usr/local/py-utils/bin/bandit",
|
||||
"python.linting.flake8Path": "/usr/local/py-utils/bin/flake8",
|
||||
"python.linting.mypyPath": "/usr/local/py-utils/bin/mypy",
|
||||
"python.linting.pycodestylePath": "/usr/local/py-utils/bin/pycodestyle",
|
||||
"python.linting.pydocstylePath": "/usr/local/py-utils/bin/pydocstyle",
|
||||
"python.linting.pylintPath": "/usr/local/py-utils/bin/pylint",
|
||||
"python.testing.pytestPath": "/usr/local/py-utils/bin/pytest"
|
||||
},
|
||||
"extensions": [
|
||||
"mhutchie.git-graph",
|
||||
],
|
||||
"postCreateCommand": "pip install -r analysis-master/analysis-amd64/requirements.txt"
|
||||
}
|
3
.gitignore
vendored
3
.gitignore
vendored
@@ -19,3 +19,6 @@ data analysis/keys.txt
|
||||
data analysis/check_for_new_matches.ipynb
|
||||
data analysis/test.ipynb
|
||||
data analysis/visualize_pit.ipynb
|
||||
data analysis/config/keys.config
|
||||
analysis-master/analysis/__pycache__/
|
||||
data analysis/__pycache__/
|
@@ -1,6 +1,6 @@
|
||||
Metadata-Version: 2.1
|
||||
Name: analysis
|
||||
Version: 1.0.0.8
|
||||
Version: 1.0.0.11
|
||||
Summary: analysis package developed by Titan Scouting for The Red Alliance
|
||||
Home-page: https://github.com/titanscout2022/tr2022-strategy
|
||||
Author: The Titan Scouting Team
|
@@ -1,6 +1,7 @@
|
||||
setup.py
|
||||
analysis/__init__.py
|
||||
analysis/analysis.py
|
||||
analysis/glicko2.py
|
||||
analysis/regression.py
|
||||
analysis/titanlearn.py
|
||||
analysis/trueskill.py
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
@@ -7,10 +7,16 @@
|
||||
# current benchmark of optimization: 1.33 times faster
|
||||
# setup:
|
||||
|
||||
__version__ = "1.1.13.006"
|
||||
__version__ = "1.1.13.009"
|
||||
|
||||
# changelog should be viewed using print(analysis.__changelog__)
|
||||
__changelog__ = """changelog:
|
||||
1.1.13.009:
|
||||
- moved elo, glicko2, trueskill functions under class Metrics
|
||||
1.1.13.008:
|
||||
- moved Glicko2 to a seperate package
|
||||
1.1.13.007:
|
||||
- fixed bug with trueskill
|
||||
1.1.13.006:
|
||||
- cleaned up imports
|
||||
1.1.13.005:
|
||||
@@ -269,7 +275,6 @@ __all__ = [
|
||||
'SVM',
|
||||
'random_forest_classifier',
|
||||
'random_forest_regressor',
|
||||
'Glicko2',
|
||||
# all statistics functions left out due to integration in other functions
|
||||
]
|
||||
|
||||
@@ -278,6 +283,7 @@ __all__ = [
|
||||
# imports (now in alphabetical order! v 1.0.3.006):
|
||||
|
||||
import csv
|
||||
from analysis import glicko2 as Glicko2
|
||||
import numba
|
||||
from numba import jit
|
||||
import numpy as np
|
||||
@@ -442,32 +448,34 @@ def regression(inputs, outputs, args): # inputs, outputs expects N-D array
|
||||
|
||||
return regressions
|
||||
|
||||
def elo(starting_score, opposing_score, observed, N, K):
|
||||
class Metrics:
|
||||
|
||||
def elo(starting_score, opposing_score, observed, N, K):
|
||||
|
||||
expected = 1/(1+10**((np.array(opposing_score) - starting_score)/N))
|
||||
|
||||
return starting_score + K*(np.sum(observed) - np.sum(expected))
|
||||
|
||||
def glicko2(starting_score, starting_rd, starting_vol, opposing_score, opposing_rd, observations):
|
||||
def glicko2(starting_score, starting_rd, starting_vol, opposing_score, opposing_rd, observations):
|
||||
|
||||
player = Glicko2(rating = starting_score, rd = starting_rd, vol = starting_vol)
|
||||
player = Glicko2.Glicko2(rating = starting_score, rd = starting_rd, vol = starting_vol)
|
||||
|
||||
player.update_player([x for x in opposing_score], [x for x in opposing_rd], observations)
|
||||
|
||||
return (player.rating, player.rd, player.vol)
|
||||
|
||||
def trueskill(teams_data, observations): # teams_data is array of array of tuples ie. [[(mu, sigma), (mu, sigma), (mu, sigma)], [(mu, sigma), (mu, sigma), (mu, sigma)]]
|
||||
def trueskill(teams_data, observations): # teams_data is array of array of tuples ie. [[(mu, sigma), (mu, sigma), (mu, sigma)], [(mu, sigma), (mu, sigma), (mu, sigma)]]
|
||||
|
||||
team_ratings = []
|
||||
|
||||
for team in teams_data:
|
||||
team_temp = []
|
||||
team_temp = ()
|
||||
for player in team:
|
||||
player = Trueskill.Rating(player[0], player[1])
|
||||
team_temp.append(player)
|
||||
team_temp = team_temp + (player,)
|
||||
team_ratings.append(team_temp)
|
||||
|
||||
return Trueskill.rate(teams_data, observations)
|
||||
return Trueskill.rate(team_ratings, ranks=observations)
|
||||
|
||||
class RegressionMetrics():
|
||||
|
||||
@@ -559,8 +567,9 @@ def decisiontree(data, labels, test_size = 0.3, criterion = "gini", splitter = "
|
||||
|
||||
return model, metrics
|
||||
|
||||
@jit(forceobj=True)
|
||||
def knn_classifier(data, labels, test_size = 0.3, algorithm='auto', leaf_size=30, metric='minkowski', metric_params=None, n_jobs=None, n_neighbors=5, p=2, weights='uniform'): #expects *2d data and 1d labels post-scaling
|
||||
class KNN:
|
||||
|
||||
def knn_classifier(data, labels, test_size = 0.3, algorithm='auto', leaf_size=30, metric='minkowski', metric_params=None, n_jobs=None, n_neighbors=5, p=2, weights='uniform'): #expects *2d data and 1d labels post-scaling
|
||||
|
||||
data_train, data_test, labels_train, labels_test = sklearn.model_selection.train_test_split(data, labels, test_size=test_size, random_state=1)
|
||||
model = sklearn.neighbors.KNeighborsClassifier()
|
||||
@@ -569,7 +578,7 @@ def knn_classifier(data, labels, test_size = 0.3, algorithm='auto', leaf_size=30
|
||||
|
||||
return model, ClassificationMetrics(predictions, labels_test)
|
||||
|
||||
def knn_regressor(data, outputs, test_size, n_neighbors = 5, weights = "uniform", algorithm = "auto", leaf_size = 30, p = 2, metric = "minkowski", metric_params = None, n_jobs = None):
|
||||
def knn_regressor(data, outputs, test_size, n_neighbors = 5, weights = "uniform", algorithm = "auto", leaf_size = 30, p = 2, metric = "minkowski", metric_params = None, n_jobs = None):
|
||||
|
||||
data_train, data_test, outputs_train, outputs_test = sklearn.model_selection.train_test_split(data, outputs, test_size=test_size, random_state=1)
|
||||
model = sklearn.neighbors.KNeighborsRegressor(n_neighbors = n_neighbors, weights = weights, algorithm = algorithm, leaf_size = leaf_size, p = p, metric = metric, metric_params = metric_params, n_jobs = n_jobs)
|
||||
@@ -689,102 +698,3 @@ def random_forest_regressor(data, outputs, test_size, n_estimators="warn", crite
|
||||
predictions = kernel.predict(data_test)
|
||||
|
||||
return kernel, RegressionMetrics(predictions, outputs_test)
|
||||
|
||||
class Glicko2:
|
||||
|
||||
_tau = 0.5
|
||||
|
||||
def getRating(self):
|
||||
return (self.__rating * 173.7178) + 1500
|
||||
|
||||
def setRating(self, rating):
|
||||
self.__rating = (rating - 1500) / 173.7178
|
||||
|
||||
rating = property(getRating, setRating)
|
||||
|
||||
def getRd(self):
|
||||
return self.__rd * 173.7178
|
||||
|
||||
def setRd(self, rd):
|
||||
self.__rd = rd / 173.7178
|
||||
|
||||
rd = property(getRd, setRd)
|
||||
|
||||
def __init__(self, rating = 1500, rd = 350, vol = 0.06):
|
||||
|
||||
self.setRating(rating)
|
||||
self.setRd(rd)
|
||||
self.vol = vol
|
||||
|
||||
def _preRatingRD(self):
|
||||
|
||||
self.__rd = math.sqrt(math.pow(self.__rd, 2) + math.pow(self.vol, 2))
|
||||
|
||||
def update_player(self, rating_list, RD_list, outcome_list):
|
||||
|
||||
rating_list = [(x - 1500) / 173.7178 for x in rating_list]
|
||||
RD_list = [x / 173.7178 for x in RD_list]
|
||||
|
||||
v = self._v(rating_list, RD_list)
|
||||
self.vol = self._newVol(rating_list, RD_list, outcome_list, v)
|
||||
self._preRatingRD()
|
||||
|
||||
self.__rd = 1 / math.sqrt((1 / math.pow(self.__rd, 2)) + (1 / v))
|
||||
|
||||
tempSum = 0
|
||||
for i in range(len(rating_list)):
|
||||
tempSum += self._g(RD_list[i]) * \
|
||||
(outcome_list[i] - self._E(rating_list[i], RD_list[i]))
|
||||
self.__rating += math.pow(self.__rd, 2) * tempSum
|
||||
|
||||
|
||||
def _newVol(self, rating_list, RD_list, outcome_list, v):
|
||||
|
||||
i = 0
|
||||
delta = self._delta(rating_list, RD_list, outcome_list, v)
|
||||
a = math.log(math.pow(self.vol, 2))
|
||||
tau = self._tau
|
||||
x0 = a
|
||||
x1 = 0
|
||||
|
||||
while x0 != x1:
|
||||
# New iteration, so x(i) becomes x(i-1)
|
||||
x0 = x1
|
||||
d = math.pow(self.__rating, 2) + v + math.exp(x0)
|
||||
h1 = -(x0 - a) / math.pow(tau, 2) - 0.5 * math.exp(x0) \
|
||||
/ d + 0.5 * math.exp(x0) * math.pow(delta / d, 2)
|
||||
h2 = -1 / math.pow(tau, 2) - 0.5 * math.exp(x0) * \
|
||||
(math.pow(self.__rating, 2) + v) \
|
||||
/ math.pow(d, 2) + 0.5 * math.pow(delta, 2) * math.exp(x0) \
|
||||
* (math.pow(self.__rating, 2) + v - math.exp(x0)) / math.pow(d, 3)
|
||||
x1 = x0 - (h1 / h2)
|
||||
|
||||
return math.exp(x1 / 2)
|
||||
|
||||
def _delta(self, rating_list, RD_list, outcome_list, v):
|
||||
|
||||
tempSum = 0
|
||||
for i in range(len(rating_list)):
|
||||
tempSum += self._g(RD_list[i]) * (outcome_list[i] - self._E(rating_list[i], RD_list[i]))
|
||||
return v * tempSum
|
||||
|
||||
def _v(self, rating_list, RD_list):
|
||||
|
||||
tempSum = 0
|
||||
for i in range(len(rating_list)):
|
||||
tempE = self._E(rating_list[i], RD_list[i])
|
||||
tempSum += math.pow(self._g(RD_list[i]), 2) * tempE * (1 - tempE)
|
||||
return 1 / tempSum
|
||||
|
||||
def _E(self, p2rating, p2RD):
|
||||
|
||||
return 1 / (1 + math.exp(-1 * self._g(p2RD) * \
|
||||
(self.__rating - p2rating)))
|
||||
|
||||
def _g(self, RD):
|
||||
|
||||
return 1 / math.sqrt(1 + 3 * math.pow(RD, 2) / math.pow(math.pi, 2))
|
||||
|
||||
def did_not_compete(self):
|
||||
|
||||
self._preRatingRD()
|
99
analysis-master/analysis-amd64/analysis/glicko2.py
Normal file
99
analysis-master/analysis-amd64/analysis/glicko2.py
Normal file
@@ -0,0 +1,99 @@
|
||||
import math
|
||||
|
||||
class Glicko2:
|
||||
_tau = 0.5
|
||||
|
||||
def getRating(self):
|
||||
return (self.__rating * 173.7178) + 1500
|
||||
|
||||
def setRating(self, rating):
|
||||
self.__rating = (rating - 1500) / 173.7178
|
||||
|
||||
rating = property(getRating, setRating)
|
||||
|
||||
def getRd(self):
|
||||
return self.__rd * 173.7178
|
||||
|
||||
def setRd(self, rd):
|
||||
self.__rd = rd / 173.7178
|
||||
|
||||
rd = property(getRd, setRd)
|
||||
|
||||
def __init__(self, rating = 1500, rd = 350, vol = 0.06):
|
||||
|
||||
self.setRating(rating)
|
||||
self.setRd(rd)
|
||||
self.vol = vol
|
||||
|
||||
def _preRatingRD(self):
|
||||
|
||||
self.__rd = math.sqrt(math.pow(self.__rd, 2) + math.pow(self.vol, 2))
|
||||
|
||||
def update_player(self, rating_list, RD_list, outcome_list):
|
||||
|
||||
rating_list = [(x - 1500) / 173.7178 for x in rating_list]
|
||||
RD_list = [x / 173.7178 for x in RD_list]
|
||||
|
||||
v = self._v(rating_list, RD_list)
|
||||
self.vol = self._newVol(rating_list, RD_list, outcome_list, v)
|
||||
self._preRatingRD()
|
||||
|
||||
self.__rd = 1 / math.sqrt((1 / math.pow(self.__rd, 2)) + (1 / v))
|
||||
|
||||
tempSum = 0
|
||||
for i in range(len(rating_list)):
|
||||
tempSum += self._g(RD_list[i]) * \
|
||||
(outcome_list[i] - self._E(rating_list[i], RD_list[i]))
|
||||
self.__rating += math.pow(self.__rd, 2) * tempSum
|
||||
|
||||
|
||||
def _newVol(self, rating_list, RD_list, outcome_list, v):
|
||||
|
||||
i = 0
|
||||
delta = self._delta(rating_list, RD_list, outcome_list, v)
|
||||
a = math.log(math.pow(self.vol, 2))
|
||||
tau = self._tau
|
||||
x0 = a
|
||||
x1 = 0
|
||||
|
||||
while x0 != x1:
|
||||
# New iteration, so x(i) becomes x(i-1)
|
||||
x0 = x1
|
||||
d = math.pow(self.__rating, 2) + v + math.exp(x0)
|
||||
h1 = -(x0 - a) / math.pow(tau, 2) - 0.5 * math.exp(x0) \
|
||||
/ d + 0.5 * math.exp(x0) * math.pow(delta / d, 2)
|
||||
h2 = -1 / math.pow(tau, 2) - 0.5 * math.exp(x0) * \
|
||||
(math.pow(self.__rating, 2) + v) \
|
||||
/ math.pow(d, 2) + 0.5 * math.pow(delta, 2) * math.exp(x0) \
|
||||
* (math.pow(self.__rating, 2) + v - math.exp(x0)) / math.pow(d, 3)
|
||||
x1 = x0 - (h1 / h2)
|
||||
|
||||
return math.exp(x1 / 2)
|
||||
|
||||
def _delta(self, rating_list, RD_list, outcome_list, v):
|
||||
|
||||
tempSum = 0
|
||||
for i in range(len(rating_list)):
|
||||
tempSum += self._g(RD_list[i]) * (outcome_list[i] - self._E(rating_list[i], RD_list[i]))
|
||||
return v * tempSum
|
||||
|
||||
def _v(self, rating_list, RD_list):
|
||||
|
||||
tempSum = 0
|
||||
for i in range(len(rating_list)):
|
||||
tempE = self._E(rating_list[i], RD_list[i])
|
||||
tempSum += math.pow(self._g(RD_list[i]), 2) * tempE * (1 - tempE)
|
||||
return 1 / tempSum
|
||||
|
||||
def _E(self, p2rating, p2RD):
|
||||
|
||||
return 1 / (1 + math.exp(-1 * self._g(p2RD) * \
|
||||
(self.__rating - p2rating)))
|
||||
|
||||
def _g(self, RD):
|
||||
|
||||
return 1 / math.sqrt(1 + 3 * math.pow(RD, 2) / math.pow(math.pi, 2))
|
||||
|
||||
def did_not_compete(self):
|
||||
|
||||
self._preRatingRD()
|
1
analysis-master/analysis-amd64/build.sh
Executable file
1
analysis-master/analysis-amd64/build.sh
Executable file
@@ -0,0 +1 @@
|
||||
python setup.py sdist bdist_wheel || python3 setup.py sdist bdist_wheel
|
@@ -7,10 +7,16 @@
|
||||
# current benchmark of optimization: 1.33 times faster
|
||||
# setup:
|
||||
|
||||
__version__ = "1.1.13.006"
|
||||
__version__ = "1.1.13.009"
|
||||
|
||||
# changelog should be viewed using print(analysis.__changelog__)
|
||||
__changelog__ = """changelog:
|
||||
1.1.13.009:
|
||||
- moved elo, glicko2, trueskill functions under class Metrics
|
||||
1.1.13.008:
|
||||
- moved Glicko2 to a seperate package
|
||||
1.1.13.007:
|
||||
- fixed bug with trueskill
|
||||
1.1.13.006:
|
||||
- cleaned up imports
|
||||
1.1.13.005:
|
||||
@@ -269,7 +275,6 @@ __all__ = [
|
||||
'SVM',
|
||||
'random_forest_classifier',
|
||||
'random_forest_regressor',
|
||||
'Glicko2',
|
||||
# all statistics functions left out due to integration in other functions
|
||||
]
|
||||
|
||||
@@ -278,6 +283,7 @@ __all__ = [
|
||||
# imports (now in alphabetical order! v 1.0.3.006):
|
||||
|
||||
import csv
|
||||
from analysis import glicko2 as Glicko2
|
||||
import numba
|
||||
from numba import jit
|
||||
import numpy as np
|
||||
@@ -442,32 +448,34 @@ def regression(inputs, outputs, args): # inputs, outputs expects N-D array
|
||||
|
||||
return regressions
|
||||
|
||||
def elo(starting_score, opposing_score, observed, N, K):
|
||||
class Metrics:
|
||||
|
||||
def elo(starting_score, opposing_score, observed, N, K):
|
||||
|
||||
expected = 1/(1+10**((np.array(opposing_score) - starting_score)/N))
|
||||
|
||||
return starting_score + K*(np.sum(observed) - np.sum(expected))
|
||||
|
||||
def glicko2(starting_score, starting_rd, starting_vol, opposing_score, opposing_rd, observations):
|
||||
def glicko2(starting_score, starting_rd, starting_vol, opposing_score, opposing_rd, observations):
|
||||
|
||||
player = Glicko2(rating = starting_score, rd = starting_rd, vol = starting_vol)
|
||||
player = Glicko2.Glicko2(rating = starting_score, rd = starting_rd, vol = starting_vol)
|
||||
|
||||
player.update_player([x for x in opposing_score], [x for x in opposing_rd], observations)
|
||||
|
||||
return (player.rating, player.rd, player.vol)
|
||||
|
||||
def trueskill(teams_data, observations): # teams_data is array of array of tuples ie. [[(mu, sigma), (mu, sigma), (mu, sigma)], [(mu, sigma), (mu, sigma), (mu, sigma)]]
|
||||
def trueskill(teams_data, observations): # teams_data is array of array of tuples ie. [[(mu, sigma), (mu, sigma), (mu, sigma)], [(mu, sigma), (mu, sigma), (mu, sigma)]]
|
||||
|
||||
team_ratings = []
|
||||
|
||||
for team in teams_data:
|
||||
team_temp = []
|
||||
team_temp = ()
|
||||
for player in team:
|
||||
player = Trueskill.Rating(player[0], player[1])
|
||||
team_temp.append(player)
|
||||
team_temp = team_temp + (player,)
|
||||
team_ratings.append(team_temp)
|
||||
|
||||
return Trueskill.rate(teams_data, observations)
|
||||
return Trueskill.rate(team_ratings, ranks=observations)
|
||||
|
||||
class RegressionMetrics():
|
||||
|
||||
@@ -559,8 +567,9 @@ def decisiontree(data, labels, test_size = 0.3, criterion = "gini", splitter = "
|
||||
|
||||
return model, metrics
|
||||
|
||||
@jit(forceobj=True)
|
||||
def knn_classifier(data, labels, test_size = 0.3, algorithm='auto', leaf_size=30, metric='minkowski', metric_params=None, n_jobs=None, n_neighbors=5, p=2, weights='uniform'): #expects *2d data and 1d labels post-scaling
|
||||
class KNN:
|
||||
|
||||
def knn_classifier(data, labels, test_size = 0.3, algorithm='auto', leaf_size=30, metric='minkowski', metric_params=None, n_jobs=None, n_neighbors=5, p=2, weights='uniform'): #expects *2d data and 1d labels post-scaling
|
||||
|
||||
data_train, data_test, labels_train, labels_test = sklearn.model_selection.train_test_split(data, labels, test_size=test_size, random_state=1)
|
||||
model = sklearn.neighbors.KNeighborsClassifier()
|
||||
@@ -569,7 +578,7 @@ def knn_classifier(data, labels, test_size = 0.3, algorithm='auto', leaf_size=30
|
||||
|
||||
return model, ClassificationMetrics(predictions, labels_test)
|
||||
|
||||
def knn_regressor(data, outputs, test_size, n_neighbors = 5, weights = "uniform", algorithm = "auto", leaf_size = 30, p = 2, metric = "minkowski", metric_params = None, n_jobs = None):
|
||||
def knn_regressor(data, outputs, test_size, n_neighbors = 5, weights = "uniform", algorithm = "auto", leaf_size = 30, p = 2, metric = "minkowski", metric_params = None, n_jobs = None):
|
||||
|
||||
data_train, data_test, outputs_train, outputs_test = sklearn.model_selection.train_test_split(data, outputs, test_size=test_size, random_state=1)
|
||||
model = sklearn.neighbors.KNeighborsRegressor(n_neighbors = n_neighbors, weights = weights, algorithm = algorithm, leaf_size = leaf_size, p = p, metric = metric, metric_params = metric_params, n_jobs = n_jobs)
|
||||
@@ -689,102 +698,3 @@ def random_forest_regressor(data, outputs, test_size, n_estimators="warn", crite
|
||||
predictions = kernel.predict(data_test)
|
||||
|
||||
return kernel, RegressionMetrics(predictions, outputs_test)
|
||||
|
||||
class Glicko2:
|
||||
|
||||
_tau = 0.5
|
||||
|
||||
def getRating(self):
|
||||
return (self.__rating * 173.7178) + 1500
|
||||
|
||||
def setRating(self, rating):
|
||||
self.__rating = (rating - 1500) / 173.7178
|
||||
|
||||
rating = property(getRating, setRating)
|
||||
|
||||
def getRd(self):
|
||||
return self.__rd * 173.7178
|
||||
|
||||
def setRd(self, rd):
|
||||
self.__rd = rd / 173.7178
|
||||
|
||||
rd = property(getRd, setRd)
|
||||
|
||||
def __init__(self, rating = 1500, rd = 350, vol = 0.06):
|
||||
|
||||
self.setRating(rating)
|
||||
self.setRd(rd)
|
||||
self.vol = vol
|
||||
|
||||
def _preRatingRD(self):
|
||||
|
||||
self.__rd = math.sqrt(math.pow(self.__rd, 2) + math.pow(self.vol, 2))
|
||||
|
||||
def update_player(self, rating_list, RD_list, outcome_list):
|
||||
|
||||
rating_list = [(x - 1500) / 173.7178 for x in rating_list]
|
||||
RD_list = [x / 173.7178 for x in RD_list]
|
||||
|
||||
v = self._v(rating_list, RD_list)
|
||||
self.vol = self._newVol(rating_list, RD_list, outcome_list, v)
|
||||
self._preRatingRD()
|
||||
|
||||
self.__rd = 1 / math.sqrt((1 / math.pow(self.__rd, 2)) + (1 / v))
|
||||
|
||||
tempSum = 0
|
||||
for i in range(len(rating_list)):
|
||||
tempSum += self._g(RD_list[i]) * \
|
||||
(outcome_list[i] - self._E(rating_list[i], RD_list[i]))
|
||||
self.__rating += math.pow(self.__rd, 2) * tempSum
|
||||
|
||||
|
||||
def _newVol(self, rating_list, RD_list, outcome_list, v):
|
||||
|
||||
i = 0
|
||||
delta = self._delta(rating_list, RD_list, outcome_list, v)
|
||||
a = math.log(math.pow(self.vol, 2))
|
||||
tau = self._tau
|
||||
x0 = a
|
||||
x1 = 0
|
||||
|
||||
while x0 != x1:
|
||||
# New iteration, so x(i) becomes x(i-1)
|
||||
x0 = x1
|
||||
d = math.pow(self.__rating, 2) + v + math.exp(x0)
|
||||
h1 = -(x0 - a) / math.pow(tau, 2) - 0.5 * math.exp(x0) \
|
||||
/ d + 0.5 * math.exp(x0) * math.pow(delta / d, 2)
|
||||
h2 = -1 / math.pow(tau, 2) - 0.5 * math.exp(x0) * \
|
||||
(math.pow(self.__rating, 2) + v) \
|
||||
/ math.pow(d, 2) + 0.5 * math.pow(delta, 2) * math.exp(x0) \
|
||||
* (math.pow(self.__rating, 2) + v - math.exp(x0)) / math.pow(d, 3)
|
||||
x1 = x0 - (h1 / h2)
|
||||
|
||||
return math.exp(x1 / 2)
|
||||
|
||||
def _delta(self, rating_list, RD_list, outcome_list, v):
|
||||
|
||||
tempSum = 0
|
||||
for i in range(len(rating_list)):
|
||||
tempSum += self._g(RD_list[i]) * (outcome_list[i] - self._E(rating_list[i], RD_list[i]))
|
||||
return v * tempSum
|
||||
|
||||
def _v(self, rating_list, RD_list):
|
||||
|
||||
tempSum = 0
|
||||
for i in range(len(rating_list)):
|
||||
tempE = self._E(rating_list[i], RD_list[i])
|
||||
tempSum += math.pow(self._g(RD_list[i]), 2) * tempE * (1 - tempE)
|
||||
return 1 / tempSum
|
||||
|
||||
def _E(self, p2rating, p2RD):
|
||||
|
||||
return 1 / (1 + math.exp(-1 * self._g(p2RD) * \
|
||||
(self.__rating - p2rating)))
|
||||
|
||||
def _g(self, RD):
|
||||
|
||||
return 1 / math.sqrt(1 + 3 * math.pow(RD, 2) / math.pow(math.pi, 2))
|
||||
|
||||
def did_not_compete(self):
|
||||
|
||||
self._preRatingRD()
|
99
analysis-master/analysis-amd64/build/lib/analysis/glicko2.py
Normal file
99
analysis-master/analysis-amd64/build/lib/analysis/glicko2.py
Normal file
@@ -0,0 +1,99 @@
|
||||
import math
|
||||
|
||||
class Glicko2:
|
||||
_tau = 0.5
|
||||
|
||||
def getRating(self):
|
||||
return (self.__rating * 173.7178) + 1500
|
||||
|
||||
def setRating(self, rating):
|
||||
self.__rating = (rating - 1500) / 173.7178
|
||||
|
||||
rating = property(getRating, setRating)
|
||||
|
||||
def getRd(self):
|
||||
return self.__rd * 173.7178
|
||||
|
||||
def setRd(self, rd):
|
||||
self.__rd = rd / 173.7178
|
||||
|
||||
rd = property(getRd, setRd)
|
||||
|
||||
def __init__(self, rating = 1500, rd = 350, vol = 0.06):
|
||||
|
||||
self.setRating(rating)
|
||||
self.setRd(rd)
|
||||
self.vol = vol
|
||||
|
||||
def _preRatingRD(self):
|
||||
|
||||
self.__rd = math.sqrt(math.pow(self.__rd, 2) + math.pow(self.vol, 2))
|
||||
|
||||
def update_player(self, rating_list, RD_list, outcome_list):
|
||||
|
||||
rating_list = [(x - 1500) / 173.7178 for x in rating_list]
|
||||
RD_list = [x / 173.7178 for x in RD_list]
|
||||
|
||||
v = self._v(rating_list, RD_list)
|
||||
self.vol = self._newVol(rating_list, RD_list, outcome_list, v)
|
||||
self._preRatingRD()
|
||||
|
||||
self.__rd = 1 / math.sqrt((1 / math.pow(self.__rd, 2)) + (1 / v))
|
||||
|
||||
tempSum = 0
|
||||
for i in range(len(rating_list)):
|
||||
tempSum += self._g(RD_list[i]) * \
|
||||
(outcome_list[i] - self._E(rating_list[i], RD_list[i]))
|
||||
self.__rating += math.pow(self.__rd, 2) * tempSum
|
||||
|
||||
|
||||
def _newVol(self, rating_list, RD_list, outcome_list, v):
|
||||
|
||||
i = 0
|
||||
delta = self._delta(rating_list, RD_list, outcome_list, v)
|
||||
a = math.log(math.pow(self.vol, 2))
|
||||
tau = self._tau
|
||||
x0 = a
|
||||
x1 = 0
|
||||
|
||||
while x0 != x1:
|
||||
# New iteration, so x(i) becomes x(i-1)
|
||||
x0 = x1
|
||||
d = math.pow(self.__rating, 2) + v + math.exp(x0)
|
||||
h1 = -(x0 - a) / math.pow(tau, 2) - 0.5 * math.exp(x0) \
|
||||
/ d + 0.5 * math.exp(x0) * math.pow(delta / d, 2)
|
||||
h2 = -1 / math.pow(tau, 2) - 0.5 * math.exp(x0) * \
|
||||
(math.pow(self.__rating, 2) + v) \
|
||||
/ math.pow(d, 2) + 0.5 * math.pow(delta, 2) * math.exp(x0) \
|
||||
* (math.pow(self.__rating, 2) + v - math.exp(x0)) / math.pow(d, 3)
|
||||
x1 = x0 - (h1 / h2)
|
||||
|
||||
return math.exp(x1 / 2)
|
||||
|
||||
def _delta(self, rating_list, RD_list, outcome_list, v):
|
||||
|
||||
tempSum = 0
|
||||
for i in range(len(rating_list)):
|
||||
tempSum += self._g(RD_list[i]) * (outcome_list[i] - self._E(rating_list[i], RD_list[i]))
|
||||
return v * tempSum
|
||||
|
||||
def _v(self, rating_list, RD_list):
|
||||
|
||||
tempSum = 0
|
||||
for i in range(len(rating_list)):
|
||||
tempE = self._E(rating_list[i], RD_list[i])
|
||||
tempSum += math.pow(self._g(RD_list[i]), 2) * tempE * (1 - tempE)
|
||||
return 1 / tempSum
|
||||
|
||||
def _E(self, p2rating, p2RD):
|
||||
|
||||
return 1 / (1 + math.exp(-1 * self._g(p2RD) * \
|
||||
(self.__rating - p2rating)))
|
||||
|
||||
def _g(self, RD):
|
||||
|
||||
return 1 / math.sqrt(1 + 3 * math.pow(RD, 2) / math.pow(math.pi, 2))
|
||||
|
||||
def did_not_compete(self):
|
||||
|
||||
self._preRatingRD()
|
BIN
analysis-master/analysis-amd64/dist/analysis-1.0.0.10-py3-none-any.whl
vendored
Normal file
BIN
analysis-master/analysis-amd64/dist/analysis-1.0.0.10-py3-none-any.whl
vendored
Normal file
Binary file not shown.
BIN
analysis-master/analysis-amd64/dist/analysis-1.0.0.10.tar.gz
vendored
Normal file
BIN
analysis-master/analysis-amd64/dist/analysis-1.0.0.10.tar.gz
vendored
Normal file
Binary file not shown.
BIN
analysis-master/analysis-amd64/dist/analysis-1.0.0.11-py3-none-any.whl
vendored
Normal file
BIN
analysis-master/analysis-amd64/dist/analysis-1.0.0.11-py3-none-any.whl
vendored
Normal file
Binary file not shown.
BIN
analysis-master/analysis-amd64/dist/analysis-1.0.0.11.tar.gz
vendored
Normal file
BIN
analysis-master/analysis-amd64/dist/analysis-1.0.0.11.tar.gz
vendored
Normal file
Binary file not shown.
BIN
analysis-master/analysis-amd64/dist/analysis-1.0.0.9-py3-none-any.whl
vendored
Normal file
BIN
analysis-master/analysis-amd64/dist/analysis-1.0.0.9-py3-none-any.whl
vendored
Normal file
Binary file not shown.
BIN
analysis-master/analysis-amd64/dist/analysis-1.0.0.9.tar.gz
vendored
Normal file
BIN
analysis-master/analysis-amd64/dist/analysis-1.0.0.9.tar.gz
vendored
Normal file
Binary file not shown.
5
analysis-master/analysis-amd64/docker/Dockerfile
Normal file
5
analysis-master/analysis-amd64/docker/Dockerfile
Normal file
@@ -0,0 +1,5 @@
|
||||
FROM python
|
||||
WORKDIR ~/
|
||||
COPY ./ ./
|
||||
RUN pip install -r requirements.txt
|
||||
CMD ["bash"]
|
3
analysis-master/analysis-amd64/docker/start-docker.sh
Executable file
3
analysis-master/analysis-amd64/docker/start-docker.sh
Executable file
@@ -0,0 +1,3 @@
|
||||
cd ..
|
||||
docker build -t tra-analysis-amd64-dev -f docker/Dockerfile .
|
||||
docker run -it tra-analysis-amd64-dev
|
6
analysis-master/analysis-amd64/requirements.txt
Normal file
6
analysis-master/analysis-amd64/requirements.txt
Normal file
@@ -0,0 +1,6 @@
|
||||
numba
|
||||
numpy
|
||||
scipy
|
||||
scikit-learn
|
||||
six
|
||||
matplotlib
|
@@ -1,8 +1,14 @@
|
||||
import setuptools
|
||||
|
||||
requirements = []
|
||||
|
||||
with open("requirements.txt", 'r') as file:
|
||||
for line in file:
|
||||
requirements.append(line)
|
||||
|
||||
setuptools.setup(
|
||||
name="analysis", # Replace with your own username
|
||||
version="1.0.0.008",
|
||||
name="analysis",
|
||||
version="1.0.0.011",
|
||||
author="The Titan Scouting Team",
|
||||
author_email="titanscout2022@gmail.com",
|
||||
description="analysis package developed by Titan Scouting for The Red Alliance",
|
||||
@@ -10,14 +16,7 @@ setuptools.setup(
|
||||
long_description_content_type="text/markdown",
|
||||
url="https://github.com/titanscout2022/tr2022-strategy",
|
||||
packages=setuptools.find_packages(),
|
||||
install_requires=[
|
||||
"numba",
|
||||
"numpy",
|
||||
"scipy",
|
||||
"scikit-learn",
|
||||
"six",
|
||||
"matplotlib"
|
||||
],
|
||||
install_requires=requirements,
|
||||
license = "GNU General Public License v3.0",
|
||||
classifiers=[
|
||||
"Programming Language :: Python :: 3",
|
3
analysis-master/analysis-arm64/docker/start-docker.sh
Executable file
3
analysis-master/analysis-arm64/docker/start-docker.sh
Executable file
@@ -0,0 +1,3 @@
|
||||
cd ..
|
||||
docker build -t tra-analysis-amd64-dev -f docker/Dockerfile .
|
||||
docker run -it tra-analysis-amd64-dev
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
@@ -1 +0,0 @@
|
||||
python3 setup.py sdist bdist_wheel
|
Binary file not shown.
Binary file not shown.
1
data analysis/config/competition.config
Normal file
1
data analysis/config/competition.config
Normal file
@@ -0,0 +1 @@
|
||||
2020ilch
|
@@ -1,4 +1,3 @@
|
||||
2020ilch
|
||||
balls-blocked,basic_stats,historical_analysis,regression_linear,regression_logarithmic,regression_exponential,regression_polynomial,regression_sigmoidal
|
||||
balls-collected,basic_stats,historical_analysis,regression_linear,regression_logarithmic,regression_exponential,regression_polynomial,regression_sigmoidal
|
||||
balls-lower-teleop,basic_stats,historical_analysis,regression_linear,regression_logarithmic,regression_exponential,regression_polynomial,regression_sigmoidal
|
4
data analysis/requirements.txt
Normal file
4
data analysis/requirements.txt
Normal file
@@ -0,0 +1,4 @@
|
||||
requests
|
||||
pymongo
|
||||
pandas
|
||||
dnspython
|
@@ -3,10 +3,17 @@
|
||||
# Notes:
|
||||
# setup:
|
||||
|
||||
__version__ = "0.0.4.002"
|
||||
__version__ = "0.0.5.002"
|
||||
|
||||
# changelog should be viewed using print(analysis.__changelog__)
|
||||
__changelog__ = """changelog:
|
||||
0.0.5.002:
|
||||
- made changes due to refactoring of analysis
|
||||
0.0.5.001:
|
||||
- text fixes
|
||||
- removed matplotlib requirement
|
||||
0.0.5.000:
|
||||
- improved user interface
|
||||
0.0.4.002:
|
||||
- removed unessasary code
|
||||
0.0.4.001:
|
||||
@@ -82,7 +89,8 @@ __all__ = [
|
||||
from analysis import analysis as an
|
||||
import data as d
|
||||
import numpy as np
|
||||
import matplotlib.pyplot as plt
|
||||
from os import system, name
|
||||
from pathlib import Path
|
||||
import time
|
||||
import warnings
|
||||
|
||||
@@ -91,16 +99,16 @@ def main():
|
||||
while(True):
|
||||
|
||||
current_time = time.time()
|
||||
print("time: " + str(current_time))
|
||||
print("[OK] time: " + str(current_time))
|
||||
|
||||
print(" loading config")
|
||||
competition, config = load_config("config.csv")
|
||||
print(" config loaded")
|
||||
start = time.time()
|
||||
config = load_config(Path("config/stats.config"))
|
||||
competition = an.load_csv(Path("config/competition.config"))[0][0]
|
||||
print("[OK] configs loaded")
|
||||
|
||||
print(" loading database keys")
|
||||
apikey = an.load_csv("keys.txt")[0][0]
|
||||
tbakey = an.load_csv("keys.txt")[1][0]
|
||||
print(" loaded keys")
|
||||
apikey = an.load_csv(Path("config/keys.config"))[0][0]
|
||||
tbakey = an.load_csv(Path("config/keys.config"))[1][0]
|
||||
print("[OK] loaded keys")
|
||||
|
||||
previous_time = d.get_analysis_flags(apikey, "latest_update")
|
||||
|
||||
@@ -113,38 +121,55 @@ def main():
|
||||
|
||||
previous_time = previous_time["latest_update"]
|
||||
|
||||
print(" analysis backtimed to: " + str(previous_time))
|
||||
print("[OK] analysis backtimed to: " + str(previous_time))
|
||||
|
||||
print(" loading data")
|
||||
print("[OK] loading data")
|
||||
start = time.time()
|
||||
data = d.get_match_data_formatted(apikey, competition)
|
||||
pit_data = d.pit = d.get_pit_data_formatted(apikey, competition)
|
||||
print(" loaded data")
|
||||
print("[OK] loaded data in " + str(time.time() - start) + " seconds")
|
||||
|
||||
print(" running tests")
|
||||
print("[OK] running tests")
|
||||
start = time.time()
|
||||
results = simpleloop(data, config)
|
||||
print(" finished tests")
|
||||
print("[OK] finished tests in " + str(time.time() - start) + " seconds")
|
||||
|
||||
print(" running metrics")
|
||||
print("[OK] running metrics")
|
||||
start = time.time()
|
||||
metricsloop(tbakey, apikey, competition, previous_time)
|
||||
print(" finished metrics")
|
||||
print("[OK] finished metrics in " + str(time.time() - start) + " seconds")
|
||||
|
||||
print(" running pit analysis")
|
||||
print("[OK] running pit analysis")
|
||||
start = time.time()
|
||||
pit = pitloop(pit_data, config)
|
||||
print(" finished pit analysis")
|
||||
print("[OK] finished pit analysis in " + str(time.time() - start) + " seconds")
|
||||
|
||||
d.set_analysis_flags(apikey, "latest_update", {"latest_update":current_time})
|
||||
|
||||
print(" pushing to database")
|
||||
print("[OK] pushing to database")
|
||||
start = time.time()
|
||||
push_to_database(apikey, competition, results, pit)
|
||||
print(" pushed to database")
|
||||
print("[OK] pushed to database in " + str(time.time() - start) + " seconds")
|
||||
|
||||
clear()
|
||||
|
||||
def clear():
|
||||
|
||||
# for windows
|
||||
if name == 'nt':
|
||||
_ = system('cls')
|
||||
|
||||
# for mac and linux(here, os.name is 'posix')
|
||||
else:
|
||||
_ = system('clear')
|
||||
|
||||
def load_config(file):
|
||||
config_vector = {}
|
||||
file = an.load_csv(file)
|
||||
for line in file[1:]:
|
||||
for line in file:
|
||||
config_vector[line[0]] = line[1:]
|
||||
|
||||
return (file[0][0], config_vector)
|
||||
return config_vector
|
||||
|
||||
def simpleloop(data, tests): # expects 3D array with [Team][Variable][Match]
|
||||
|
||||
@@ -263,11 +288,11 @@ def metricsloop(tbakey, apikey, competition, timestamp): # listener based metric
|
||||
|
||||
observations = {"red": 0.5, "blu": 0.5}
|
||||
|
||||
red_elo_delta = an.elo(red_elo["score"], blu_elo["score"], observations["red"], elo_N, elo_K) - red_elo["score"]
|
||||
blu_elo_delta = an.elo(blu_elo["score"], red_elo["score"], observations["blu"], elo_N, elo_K) - blu_elo["score"]
|
||||
red_elo_delta = an.Metrics.elo(red_elo["score"], blu_elo["score"], observations["red"], elo_N, elo_K) - red_elo["score"]
|
||||
blu_elo_delta = an.Metrics.elo(blu_elo["score"], red_elo["score"], observations["blu"], elo_N, elo_K) - blu_elo["score"]
|
||||
|
||||
new_red_gl2_score, new_red_gl2_rd, new_red_gl2_vol = an.glicko2(red_gl2["score"], red_gl2["rd"], red_gl2["vol"], [blu_gl2["score"]], [blu_gl2["rd"]], [observations["red"], observations["blu"]])
|
||||
new_blu_gl2_score, new_blu_gl2_rd, new_blu_gl2_vol = an.glicko2(blu_gl2["score"], blu_gl2["rd"], blu_gl2["vol"], [red_gl2["score"]], [red_gl2["rd"]], [observations["blu"], observations["red"]])
|
||||
new_red_gl2_score, new_red_gl2_rd, new_red_gl2_vol = an.Metrics.glicko2(red_gl2["score"], red_gl2["rd"], red_gl2["vol"], [blu_gl2["score"]], [blu_gl2["rd"]], [observations["red"], observations["blu"]])
|
||||
new_blu_gl2_score, new_blu_gl2_rd, new_blu_gl2_vol = an.Metrics.glicko2(blu_gl2["score"], blu_gl2["rd"], blu_gl2["vol"], [red_gl2["score"]], [red_gl2["rd"]], [observations["blu"], observations["red"]])
|
||||
|
||||
red_gl2_delta = {"score": new_red_gl2_score - red_gl2["score"], "rd": new_red_gl2_rd - red_gl2["rd"], "vol": new_red_gl2_vol - red_gl2["vol"]}
|
||||
blu_gl2_delta = {"score": new_blu_gl2_score - blu_gl2["score"], "rd": new_blu_gl2_rd - blu_gl2["rd"], "vol": new_blu_gl2_vol - blu_gl2["vol"]}
|
||||
|
Reference in New Issue
Block a user