analysis pkg v 1.0.0.11

analysis.py v 1.1.13.009
superscript.py v 0.0.5.002
This commit is contained in:
ltcptgeneral 2020-04-12 02:51:40 +00:00
parent 337fae68ee
commit 7a58cd08e2
11 changed files with 79 additions and 67 deletions

View File

@ -1,6 +1,6 @@
Metadata-Version: 2.1 Metadata-Version: 2.1
Name: analysis Name: analysis
Version: 1.0.0.10 Version: 1.0.0.11
Summary: analysis package developed by Titan Scouting for The Red Alliance Summary: analysis package developed by Titan Scouting for The Red Alliance
Home-page: https://github.com/titanscout2022/tr2022-strategy Home-page: https://github.com/titanscout2022/tr2022-strategy
Author: The Titan Scouting Team Author: The Titan Scouting Team

View File

@ -7,10 +7,12 @@
# current benchmark of optimization: 1.33 times faster # current benchmark of optimization: 1.33 times faster
# setup: # setup:
__version__ = "1.1.13.008" __version__ = "1.1.13.009"
# changelog should be viewed using print(analysis.__changelog__) # changelog should be viewed using print(analysis.__changelog__)
__changelog__ = """changelog: __changelog__ = """changelog:
1.1.13.009:
- moved elo, glicko2, trueskill functions under class Metrics
1.1.13.008: 1.1.13.008:
- moved Glicko2 to a seperate package - moved Glicko2 to a seperate package
1.1.13.007: 1.1.13.007:
@ -446,32 +448,34 @@ def regression(inputs, outputs, args): # inputs, outputs expects N-D array
return regressions return regressions
def elo(starting_score, opposing_score, observed, N, K): class Metrics:
expected = 1/(1+10**((np.array(opposing_score) - starting_score)/N)) def elo(starting_score, opposing_score, observed, N, K):
return starting_score + K*(np.sum(observed) - np.sum(expected)) expected = 1/(1+10**((np.array(opposing_score) - starting_score)/N))
def glicko2(starting_score, starting_rd, starting_vol, opposing_score, opposing_rd, observations): return starting_score + K*(np.sum(observed) - np.sum(expected))
player = Glicko2.Glicko2(rating = starting_score, rd = starting_rd, vol = starting_vol) def glicko2(starting_score, starting_rd, starting_vol, opposing_score, opposing_rd, observations):
player.update_player([x for x in opposing_score], [x for x in opposing_rd], observations) player = Glicko2.Glicko2(rating = starting_score, rd = starting_rd, vol = starting_vol)
return (player.rating, player.rd, player.vol) player.update_player([x for x in opposing_score], [x for x in opposing_rd], observations)
def trueskill(teams_data, observations): # teams_data is array of array of tuples ie. [[(mu, sigma), (mu, sigma), (mu, sigma)], [(mu, sigma), (mu, sigma), (mu, sigma)]] return (player.rating, player.rd, player.vol)
team_ratings = [] def trueskill(teams_data, observations): # teams_data is array of array of tuples ie. [[(mu, sigma), (mu, sigma), (mu, sigma)], [(mu, sigma), (mu, sigma), (mu, sigma)]]
for team in teams_data: team_ratings = []
team_temp = ()
for player in team:
player = Trueskill.Rating(player[0], player[1])
team_temp = team_temp + (player,)
team_ratings.append(team_temp)
return Trueskill.rate(team_ratings, ranks=observations) for team in teams_data:
team_temp = ()
for player in team:
player = Trueskill.Rating(player[0], player[1])
team_temp = team_temp + (player,)
team_ratings.append(team_temp)
return Trueskill.rate(team_ratings, ranks=observations)
class RegressionMetrics(): class RegressionMetrics():
@ -563,24 +567,25 @@ def decisiontree(data, labels, test_size = 0.3, criterion = "gini", splitter = "
return model, metrics return model, metrics
@jit(forceobj=True) class KNN:
def knn_classifier(data, labels, test_size = 0.3, algorithm='auto', leaf_size=30, metric='minkowski', metric_params=None, n_jobs=None, n_neighbors=5, p=2, weights='uniform'): #expects *2d data and 1d labels post-scaling
data_train, data_test, labels_train, labels_test = sklearn.model_selection.train_test_split(data, labels, test_size=test_size, random_state=1) def knn_classifier(data, labels, test_size = 0.3, algorithm='auto', leaf_size=30, metric='minkowski', metric_params=None, n_jobs=None, n_neighbors=5, p=2, weights='uniform'): #expects *2d data and 1d labels post-scaling
model = sklearn.neighbors.KNeighborsClassifier()
model.fit(data_train, labels_train)
predictions = model.predict(data_test)
return model, ClassificationMetrics(predictions, labels_test) data_train, data_test, labels_train, labels_test = sklearn.model_selection.train_test_split(data, labels, test_size=test_size, random_state=1)
model = sklearn.neighbors.KNeighborsClassifier()
model.fit(data_train, labels_train)
predictions = model.predict(data_test)
def knn_regressor(data, outputs, test_size, n_neighbors = 5, weights = "uniform", algorithm = "auto", leaf_size = 30, p = 2, metric = "minkowski", metric_params = None, n_jobs = None): return model, ClassificationMetrics(predictions, labels_test)
data_train, data_test, outputs_train, outputs_test = sklearn.model_selection.train_test_split(data, outputs, test_size=test_size, random_state=1) def knn_regressor(data, outputs, test_size, n_neighbors = 5, weights = "uniform", algorithm = "auto", leaf_size = 30, p = 2, metric = "minkowski", metric_params = None, n_jobs = None):
model = sklearn.neighbors.KNeighborsRegressor(n_neighbors = n_neighbors, weights = weights, algorithm = algorithm, leaf_size = leaf_size, p = p, metric = metric, metric_params = metric_params, n_jobs = n_jobs)
model.fit(data_train, outputs_train)
predictions = model.predict(data_test)
return model, RegressionMetrics(predictions, outputs_test) data_train, data_test, outputs_train, outputs_test = sklearn.model_selection.train_test_split(data, outputs, test_size=test_size, random_state=1)
model = sklearn.neighbors.KNeighborsRegressor(n_neighbors = n_neighbors, weights = weights, algorithm = algorithm, leaf_size = leaf_size, p = p, metric = metric, metric_params = metric_params, n_jobs = n_jobs)
model.fit(data_train, outputs_train)
predictions = model.predict(data_test)
return model, RegressionMetrics(predictions, outputs_test)
class NaiveBayes: class NaiveBayes:

View File

@ -7,10 +7,12 @@
# current benchmark of optimization: 1.33 times faster # current benchmark of optimization: 1.33 times faster
# setup: # setup:
__version__ = "1.1.13.008" __version__ = "1.1.13.009"
# changelog should be viewed using print(analysis.__changelog__) # changelog should be viewed using print(analysis.__changelog__)
__changelog__ = """changelog: __changelog__ = """changelog:
1.1.13.009:
- moved elo, glicko2, trueskill functions under class Metrics
1.1.13.008: 1.1.13.008:
- moved Glicko2 to a seperate package - moved Glicko2 to a seperate package
1.1.13.007: 1.1.13.007:
@ -446,32 +448,34 @@ def regression(inputs, outputs, args): # inputs, outputs expects N-D array
return regressions return regressions
def elo(starting_score, opposing_score, observed, N, K): class Metrics:
expected = 1/(1+10**((np.array(opposing_score) - starting_score)/N)) def elo(starting_score, opposing_score, observed, N, K):
return starting_score + K*(np.sum(observed) - np.sum(expected)) expected = 1/(1+10**((np.array(opposing_score) - starting_score)/N))
def glicko2(starting_score, starting_rd, starting_vol, opposing_score, opposing_rd, observations): return starting_score + K*(np.sum(observed) - np.sum(expected))
player = Glicko2.Glicko2(rating = starting_score, rd = starting_rd, vol = starting_vol) def glicko2(starting_score, starting_rd, starting_vol, opposing_score, opposing_rd, observations):
player.update_player([x for x in opposing_score], [x for x in opposing_rd], observations) player = Glicko2.Glicko2(rating = starting_score, rd = starting_rd, vol = starting_vol)
return (player.rating, player.rd, player.vol) player.update_player([x for x in opposing_score], [x for x in opposing_rd], observations)
def trueskill(teams_data, observations): # teams_data is array of array of tuples ie. [[(mu, sigma), (mu, sigma), (mu, sigma)], [(mu, sigma), (mu, sigma), (mu, sigma)]] return (player.rating, player.rd, player.vol)
team_ratings = [] def trueskill(teams_data, observations): # teams_data is array of array of tuples ie. [[(mu, sigma), (mu, sigma), (mu, sigma)], [(mu, sigma), (mu, sigma), (mu, sigma)]]
for team in teams_data: team_ratings = []
team_temp = ()
for player in team:
player = Trueskill.Rating(player[0], player[1])
team_temp = team_temp + (player,)
team_ratings.append(team_temp)
return Trueskill.rate(team_ratings, ranks=observations) for team in teams_data:
team_temp = ()
for player in team:
player = Trueskill.Rating(player[0], player[1])
team_temp = team_temp + (player,)
team_ratings.append(team_temp)
return Trueskill.rate(team_ratings, ranks=observations)
class RegressionMetrics(): class RegressionMetrics():
@ -563,24 +567,25 @@ def decisiontree(data, labels, test_size = 0.3, criterion = "gini", splitter = "
return model, metrics return model, metrics
@jit(forceobj=True) class KNN:
def knn_classifier(data, labels, test_size = 0.3, algorithm='auto', leaf_size=30, metric='minkowski', metric_params=None, n_jobs=None, n_neighbors=5, p=2, weights='uniform'): #expects *2d data and 1d labels post-scaling
data_train, data_test, labels_train, labels_test = sklearn.model_selection.train_test_split(data, labels, test_size=test_size, random_state=1) def knn_classifier(data, labels, test_size = 0.3, algorithm='auto', leaf_size=30, metric='minkowski', metric_params=None, n_jobs=None, n_neighbors=5, p=2, weights='uniform'): #expects *2d data and 1d labels post-scaling
model = sklearn.neighbors.KNeighborsClassifier()
model.fit(data_train, labels_train)
predictions = model.predict(data_test)
return model, ClassificationMetrics(predictions, labels_test) data_train, data_test, labels_train, labels_test = sklearn.model_selection.train_test_split(data, labels, test_size=test_size, random_state=1)
model = sklearn.neighbors.KNeighborsClassifier()
model.fit(data_train, labels_train)
predictions = model.predict(data_test)
def knn_regressor(data, outputs, test_size, n_neighbors = 5, weights = "uniform", algorithm = "auto", leaf_size = 30, p = 2, metric = "minkowski", metric_params = None, n_jobs = None): return model, ClassificationMetrics(predictions, labels_test)
data_train, data_test, outputs_train, outputs_test = sklearn.model_selection.train_test_split(data, outputs, test_size=test_size, random_state=1) def knn_regressor(data, outputs, test_size, n_neighbors = 5, weights = "uniform", algorithm = "auto", leaf_size = 30, p = 2, metric = "minkowski", metric_params = None, n_jobs = None):
model = sklearn.neighbors.KNeighborsRegressor(n_neighbors = n_neighbors, weights = weights, algorithm = algorithm, leaf_size = leaf_size, p = p, metric = metric, metric_params = metric_params, n_jobs = n_jobs)
model.fit(data_train, outputs_train)
predictions = model.predict(data_test)
return model, RegressionMetrics(predictions, outputs_test) data_train, data_test, outputs_train, outputs_test = sklearn.model_selection.train_test_split(data, outputs, test_size=test_size, random_state=1)
model = sklearn.neighbors.KNeighborsRegressor(n_neighbors = n_neighbors, weights = weights, algorithm = algorithm, leaf_size = leaf_size, p = p, metric = metric, metric_params = metric_params, n_jobs = n_jobs)
model.fit(data_train, outputs_train)
predictions = model.predict(data_test)
return model, RegressionMetrics(predictions, outputs_test)
class NaiveBayes: class NaiveBayes:

Binary file not shown.

View File

@ -8,7 +8,7 @@ with open("requirements.txt", 'r') as file:
setuptools.setup( setuptools.setup(
name="analysis", name="analysis",
version="1.0.0.010", version="1.0.0.011",
author="The Titan Scouting Team", author="The Titan Scouting Team",
author_email="titanscout2022@gmail.com", author_email="titanscout2022@gmail.com",
description="analysis package developed by Titan Scouting for The Red Alliance", description="analysis package developed by Titan Scouting for The Red Alliance",

View File

@ -3,10 +3,12 @@
# Notes: # Notes:
# setup: # setup:
__version__ = "0.0.5.001" __version__ = "0.0.5.002"
# changelog should be viewed using print(analysis.__changelog__) # changelog should be viewed using print(analysis.__changelog__)
__changelog__ = """changelog: __changelog__ = """changelog:
0.0.5.002:
- made changes due to refactoring of analysis
0.0.5.001: 0.0.5.001:
- text fixes - text fixes
- removed matplotlib requirement - removed matplotlib requirement
@ -286,11 +288,11 @@ def metricsloop(tbakey, apikey, competition, timestamp): # listener based metric
observations = {"red": 0.5, "blu": 0.5} observations = {"red": 0.5, "blu": 0.5}
red_elo_delta = an.elo(red_elo["score"], blu_elo["score"], observations["red"], elo_N, elo_K) - red_elo["score"] red_elo_delta = an.Metrics.elo(red_elo["score"], blu_elo["score"], observations["red"], elo_N, elo_K) - red_elo["score"]
blu_elo_delta = an.elo(blu_elo["score"], red_elo["score"], observations["blu"], elo_N, elo_K) - blu_elo["score"] blu_elo_delta = an.Metrics.elo(blu_elo["score"], red_elo["score"], observations["blu"], elo_N, elo_K) - blu_elo["score"]
new_red_gl2_score, new_red_gl2_rd, new_red_gl2_vol = an.glicko2(red_gl2["score"], red_gl2["rd"], red_gl2["vol"], [blu_gl2["score"]], [blu_gl2["rd"]], [observations["red"], observations["blu"]]) new_red_gl2_score, new_red_gl2_rd, new_red_gl2_vol = an.Metrics.glicko2(red_gl2["score"], red_gl2["rd"], red_gl2["vol"], [blu_gl2["score"]], [blu_gl2["rd"]], [observations["red"], observations["blu"]])
new_blu_gl2_score, new_blu_gl2_rd, new_blu_gl2_vol = an.glicko2(blu_gl2["score"], blu_gl2["rd"], blu_gl2["vol"], [red_gl2["score"]], [red_gl2["rd"]], [observations["blu"], observations["red"]]) new_blu_gl2_score, new_blu_gl2_rd, new_blu_gl2_vol = an.Metrics.glicko2(blu_gl2["score"], blu_gl2["rd"], blu_gl2["vol"], [red_gl2["score"]], [red_gl2["rd"]], [observations["blu"], observations["red"]])
red_gl2_delta = {"score": new_red_gl2_score - red_gl2["score"], "rd": new_red_gl2_rd - red_gl2["rd"], "vol": new_red_gl2_vol - red_gl2["vol"]} red_gl2_delta = {"score": new_red_gl2_score - red_gl2["score"], "rd": new_red_gl2_rd - red_gl2["rd"], "vol": new_red_gl2_vol - red_gl2["vol"]}
blu_gl2_delta = {"score": new_blu_gl2_score - blu_gl2["score"], "rd": new_blu_gl2_rd - blu_gl2["rd"], "vol": new_blu_gl2_vol - blu_gl2["vol"]} blu_gl2_delta = {"score": new_blu_gl2_score - blu_gl2["score"], "rd": new_blu_gl2_rd - blu_gl2["rd"], "vol": new_blu_gl2_vol - blu_gl2["vol"]}