diff --git a/analysis-master/analysis-amd64/analysis.egg-info/PKG-INFO b/analysis-master/analysis-amd64/analysis.egg-info/PKG-INFO index a9c6413a..410189e2 100644 --- a/analysis-master/analysis-amd64/analysis.egg-info/PKG-INFO +++ b/analysis-master/analysis-amd64/analysis.egg-info/PKG-INFO @@ -1,6 +1,6 @@ Metadata-Version: 2.1 Name: analysis -Version: 1.0.0.10 +Version: 1.0.0.11 Summary: analysis package developed by Titan Scouting for The Red Alliance Home-page: https://github.com/titanscout2022/tr2022-strategy Author: The Titan Scouting Team diff --git a/analysis-master/analysis-amd64/analysis/__pycache__/__init__.cpython-38.pyc b/analysis-master/analysis-amd64/analysis/__pycache__/__init__.cpython-38.pyc new file mode 100644 index 00000000..2d9cbe89 Binary files /dev/null and b/analysis-master/analysis-amd64/analysis/__pycache__/__init__.cpython-38.pyc differ diff --git a/analysis-master/analysis-amd64/analysis/__pycache__/analysis.cpython-38.pyc b/analysis-master/analysis-amd64/analysis/__pycache__/analysis.cpython-38.pyc new file mode 100644 index 00000000..dad67484 Binary files /dev/null and b/analysis-master/analysis-amd64/analysis/__pycache__/analysis.cpython-38.pyc differ diff --git a/analysis-master/analysis-amd64/analysis/__pycache__/glicko2.cpython-38.pyc b/analysis-master/analysis-amd64/analysis/__pycache__/glicko2.cpython-38.pyc new file mode 100644 index 00000000..690c5e2e Binary files /dev/null and b/analysis-master/analysis-amd64/analysis/__pycache__/glicko2.cpython-38.pyc differ diff --git a/analysis-master/analysis-amd64/analysis/__pycache__/trueskill.cpython-38.pyc b/analysis-master/analysis-amd64/analysis/__pycache__/trueskill.cpython-38.pyc new file mode 100644 index 00000000..8927caf9 Binary files /dev/null and b/analysis-master/analysis-amd64/analysis/__pycache__/trueskill.cpython-38.pyc differ diff --git a/analysis-master/analysis-amd64/analysis/analysis.py b/analysis-master/analysis-amd64/analysis/analysis.py index 3651680f..944dd0c7 100644 --- a/analysis-master/analysis-amd64/analysis/analysis.py +++ b/analysis-master/analysis-amd64/analysis/analysis.py @@ -7,10 +7,12 @@ # current benchmark of optimization: 1.33 times faster # setup: -__version__ = "1.1.13.008" +__version__ = "1.1.13.009" # changelog should be viewed using print(analysis.__changelog__) __changelog__ = """changelog: + 1.1.13.009: + - moved elo, glicko2, trueskill functions under class Metrics 1.1.13.008: - moved Glicko2 to a seperate package 1.1.13.007: @@ -446,32 +448,34 @@ def regression(inputs, outputs, args): # inputs, outputs expects N-D array return regressions -def elo(starting_score, opposing_score, observed, N, K): +class Metrics: - expected = 1/(1+10**((np.array(opposing_score) - starting_score)/N)) + def elo(starting_score, opposing_score, observed, N, K): - return starting_score + K*(np.sum(observed) - np.sum(expected)) + expected = 1/(1+10**((np.array(opposing_score) - starting_score)/N)) -def glicko2(starting_score, starting_rd, starting_vol, opposing_score, opposing_rd, observations): + return starting_score + K*(np.sum(observed) - np.sum(expected)) - player = Glicko2.Glicko2(rating = starting_score, rd = starting_rd, vol = starting_vol) + def glicko2(starting_score, starting_rd, starting_vol, opposing_score, opposing_rd, observations): - player.update_player([x for x in opposing_score], [x for x in opposing_rd], observations) + player = Glicko2.Glicko2(rating = starting_score, rd = starting_rd, vol = starting_vol) - return (player.rating, player.rd, player.vol) + player.update_player([x for x in opposing_score], [x for x in opposing_rd], observations) -def trueskill(teams_data, observations): # teams_data is array of array of tuples ie. [[(mu, sigma), (mu, sigma), (mu, sigma)], [(mu, sigma), (mu, sigma), (mu, sigma)]] + return (player.rating, player.rd, player.vol) - team_ratings = [] + def trueskill(teams_data, observations): # teams_data is array of array of tuples ie. [[(mu, sigma), (mu, sigma), (mu, sigma)], [(mu, sigma), (mu, sigma), (mu, sigma)]] - for team in teams_data: - team_temp = () - for player in team: - player = Trueskill.Rating(player[0], player[1]) - team_temp = team_temp + (player,) - team_ratings.append(team_temp) + team_ratings = [] - return Trueskill.rate(team_ratings, ranks=observations) + for team in teams_data: + team_temp = () + for player in team: + player = Trueskill.Rating(player[0], player[1]) + team_temp = team_temp + (player,) + team_ratings.append(team_temp) + + return Trueskill.rate(team_ratings, ranks=observations) class RegressionMetrics(): @@ -563,24 +567,25 @@ def decisiontree(data, labels, test_size = 0.3, criterion = "gini", splitter = " return model, metrics -@jit(forceobj=True) -def knn_classifier(data, labels, test_size = 0.3, algorithm='auto', leaf_size=30, metric='minkowski', metric_params=None, n_jobs=None, n_neighbors=5, p=2, weights='uniform'): #expects *2d data and 1d labels post-scaling +class KNN: - data_train, data_test, labels_train, labels_test = sklearn.model_selection.train_test_split(data, labels, test_size=test_size, random_state=1) - model = sklearn.neighbors.KNeighborsClassifier() - model.fit(data_train, labels_train) - predictions = model.predict(data_test) + def knn_classifier(data, labels, test_size = 0.3, algorithm='auto', leaf_size=30, metric='minkowski', metric_params=None, n_jobs=None, n_neighbors=5, p=2, weights='uniform'): #expects *2d data and 1d labels post-scaling - return model, ClassificationMetrics(predictions, labels_test) + data_train, data_test, labels_train, labels_test = sklearn.model_selection.train_test_split(data, labels, test_size=test_size, random_state=1) + model = sklearn.neighbors.KNeighborsClassifier() + model.fit(data_train, labels_train) + predictions = model.predict(data_test) -def knn_regressor(data, outputs, test_size, n_neighbors = 5, weights = "uniform", algorithm = "auto", leaf_size = 30, p = 2, metric = "minkowski", metric_params = None, n_jobs = None): + return model, ClassificationMetrics(predictions, labels_test) - data_train, data_test, outputs_train, outputs_test = sklearn.model_selection.train_test_split(data, outputs, test_size=test_size, random_state=1) - model = sklearn.neighbors.KNeighborsRegressor(n_neighbors = n_neighbors, weights = weights, algorithm = algorithm, leaf_size = leaf_size, p = p, metric = metric, metric_params = metric_params, n_jobs = n_jobs) - model.fit(data_train, outputs_train) - predictions = model.predict(data_test) + def knn_regressor(data, outputs, test_size, n_neighbors = 5, weights = "uniform", algorithm = "auto", leaf_size = 30, p = 2, metric = "minkowski", metric_params = None, n_jobs = None): - return model, RegressionMetrics(predictions, outputs_test) + data_train, data_test, outputs_train, outputs_test = sklearn.model_selection.train_test_split(data, outputs, test_size=test_size, random_state=1) + model = sklearn.neighbors.KNeighborsRegressor(n_neighbors = n_neighbors, weights = weights, algorithm = algorithm, leaf_size = leaf_size, p = p, metric = metric, metric_params = metric_params, n_jobs = n_jobs) + model.fit(data_train, outputs_train) + predictions = model.predict(data_test) + + return model, RegressionMetrics(predictions, outputs_test) class NaiveBayes: diff --git a/analysis-master/analysis-amd64/build/lib/analysis/analysis.py b/analysis-master/analysis-amd64/build/lib/analysis/analysis.py index 3651680f..944dd0c7 100644 --- a/analysis-master/analysis-amd64/build/lib/analysis/analysis.py +++ b/analysis-master/analysis-amd64/build/lib/analysis/analysis.py @@ -7,10 +7,12 @@ # current benchmark of optimization: 1.33 times faster # setup: -__version__ = "1.1.13.008" +__version__ = "1.1.13.009" # changelog should be viewed using print(analysis.__changelog__) __changelog__ = """changelog: + 1.1.13.009: + - moved elo, glicko2, trueskill functions under class Metrics 1.1.13.008: - moved Glicko2 to a seperate package 1.1.13.007: @@ -446,32 +448,34 @@ def regression(inputs, outputs, args): # inputs, outputs expects N-D array return regressions -def elo(starting_score, opposing_score, observed, N, K): +class Metrics: - expected = 1/(1+10**((np.array(opposing_score) - starting_score)/N)) + def elo(starting_score, opposing_score, observed, N, K): - return starting_score + K*(np.sum(observed) - np.sum(expected)) + expected = 1/(1+10**((np.array(opposing_score) - starting_score)/N)) -def glicko2(starting_score, starting_rd, starting_vol, opposing_score, opposing_rd, observations): + return starting_score + K*(np.sum(observed) - np.sum(expected)) - player = Glicko2.Glicko2(rating = starting_score, rd = starting_rd, vol = starting_vol) + def glicko2(starting_score, starting_rd, starting_vol, opposing_score, opposing_rd, observations): - player.update_player([x for x in opposing_score], [x for x in opposing_rd], observations) + player = Glicko2.Glicko2(rating = starting_score, rd = starting_rd, vol = starting_vol) - return (player.rating, player.rd, player.vol) + player.update_player([x for x in opposing_score], [x for x in opposing_rd], observations) -def trueskill(teams_data, observations): # teams_data is array of array of tuples ie. [[(mu, sigma), (mu, sigma), (mu, sigma)], [(mu, sigma), (mu, sigma), (mu, sigma)]] + return (player.rating, player.rd, player.vol) - team_ratings = [] + def trueskill(teams_data, observations): # teams_data is array of array of tuples ie. [[(mu, sigma), (mu, sigma), (mu, sigma)], [(mu, sigma), (mu, sigma), (mu, sigma)]] - for team in teams_data: - team_temp = () - for player in team: - player = Trueskill.Rating(player[0], player[1]) - team_temp = team_temp + (player,) - team_ratings.append(team_temp) + team_ratings = [] - return Trueskill.rate(team_ratings, ranks=observations) + for team in teams_data: + team_temp = () + for player in team: + player = Trueskill.Rating(player[0], player[1]) + team_temp = team_temp + (player,) + team_ratings.append(team_temp) + + return Trueskill.rate(team_ratings, ranks=observations) class RegressionMetrics(): @@ -563,24 +567,25 @@ def decisiontree(data, labels, test_size = 0.3, criterion = "gini", splitter = " return model, metrics -@jit(forceobj=True) -def knn_classifier(data, labels, test_size = 0.3, algorithm='auto', leaf_size=30, metric='minkowski', metric_params=None, n_jobs=None, n_neighbors=5, p=2, weights='uniform'): #expects *2d data and 1d labels post-scaling +class KNN: - data_train, data_test, labels_train, labels_test = sklearn.model_selection.train_test_split(data, labels, test_size=test_size, random_state=1) - model = sklearn.neighbors.KNeighborsClassifier() - model.fit(data_train, labels_train) - predictions = model.predict(data_test) + def knn_classifier(data, labels, test_size = 0.3, algorithm='auto', leaf_size=30, metric='minkowski', metric_params=None, n_jobs=None, n_neighbors=5, p=2, weights='uniform'): #expects *2d data and 1d labels post-scaling - return model, ClassificationMetrics(predictions, labels_test) + data_train, data_test, labels_train, labels_test = sklearn.model_selection.train_test_split(data, labels, test_size=test_size, random_state=1) + model = sklearn.neighbors.KNeighborsClassifier() + model.fit(data_train, labels_train) + predictions = model.predict(data_test) -def knn_regressor(data, outputs, test_size, n_neighbors = 5, weights = "uniform", algorithm = "auto", leaf_size = 30, p = 2, metric = "minkowski", metric_params = None, n_jobs = None): + return model, ClassificationMetrics(predictions, labels_test) - data_train, data_test, outputs_train, outputs_test = sklearn.model_selection.train_test_split(data, outputs, test_size=test_size, random_state=1) - model = sklearn.neighbors.KNeighborsRegressor(n_neighbors = n_neighbors, weights = weights, algorithm = algorithm, leaf_size = leaf_size, p = p, metric = metric, metric_params = metric_params, n_jobs = n_jobs) - model.fit(data_train, outputs_train) - predictions = model.predict(data_test) + def knn_regressor(data, outputs, test_size, n_neighbors = 5, weights = "uniform", algorithm = "auto", leaf_size = 30, p = 2, metric = "minkowski", metric_params = None, n_jobs = None): - return model, RegressionMetrics(predictions, outputs_test) + data_train, data_test, outputs_train, outputs_test = sklearn.model_selection.train_test_split(data, outputs, test_size=test_size, random_state=1) + model = sklearn.neighbors.KNeighborsRegressor(n_neighbors = n_neighbors, weights = weights, algorithm = algorithm, leaf_size = leaf_size, p = p, metric = metric, metric_params = metric_params, n_jobs = n_jobs) + model.fit(data_train, outputs_train) + predictions = model.predict(data_test) + + return model, RegressionMetrics(predictions, outputs_test) class NaiveBayes: diff --git a/analysis-master/analysis-amd64/dist/analysis-1.0.0.11-py3-none-any.whl b/analysis-master/analysis-amd64/dist/analysis-1.0.0.11-py3-none-any.whl new file mode 100644 index 00000000..6ad83256 Binary files /dev/null and b/analysis-master/analysis-amd64/dist/analysis-1.0.0.11-py3-none-any.whl differ diff --git a/analysis-master/analysis-amd64/dist/analysis-1.0.0.11.tar.gz b/analysis-master/analysis-amd64/dist/analysis-1.0.0.11.tar.gz new file mode 100644 index 00000000..2e9aa30e Binary files /dev/null and b/analysis-master/analysis-amd64/dist/analysis-1.0.0.11.tar.gz differ diff --git a/analysis-master/analysis-amd64/setup.py b/analysis-master/analysis-amd64/setup.py index f6ef54c4..89a9bdf6 100644 --- a/analysis-master/analysis-amd64/setup.py +++ b/analysis-master/analysis-amd64/setup.py @@ -8,7 +8,7 @@ with open("requirements.txt", 'r') as file: setuptools.setup( name="analysis", - version="1.0.0.010", + version="1.0.0.011", author="The Titan Scouting Team", author_email="titanscout2022@gmail.com", description="analysis package developed by Titan Scouting for The Red Alliance", diff --git a/data analysis/superscript.py b/data analysis/superscript.py index 869c97e6..ac1ec4a5 100644 --- a/data analysis/superscript.py +++ b/data analysis/superscript.py @@ -3,10 +3,12 @@ # Notes: # setup: -__version__ = "0.0.5.001" +__version__ = "0.0.5.002" # changelog should be viewed using print(analysis.__changelog__) __changelog__ = """changelog: + 0.0.5.002: + - made changes due to refactoring of analysis 0.0.5.001: - text fixes - removed matplotlib requirement @@ -286,11 +288,11 @@ def metricsloop(tbakey, apikey, competition, timestamp): # listener based metric observations = {"red": 0.5, "blu": 0.5} - red_elo_delta = an.elo(red_elo["score"], blu_elo["score"], observations["red"], elo_N, elo_K) - red_elo["score"] - blu_elo_delta = an.elo(blu_elo["score"], red_elo["score"], observations["blu"], elo_N, elo_K) - blu_elo["score"] + red_elo_delta = an.Metrics.elo(red_elo["score"], blu_elo["score"], observations["red"], elo_N, elo_K) - red_elo["score"] + blu_elo_delta = an.Metrics.elo(blu_elo["score"], red_elo["score"], observations["blu"], elo_N, elo_K) - blu_elo["score"] - new_red_gl2_score, new_red_gl2_rd, new_red_gl2_vol = an.glicko2(red_gl2["score"], red_gl2["rd"], red_gl2["vol"], [blu_gl2["score"]], [blu_gl2["rd"]], [observations["red"], observations["blu"]]) - new_blu_gl2_score, new_blu_gl2_rd, new_blu_gl2_vol = an.glicko2(blu_gl2["score"], blu_gl2["rd"], blu_gl2["vol"], [red_gl2["score"]], [red_gl2["rd"]], [observations["blu"], observations["red"]]) + new_red_gl2_score, new_red_gl2_rd, new_red_gl2_vol = an.Metrics.glicko2(red_gl2["score"], red_gl2["rd"], red_gl2["vol"], [blu_gl2["score"]], [blu_gl2["rd"]], [observations["red"], observations["blu"]]) + new_blu_gl2_score, new_blu_gl2_rd, new_blu_gl2_vol = an.Metrics.glicko2(blu_gl2["score"], blu_gl2["rd"], blu_gl2["vol"], [red_gl2["score"]], [red_gl2["rd"]], [observations["blu"], observations["red"]]) red_gl2_delta = {"score": new_red_gl2_score - red_gl2["score"], "rd": new_red_gl2_rd - red_gl2["rd"], "vol": new_red_gl2_vol - red_gl2["vol"]} blu_gl2_delta = {"score": new_blu_gl2_score - blu_gl2["score"], "rd": new_blu_gl2_rd - blu_gl2["rd"], "vol": new_blu_gl2_vol - blu_gl2["vol"]}