Revert "experimental trueskill support"

This reverts commit 3fe2922e97.
This commit is contained in:
Dev Singh 2022-03-28 14:21:23 -05:00
parent 3fe2922e97
commit 25e4babd71
4 changed files with 26 additions and 108 deletions

View File

@ -3,32 +3,27 @@ import pull
import pandas as pd
import json
def pull_new_tba_matches(apikey, competition, cutoff):
api_key = apikey
x = requests.get("https://www.thebluealliance.com/api/v3/event/" +
competition+"/matches/simple", headers={"X-TBA-Auth-Key": api_key})
api_key= apikey
x=requests.get("https://www.thebluealliance.com/api/v3/event/"+competition+"/matches/simple", headers={"X-TBA-Auth-Key":api_key})
json = x.json()
out = []
for i in json:
if i["actual_time"] != None and i["comp_level"] == "qm":
out.append({"match": i['match_number'], "blue": list(map(lambda x: int(x[3:]), i['alliances']['blue']['team_keys'])), "red": list(
map(lambda x: int(x[3:]), i['alliances']['red']['team_keys'])), "winner": i["winning_alliance"]})
out.append({"match" : i['match_number'], "blue" : list(map(lambda x: int(x[3:]), i['alliances']['blue']['team_keys'])), "red" : list(map(lambda x: int(x[3:]), i['alliances']['red']['team_keys'])), "winner": i["winning_alliance"]})
return out
def pull_new_tba_matches_manual(apikey, competition, cutoff):
filename = competition+"-wins.json"
with open(filename, 'r') as f:
data = json.load(f)
data = json.load(f)
return data
def get_team_match_data(client, competition, team_num):
db = client.data_scouting
mdata = db.matchdata
out = {}
for i in mdata.find({"competition": competition, "team_scouted": str(team_num)}):
for i in mdata.find({"competition" : competition, "team_scouted": str(team_num)}):
out[i['match']] = i['data']
return pd.DataFrame(out)
@ -36,27 +31,23 @@ def get_team_pit_data(client, competition, team_num):
db = client.data_scouting
mdata = db.pitdata
out = {}
return mdata.find_one({"competition": competition, "team_scouted": str(team_num)})["data"]
return mdata.find_one({"competition" : competition, "team_scouted": str(team_num)})["data"]
def get_team_metrics_data(client, competition, team_num):
db = client.data_processing
mdata = db.team_metrics
return mdata.find_one({"competition": competition, "team": team_num})
return mdata.find_one({"competition" : competition, "team": team_num})
def get_match_data_formatted(client, competition):
teams_at_comp = pull.get_teams_at_competition(competition)
out = {}
for team in teams_at_comp:
try:
out[int(team)] = unkeyify_2l(get_team_match_data(
client, competition, team).transpose().to_dict())
out[int(team)] = unkeyify_2l(get_team_match_data(client, competition, team).transpose().to_dict())
except:
pass
return out
def get_metrics_data_formatted(client, competition):
teams_at_comp = pull.get_teams_at_competition(competition)
out = {}
@ -67,10 +58,8 @@ def get_metrics_data_formatted(client, competition):
pass
return out
def get_pit_data_formatted(client, competition):
x = requests.get(
"https://titanscouting.epochml.org/api/fetchAllTeamNicknamesAtCompetition?competition="+competition)
x=requests.get("https://titanscouting.epochml.org/api/fetchAllTeamNicknamesAtCompetition?competition="+competition)
x = x.json()
x = x['data']
x = x.keys()
@ -82,14 +71,12 @@ def get_pit_data_formatted(client, competition):
pass
return out
def get_pit_variable_data(client, competition):
db = client.data_processing
mdata = db.team_pit
out = {}
return mdata.find()
def get_pit_variable_formatted(client, competition):
temp = get_pit_variable_data(client, competition)
out = {}
@ -97,39 +84,30 @@ def get_pit_variable_formatted(client, competition):
out[i["variable"]] = i["data"]
return out
def push_team_tests_data(client, competition, team_num, data, dbname="data_processing", colname="team_tests"):
def push_team_tests_data(client, competition, team_num, data, dbname = "data_processing", colname = "team_tests"):
db = client[dbname]
mdata = db[colname]
mdata.replace_one({"competition": competition, "team": team_num}, {"_id": competition +
str(team_num)+"am", "competition": competition, "team": team_num, "data": data}, True)
mdata.replace_one({"competition" : competition, "team": team_num}, {"_id": competition+str(team_num)+"am", "competition" : competition, "team" : team_num, "data" : data}, True)
def push_team_metrics_data(client, competition, team_num, data, dbname="data_processing", colname="team_metrics"):
def push_team_metrics_data(client, competition, team_num, data, dbname = "data_processing", colname = "team_metrics"):
db = client[dbname]
mdata = db[colname]
mdata.replace_one({"competition": competition, "team": team_num}, {"_id": competition +
str(team_num)+"am", "competition": competition, "team": team_num, "metrics": data}, True)
mdata.replace_one({"competition" : competition, "team": team_num}, {"_id": competition+str(team_num)+"am", "competition" : competition, "team" : team_num, "metrics" : data}, True)
def push_team_pit_data(client, competition, variable, data, dbname="data_processing", colname="team_pit"):
def push_team_pit_data(client, competition, variable, data, dbname = "data_processing", colname = "team_pit"):
db = client[dbname]
mdata = db[colname]
mdata.replace_one({"competition": competition, "variable": variable}, {
"competition": competition, "variable": variable, "data": data}, True)
mdata.replace_one({"competition" : competition, "variable": variable}, {"competition" : competition, "variable" : variable, "data" : data}, True)
def get_analysis_flags(client, flag):
db = client.data_processing
mdata = db.flags
return mdata.find_one({flag: {"$exists": True}})
return mdata.find_one({flag:{"$exists":True}})
def set_analysis_flags(client, flag, data):
db = client.data_processing
mdata = db.flags
return mdata.replace_one({flag: {"$exists": True}}, data, True)
return mdata.replace_one({flag:{"$exists":True}}, data, True)
def unkeyify_2l(layered_dict):
out = {}
@ -137,12 +115,11 @@ def unkeyify_2l(layered_dict):
add = []
sortkey = []
for j in layered_dict[i].keys():
add.append([j, layered_dict[i][j]])
add.sort(key=lambda x: x[0])
add.append([j,layered_dict[i][j]])
add.sort(key = lambda x: x[0])
out[i] = list(map(lambda x: x[1], add))
return out
def get_previous_time(client):
previous_time = get_analysis_flags(client, "latest_update")
@ -158,28 +135,23 @@ def get_previous_time(client):
return previous_time
def set_current_time(client, current_time):
set_analysis_flags(client, "latest_update", {"latest_update": current_time})
set_analysis_flags(client, "latest_update", {"latest_update":current_time})
def get_database_config(client):
remote_config = get_analysis_flags(client, "config")
return remote_config["config"] if remote_config != None else None
def set_database_config(client, config):
set_analysis_flags(client, "config", {"config": config})
def load_match(client, competition):
return get_match_data_formatted(client, competition)
def load_metric(client, competition, match, group_name, metrics):
group = {}
@ -191,8 +163,7 @@ def load_metric(client, competition, match, group_name, metrics):
if db_data == None:
elo = {"score": metrics["elo"]["score"]}
gl2 = {"score": metrics["gl2"]["score"],
"rd": metrics["gl2"]["rd"], "vol": metrics["gl2"]["vol"]}
gl2 = {"score": metrics["gl2"]["score"], "rd": metrics["gl2"]["rd"], "vol": metrics["gl2"]["vol"]}
ts = {"mu": metrics["ts"]["mu"], "sigma": metrics["ts"]["sigma"]}
group[team] = {"elo": elo, "gl2": gl2, "ts": ts}
@ -209,33 +180,28 @@ def load_metric(client, competition, match, group_name, metrics):
return group
def load_pit(client, competition):
return get_pit_data_formatted(client, competition)
def push_match(client, competition, results):
for team in results:
push_team_tests_data(client, competition, team, results[team])
def push_metric(client, competition, metric):
for team in metric:
push_team_metrics_data(client, competition, team, metric[team])
def push_pit(client, competition, pit):
for variable in pit:
push_team_pit_data(client, competition, variable, pit[variable])
def check_new_database_matches(client, competition):
return True

View File

@ -1,10 +1,8 @@
import abc
import data as d
import signal
import trueskill
import numpy as np
from tra_analysis import Analysis as an
from ts_predict import win_probability
class Module(metaclass = abc.ABCMeta):
@ -182,9 +180,9 @@ class Metric (Module):
red = {}
blu = {}
for match in matches:
red = d.load_metric(self.apikey, self.competition, match, "red", self.config["tests"])
blu = d.load_metric(self.apikey, self.competition, match, "blue", self.config["tests"])
@ -200,9 +198,6 @@ class Metric (Module):
gl2_red_vol_total = 0
gl2_blu_vol_total = 0
ts_red_team = {}
ts_blu_team = {}
for team in red:
elo_red_total += red[team]["elo"]["score"]
@ -211,10 +206,6 @@ class Metric (Module):
gl2_red_rd_total += red[team]["gl2"]["rd"]
gl2_red_vol_total += red[team]["gl2"]["vol"]
tmu = red[team]["ts"]["mu"] or 25
tsigma = red[team]["ts"]["sigma"] or 8.333
ts_red_team[team] = trueskill.Rating(mu=tmu, sigma=tsigma)
for team in blu:
elo_blu_total += blu[team]["elo"]["score"]
@ -223,43 +214,25 @@ class Metric (Module):
gl2_blu_rd_total += blu[team]["gl2"]["rd"]
gl2_blu_vol_total += blu[team]["gl2"]["vol"]
tmu = blu[team]["ts"]["mu"] or 25
tsigma = blu[team]["ts"]["sigma"] or 8.333
ts_blu_team[team] = trueskill.Rating(mu=tmu, sigma=tsigma)
red_elo = {"score": elo_red_total / len(red)}
blu_elo = {"score": elo_blu_total / len(blu)}
red_gl2 = {"score": gl2_red_score_total / len(red), "rd": gl2_red_rd_total / len(red), "vol": gl2_red_vol_total / len(red)}
blu_gl2 = {"score": gl2_blu_score_total / len(blu), "rd": gl2_blu_rd_total / len(blu), "vol": gl2_blu_vol_total / len(blu)}
print('here')
if match["winner"] == "red":
observations = {"red": 1, "blu": 0}
ts_obs = [1,0]
elif match["winner"] == "blue":
observations = {"red": 0, "blu": 1}
ts_obs = [0,1]
else:
observations = {"red": 0.5, "blu": 0.5}
ts_obs = [0,0]
ts_red = list(ts_red_team.values())
ts_blu = list(ts_blu_team.values())
new_red, new_blu = trueskill.rate([ts_red, ts_blu], ranks=ts_obs)
new_red_ts = {}
new_blu_ts = {}
for key, value in zip(ts_red_team.keys(), new_red):
new_red_ts[key] = value
for key, value in zip(ts_blu_team.keys(), new_blu):
new_blu_ts[key] = value
print("red" if win_probability(new_red, new_blu) > 0.5 else "blue", match["winner"])
# now there are new trueskll ratings for each team based on the win/loss from DB
red_elo_delta = an.Metric().elo(red_elo["score"], blu_elo["score"], observations["red"], elo_N, elo_K) - red_elo["score"]
blu_elo_delta = an.Metric().elo(blu_elo["score"], red_elo["score"], observations["blu"], elo_N, elo_K) - blu_elo["score"]
@ -277,9 +250,6 @@ class Metric (Module):
red[team]["gl2"]["rd"] = red[team]["gl2"]["rd"] + red_gl2_delta["rd"]
red[team]["gl2"]["vol"] = red[team]["gl2"]["vol"] + red_gl2_delta["vol"]
red[team]["ts"]["mu"] = new_red_ts[team].mu
red[team]["ts"]["sigma"] = new_red_ts[team].sigma
for team in blu:
blu[team]["elo"]["score"] = blu[team]["elo"]["score"] + blu_elo_delta
@ -288,10 +258,6 @@ class Metric (Module):
blu[team]["gl2"]["rd"] = blu[team]["gl2"]["rd"] + blu_gl2_delta["rd"]
blu[team]["gl2"]["vol"] = blu[team]["gl2"]["vol"] + blu_gl2_delta["vol"]
blu[team]["ts"]["mu"] = new_blu_ts[team].mu
blu[team]["ts"]["sigma"] = new_blu_ts[team].sigma
temp_vector = {}
temp_vector.update(red)
temp_vector.update(blu)

View File

@ -12,5 +12,4 @@ requests
scikit-learn
scipy
six
tra-analysis
trueskill
tra-analysis

View File

@ -1,13 +0,0 @@
from trueskill import Rating
import trueskill
from trueskill import TrueSkill
import math
BETA = 8.333/2
cdf = TrueSkill().cdf
def win_probability(a, b):
deltaMu = sum([x.mu for x in a]) - sum([x.mu for x in b])
sumSigma = sum([x.sigma ** 2 for x in a]) + sum([x.sigma ** 2 for x in b])
playerCount = len(a) + len(b)
denominator = math.sqrt(playerCount * (BETA * BETA) + sumSigma)
return cdf(deltaMu / denominator)