From 5aae889fd73dd8909c2cb2798cb3321ddaf28cb0 Mon Sep 17 00:00:00 2001 From: Arthur Lu Date: Wed, 20 May 2020 08:52:38 -0500 Subject: [PATCH] Merge service-dev changes with master (#24) * added config.json removed old config files Signed-off-by: Arthur * superscript.py v 0.0.6.000 Signed-off-by: Arthur * changed data.py Signed-off-by: Arthur * changes to config.json Signed-off-by: Arthur * removed cells from visualize_pit.py Signed-off-by: Arthur * more changes to visualize_pit.py Signed-off-by: Arthur * added analysis-master/metrics/__pycache__ to git ignore moved pit configs in config.json to the borrom superscript.py v 0.0.6.001 Signed-off-by: Arthur * removed old database key Signed-off-by: Arthur * adjusted config files Signed-off-by: Arthur * Delete config-pop.json * fixed .gitignore Signed-off-by: Arthur * analysis.py 1.2.1.003 added team kv pair to config.json Signed-off-by: Arthur * superscript.py v 0.0.6.002 Signed-off-by: Arthur * finished app.py API made minute changes to parentheses use in various packages Signed-off-by: Arthur Lu * bug fixes in app.py Signed-off-by: Arthur Lu * bug fixes in app.py Signed-off-by: Arthur Lu * made changes to .gitignore Signed-off-by: Arthur Lu * made changes to .gitignore Signed-off-by: Arthur Lu * deleted a __pycache__ folder from metrics Signed-off-by: Arthur Lu * more changes to .gitignore Signed-off-by: Arthur Lu * additions to app.py Signed-off-by: Arthur Lu * renamed app.py to api.py Signed-off-by: Arthur Lu * removed extranneous files Signed-off-by: Arthur Lu * renamed api.py to tra.py removed rest api calls from tra.py * renamed api.py to tra.py removed rest api calls from tra.py Signed-off-by: Arthur Lu * removed flask import from tra.py Signed-off-by: Arthur Lu * changes to devcontainer.json Signed-off-by: Arthur Lu * fixed unit tests to be correct removed some tests regressions because of potential function overflow removed trueskill unit test because of slight deviation chance Signed-off-by: Arthur Lu --- __pycache__/data.cpython-37.pyc | Bin 4239 -> 0 bytes config.json | 45 ++++ config/competition.config | 1 - config/database.config | 0 config/keys.config | 2 - config/stats.config | 14 -- data.py | 53 +++-- get_team_rankings.py | 59 ------ superscript.py | 349 +++++++++++++++++--------------- tra.py | 91 +++++++++ visualize_pit.py | 59 ------ 11 files changed, 364 insertions(+), 309 deletions(-) delete mode 100644 __pycache__/data.cpython-37.pyc create mode 100644 config.json delete mode 100644 config/competition.config delete mode 100644 config/database.config delete mode 100644 config/keys.config delete mode 100644 config/stats.config delete mode 100644 get_team_rankings.py create mode 100644 tra.py delete mode 100644 visualize_pit.py diff --git a/__pycache__/data.cpython-37.pyc b/__pycache__/data.cpython-37.pyc deleted file mode 100644 index 9c1a4a46da8095e2ca717e6f31080ba00a12f951..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 4239 zcmcgv&2JmW72nx!lA9BY8+?zk*QD>G$3r14K0`_WfhscwmNbqTXBm6P;3kTdSux}G59Yu=GzQM; zUo(4Xj*KV9pR<9Rx!l=+X3s2fH}7ZW4THPS96k0h=RY&icZL?`OB;r?rITxf@e<~% zD9anhOYwsjRBEsvr`(u=|* ziNkg?+HQ7QJ7G88dGYg|=tb0)J7+69txz_PqI4&XTiql&TmEiq|M8=(M?HBId=>R; zOnFC9$Rm-~D`8Xi!X%J!D-sO&)XUhg8zk5(QRefOTGE>Z?Ov-9i6$+fhguuu8RUXldu=p>MhFc_=6y_@mWX(L^(1b$WmE>U*-eyXw#oLHco&kw@d;=LMLYf;I?@=j z%owo&-2E7?p4rFNz=@54d%6kdwKJBPoV_+j432+o;AI|%DKi(XwPEaPhm_6`0Wl); z+Iuxqyn}t!l7=&FLW2kZko^RG9RTpU{Z^-a*in@~Q1e+5!$DP*mIV3*pk6O&s7a>m zgBTc^U9L(`F#bY>t*GXUb?la(J=;*gIH!Rel}ZY*O=IZya16*Q-HXQ!G8yF9zuc+AXyWT#4WrmZlkI>;yvoWPt_e% zb*~#st-Vj$2c33Q^QJ0`4{70#srrbjpHQVWT~P8$p(OSTXu}&=nraGX6BWhBkQM5Y zBNgh=?&qsFubK|+iS2Ddbyg*{LMI1Z(P>6$3iFQLqZLUJH&ao=DK+;3^%gX{UT8Mn zMQcX@rnJ%+YbVG+1woN+WIpZx5*>ItJ3Y>fp`oe$`+*6vw@~cUk252K!NxN;2To=o zR-9a#f&LE>6`f%G-tj-+V}x7X-OMt4dcurf~P1#tSnk zzMJ?`dx^QHnO8e(jcGnu@L)k!yo(|#sLpQp+8~bjpdZ{z-X=U^Tufi;;5YPD7ahDc zIk@I18#|}sDkREIJ3@+UGB)5tp?nBxn4Cx^V3j=pCD4f^KOb{H=oJBM@v?boY|Iw1)o37?wuJV40iCc8zb1=i3Ykj~RbL?>iJ2*VSR5NvT#| zh-jr*iV}f#cm3I#R|GkwDk0Og({3k?>YnUqtY$fGG1f`PDVB@S-JsZ*vRZJ^5g37p zKf!A$5$7a42+J}vkt$rY;+L2?A4`szOJjLoW4R41Z%GOm20S&Az)$xsfuFa4pZEp7 zAcv0VN~89_BeYHkO(=1S&#$rbx%k}5@p(WeQ^={Z&K#05ev=GH4<%=EK+P^+qEi5d z{FvDr2IWs;j)9-~LvxZ_0gsh|CGv|WNi&OEufZTAi{X-~8Dq??;-}bFnV?r&SwYOT zuW6yqBl|Ub3Y)5ksGEe%h_Z*W$ni>x981YZxxCRvPE}sBlTc^T*+{3{#U|H}^eE4q zT7ONvzsH(_czxzG@tEEsmiWKpBX)5rkRWSL0aD&LQOtoP9@3)U&1(_+kINHCaxx$XoCBv|F7mU>I*)c2^EF(iaF^*d%zqJzIBXDYCrtih1bUCB_^3hw zx(^ES^YgDtL<@xibJywosx(mm7ix24U8IB{{?N`jxfuP91Z263)JWe_qk*^P)%Xe$ zP>Q}i{A#98@j8)zPW>xl6^-^3<#xk14^#0YNR+Mr9MZoFL8jApNPXGLGaF?Rq3Kfd z06D!N<7)r~9Y2$_bSPKbnbZ Apa1{> diff --git a/config.json b/config.json new file mode 100644 index 0000000..7893057 --- /dev/null +++ b/config.json @@ -0,0 +1,45 @@ +{ + "team": "", + "competition": "", + "key":{ + "database":"", + "tba":"" + }, + "statistics":{ + "match":{ + "balls-blocked":["basic_stats","historical_analysis","regression_linear","regression_logarithmic","regression_exponential","regression_polynomial","regression_sigmoidal"], + "balls-collected":["basic_stats","historical_analysis","regression_linear","regression_logarithmic","regression_exponential","regression_polynomial","regression_sigmoidal"], + "balls-lower-teleop":["basic_stats","historical_analysis","regression_linear","regression_logarithmic","regression_exponential","regression_polynomial","regression_sigmoidal"], + "balls-lower-auto":["basic_stats","historical_analysis","regression_linear","regression_logarithmic","regression_exponential","regression_polynomial","regression_sigmoidal"], + "balls-started":["basic_stats","historical_analyss","regression_linear","regression_logarithmic","regression_exponential","regression_polynomial","regression_sigmoidal"], + "balls-upper-teleop":["basic_stats","historical_analysis","regression_linear","regression_logarithmic","regression_exponential","regression_polynomial","regression_sigmoidal"], + "balls-upper-auto":["basic_stats","historical_analysis","regression_linear","regression_logarithmic","regression_exponential","regression_polynomial","regression_sigmoidal"] + + }, + "metric":{ + "elo":{ + "score":1500, + "N":400, + "K":24 + }, + "gl2":{ + "score":1500, + "rd":250, + "vol":0.06 + }, + "ts":{ + "mu":25, + "sigma":8.33 + } + }, + "pit":{ + "wheel-mechanism":true, + "low-balls":true, + "high-balls":true, + "wheel-success":true, + "strategic-focus":true, + "climb-mechanism":true, + "attitude":true + } + } +} \ No newline at end of file diff --git a/config/competition.config b/config/competition.config deleted file mode 100644 index 511e258..0000000 --- a/config/competition.config +++ /dev/null @@ -1 +0,0 @@ -2020ilch \ No newline at end of file diff --git a/config/database.config b/config/database.config deleted file mode 100644 index e69de29..0000000 diff --git a/config/keys.config b/config/keys.config deleted file mode 100644 index 77a53a6..0000000 --- a/config/keys.config +++ /dev/null @@ -1,2 +0,0 @@ -mongodb+srv://api-user-new:titanscout2022@2022-scouting-4vfuu.mongodb.net/test?authSource=admin&replicaSet=2022-scouting-shard-0&readPreference=primary&appname=MongoDB%20Compass&ssl=true -UDvKmPjPRfwwUdDX1JxbmkyecYBJhCtXeyVk9vmO2i7K0Zn4wqQPMfzuEINXJ7e5 \ No newline at end of file diff --git a/config/stats.config b/config/stats.config deleted file mode 100644 index 5b0501a..0000000 --- a/config/stats.config +++ /dev/null @@ -1,14 +0,0 @@ -balls-blocked,basic_stats,historical_analysis,regression_linear,regression_logarithmic,regression_exponential,regression_polynomial,regression_sigmoidal -balls-collected,basic_stats,historical_analysis,regression_linear,regression_logarithmic,regression_exponential,regression_polynomial,regression_sigmoidal -balls-lower-teleop,basic_stats,historical_analysis,regression_linear,regression_logarithmic,regression_exponential,regression_polynomial,regression_sigmoidal -balls-lower-auto,basic_stats,historical_analysis,regression_linear,regression_logarithmic,regression_exponential,regression_polynomial,regression_sigmoidal -balls-started,basic_stats,historical_analyss,regression_linear,regression_logarithmic,regression_exponential,regression_polynomial,regression_sigmoidal -balls-upper-teleop,basic_stats,historical_analysis,regression_linear,regression_logarithmic,regression_exponential,regression_polynomial,regression_sigmoidal -balls-upper-auto,basic_stats,historical_analysis,regression_linear,regression_logarithmic,regression_exponential,regression_polynomial,regression_sigmoidal -wheel-mechanism -low-balls -high-balls -wheel-success -strategic-focus -climb-mechanism -attitude \ No newline at end of file diff --git a/data.py b/data.py index 9b075e5..641aba7 100644 --- a/data.py +++ b/data.py @@ -8,7 +8,7 @@ def pull_new_tba_matches(apikey, competition, cutoff): x=requests.get("https://www.thebluealliance.com/api/v3/event/"+competition+"/matches/simple", headers={"X-TBA-Auth_Key":api_key}) out = [] for i in x.json(): - if (i["actual_time"] != None and i["actual_time"]-cutoff >= 0 and i["comp_level"] == "qm"): + if i["actual_time"] != None and i["actual_time"]-cutoff >= 0 and i["comp_level"] == "qm": out.append({"match" : i['match_number'], "blue" : list(map(lambda x: int(x[3:]), i['alliances']['blue']['team_keys'])), "red" : list(map(lambda x: int(x[3:]), i['alliances']['red']['team_keys'])), "winner": i["winning_alliance"]}) return out @@ -34,17 +34,6 @@ def get_team_metrics_data(apikey, competition, team_num): mdata = db.team_metrics return mdata.find_one({"competition" : competition, "team": team_num}) -def unkeyify_2l(layered_dict): - out = {} - for i in layered_dict.keys(): - add = [] - sortkey = [] - for j in layered_dict[i].keys(): - add.append([j,layered_dict[i][j]]) - add.sort(key = lambda x: x[0]) - out[i] = list(map(lambda x: x[1], add)) - return out - def get_match_data_formatted(apikey, competition): client = pymongo.MongoClient(apikey) db = client.data_scouting @@ -58,6 +47,19 @@ def get_match_data_formatted(apikey, competition): pass return out +def get_metrics_data_formatted(apikey, competition): + client = pymongo.MongoClient(apikey) + db = client.data_scouting + mdata = db.teamlist + x=mdata.find_one({"competition":competition}) + out = {} + for i in x: + try: + out[int(i)] = d.get_team_metrics_data(apikey, competition, int(i)) + except: + pass + return out + def get_pit_data_formatted(apikey, competition): client = pymongo.MongoClient(apikey) db = client.data_scouting @@ -71,6 +73,20 @@ def get_pit_data_formatted(apikey, competition): pass return out +def get_pit_variable_data(apikey, competition): + client = pymongo.MongoClient(apikey) + db = client.data_processing + mdata = db.team_pit + out = {} + return mdata.find() + +def get_pit_variable_formatted(apikey, competition): + temp = get_pit_variable_data(apikey, competition) + out = {} + for i in temp: + out[i["variable"]] = i["data"] + return out + def push_team_tests_data(apikey, competition, team_num, data, dbname = "data_processing", colname = "team_tests"): client = pymongo.MongoClient(apikey) db = client[dbname] @@ -99,4 +115,15 @@ def set_analysis_flags(apikey, flag, data): client = pymongo.MongoClient(apikey) db = client.data_processing mdata = db.flags - return mdata.replace_one({flag:{"$exists":True}}, data, True) \ No newline at end of file + return mdata.replace_one({flag:{"$exists":True}}, data, True) + +def unkeyify_2l(layered_dict): + out = {} + for i in layered_dict.keys(): + add = [] + sortkey = [] + for j in layered_dict[i].keys(): + add.append([j,layered_dict[i][j]]) + add.sort(key = lambda x: x[0]) + out[i] = list(map(lambda x: x[1], add)) + return out \ No newline at end of file diff --git a/get_team_rankings.py b/get_team_rankings.py deleted file mode 100644 index 3ab0326..0000000 --- a/get_team_rankings.py +++ /dev/null @@ -1,59 +0,0 @@ -import data as d -from analysis import analysis as an -import pymongo -import operator - -def load_config(file): - config_vector = {} - file = an.load_csv(file) - for line in file[1:]: - config_vector[line[0]] = line[1:] - - return (file[0][0], config_vector) - -def get_metrics_processed_formatted(apikey, competition): - client = pymongo.MongoClient(apikey) - db = client.data_scouting - mdata = db.teamlist - x=mdata.find_one({"competition":competition}) - out = {} - for i in x: - try: - out[int(i)] = d.get_team_metrics_data(apikey, competition, int(i)) - except: - pass - return out - -def main(): - - apikey = an.load_csv("keys.txt")[0][0] - tbakey = an.load_csv("keys.txt")[1][0] - - competition, config = load_config("config.csv") - - metrics = get_metrics_processed_formatted(apikey, competition) - - elo = {} - gl2 = {} - - for team in metrics: - - elo[team] = metrics[team]["metrics"]["elo"]["score"] - gl2[team] = metrics[team]["metrics"]["gl2"]["score"] - - elo = {k: v for k, v in sorted(elo.items(), key=lambda item: item[1])} - gl2 = {k: v for k, v in sorted(gl2.items(), key=lambda item: item[1])} - - for team in elo: - - print("teams sorted by elo:") - print("" + str(team) + " | " + str(elo[team])) - - print("*"*25) - - for team in gl2: - - print("teams sorted by glicko2:") - print("" + str(team) + " | " + str(gl2[team])) - -main() \ No newline at end of file diff --git a/superscript.py b/superscript.py index 05562c1..94e2d84 100644 --- a/superscript.py +++ b/superscript.py @@ -3,10 +3,27 @@ # Notes: # setup: -__version__ = "0.0.5.002" +__version__ = "0.0.6.002" # changelog should be viewed using print(analysis.__changelog__) __changelog__ = """changelog: + 0.0.6.002: + - integrated get_team_rankings.py as get_team_metrics() function + - integrated visualize_pit.py as graph_pit_histogram() function + 0.0.6.001: + - bug fixes with analysis.Metric() calls + - modified metric functions to use config.json defined default values + 0.0.6.000: + - removed main function + - changed load_config function + - added save_config function + - added load_match function + - renamed simpleloop to matchloop + - moved simplestats function inside matchloop + - renamed load_metrics to load_metric + - renamed metricsloop to metricloop + - split push to database functions amon push_match, push_metric, push_pit + - moved 0.0.5.002: - made changes due to refactoring of analysis 0.0.5.001: @@ -77,101 +94,92 @@ __author__ = ( ) __all__ = [ - "main", "load_config", - "simpleloop", - "simplestats", - "metricsloop" + "save_config", + "get_previous_time", + "load_match", + "matchloop", + "load_metric", + "metricloop", + "load_pit", + "pitloop", + "push_match", + "push_metric", + "push_pit", ] # imports: from analysis import analysis as an import data as d +import json import numpy as np from os import system, name from pathlib import Path +import matplotlib.pyplot as plt import time import warnings -def main(): - warnings.filterwarnings("ignore") - while(True): - - current_time = time.time() - print("[OK] time: " + str(current_time)) - - start = time.time() - config = load_config(Path("config/stats.config")) - competition = an.load_csv(Path("config/competition.config"))[0][0] - print("[OK] configs loaded") - - apikey = an.load_csv(Path("config/keys.config"))[0][0] - tbakey = an.load_csv(Path("config/keys.config"))[1][0] - print("[OK] loaded keys") - - previous_time = d.get_analysis_flags(apikey, "latest_update") - - if(previous_time == None): - - d.set_analysis_flags(apikey, "latest_update", 0) - previous_time = 0 - - else: - - previous_time = previous_time["latest_update"] - - print("[OK] analysis backtimed to: " + str(previous_time)) - - print("[OK] loading data") - start = time.time() - data = d.get_match_data_formatted(apikey, competition) - pit_data = d.pit = d.get_pit_data_formatted(apikey, competition) - print("[OK] loaded data in " + str(time.time() - start) + " seconds") - - print("[OK] running tests") - start = time.time() - results = simpleloop(data, config) - print("[OK] finished tests in " + str(time.time() - start) + " seconds") - - print("[OK] running metrics") - start = time.time() - metricsloop(tbakey, apikey, competition, previous_time) - print("[OK] finished metrics in " + str(time.time() - start) + " seconds") - - print("[OK] running pit analysis") - start = time.time() - pit = pitloop(pit_data, config) - print("[OK] finished pit analysis in " + str(time.time() - start) + " seconds") - - d.set_analysis_flags(apikey, "latest_update", {"latest_update":current_time}) - - print("[OK] pushing to database") - start = time.time() - push_to_database(apikey, competition, results, pit) - print("[OK] pushed to database in " + str(time.time() - start) + " seconds") - - clear() - -def clear(): - - # for windows - if name == 'nt': - _ = system('cls') - - # for mac and linux(here, os.name is 'posix') - else: - _ = system('clear') - def load_config(file): + config_vector = {} - file = an.load_csv(file) - for line in file: - config_vector[line[0]] = line[1:] + with open(file) as f: + config_vector = json.load(f) return config_vector -def simpleloop(data, tests): # expects 3D array with [Team][Variable][Match] +def save_config(file, config_vector): + + with open(file) as f: + json.dump(config_vector, f) + +def get_previous_time(apikey): + + previous_time = d.get_analysis_flags(apikey, "latest_update") + + if previous_time == None: + + d.set_analysis_flags(apikey, "latest_update", 0) + previous_time = 0 + + else: + + previous_time = previous_time["latest_update"] + + return previous_time + +def load_match(apikey, competition): + + return d.get_match_data_formatted(apikey, competition) + +def matchloop(apikey, competition, data, tests): # expects 3D array with [Team][Variable][Match] + + def simplestats(data, test): + + data = np.array(data) + data = data[np.isfinite(data)] + ranges = list(range(len(data))) + + if test == "basic_stats": + return an.basic_stats(data) + + if test == "historical_analysis": + return an.histo_analysis([ranges, data]) + + if test == "regression_linear": + return an.regression(ranges, data, ['lin']) + + if test == "regression_logarithmic": + return an.regression(ranges, data, ['log']) + + if test == "regression_exponential": + return an.regression(ranges, data, ['exp']) + + if test == "regression_polynomial": + return an.regression(ranges, data, ['ply']) + + if test == "regression_sigmoidal": + return an.regression(ranges, data, ['sig']) return_vector = {} for team in data: @@ -179,7 +187,7 @@ def simpleloop(data, tests): # expects 3D array with [Team][Variable][Match] for variable in data[team]: test_vector = {} variable_data = data[team][variable] - if(variable in tests): + if variable in tests: for test in tests[variable]: test_vector[test] = simplestats(variable_data, test) else: @@ -187,49 +195,40 @@ def simpleloop(data, tests): # expects 3D array with [Team][Variable][Match] variable_vector[variable] = test_vector return_vector[team] = variable_vector - return return_vector + push_match(apikey, competition, return_vector) -def simplestats(data, test): +def load_metric(apikey, competition, match, group_name, metrics): - data = np.array(data) - data = data[np.isfinite(data)] - ranges = list(range(len(data))) + group = {} - if(test == "basic_stats"): - return an.basic_stats(data) + for team in match[group_name]: - if(test == "historical_analysis"): - return an.histo_analysis([ranges, data]) + db_data = d.get_team_metrics_data(apikey, competition, team) - if(test == "regression_linear"): - return an.regression(ranges, data, ['lin']) + if d.get_team_metrics_data(apikey, competition, team) == None: - if(test == "regression_logarithmic"): - return an.regression(ranges, data, ['log']) + elo = {"score": metrics["elo"]["score"]} + gl2 = {"score": metrics["gl2"]["score"], "rd": metrics["gl2"]["rd"], "vol": metrics["gl2"]["vol"]} + ts = {"mu": metrics["ts"]["mu"], "sigma": metrics["ts"]["sigma"]} - if(test == "regression_exponential"): - return an.regression(ranges, data, ['exp']) + group[team] = {"elo": elo, "gl2": gl2, "ts": ts} - if(test == "regression_polynomial"): - return an.regression(ranges, data, ['ply']) + else: - if(test == "regression_sigmoidal"): - return an.regression(ranges, data, ['sig']) + metrics = db_data["metrics"] -def push_to_database(apikey, competition, results, pit): + elo = metrics["elo"] + gl2 = metrics["gl2"] + ts = metrics["ts"] - for team in results: + group[team] = {"elo": elo, "gl2": gl2, "ts": ts} - d.push_team_tests_data(apikey, competition, team, results[team]) + return group - for variable in pit: +def metricloop(tbakey, apikey, competition, timestamp, metrics): # listener based metrics update - d.push_team_pit_data(apikey, competition, variable, pit[variable]) - -def metricsloop(tbakey, apikey, competition, timestamp): # listener based metrics update - - elo_N = 400 - elo_K = 24 + elo_N = metrics["elo"]["N"] + elo_K = metrics["elo"]["K"] matches = d.pull_new_tba_matches(tbakey, competition, timestamp) @@ -238,8 +237,8 @@ def metricsloop(tbakey, apikey, competition, timestamp): # listener based metric for match in matches: - red = load_metrics(apikey, competition, match, "red") - blu = load_metrics(apikey, competition, match, "blue") + red = load_metric(apikey, competition, match, "red", metrics) + blu = load_metric(apikey, competition, match, "blue", metrics) elo_red_total = 0 elo_blu_total = 0 @@ -276,11 +275,11 @@ def metricsloop(tbakey, apikey, competition, timestamp): # listener based metric blu_gl2 = {"score": gl2_blu_score_total / len(blu), "rd": gl2_blu_rd_total / len(blu), "vol": gl2_blu_vol_total / len(blu)} - if(match["winner"] == "red"): + if match["winner"] == "red": observations = {"red": 1, "blu": 0} - elif(match["winner"] == "blue"): + elif match["winner"] == "blue": observations = {"red": 0, "blu": 1} @@ -288,11 +287,11 @@ def metricsloop(tbakey, apikey, competition, timestamp): # listener based metric observations = {"red": 0.5, "blu": 0.5} - red_elo_delta = an.Metrics.elo(red_elo["score"], blu_elo["score"], observations["red"], elo_N, elo_K) - red_elo["score"] - blu_elo_delta = an.Metrics.elo(blu_elo["score"], red_elo["score"], observations["blu"], elo_N, elo_K) - blu_elo["score"] + red_elo_delta = an.Metric().elo(red_elo["score"], blu_elo["score"], observations["red"], elo_N, elo_K) - red_elo["score"] + blu_elo_delta = an.Metric().elo(blu_elo["score"], red_elo["score"], observations["blu"], elo_N, elo_K) - blu_elo["score"] - new_red_gl2_score, new_red_gl2_rd, new_red_gl2_vol = an.Metrics.glicko2(red_gl2["score"], red_gl2["rd"], red_gl2["vol"], [blu_gl2["score"]], [blu_gl2["rd"]], [observations["red"], observations["blu"]]) - new_blu_gl2_score, new_blu_gl2_rd, new_blu_gl2_vol = an.Metrics.glicko2(blu_gl2["score"], blu_gl2["rd"], blu_gl2["vol"], [red_gl2["score"]], [red_gl2["rd"]], [observations["blu"], observations["red"]]) + new_red_gl2_score, new_red_gl2_rd, new_red_gl2_vol = an.Metric().glicko2(red_gl2["score"], red_gl2["rd"], red_gl2["vol"], [blu_gl2["score"]], [blu_gl2["rd"]], [observations["red"], observations["blu"]]) + new_blu_gl2_score, new_blu_gl2_rd, new_blu_gl2_vol = an.Metric().glicko2(blu_gl2["score"], blu_gl2["rd"], blu_gl2["vol"], [red_gl2["score"]], [red_gl2["rd"]], [observations["blu"], observations["red"]]) red_gl2_delta = {"score": new_red_gl2_score - red_gl2["score"], "rd": new_red_gl2_rd - red_gl2["rd"], "vol": new_red_gl2_vol - red_gl2["vol"]} blu_gl2_delta = {"score": new_blu_gl2_score - blu_gl2["score"], "rd": new_blu_gl2_rd - blu_gl2["rd"], "vol": new_blu_gl2_vol - blu_gl2["vol"]} @@ -317,62 +316,90 @@ def metricsloop(tbakey, apikey, competition, timestamp): # listener based metric temp_vector.update(red) temp_vector.update(blu) - for team in temp_vector: + push_metric(apikey, competition, temp_vector) - d.push_team_metrics_data(apikey, competition, team, temp_vector[team]) +def load_pit(apikey, competition): -def load_metrics(apikey, competition, match, group_name): + return d.get_pit_data_formatted(apikey, competition) - group = {} - - for team in match[group_name]: - - db_data = d.get_team_metrics_data(apikey, competition, team) - - if d.get_team_metrics_data(apikey, competition, team) == None: - - elo = {"score": 1500} - gl2 = {"score": 1500, "rd": 250, "vol": 0.06} - ts = {"mu": 25, "sigma": 25/3} - - #d.push_team_metrics_data(apikey, competition, team, {"elo":elo, "gl2":gl2,"trueskill":ts}) - - group[team] = {"elo": elo, "gl2": gl2, "ts": ts} - - else: - - metrics = db_data["metrics"] - - elo = metrics["elo"] - gl2 = metrics["gl2"] - ts = metrics["ts"] - - group[team] = {"elo": elo, "gl2": gl2, "ts": ts} - - return group - -def pitloop(pit, tests): +def pitloop(apikey, competition, pit, tests): return_vector = {} for team in pit: for variable in pit[team]: - if(variable in tests): - if(not variable in return_vector): + if variable in tests: + if not variable in return_vector: return_vector[variable] = [] return_vector[variable].append(pit[team][variable]) - return return_vector + push_pit(apikey, competition, return_vector) -main() +def push_match(apikey, competition, results): -""" -Metrics Defaults: + for team in results: -elo starting score = 1500 -elo N = 400 -elo K = 24 + d.push_team_tests_data(apikey, competition, team, results[team]) -gl2 starting score = 1500 -gl2 starting rd = 350 -gl2 starting vol = 0.06 -""" \ No newline at end of file +def push_metric(apikey, competition, metric): + + for team in metric: + + d.push_team_metrics_data(apikey, competition, team, metric[team]) + +def push_pit(apikey, competition, pit): + + for variable in pit: + + d.push_team_pit_data(apikey, competition, variable, pit[variable]) + +def get_team_metrics(apikey, tbakey, competition): + + metrics = d.get_metrics_data_formatted(apikey, competition) + + elo = {} + gl2 = {} + + for team in metrics: + + elo[team] = metrics[team]["metrics"]["elo"]["score"] + gl2[team] = metrics[team]["metrics"]["gl2"]["score"] + + elo = {k: v for k, v in sorted(elo.items(), key=lambda item: item[1])} + gl2 = {k: v for k, v in sorted(gl2.items(), key=lambda item: item[1])} + + elo_ranked = [] + + for team in elo: + + elo_ranked.append({"team": str(team), "elo": str(elo[team])}) + + gl2_ranked = [] + + for team in gl2: + + gl2_ranked.append({"team": str(team), "gl2": str(gl2[team])}) + + return {"elo-ranks": elo_ranked, "glicko2-ranks": gl2_ranked} + +def graph_pit_histogram(apikey, competition, figsize=(80,15)): + + pit = d.get_pit_variable_formatted(apikey, competition) + + fig, ax = plt.subplots(1, len(pit), sharey=True, figsize=figsize) + + i = 0 + + for variable in pit: + + ax[i].hist(pit[variable]) + ax[i].invert_xaxis() + + ax[i].set_xlabel('') + ax[i].set_ylabel('Frequency') + ax[i].set_title(variable) + + plt.yticks(np.arange(len(pit[variable]))) + + i+=1 + + plt.show() \ No newline at end of file diff --git a/tra.py b/tra.py new file mode 100644 index 0000000..de16723 --- /dev/null +++ b/tra.py @@ -0,0 +1,91 @@ +import json +import superscript as su +import threading + +__author__ = ( + "Arthur Lu ", +) + +match = False +metric = False +pit = False + +match_enable = True +metric_enable = True +pit_enable = True + +config = {} + +def main(): + + global match + global metric + global pit + + global match_enable + global metric_enable + global pit_enable + + global config + config = su.load_config("config.json") + + while(True): + + if match_enable == True and match == False: + + def target(): + + apikey = config["key"]["database"] + competition = config["competition"] + tests = config["statistics"]["match"] + + data = su.load_match(apikey, competition) + su.matchloop(apikey, competition, data, tests) + + match = False + return + + match = True + task = threading.Thread(name = "match", target=target) + task.start() + + if metric_enable == True and metric == False: + + def target(): + + apikey = config["key"]["database"] + tbakey = config["key"]["tba"] + competition = config["competition"] + metric = config["statistics"]["metric"] + + timestamp = su.get_previous_time(apikey) + + su.metricloop(tbakey, apikey, competition, timestamp, metric) + + metric = False + return + + match = True + task = threading.Thread(name = "metric", target=target) + task.start() + + if pit_enable == True and pit == False: + + def target(): + + apikey = config["key"]["database"] + competition = config["competition"] + tests = config["statistics"]["pit"] + + data = su.load_pit(apikey, competition) + su.pitloop(apikey, competition, data, tests) + + pit = False + return + + pit = True + task = threading.Thread(name = "pit", target=target) + task.start() + +task = threading.Thread(name = "main", target=main) +task.start() \ No newline at end of file diff --git a/visualize_pit.py b/visualize_pit.py deleted file mode 100644 index 9fddd25..0000000 --- a/visualize_pit.py +++ /dev/null @@ -1,59 +0,0 @@ -# To add a new cell, type '# %%' -# To add a new markdown cell, type '# %% [markdown]' -# %% -import matplotlib.pyplot as plt -import data as d -import pymongo - - -# %% -def get_pit_variable_data(apikey, competition): - client = pymongo.MongoClient(apikey) - db = client.data_processing - mdata = db.team_pit - out = {} - return mdata.find() - - -# %% -def get_pit_variable_formatted(apikey, competition): - temp = get_pit_variable_data(apikey, competition) - out = {} - for i in temp: - out[i["variable"]] = i["data"] - return out - - -# %% -pit = get_pit_variable_formatted("mongodb+srv://api-user-new:titanscout2022@2022-scouting-4vfuu.mongodb.net/test?authSource=admin&replicaSet=2022-scouting-shard-0&readPreference=primary&appname=MongoDB%20Compass&ssl=true", "2020ilch") - - -# %% -import matplotlib.pyplot as plt -import numpy as np - - -# %% -fig, ax = plt.subplots(1, len(pit), sharey=True, figsize=(80,15)) - -i = 0 - -for variable in pit: - - ax[i].hist(pit[variable]) - ax[i].invert_xaxis() - - ax[i].set_xlabel('') - ax[i].set_ylabel('Frequency') - ax[i].set_title(variable) - - plt.yticks(np.arange(len(pit[variable]))) - - i+=1 - -plt.show() - - -# %% - -