From 524a0a211dbf33b3d3f4e61f13b6a6b77d35f355 Mon Sep 17 00:00:00 2001 From: Arthur Lu Date: Wed, 9 Feb 2022 05:36:19 +0000 Subject: [PATCH] removed gui (last commit tagged "gui"), removed print statement in pit module Former-commit-id: 4978aee142eaf9431913b44eabfc0dfb79c7b600 --- src/{cli => }/config.py | 0 src/{cli => }/data.py | 0 src/{cli => }/exceptions.py | 0 src/gui/data.py | 129 ------ src/gui/design.kv | 151 ------- src/gui/main.py | 58 --- src/gui/superscript.py | 627 --------------------------- src/{cli => }/interface.py | 0 src/{cli => }/module.py | 1 - src/{cli => }/pull.py | 0 src/{cli => }/superscript.py | 0 src/{cli => }/superscript.spec | 0 src/{cli => }/validation-schema.json | 0 13 files changed, 966 deletions(-) rename src/{cli => }/config.py (100%) rename src/{cli => }/data.py (100%) rename src/{cli => }/exceptions.py (100%) delete mode 100644 src/gui/data.py delete mode 100644 src/gui/design.kv delete mode 100644 src/gui/main.py delete mode 100644 src/gui/superscript.py rename src/{cli => }/interface.py (100%) rename src/{cli => }/module.py (99%) rename src/{cli => }/pull.py (100%) rename src/{cli => }/superscript.py (100%) rename src/{cli => }/superscript.spec (100%) rename src/{cli => }/validation-schema.json (100%) diff --git a/src/cli/config.py b/src/config.py similarity index 100% rename from src/cli/config.py rename to src/config.py diff --git a/src/cli/data.py b/src/data.py similarity index 100% rename from src/cli/data.py rename to src/data.py diff --git a/src/cli/exceptions.py b/src/exceptions.py similarity index 100% rename from src/cli/exceptions.py rename to src/exceptions.py diff --git a/src/gui/data.py b/src/gui/data.py deleted file mode 100644 index 641aba7..0000000 --- a/src/gui/data.py +++ /dev/null @@ -1,129 +0,0 @@ -import requests -import pymongo -import pandas as pd -import time - -def pull_new_tba_matches(apikey, competition, cutoff): - api_key= apikey - x=requests.get("https://www.thebluealliance.com/api/v3/event/"+competition+"/matches/simple", headers={"X-TBA-Auth_Key":api_key}) - out = [] - for i in x.json(): - if i["actual_time"] != None and i["actual_time"]-cutoff >= 0 and i["comp_level"] == "qm": - out.append({"match" : i['match_number'], "blue" : list(map(lambda x: int(x[3:]), i['alliances']['blue']['team_keys'])), "red" : list(map(lambda x: int(x[3:]), i['alliances']['red']['team_keys'])), "winner": i["winning_alliance"]}) - return out - -def get_team_match_data(apikey, competition, team_num): - client = pymongo.MongoClient(apikey) - db = client.data_scouting - mdata = db.matchdata - out = {} - for i in mdata.find({"competition" : competition, "team_scouted": team_num}): - out[i['match']] = i['data'] - return pd.DataFrame(out) - -def get_team_pit_data(apikey, competition, team_num): - client = pymongo.MongoClient(apikey) - db = client.data_scouting - mdata = db.pitdata - out = {} - return mdata.find_one({"competition" : competition, "team_scouted": team_num})["data"] - -def get_team_metrics_data(apikey, competition, team_num): - client = pymongo.MongoClient(apikey) - db = client.data_processing - mdata = db.team_metrics - return mdata.find_one({"competition" : competition, "team": team_num}) - -def get_match_data_formatted(apikey, competition): - client = pymongo.MongoClient(apikey) - db = client.data_scouting - mdata = db.teamlist - x=mdata.find_one({"competition":competition}) - out = {} - for i in x: - try: - out[int(i)] = unkeyify_2l(get_team_match_data(apikey, competition, int(i)).transpose().to_dict()) - except: - pass - return out - -def get_metrics_data_formatted(apikey, competition): - client = pymongo.MongoClient(apikey) - db = client.data_scouting - mdata = db.teamlist - x=mdata.find_one({"competition":competition}) - out = {} - for i in x: - try: - out[int(i)] = d.get_team_metrics_data(apikey, competition, int(i)) - except: - pass - return out - -def get_pit_data_formatted(apikey, competition): - client = pymongo.MongoClient(apikey) - db = client.data_scouting - mdata = db.teamlist - x=mdata.find_one({"competition":competition}) - out = {} - for i in x: - try: - out[int(i)] = get_team_pit_data(apikey, competition, int(i)) - except: - pass - return out - -def get_pit_variable_data(apikey, competition): - client = pymongo.MongoClient(apikey) - db = client.data_processing - mdata = db.team_pit - out = {} - return mdata.find() - -def get_pit_variable_formatted(apikey, competition): - temp = get_pit_variable_data(apikey, competition) - out = {} - for i in temp: - out[i["variable"]] = i["data"] - return out - -def push_team_tests_data(apikey, competition, team_num, data, dbname = "data_processing", colname = "team_tests"): - client = pymongo.MongoClient(apikey) - db = client[dbname] - mdata = db[colname] - mdata.replace_one({"competition" : competition, "team": team_num}, {"_id": competition+str(team_num)+"am", "competition" : competition, "team" : team_num, "data" : data}, True) - -def push_team_metrics_data(apikey, competition, team_num, data, dbname = "data_processing", colname = "team_metrics"): - client = pymongo.MongoClient(apikey) - db = client[dbname] - mdata = db[colname] - mdata.replace_one({"competition" : competition, "team": team_num}, {"_id": competition+str(team_num)+"am", "competition" : competition, "team" : team_num, "metrics" : data}, True) - -def push_team_pit_data(apikey, competition, variable, data, dbname = "data_processing", colname = "team_pit"): - client = pymongo.MongoClient(apikey) - db = client[dbname] - mdata = db[colname] - mdata.replace_one({"competition" : competition, "variable": variable}, {"competition" : competition, "variable" : variable, "data" : data}, True) - -def get_analysis_flags(apikey, flag): - client = pymongo.MongoClient(apikey) - db = client.data_processing - mdata = db.flags - return mdata.find_one({flag:{"$exists":True}}) - -def set_analysis_flags(apikey, flag, data): - client = pymongo.MongoClient(apikey) - db = client.data_processing - mdata = db.flags - return mdata.replace_one({flag:{"$exists":True}}, data, True) - -def unkeyify_2l(layered_dict): - out = {} - for i in layered_dict.keys(): - add = [] - sortkey = [] - for j in layered_dict[i].keys(): - add.append([j,layered_dict[i][j]]) - add.sort(key = lambda x: x[0]) - out[i] = list(map(lambda x: x[1], add)) - return out \ No newline at end of file diff --git a/src/gui/design.kv b/src/gui/design.kv deleted file mode 100644 index 177a926..0000000 --- a/src/gui/design.kv +++ /dev/null @@ -1,151 +0,0 @@ -: - orientation: "vertical" - - NavigationLayout: - ScreenManager: - id: screen_manager - HomeScreen: - name: "Home" - BoxLayout: - orientation: "vertical" - MDToolbar: - title: screen_manager.current - elevation: 10 - left_action_items: [['menu', lambda x: nav_drawer.toggle_nav_drawer()]] - - GridLayout: - cols: 1 - padding: 15, 15 - spacing: 20, 20 - MDTextFieldRect: - hint_text: "Console Log" - # size_hint: .8, None - # align: 'center' - # Widget: - SettingsScreen: - name: "Settings" - BoxLayout: - orientation: 'vertical' - MDToolbar: - title: screen_manager.current - elevation: 10 - left_action_items: [['menu', lambda x: nav_drawer.toggle_nav_drawer()]] - Widget: - InfoScreen: - name: "Info" - BoxLayout: - orientation: 'vertical' - MDToolbar: - title: screen_manager.current - elevation: 10 - left_action_items: [['menu', lambda x: nav_drawer.toggle_nav_drawer()]] - # GridLayout: - # cols: 2 - # padding: 15, 15 - # spacing: 20, 20 - BoxLayout: - orientation: "horizontal" - MDLabel: - text: "DB Key:" - halign: 'center' - MDTextField: - hint_text: "placeholder" - pos_hint: {"center_y": .5} - - BoxLayout: - orientation: "horizontal" - MDLabel: - text: "TBA Key:" - halign: 'center' - MDTextField: - hint_text: "placeholder" - pos_hint: {"center_y": .5} - BoxLayout: - orientation: "horizontal" - MDLabel: - text: "CPU Use:" - halign: 'center' - MDLabel: - text: "placeholder" - halign: 'center' - BoxLayout: - orientation: "horizontal" - MDLabel: - text: "Network:" - halign: 'center' - MDLabel: - text: "placeholder" - halign: 'center' - Widget: - BoxLayout: - orientation: "horizontal" - MDLabel: - text: "Progress" - halign: 'center' - MDProgressBar: - id: progress - value: 50 - StatsScreen: - name: "Stats" - MDCheckbox: - size_hint: None, None - size: "48dp", "48dp" - pos_hint: {'center_x': .5, 'center_y': .5} - on_active: Screen.test() - -#Navigation Drawer ------------------------- - MDNavigationDrawer: - id: nav_drawer - BoxLayout: - orientation: "vertical" - padding: "8dp" - spacing: "8dp" - MDLabel: - text: "Titan Scouting" - font_style: "Button" - size_hint_y: None - height: self.texture_size[1] - - MDLabel: - text: "Data Analysis" - font_style: "Caption" - size_hint_y: None - height: self.texture_size[1] - ScrollView: - MDList: - OneLineAvatarListItem: - text: "Home" - on_press: - # nav_drawer.set_state("close") - # screen_manager.transition.direction = "left" - screen_manager.current = "Home" - IconLeftWidget: - icon: "home" - - OneLineAvatarListItem: - text: "Settings" - on_press: - # nav_drawer.set_state("close") - # screen_manager.transition.direction = "right" - # screen_manager.fade - screen_manager.current = "Settings" - IconLeftWidget: - icon: "cog" - OneLineAvatarListItem: - text: "Info" - on_press: - # nav_drawer.set_state("close") - # screen_manager.transition.direction = "right" - # screen_manager.fade - screen_manager.current = "Info" - IconLeftWidget: - icon: "cog" - OneLineAvatarListItem: - text: "Stats" - on_press: - # nav_drawer.set_state("close") - # screen_manager.transition.direction = "right" - # screen_manager.fade - screen_manager.current = "Stats" - IconLeftWidget: - icon: "cog" \ No newline at end of file diff --git a/src/gui/main.py b/src/gui/main.py deleted file mode 100644 index a57421e..0000000 --- a/src/gui/main.py +++ /dev/null @@ -1,58 +0,0 @@ -from kivy.lang import Builder - -from kivymd.uix.screen import Screen -from kivymd.uix.list import OneLineListItem, MDList, TwoLineListItem, ThreeLineListItem -from kivymd.uix.list import OneLineIconListItem, IconLeftWidget -from kivy.uix.scrollview import ScrollView - - -from kivy.uix.boxlayout import BoxLayout -from kivy.uix.screenmanager import ScreenManager, Screen -from kivy.uix.dropdown import DropDown -from kivy.uix.button import Button -from kivy.base import runTouchApp -from kivymd.uix.menu import MDDropdownMenu, MDMenuItem - -from kivymd.app import MDApp -# import superscript as ss - -# from tra_analysis import analysis as an -import data as d -from collections import defaultdict -import json -import math -import numpy as np -import os -from os import system, name -from pathlib import Path -from multiprocessing import Pool -import matplotlib.pyplot as plt -from concurrent.futures import ThreadPoolExecutor -import time -import warnings - -# global exec_threads - - -# Screens -class HomeScreen(Screen): - pass -class SettingsScreen(Screen): - pass -class InfoScreen(Screen): - pass - -class StatsScreen(Screen): - pass - - -class MyApp(MDApp): - def build(self): - self.theme_cls.primary_palette = "Red" - return Builder.load_file("design.kv") - def test(): - print("test") - - -if __name__ == "__main__": - MyApp().run() \ No newline at end of file diff --git a/src/gui/superscript.py b/src/gui/superscript.py deleted file mode 100644 index e912951..0000000 --- a/src/gui/superscript.py +++ /dev/null @@ -1,627 +0,0 @@ -# Titan Robotics Team 2022: Superscript Script -# Written by Arthur Lu, Jacob Levine, and Dev Singh -# Notes: -# setup: - -__version__ = "0.8.6" - -# changelog should be viewed using print(analysis.__changelog__) -__changelog__ = """changelog: - 0.8.6: - - added proper main function - 0.8.5: - - added more gradeful KeyboardInterrupt exiting - - redirected stderr to errorlog.txt - 0.8.4: - - added better error message for missing config.json - - added automatic config.json creation - - added splash text with version and system info - 0.8.3: - - updated matchloop with new regression format (requires tra_analysis 3.x) - 0.8.2: - - readded while true to main function - - added more thread config options - 0.8.1: - - optimized matchloop further by bypassing GIL - 0.8.0: - - added multithreading to matchloop - - tweaked user log - 0.7.0: - - finished implementing main function - 0.6.2: - - integrated get_team_rankings.py as get_team_metrics() function - - integrated visualize_pit.py as graph_pit_histogram() function - 0.6.1: - - bug fixes with analysis.Metric() calls - - modified metric functions to use config.json defined default values - 0.6.0: - - removed main function - - changed load_config function - - added save_config function - - added load_match function - - renamed simpleloop to matchloop - - moved simplestats function inside matchloop - - renamed load_metrics to load_metric - - renamed metricsloop to metricloop - - split push to database functions amon push_match, push_metric, push_pit - - moved - 0.5.2: - - made changes due to refactoring of analysis - 0.5.1: - - text fixes - - removed matplotlib requirement - 0.5.0: - - improved user interface - 0.4.2: - - removed unessasary code - 0.4.1: - - fixed bug where X range for regression was determined before sanitization - - better sanitized data - 0.4.0: - - fixed spelling issue in __changelog__ - - addressed nan bug in regression - - fixed errors on line 335 with metrics calling incorrect key "glicko2" - - fixed errors in metrics computing - 0.3.0: - - added analysis to pit data - 0.2.1: - - minor stability patches - - implemented db syncing for timestamps - - fixed bugs - 0.2.0: - - finalized testing and small fixes - 0.1.4: - - finished metrics implement, trueskill is bugged - 0.1.3: - - working - 0.1.2: - - started implement of metrics - 0.1.1: - - cleaned up imports - 0.1.0: - - tested working, can push to database - 0.0.9: - - tested working - - prints out stats for the time being, will push to database later - 0.0.8: - - added data import - - removed tba import - - finished main method - 0.0.7: - - added load_config - - optimized simpleloop for readibility - - added __all__ entries - - added simplestats engine - - pending testing - 0.0.6: - - fixes - 0.0.5: - - imported pickle - - created custom database object - 0.0.4: - - fixed simpleloop to actually return a vector - 0.0.3: - - added metricsloop which is unfinished - 0.0.2: - - added simpleloop which is untested until data is provided - 0.0.1: - - created script - - added analysis, numba, numpy imports -""" - -__author__ = ( - "Arthur Lu ", - "Jacob Levine ", -) - -__all__ = [ - "load_config", - "save_config", - "get_previous_time", - "load_match", - "matchloop", - "load_metric", - "metricloop", - "load_pit", - "pitloop", - "push_match", - "push_metric", - "push_pit", -] - -# imports: - -from tra_analysis import analysis as an -import data as d -from collections import defaultdict -import json -import math -import numpy as np -import os -from os import system, name -from pathlib import Path -from multiprocessing import Pool -import platform -import sys -import time -import warnings - -global exec_threads - -def main(): - - global exec_threads - - sys.stderr = open("errorlog.txt", "w") - - warnings.filterwarnings("ignore") - - splash() - - while (True): - - try: - - current_time = time.time() - print("[OK] time: " + str(current_time)) - - config = load_config("config.json") - competition = config["competition"] - match_tests = config["statistics"]["match"] - pit_tests = config["statistics"]["pit"] - metrics_tests = config["statistics"]["metric"] - print("[OK] configs loaded") - - print("[OK] starting threads") - cfg_max_threads = config["max-threads"] - sys_max_threads = os.cpu_count() - if cfg_max_threads > -sys_max_threads and cfg_max_threads < 0 : - alloc_processes = sys_max_threads + cfg_max_threads - elif cfg_max_threads > 0 and cfg_max_threads < 1: - alloc_processes = math.floor(cfg_max_threads * sys_max_threads) - elif cfg_max_threads > 1 and cfg_max_threads <= sys_max_threads: - alloc_processes = cfg_max_threads - elif cfg_max_threads == 0: - alloc_processes = sys_max_threads - else: - print("[ERROR] Invalid number of processes, must be between -" + str(sys_max_threads) + " and " + str(sys_max_threads)) - exit() - exec_threads = Pool(processes = alloc_processes) - print("[OK] " + str(alloc_processes) + " threads started") - - apikey = config["key"]["database"] - tbakey = config["key"]["tba"] - print("[OK] loaded keys") - - previous_time = get_previous_time(apikey) - print("[OK] analysis backtimed to: " + str(previous_time)) - - print("[OK] loading data") - start = time.time() - match_data = load_match(apikey, competition) - pit_data = load_pit(apikey, competition) - print("[OK] loaded data in " + str(time.time() - start) + " seconds") - - print("[OK] running match stats") - start = time.time() - matchloop(apikey, competition, match_data, match_tests) - print("[OK] finished match stats in " + str(time.time() - start) + " seconds") - - print("[OK] running team metrics") - start = time.time() - metricloop(tbakey, apikey, competition, previous_time, metrics_tests) - print("[OK] finished team metrics in " + str(time.time() - start) + " seconds") - - print("[OK] running pit analysis") - start = time.time() - pitloop(apikey, competition, pit_data, pit_tests) - print("[OK] finished pit analysis in " + str(time.time() - start) + " seconds") - - set_current_time(apikey, current_time) - print("[OK] finished all tests, looping") - - print_hrule() - - except KeyboardInterrupt: - print("\n[OK] caught KeyboardInterrupt, killing processes") - exec_threads.terminate() - print("[OK] processes killed, exiting") - exit() - - else: - pass - - #clear() - -def clear(): - - # for windows - if name == 'nt': - _ = system('cls') - - # for mac and linux(here, os.name is 'posix') - else: - _ = system('clear') - -def print_hrule(): - - print("#"+38*"-"+"#") - -def print_box(s): - - temp = "|" - temp += s - temp += (40-len(s)-2)*" " - temp += "|" - print(temp) - -def splash(): - - print_hrule() - print_box(" superscript version: " + __version__) - print_box(" os: " + platform.system()) - print_box(" python: " + platform.python_version()) - print_hrule() - -def load_config(file): - - config_vector = {} - - try: - f = open(file) - except: - print("[ERROR] could not locate config.json, generating blank config.json and exiting") - f = open(file, "w") - f.write(sample_json) - exit() - - config_vector = json.load(f) - - return config_vector - -def save_config(file, config_vector): - - with open(file) as f: - json.dump(config_vector, f) - -def get_previous_time(apikey): - - previous_time = d.get_analysis_flags(apikey, "latest_update") - - if previous_time == None: - - d.set_analysis_flags(apikey, "latest_update", 0) - previous_time = 0 - - else: - - previous_time = previous_time["latest_update"] - - return previous_time - -def set_current_time(apikey, current_time): - - d.set_analysis_flags(apikey, "latest_update", {"latest_update":current_time}) - -def load_match(apikey, competition): - - return d.get_match_data_formatted(apikey, competition) - -def simplestats(data_test): - - data = np.array(data_test[0]) - data = data[np.isfinite(data)] - ranges = list(range(len(data))) - - test = data_test[1] - - if test == "basic_stats": - return an.basic_stats(data) - - if test == "historical_analysis": - return an.histo_analysis([ranges, data]) - - if test == "regression_linear": - return an.regression(ranges, data, ['lin']) - - if test == "regression_logarithmic": - return an.regression(ranges, data, ['log']) - - if test == "regression_exponential": - return an.regression(ranges, data, ['exp']) - - if test == "regression_polynomial": - return an.regression(ranges, data, ['ply']) - - if test == "regression_sigmoidal": - return an.regression(ranges, data, ['sig']) - -def matchloop(apikey, competition, data, tests): # expects 3D array with [Team][Variable][Match] - - global exec_threads - - short_mapping = {"regression_linear": "lin", "regression_logarithmic": "log", "regression_exponential": "exp", "regression_polynomial": "ply", "regression_sigmoidal": "sig"} - - class AutoVivification(dict): - def __getitem__(self, item): - try: - return dict.__getitem__(self, item) - except KeyError: - value = self[item] = type(self)() - return value - - return_vector = {} - - team_filtered = [] - variable_filtered = [] - variable_data = [] - test_filtered = [] - result_filtered = [] - return_vector = AutoVivification() - - for team in data: - - for variable in data[team]: - - if variable in tests: - - for test in tests[variable]: - - team_filtered.append(team) - variable_filtered.append(variable) - variable_data.append((data[team][variable], test)) - test_filtered.append(test) - - result_filtered = exec_threads.map(simplestats, variable_data) - i = 0 - - result_filtered = list(result_filtered) - - for result in result_filtered: - - filtered = test_filtered[i] - - try: - short = short_mapping[filtered] - return_vector[team_filtered[i]][variable_filtered[i]][test_filtered[i]] = result[short] - except KeyError: # not in mapping - return_vector[team_filtered[i]][variable_filtered[i]][test_filtered[i]] = result - i += 1 - - push_match(apikey, competition, return_vector) - -def load_metric(apikey, competition, match, group_name, metrics): - - group = {} - - for team in match[group_name]: - - db_data = d.get_team_metrics_data(apikey, competition, team) - - if d.get_team_metrics_data(apikey, competition, team) == None: - - elo = {"score": metrics["elo"]["score"]} - gl2 = {"score": metrics["gl2"]["score"], "rd": metrics["gl2"]["rd"], "vol": metrics["gl2"]["vol"]} - ts = {"mu": metrics["ts"]["mu"], "sigma": metrics["ts"]["sigma"]} - - group[team] = {"elo": elo, "gl2": gl2, "ts": ts} - - else: - - metrics = db_data["metrics"] - - elo = metrics["elo"] - gl2 = metrics["gl2"] - ts = metrics["ts"] - - group[team] = {"elo": elo, "gl2": gl2, "ts": ts} - - return group - -def metricloop(tbakey, apikey, competition, timestamp, metrics): # listener based metrics update - - elo_N = metrics["elo"]["N"] - elo_K = metrics["elo"]["K"] - - matches = d.pull_new_tba_matches(tbakey, competition, timestamp) - - red = {} - blu = {} - - for match in matches: - - red = load_metric(apikey, competition, match, "red", metrics) - blu = load_metric(apikey, competition, match, "blue", metrics) - - elo_red_total = 0 - elo_blu_total = 0 - - gl2_red_score_total = 0 - gl2_blu_score_total = 0 - - gl2_red_rd_total = 0 - gl2_blu_rd_total = 0 - - gl2_red_vol_total = 0 - gl2_blu_vol_total = 0 - - for team in red: - - elo_red_total += red[team]["elo"]["score"] - - gl2_red_score_total += red[team]["gl2"]["score"] - gl2_red_rd_total += red[team]["gl2"]["rd"] - gl2_red_vol_total += red[team]["gl2"]["vol"] - - for team in blu: - - elo_blu_total += blu[team]["elo"]["score"] - - gl2_blu_score_total += blu[team]["gl2"]["score"] - gl2_blu_rd_total += blu[team]["gl2"]["rd"] - gl2_blu_vol_total += blu[team]["gl2"]["vol"] - - red_elo = {"score": elo_red_total / len(red)} - blu_elo = {"score": elo_blu_total / len(blu)} - - red_gl2 = {"score": gl2_red_score_total / len(red), "rd": gl2_red_rd_total / len(red), "vol": gl2_red_vol_total / len(red)} - blu_gl2 = {"score": gl2_blu_score_total / len(blu), "rd": gl2_blu_rd_total / len(blu), "vol": gl2_blu_vol_total / len(blu)} - - - if match["winner"] == "red": - - observations = {"red": 1, "blu": 0} - - elif match["winner"] == "blue": - - observations = {"red": 0, "blu": 1} - - else: - - observations = {"red": 0.5, "blu": 0.5} - - red_elo_delta = an.Metric().elo(red_elo["score"], blu_elo["score"], observations["red"], elo_N, elo_K) - red_elo["score"] - blu_elo_delta = an.Metric().elo(blu_elo["score"], red_elo["score"], observations["blu"], elo_N, elo_K) - blu_elo["score"] - - new_red_gl2_score, new_red_gl2_rd, new_red_gl2_vol = an.Metric().glicko2(red_gl2["score"], red_gl2["rd"], red_gl2["vol"], [blu_gl2["score"]], [blu_gl2["rd"]], [observations["red"], observations["blu"]]) - new_blu_gl2_score, new_blu_gl2_rd, new_blu_gl2_vol = an.Metric().glicko2(blu_gl2["score"], blu_gl2["rd"], blu_gl2["vol"], [red_gl2["score"]], [red_gl2["rd"]], [observations["blu"], observations["red"]]) - - red_gl2_delta = {"score": new_red_gl2_score - red_gl2["score"], "rd": new_red_gl2_rd - red_gl2["rd"], "vol": new_red_gl2_vol - red_gl2["vol"]} - blu_gl2_delta = {"score": new_blu_gl2_score - blu_gl2["score"], "rd": new_blu_gl2_rd - blu_gl2["rd"], "vol": new_blu_gl2_vol - blu_gl2["vol"]} - - for team in red: - - red[team]["elo"]["score"] = red[team]["elo"]["score"] + red_elo_delta - - red[team]["gl2"]["score"] = red[team]["gl2"]["score"] + red_gl2_delta["score"] - red[team]["gl2"]["rd"] = red[team]["gl2"]["rd"] + red_gl2_delta["rd"] - red[team]["gl2"]["vol"] = red[team]["gl2"]["vol"] + red_gl2_delta["vol"] - - for team in blu: - - blu[team]["elo"]["score"] = blu[team]["elo"]["score"] + blu_elo_delta - - blu[team]["gl2"]["score"] = blu[team]["gl2"]["score"] + blu_gl2_delta["score"] - blu[team]["gl2"]["rd"] = blu[team]["gl2"]["rd"] + blu_gl2_delta["rd"] - blu[team]["gl2"]["vol"] = blu[team]["gl2"]["vol"] + blu_gl2_delta["vol"] - - temp_vector = {} - temp_vector.update(red) - temp_vector.update(blu) - - push_metric(apikey, competition, temp_vector) - -def load_pit(apikey, competition): - - return d.get_pit_data_formatted(apikey, competition) - -def pitloop(apikey, competition, pit, tests): - - return_vector = {} - for team in pit: - for variable in pit[team]: - if variable in tests: - if not variable in return_vector: - return_vector[variable] = [] - return_vector[variable].append(pit[team][variable]) - - push_pit(apikey, competition, return_vector) - -def push_match(apikey, competition, results): - - for team in results: - - d.push_team_tests_data(apikey, competition, team, results[team]) - -def push_metric(apikey, competition, metric): - - for team in metric: - - d.push_team_metrics_data(apikey, competition, team, metric[team]) - -def push_pit(apikey, competition, pit): - - for variable in pit: - - d.push_team_pit_data(apikey, competition, variable, pit[variable]) - -def get_team_metrics(apikey, tbakey, competition): - - metrics = d.get_metrics_data_formatted(apikey, competition) - - elo = {} - gl2 = {} - - for team in metrics: - - elo[team] = metrics[team]["metrics"]["elo"]["score"] - gl2[team] = metrics[team]["metrics"]["gl2"]["score"] - - elo = {k: v for k, v in sorted(elo.items(), key=lambda item: item[1])} - gl2 = {k: v for k, v in sorted(gl2.items(), key=lambda item: item[1])} - - elo_ranked = [] - - for team in elo: - - elo_ranked.append({"team": str(team), "elo": str(elo[team])}) - - gl2_ranked = [] - - for team in gl2: - - gl2_ranked.append({"team": str(team), "gl2": str(gl2[team])}) - - return {"elo-ranks": elo_ranked, "glicko2-ranks": gl2_ranked} - -sample_json = """{ - "max-threads": 0.5, - "team": "", - "competition": "2020ilch", - "key":{ - "database":"", - "tba":"" - }, - "statistics":{ - "match":{ - "balls-blocked":["basic_stats","historical_analysis","regression_linear","regression_logarithmic","regression_exponential","regression_polynomial","regression_sigmoidal"], - "balls-collected":["basic_stats","historical_analysis","regression_linear","regression_logarithmic","regression_exponential","regression_polynomial","regression_sigmoidal"], - "balls-lower-teleop":["basic_stats","historical_analysis","regression_linear","regression_logarithmic","regression_exponential","regression_polynomial","regression_sigmoidal"], - "balls-lower-auto":["basic_stats","historical_analysis","regression_linear","regression_logarithmic","regression_exponential","regression_polynomial","regression_sigmoidal"], - "balls-started":["basic_stats","historical_analyss","regression_linear","regression_logarithmic","regression_exponential","regression_polynomial","regression_sigmoidal"], - "balls-upper-teleop":["basic_stats","historical_analysis","regression_linear","regression_logarithmic","regression_exponential","regression_polynomial","regression_sigmoidal"], - "balls-upper-auto":["basic_stats","historical_analysis","regression_linear","regression_logarithmic","regression_exponential","regression_polynomial","regression_sigmoidal"] - - }, - "metric":{ - "elo":{ - "score":1500, - "N":400, - "K":24 - }, - "gl2":{ - "score":1500, - "rd":250, - "vol":0.06 - }, - "ts":{ - "mu":25, - "sigma":8.33 - } - }, - "pit":{ - "wheel-mechanism":true, - "low-balls":true, - "high-balls":true, - "wheel-success":true, - "strategic-focus":true, - "climb-mechanism":true, - "attitude":true - } - } -}""" - -if __name__ == "__main__": - if sys.platform.startswith('win'): - multiprocessing.freeze_support() - main() \ No newline at end of file diff --git a/src/cli/interface.py b/src/interface.py similarity index 100% rename from src/cli/interface.py rename to src/interface.py diff --git a/src/cli/module.py b/src/module.py similarity index 99% rename from src/cli/module.py rename to src/module.py index c1bb145..a059e1a 100644 --- a/src/cli/module.py +++ b/src/module.py @@ -299,7 +299,6 @@ class Pit (Module): def _process_data(self, exec_threads): tests = self.config["tests"] - print(tests) return_vector = {} for team in self.data: for variable in self.data[team]: diff --git a/src/cli/pull.py b/src/pull.py similarity index 100% rename from src/cli/pull.py rename to src/pull.py diff --git a/src/cli/superscript.py b/src/superscript.py similarity index 100% rename from src/cli/superscript.py rename to src/superscript.py diff --git a/src/cli/superscript.spec b/src/superscript.spec similarity index 100% rename from src/cli/superscript.spec rename to src/superscript.spec diff --git a/src/cli/validation-schema.json b/src/validation-schema.json similarity index 100% rename from src/cli/validation-schema.json rename to src/validation-schema.json