diff --git a/superscript.py b/superscript.py index fdd4e5a..cb91adb 100644 --- a/superscript.py +++ b/superscript.py @@ -3,10 +3,13 @@ # Notes: # setup: -__version__ = "0.7.0" +__version__ = "0.8.0" # changelog should be viewed using print(analysis.__changelog__) __changelog__ = """changelog: + 0.8.0: + - added multithreading to matchloop + - tweaked user log 0.7.0: - finished implementing main function 0.6.2: @@ -114,62 +117,72 @@ __all__ = [ from tra_analysis import analysis as an import data as d +from collections import defaultdict import json import numpy as np from os import system, name from pathlib import Path import matplotlib.pyplot as plt +from concurrent.futures import ThreadPoolExecutor import time import warnings +global exec_threads + def main(): + global exec_threads + warnings.filterwarnings("ignore") - while (True): + # while (True): - current_time = time.time() - print("[OK] time: " + str(current_time)) + current_time = time.time() + print("[OK] time: " + str(current_time)) - config = load_config("config.json") - competition = config["competition"] - match_tests = config["statistics"]["match"] - pit_tests = config["statistics"]["pit"] - metrics_tests = config["statistics"]["metric"] - print("[OK] configs loaded") + config = load_config("config.json") + competition = config["competition"] + match_tests = config["statistics"]["match"] + pit_tests = config["statistics"]["pit"] + metrics_tests = config["statistics"]["metric"] + print("[OK] configs loaded") - apikey = config["key"]["database"] - tbakey = config["key"]["tba"] - print("[OK] loaded keys") + print("[OK] starting threads") + exec_threads = ThreadPoolExecutor(max_workers = config["max-threads"]) + print("[OK] threads started") - previous_time = get_previous_time(apikey) - print("[OK] analysis backtimed to: " + str(previous_time)) + apikey = config["key"]["database"] + tbakey = config["key"]["tba"] + print("[OK] loaded keys") - print("[OK] loading data") - start = time.time() - match_data = load_match(apikey, competition) - pit_data = load_pit(apikey, competition) - print("[OK] loaded data in " + str(time.time() - start) + " seconds") + previous_time = get_previous_time(apikey) + print("[OK] analysis backtimed to: " + str(previous_time)) - print("[OK] running tests") - start = time.time() - matchloop(apikey, competition, match_data, match_tests) - print("[OK] finished tests in " + str(time.time() - start) + " seconds") + print("[OK] loading data") + start = time.time() + match_data = load_match(apikey, competition) + pit_data = load_pit(apikey, competition) + print("[OK] loaded data in " + str(time.time() - start) + " seconds") - print("[OK] running metrics") - start = time.time() - metricloop(tbakey, apikey, competition, previous_time, metrics_tests) - print("[OK] finished metrics in " + str(time.time() - start) + " seconds") + print("[OK] running match stats") + start = time.time() + matchloop(apikey, competition, match_data, match_tests) + print("[OK] finished match stats in " + str(time.time() - start) + " seconds") - print("[OK] running pit analysis") - start = time.time() - pitloop(apikey, competition, pit_data, pit_tests) - print("[OK] finished pit analysis in " + str(time.time() - start) + " seconds") - - set_current_time(apikey, current_time) - print("[OK] finished all tests, looping") + print("[OK] running team metrics") + start = time.time() + metricloop(tbakey, apikey, competition, previous_time, metrics_tests) + print("[OK] finished team metrics in " + str(time.time() - start) + " seconds") - clear() + print("[OK] running pit analysis") + start = time.time() + pitloop(apikey, competition, pit_data, pit_tests) + print("[OK] finished pit analysis in " + str(time.time() - start) + " seconds") + + set_current_time(apikey, current_time) + print("[OK] finished all tests, looping") + + #clear() def clear(): @@ -219,12 +232,18 @@ def load_match(apikey, competition): def matchloop(apikey, competition, data, tests): # expects 3D array with [Team][Variable][Match] - def simplestats(data, test): + start = time.time() - data = np.array(data) + global exec_threads + + def simplestats(data_test): + + data = np.array(data_test[0]) data = data[np.isfinite(data)] ranges = list(range(len(data))) + test = data_test[1] + if test == "basic_stats": return an.basic_stats(data) @@ -246,19 +265,47 @@ def matchloop(apikey, competition, data, tests): # expects 3D array with [Team][ if test == "regression_sigmoidal": return an.regression(ranges, data, ['sig']) + class AutoVivification(dict): + def __getitem__(self, item): + try: + return dict.__getitem__(self, item) + except KeyError: + value = self[item] = type(self)() + return value + return_vector = {} + + team_filtered = [] + variable_filtered = [] + variable_data = [] + test_filtered = [] + result_filtered = [] + return_vector = AutoVivification() + for team in data: - variable_vector = {} + for variable in data[team]: - test_vector = {} - variable_data = data[team][variable] + if variable in tests: + for test in tests[variable]: - test_vector[test] = simplestats(variable_data, test) - else: - pass - variable_vector[variable] = test_vector - return_vector[team] = variable_vector + + team_filtered.append(team) + variable_filtered.append(variable) + variable_data.append((data[team][variable], test)) + test_filtered.append(test) + + result_filtered = exec_threads.map(simplestats, variable_data) + i = 0 + + result_filtered = list(result_filtered) + + for result in result_filtered: + + return_vector[team_filtered[i]][variable_filtered[i]][test_filtered[i]] = result + i += 1 + + print("metrics finished in " + str(time.time() - start)) push_match(apikey, competition, return_vector)