diff --git a/build/build-CLI.bat b/build/build-CLI.bat index 22dd93c..29199e2 100644 --- a/build/build-CLI.bat +++ b/build/build-CLI.bat @@ -2,4 +2,4 @@ set pathtospec="../src/cli/superscript.spec" set pathtodist="../dist/" set pathtowork="temp/" -pyinstaller --onefile --clean --distpath %pathtodist% --workpath %pathtowork% %pathtospec% \ No newline at end of file +pyinstaller --clean --distpath %pathtodist% --workpath %pathtowork% %pathtospec% \ No newline at end of file diff --git a/build/build-CLI.sh b/build/build-CLI.sh index 302265b..e328718 100644 --- a/build/build-CLI.sh +++ b/build/build-CLI.sh @@ -2,4 +2,4 @@ pathtospec="../src/cli/superscript.spec" pathtodist="../dist/" pathtowork="temp/" -pyinstaller --onefile --clean --distpath ${pathtodist} --workpath ${pathtowork} ${pathtospec} \ No newline at end of file +pyinstaller --clean --distpath ${pathtodist} --workpath ${pathtowork} ${pathtospec} \ No newline at end of file diff --git a/src/cli/data.py b/src/cli/data.py index c46e726..1d9c056 100644 --- a/src/cli/data.py +++ b/src/cli/data.py @@ -1,5 +1,4 @@ import requests -import pandas as pd def pull_new_tba_matches(apikey, competition, cutoff): api_key= apikey diff --git a/src/cli/module.py b/src/cli/module.py new file mode 100644 index 0000000..b3cac88 --- /dev/null +++ b/src/cli/module.py @@ -0,0 +1,309 @@ +import abc +import data as d +import signal +import numpy as np +import tra_analysis as an + +class Module(metaclass = abc.ABCMeta): + + @classmethod + def __subclasshook__(cls, subclass): + return (hasattr(subclass, 'validate_config') and + callable(subclass.validate_config) and + hasattr(subclass, 'load_data') and + callable(subclass.load_data) and + hasattr(subclass, 'process_data') and + callable(subclass.process_data) and + hasattr(subclass, 'push_results') and + callable(subclass.push_results) + ) + + @abc.abstractmethod + def validate_config(self): + raise NotImplementedError + @abc.abstractmethod + def load_data(self): + raise NotImplementedError + @abc.abstractmethod + def process_data(self, exec_threads): + raise NotImplementedError + @abc.abstractmethod + def push_results(self): + raise NotImplementedError + +class Match (Module): + + config = None + apikey = None + tbakey = None + timestamp = None + competition = None + + data = None + results = None + + def __init__(self, config, apikey, tbakey, timestamp, competition): + self.config = config + self.apikey = apikey + self.tbakey = tbakey + self.timestamp = timestamp + self.competition = competition + + def validate_config(self): + return True, "" + + def load_data(self): + self.data = d.load_match(self.apikey, self.competition) + + def simplestats(data_test): + + signal.signal(signal.SIGINT, signal.SIG_IGN) + + data = np.array(data_test[3]) + data = data[np.isfinite(data)] + ranges = list(range(len(data))) + + test = data_test[2] + + if test == "basic_stats": + return an.basic_stats(data) + + if test == "historical_analysis": + return an.histo_analysis([ranges, data]) + + if test == "regression_linear": + return an.regression(ranges, data, ['lin']) + + if test == "regression_logarithmic": + return an.regression(ranges, data, ['log']) + + if test == "regression_exponential": + return an.regression(ranges, data, ['exp']) + + if test == "regression_polynomial": + return an.regression(ranges, data, ['ply']) + + if test == "regression_sigmoidal": + return an.regression(ranges, data, ['sig']) + + def process_data(self, exec_threads): + + tests = self.config["tests"] + data = self.data + + input_vector = [] + + for team in data: + + for variable in data[team]: + + if variable in tests: + + for test in tests[variable]: + + input_vector.append((team, variable, test, data[team][variable])) + + self.data = input_vector + self.results = list(exec_threads.map(self.simplestats, self.data)) + + def push_results(self): + + short_mapping = {"regression_linear": "lin", "regression_logarithmic": "log", "regression_exponential": "exp", "regression_polynomial": "ply", "regression_sigmoidal": "sig"} + + class AutoVivification(dict): + def __getitem__(self, item): + try: + return dict.__getitem__(self, item) + except KeyError: + value = self[item] = type(self)() + return value + + result_filtered = self.results + input_vector = self.data + + return_vector = AutoVivification() + + i = 0 + + for result in result_filtered: + + filtered = input_vector[i][2] + + try: + short = short_mapping[filtered] + return_vector[input_vector[i][0]][input_vector[i][1]][input_vector[i][2]] = result[short] + except KeyError: # not in mapping + return_vector[input_vector[i][0]][input_vector[i][1]][input_vector[i][2]] = result + + i += 1 + + self.results = return_vector + + d.push_match(self.apikey, self.competition, self.results) + +class Metric (Module): + + config = None + apikey = None + tbakey = None + timestamp = None + competition = None + + data = None + results = None + + def __init__(self, config, apikey, tbakey, timestamp, competition): + self.config = config + self.apikey = apikey + self.tbakey = tbakey + self.timestamp = timestamp + self.competition = competition + + def validate_config(self): + return True, "" + + def load_data(self): + self.data = d.pull_new_tba_matches(self.tbakey, self.competition, self.timestamp) + + def process_data(self, exec_threads): + + elo_N = self.config["tests"]["elo"]["N"] + elo_K = self.config["tests"]["elo"]["K"] + + matches = self.data + + red = {} + blu = {} + + for match in matches: + + red = d.load_metric(self.apikey, self.competition, match, "red", self.config["tests"]) + blu = d.load_metric(self.apikey, self.competition, match, "blue", self.config["tests"]) + + elo_red_total = 0 + elo_blu_total = 0 + + gl2_red_score_total = 0 + gl2_blu_score_total = 0 + + gl2_red_rd_total = 0 + gl2_blu_rd_total = 0 + + gl2_red_vol_total = 0 + gl2_blu_vol_total = 0 + + for team in red: + + elo_red_total += red[team]["elo"]["score"] + + gl2_red_score_total += red[team]["gl2"]["score"] + gl2_red_rd_total += red[team]["gl2"]["rd"] + gl2_red_vol_total += red[team]["gl2"]["vol"] + + for team in blu: + + elo_blu_total += blu[team]["elo"]["score"] + + gl2_blu_score_total += blu[team]["gl2"]["score"] + gl2_blu_rd_total += blu[team]["gl2"]["rd"] + gl2_blu_vol_total += blu[team]["gl2"]["vol"] + + red_elo = {"score": elo_red_total / len(red)} + blu_elo = {"score": elo_blu_total / len(blu)} + + red_gl2 = {"score": gl2_red_score_total / len(red), "rd": gl2_red_rd_total / len(red), "vol": gl2_red_vol_total / len(red)} + blu_gl2 = {"score": gl2_blu_score_total / len(blu), "rd": gl2_blu_rd_total / len(blu), "vol": gl2_blu_vol_total / len(blu)} + + + if match["winner"] == "red": + + observations = {"red": 1, "blu": 0} + + elif match["winner"] == "blue": + + observations = {"red": 0, "blu": 1} + + else: + + observations = {"red": 0.5, "blu": 0.5} + + red_elo_delta = an.Metric().elo(red_elo["score"], blu_elo["score"], observations["red"], elo_N, elo_K) - red_elo["score"] + blu_elo_delta = an.Metric().elo(blu_elo["score"], red_elo["score"], observations["blu"], elo_N, elo_K) - blu_elo["score"] + + new_red_gl2_score, new_red_gl2_rd, new_red_gl2_vol = an.Metric().glicko2(red_gl2["score"], red_gl2["rd"], red_gl2["vol"], [blu_gl2["score"]], [blu_gl2["rd"]], [observations["red"], observations["blu"]]) + new_blu_gl2_score, new_blu_gl2_rd, new_blu_gl2_vol = an.Metric().glicko2(blu_gl2["score"], blu_gl2["rd"], blu_gl2["vol"], [red_gl2["score"]], [red_gl2["rd"]], [observations["blu"], observations["red"]]) + + red_gl2_delta = {"score": new_red_gl2_score - red_gl2["score"], "rd": new_red_gl2_rd - red_gl2["rd"], "vol": new_red_gl2_vol - red_gl2["vol"]} + blu_gl2_delta = {"score": new_blu_gl2_score - blu_gl2["score"], "rd": new_blu_gl2_rd - blu_gl2["rd"], "vol": new_blu_gl2_vol - blu_gl2["vol"]} + + for team in red: + + red[team]["elo"]["score"] = red[team]["elo"]["score"] + red_elo_delta + + red[team]["gl2"]["score"] = red[team]["gl2"]["score"] + red_gl2_delta["score"] + red[team]["gl2"]["rd"] = red[team]["gl2"]["rd"] + red_gl2_delta["rd"] + red[team]["gl2"]["vol"] = red[team]["gl2"]["vol"] + red_gl2_delta["vol"] + + for team in blu: + + blu[team]["elo"]["score"] = blu[team]["elo"]["score"] + blu_elo_delta + + blu[team]["gl2"]["score"] = blu[team]["gl2"]["score"] + blu_gl2_delta["score"] + blu[team]["gl2"]["rd"] = blu[team]["gl2"]["rd"] + blu_gl2_delta["rd"] + blu[team]["gl2"]["vol"] = blu[team]["gl2"]["vol"] + blu_gl2_delta["vol"] + + temp_vector = {} + temp_vector.update(red) + temp_vector.update(blu) + + d.push_metric(self.client, self.competition, temp_vector) + + def push_results(self): + pass + +class Pit (Module): + + config = None + apikey = None + tbakey = None + timestamp = None + competition = None + + data = None + results = None + + def __init__(self, config, apikey, tbakey, timestamp, competition): + self.config = config + self.apikey = apikey + self.tbakey = tbakey + self.timestamp = timestamp + self.competition = competition + + def validate_config(self): + return True, "" + + def load_data(self): + self.data = d.load_pit(self.apikey, self.competition) + + def process_data(self, exec_threads): + return_vector = {} + for team in self.data: + for variable in self.data[team]: + if variable in self.config: + if not variable in return_vector: + return_vector[variable] = [] + return_vector[variable].append(self.data[team][variable]) + + self.results = return_vector + + def push_results(self): + d.push_pit(self.apikey, self.competition, self.results) + +class Rating (Module): + pass + +class Heatmap (Module): + pass + +class Sentiment (Module): + pass \ No newline at end of file diff --git a/src/cli/processing.py b/src/cli/processing.py deleted file mode 100644 index aa867f6..0000000 --- a/src/cli/processing.py +++ /dev/null @@ -1,188 +0,0 @@ -import numpy as np - -from tra_analysis import Analysis as an -from data import pull_new_tba_matches, push_metric, load_metric - -import signal - -def simplestats(data_test): - - signal.signal(signal.SIGINT, signal.SIG_IGN) - - data = np.array(data_test[3]) - data = data[np.isfinite(data)] - ranges = list(range(len(data))) - - test = data_test[2] - - if test == "basic_stats": - return an.basic_stats(data) - - if test == "historical_analysis": - return an.histo_analysis([ranges, data]) - - if test == "regression_linear": - return an.regression(ranges, data, ['lin']) - - if test == "regression_logarithmic": - return an.regression(ranges, data, ['log']) - - if test == "regression_exponential": - return an.regression(ranges, data, ['exp']) - - if test == "regression_polynomial": - return an.regression(ranges, data, ['ply']) - - if test == "regression_sigmoidal": - return an.regression(ranges, data, ['sig']) - -def matchloop(client, competition, data, tests, exec_threads): - - short_mapping = {"regression_linear": "lin", "regression_logarithmic": "log", "regression_exponential": "exp", "regression_polynomial": "ply", "regression_sigmoidal": "sig"} - - class AutoVivification(dict): - def __getitem__(self, item): - try: - return dict.__getitem__(self, item) - except KeyError: - value = self[item] = type(self)() - return value - - input_vector = [] - return_vector = AutoVivification() - - for team in data: - - for variable in data[team]: - - if variable in tests: - - for test in tests[variable]: - - input_vector.append((team, variable, test, data[team][variable])) - - result_filtered = exec_threads.map(simplestats, input_vector) - - i = 0 - - result_filtered = list(result_filtered) - - for result in result_filtered: - - filtered = input_vector[i][2] - - try: - short = short_mapping[filtered] - return_vector[input_vector[i][0]][input_vector[i][1]][input_vector[i][2]] = result[short] - except KeyError: # not in mapping - return_vector[input_vector[i][0]][input_vector[i][1]][input_vector[i][2]] = result - - i += 1 - - return return_vector - -def metricloop(client, competition, data, metrics): # listener based metrics update - - elo_N = metrics["elo"]["N"] - elo_K = metrics["elo"]["K"] - - matches = data - #matches = pull_new_tba_matches(tbakey, competition, timestamp) - - red = {} - blu = {} - - for match in matches: - - red = load_metric(client, competition, match, "red", metrics) - blu = load_metric(client, competition, match, "blue", metrics) - - elo_red_total = 0 - elo_blu_total = 0 - - gl2_red_score_total = 0 - gl2_blu_score_total = 0 - - gl2_red_rd_total = 0 - gl2_blu_rd_total = 0 - - gl2_red_vol_total = 0 - gl2_blu_vol_total = 0 - - for team in red: - - elo_red_total += red[team]["elo"]["score"] - - gl2_red_score_total += red[team]["gl2"]["score"] - gl2_red_rd_total += red[team]["gl2"]["rd"] - gl2_red_vol_total += red[team]["gl2"]["vol"] - - for team in blu: - - elo_blu_total += blu[team]["elo"]["score"] - - gl2_blu_score_total += blu[team]["gl2"]["score"] - gl2_blu_rd_total += blu[team]["gl2"]["rd"] - gl2_blu_vol_total += blu[team]["gl2"]["vol"] - - red_elo = {"score": elo_red_total / len(red)} - blu_elo = {"score": elo_blu_total / len(blu)} - - red_gl2 = {"score": gl2_red_score_total / len(red), "rd": gl2_red_rd_total / len(red), "vol": gl2_red_vol_total / len(red)} - blu_gl2 = {"score": gl2_blu_score_total / len(blu), "rd": gl2_blu_rd_total / len(blu), "vol": gl2_blu_vol_total / len(blu)} - - - if match["winner"] == "red": - - observations = {"red": 1, "blu": 0} - - elif match["winner"] == "blue": - - observations = {"red": 0, "blu": 1} - - else: - - observations = {"red": 0.5, "blu": 0.5} - - red_elo_delta = an.Metric().elo(red_elo["score"], blu_elo["score"], observations["red"], elo_N, elo_K) - red_elo["score"] - blu_elo_delta = an.Metric().elo(blu_elo["score"], red_elo["score"], observations["blu"], elo_N, elo_K) - blu_elo["score"] - - new_red_gl2_score, new_red_gl2_rd, new_red_gl2_vol = an.Metric().glicko2(red_gl2["score"], red_gl2["rd"], red_gl2["vol"], [blu_gl2["score"]], [blu_gl2["rd"]], [observations["red"], observations["blu"]]) - new_blu_gl2_score, new_blu_gl2_rd, new_blu_gl2_vol = an.Metric().glicko2(blu_gl2["score"], blu_gl2["rd"], blu_gl2["vol"], [red_gl2["score"]], [red_gl2["rd"]], [observations["blu"], observations["red"]]) - - red_gl2_delta = {"score": new_red_gl2_score - red_gl2["score"], "rd": new_red_gl2_rd - red_gl2["rd"], "vol": new_red_gl2_vol - red_gl2["vol"]} - blu_gl2_delta = {"score": new_blu_gl2_score - blu_gl2["score"], "rd": new_blu_gl2_rd - blu_gl2["rd"], "vol": new_blu_gl2_vol - blu_gl2["vol"]} - - for team in red: - - red[team]["elo"]["score"] = red[team]["elo"]["score"] + red_elo_delta - - red[team]["gl2"]["score"] = red[team]["gl2"]["score"] + red_gl2_delta["score"] - red[team]["gl2"]["rd"] = red[team]["gl2"]["rd"] + red_gl2_delta["rd"] - red[team]["gl2"]["vol"] = red[team]["gl2"]["vol"] + red_gl2_delta["vol"] - - for team in blu: - - blu[team]["elo"]["score"] = blu[team]["elo"]["score"] + blu_elo_delta - - blu[team]["gl2"]["score"] = blu[team]["gl2"]["score"] + blu_gl2_delta["score"] - blu[team]["gl2"]["rd"] = blu[team]["gl2"]["rd"] + blu_gl2_delta["rd"] - blu[team]["gl2"]["vol"] = blu[team]["gl2"]["vol"] + blu_gl2_delta["vol"] - - temp_vector = {} - temp_vector.update(red) - temp_vector.update(blu) - - push_metric(client, competition, temp_vector) - -def pitloop(client, competition, pit, tests): - - return_vector = {} - for team in pit: - for variable in pit[team]: - if variable in tests: - if not variable in return_vector: - return_vector[variable] = [] - return_vector[variable].append(pit[team][variable]) - - return return_vector \ No newline at end of file diff --git a/src/cli/superscript.py b/src/cli/superscript.py index 70f497f..84b8e32 100644 --- a/src/cli/superscript.py +++ b/src/cli/superscript.py @@ -163,62 +163,78 @@ import warnings import zmq from interface import splash, log, ERR, INF, stdout, stderr -from data import get_previous_time, pull_new_tba_matches, set_current_time, load_match, push_match, load_pit, push_pit, get_database_config, set_database_config, check_new_database_matches -from processing import matchloop, metricloop, pitloop +from data import get_previous_time, set_current_time, get_database_config, set_database_config, check_new_database_matches +from module import Match, Metric, Pit config_path = "config.json" sample_json = """{ "persistent":{ "key":{ - "database":"mongodb+srv://analysis:MU2gPeEjEurRt2n@2022-scouting-4vfuu.mongodb.net/?retryWrites=true&w=majority", - "tba":"UDvKmPjPRfwwUdDX1JxbmkyecYBJhCtXeyVk9vmO2i7K0Zn4wqQPMfzuEINXJ7e5" + "database":"", + "tba":"" }, "config-preference":"local", "synchronize-config":false }, "variable":{ + "max-threads":0.5, + + "competition":"", "team":"", - "competition": "2020ilch", - "statistics":{ + + "event-delay":false, + "loop-delay":0, + "reportable":true, + + "teams":[], + + "modules":{ + "match":{ - "balls-blocked":["basic_stats","historical_analysis","regression_linear","regression_logarithmic","regression_exponential","regression_polynomial","regression_sigmoidal"], - "balls-collected":["basic_stats","historical_analysis","regression_linear","regression_logarithmic","regression_exponential","regression_polynomial","regression_sigmoidal"], - "balls-lower-teleop":["basic_stats","historical_analysis","regression_linear","regression_logarithmic","regression_exponential","regression_polynomial","regression_sigmoidal"], - "balls-lower-auto":["basic_stats","historical_analysis","regression_linear","regression_logarithmic","regression_exponential","regression_polynomial","regression_sigmoidal"], - "balls-started":["basic_stats","historical_analyss","regression_linear","regression_logarithmic","regression_exponential","regression_polynomial","regression_sigmoidal"], - "balls-upper-teleop":["basic_stats","historical_analysis","regression_linear","regression_logarithmic","regression_exponential","regression_polynomial","regression_sigmoidal"], - "balls-upper-auto":["basic_stats","historical_analysis","regression_linear","regression_logarithmic","regression_exponential","regression_polynomial","regression_sigmoidal"] + "tests":{ + "balls-blocked":["basic_stats","historical_analysis","regression_linear","regression_logarithmic","regression_exponential","regression_polynomial","regression_sigmoidal"], + "balls-collected":["basic_stats","historical_analysis","regression_linear","regression_logarithmic","regression_exponential","regression_polynomial","regression_sigmoidal"], + "balls-lower-teleop":["basic_stats","historical_analysis","regression_linear","regression_logarithmic","regression_exponential","regression_polynomial","regression_sigmoidal"], + "balls-lower-auto":["basic_stats","historical_analysis","regression_linear","regression_logarithmic","regression_exponential","regression_polynomial","regression_sigmoidal"], + "balls-started":["basic_stats","historical_analyss","regression_linear","regression_logarithmic","regression_exponential","regression_polynomial","regression_sigmoidal"], + "balls-upper-teleop":["basic_stats","historical_analysis","regression_linear","regression_logarithmic","regression_exponential","regression_polynomial","regression_sigmoidal"], + "balls-upper-auto":["basic_stats","historical_analysis","regression_linear","regression_logarithmic","regression_exponential","regression_polynomial","regression_sigmoidal"] + } }, + "metric":{ - "elo":{ - "score":1500, - "N":400, - "K":24 - }, - "gl2":{ - "score":1500, - "rd":250, - "vol":0.06 - }, - "ts":{ - "mu":25, - "sigma":8.33 + "tests":{ + "elo":{ + "score":1500, + "N":400, + "K":24 + }, + "gl2":{ + "score":1500, + "rd":250, + "vol":0.06 + }, + "ts":{ + "mu":25, + "sigma":8.33 + } } }, + "pit":{ - "wheel-mechanism":true, - "low-balls":true, - "high-balls":true, - "wheel-success":true, - "strategic-focus":true, - "climb-mechanism":true, - "attitude":true + "tests":{ + "wheel-mechanism":true, + "low-balls":true, + "high-balls":true, + "wheel-success":true, + "strategic-focus":true, + "climb-mechanism":true, + "attitude":true + } } - }, - "event-delay":false, - "loop-delay":60 + } } }""" @@ -238,6 +254,8 @@ def main(send, verbose = False, profile = False, debug = False): if verbose: splash(__version__) + modules = {"match": Match, "metric": Metric, "pit": Pit} + while True: try: @@ -273,40 +291,27 @@ def main(send, verbose = False, profile = False, debug = False): exit_code = 1 close_all() break - flag, exec_threads, competition, match_tests, metrics_tests, pit_tests = parse_config_variable(send, config) + flag, exec_threads, competition, config_modules = parse_config_variable(send, config) if flag: exit_code = 1 close_all() break - start = time.time() - send(stdout, INF, "loading match, metric, pit data (this may take a few seconds)") - match_data = load_match(client, competition) - metrics_data = pull_new_tba_matches(tbakey, competition, loop_start) - pit_data = load_pit(client, competition) - send(stdout, INF, "finished loading match, metric, pit data in "+ str(time.time() - start) + " seconds") - - start = time.time() - send(stdout, INF, "performing analysis on match, metrics, pit data") - match_results = matchloop(client, competition, match_data, match_tests, exec_threads) - metrics_results = metricloop(client, competition, metrics_data, metrics_tests) - pit_results = pitloop(client, competition, pit_data, pit_tests) - send(stdout, INF, "finished analysis in " + str(time.time() - start) + " seconds") - - start = time.time() - send(stdout, INF, "uploading match, metrics, pit results to database") - push_match(client, competition, match_results) - push_pit(client, competition, pit_results) - send(stdout, INF, "finished uploading results in " + str(time.time() - start) + " seconds") - - if debug: - f = open("matchloop.log", "w+") - json.dump(match_results, f, ensure_ascii=False, indent=4) - f.close() - - f = open("pitloop.log", "w+") - json.dump(pit_results, f, ensure_ascii=False, indent=4) - f.close() + for m in config_modules: + if m in modules: + start = time.time() + current_module = modules[m](config_modules[m], client, tbakey, loop_start, competition) + valid = current_module.validate_config() + if not valid: + continue + current_module.load_data() + current_module.process_data(exec_threads) + current_module.push_results() + send(stdout, INF, m + " module finished in " + str(time.time() - start) + " seconds") + if debug: + f = open(m + ".log", "w+") + json.dump({"data": current_module.data, "results":current_module.results}, f, ensure_ascii=False, indent=4) + f.close() set_current_time(client, loop_start) close_all() @@ -423,37 +428,21 @@ def parse_config_variable(send, config): send(stderr, ERR, "could not find competition field in config", code = 101) exit_flag = True try: - match_tests = config["variable"]["statistics"]["match"] + modules = config["variable"]["modules"] except: - send(stderr, ERR, "could not find match field in config", code = 102) - exit_flag = True - try: - metrics_tests = config["variable"]["statistics"]["metric"] - except: - send(stderr, ERR, "could not find metrics field in config", code = 103) - exit_flag = True - try: - pit_tests = config["variable"]["statistics"]["pit"] - except: - send(stderr, ERR, "could not find pit field in config", code = 104) + send(stderr, ERR, "could not find modules field in config", code = 102) exit_flag = True if competition == None or competition == "": send(stderr, ERR, "competition field in config must not be empty", code = 105) exit_flag = True - if match_tests == None: - send(stderr, ERR, "matchfield in config must not be empty", code = 106) - exit_flag = True - if metrics_tests == None: - send(stderr, ERR, "metrics field in config must not be empty", code = 107) - exit_flag = True - if pit_tests == None: - send(stderr, ERR, "pit field in config must not be empty", code = 108) + if modules == None: + send(stderr, ERR, "modules in config must not be empty", code = 106) exit_flag = True send(stdout, INF, "found and loaded competition, match, metrics, pit from config") - return exit_flag, exec_threads, competition, match_tests, metrics_tests, pit_tests + return exit_flag, exec_threads, competition, modules def resolve_config_conflicts(send, client, config, preference, sync): diff --git a/src/cli/superscript.spec b/src/cli/superscript.spec index 04cafa7..5d09620 100644 --- a/src/cli/superscript.spec +++ b/src/cli/superscript.spec @@ -13,7 +13,10 @@ a = Analysis(['superscript.py'], ], hookspath=[], runtime_hooks=[], - excludes=[], + excludes=[ + "matplotlib", + "pandas" + ], win_no_prefer_redirects=False, win_private_assemblies=False, cipher=block_cipher, diff --git a/src/requirements.txt b/src/requirements.txt index 88cc58b..52854df 100644 --- a/src/requirements.txt +++ b/src/requirements.txt @@ -1,6 +1,5 @@ requests pymongo -pandas tra-analysis dnspython @@ -11,7 +10,6 @@ scipy scikit-learn six pyparsing -pandas kivy==2.0.0rc2