From 65a1720657e3d90a33621e55a413f07431d1c80f Mon Sep 17 00:00:00 2001 From: Arthur Lu Date: Thu, 26 Aug 2021 22:48:48 +0000 Subject: [PATCH 1/9] added prerequisite and build steps --- .github/workflows/build-cli.yml | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/.github/workflows/build-cli.yml b/.github/workflows/build-cli.yml index 04e4e66..175e095 100644 --- a/.github/workflows/build-cli.yml +++ b/.github/workflows/build-cli.yml @@ -11,9 +11,15 @@ jobs: generate: name: Build Linux runs-on: ubuntu-latest - steps: - name: Checkout master uses: actions/checkout@master - - name: Echo test - run: echo "test" \ No newline at end of file + - name: Install Dependencies + run: pip install -r requirements.txt + working-directory: src/ + - name: Give Execute Permission + run: chmod +x build/build-CLI.sh + working-directory: build/ + - name: Build Binary + run: ./build-CLI.sh + working-directory: build/ \ No newline at end of file From 1106a0ffb1d393bd682fc70c3bf9dea7ea08575e Mon Sep 17 00:00:00 2001 From: Arthur Lu Date: Thu, 26 Aug 2021 22:53:21 +0000 Subject: [PATCH 2/9] fixed path --- .github/workflows/build-cli.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/build-cli.yml b/.github/workflows/build-cli.yml index 175e095..ede3b4c 100644 --- a/.github/workflows/build-cli.yml +++ b/.github/workflows/build-cli.yml @@ -18,8 +18,8 @@ jobs: run: pip install -r requirements.txt working-directory: src/ - name: Give Execute Permission - run: chmod +x build/build-CLI.sh + run: chmod +x build-CLI.sh working-directory: build/ - name: Build Binary - run: ./build-CLI.sh + run: ./build-CLI.sh5 working-directory: build/ \ No newline at end of file From fc63d5d7e1acf874f70db955b581d229ed3a8322 Mon Sep 17 00:00:00 2001 From: Arthur Lu Date: Thu, 26 Aug 2021 22:57:15 +0000 Subject: [PATCH 3/9] fixed typo --- .github/workflows/build-cli.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build-cli.yml b/.github/workflows/build-cli.yml index ede3b4c..0828fa6 100644 --- a/.github/workflows/build-cli.yml +++ b/.github/workflows/build-cli.yml @@ -21,5 +21,5 @@ jobs: run: chmod +x build-CLI.sh working-directory: build/ - name: Build Binary - run: ./build-CLI.sh5 + run: ./build-CLI.sh working-directory: build/ \ No newline at end of file From 42ca74b4ab541a9358cb5fe2d82ced0d48537f00 Mon Sep 17 00:00:00 2001 From: Arthur Lu Date: Thu, 26 Aug 2021 23:14:09 +0000 Subject: [PATCH 4/9] add release asset automaticallt --- .github/workflows/build-cli.yml | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/.github/workflows/build-cli.yml b/.github/workflows/build-cli.yml index 0828fa6..0093a7c 100644 --- a/.github/workflows/build-cli.yml +++ b/.github/workflows/build-cli.yml @@ -22,4 +22,14 @@ jobs: working-directory: build/ - name: Build Binary run: ./build-CLI.sh - working-directory: build/ \ No newline at end of file + working-directory: build/ + - name: Copy Binary to Root Dir + run: cp superscript .. + working-directory: dist/ + - name: Upload Release Asset + uses: svenstaro/upload-release-action@v2 + with: + repo_token: ${{ secrets.GITHUB_TOKEN }} + file: superscript + asset_name: Superscript Linux Binary + tag: ${{ github.ref }} \ No newline at end of file From 73b5c393a016ed25ac49661e68f67e9aa495adaf Mon Sep 17 00:00:00 2001 From: Arthur Lu Date: Thu, 26 Aug 2021 23:19:53 +0000 Subject: [PATCH 5/9] changed asset upload name removed create criteria, must be publish --- .github/workflows/build-cli.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/build-cli.yml b/.github/workflows/build-cli.yml index 0093a7c..7b9f8d7 100644 --- a/.github/workflows/build-cli.yml +++ b/.github/workflows/build-cli.yml @@ -5,7 +5,7 @@ name: Build Superscript Linux on: release: - types: [published, created, edited] + types: [published, edited] jobs: generate: @@ -31,5 +31,5 @@ jobs: with: repo_token: ${{ secrets.GITHUB_TOKEN }} file: superscript - asset_name: Superscript Linux Binary + asset_name: superscript tag: ${{ github.ref }} \ No newline at end of file From bd51efc6c2248edd987a9e5fc45113fa39af1c46 Mon Sep 17 00:00:00 2001 From: Arthur Lu Date: Tue, 21 Sep 2021 04:43:32 +0000 Subject: [PATCH 6/9] added debug option, improved memory usage slightly, changed errorlog.txt to errorlog.log, updated .gitignore --- .gitignore | 1 + src/cli/processing.py | 27 ++++++++++----------------- src/cli/superscript.py | 26 ++++++++++++++++++++++---- 3 files changed, 33 insertions(+), 21 deletions(-) diff --git a/.gitignore b/.gitignore index 1573f33..0c62b1f 100644 --- a/.gitignore +++ b/.gitignore @@ -13,6 +13,7 @@ **/profile.* +**/*.log **/errorlog.txt /dist/superscript.* /dist/superscript \ No newline at end of file diff --git a/src/cli/processing.py b/src/cli/processing.py index fe028e5..73b1224 100644 --- a/src/cli/processing.py +++ b/src/cli/processing.py @@ -9,11 +9,11 @@ def simplestats(data_test): signal.signal(signal.SIGINT, signal.SIG_IGN) - data = np.array(data_test[0]) + data = np.array(data_test[3]) data = data[np.isfinite(data)] ranges = list(range(len(data))) - test = data_test[1] + test = data_test[2] if test == "basic_stats": return an.basic_stats(data) @@ -48,13 +48,7 @@ def matchloop(client, competition, data, tests, exec_threads): value = self[item] = type(self)() return value - return_vector = {} - - team_filtered = [] - variable_filtered = [] - variable_data = [] - test_filtered = [] - result_filtered = [] + input_vector = [] return_vector = AutoVivification() for team in data: @@ -65,25 +59,24 @@ def matchloop(client, competition, data, tests, exec_threads): for test in tests[variable]: - team_filtered.append(team) - variable_filtered.append(variable) - variable_data.append((data[team][variable], test)) - test_filtered.append(test) + input_vector.append((team, variable, test, data[team][variable])) - result_filtered = exec_threads.map(simplestats, variable_data) + result_filtered = exec_threads.map(simplestats, input_vector) + i = 0 result_filtered = list(result_filtered) for result in result_filtered: - filtered = test_filtered[i] + filtered = input_vector[i][2] try: short = short_mapping[filtered] - return_vector[team_filtered[i]][variable_filtered[i]][test_filtered[i]] = result[short] + return_vector[input_vector[i][0]][input_vector[i][1]][input_vector[i][2]] = result[short] except KeyError: # not in mapping - return_vector[team_filtered[i]][variable_filtered[i]][test_filtered[i]] = result + return_vector[input_vector[i][0]][input_vector[i][1]][input_vector[i][2]] = result + i += 1 return return_vector diff --git a/src/cli/superscript.py b/src/cli/superscript.py index 91d11b5..ef15062 100644 --- a/src/cli/superscript.py +++ b/src/cli/superscript.py @@ -10,10 +10,12 @@ __changelog__ = """changelog: 1.0.0: - superscript now runs in PEP 3143 compliant well behaved daemon on Linux systems - linux superscript daemon has integrated websocket output to monitor progress/status remotely - - linux daemon now sends stderr to errorlog.txt + - linux daemon now sends stderr to errorlog.log - added verbose option to linux superscript to allow for interactive output - moved pymongo import to superscript.py - added profile option to linux superscript to profile runtime of script + - reduced memory usage slightly by consolidating the unwrapped input data + - added debug option, which performs one loop of analysis and dumps results to local files 0.9.3: - improved data loading performance by removing redundant PyMongo client creation (120s to 14s) - passed singular instance of PyMongo client as standin for apikey parameter in all data.py functions @@ -206,10 +208,10 @@ sample_json = """{ } }""" -def main(send, verbose = False, profile = False): +def main(send, verbose = False, profile = False, debug = False): warnings.filterwarnings("ignore") - sys.stderr = open("errorlog.txt", "w") + sys.stderr = open("errorlog.log", "w") loop_exit_code = 0 loop_stored_exception = None @@ -348,6 +350,11 @@ def main(send, verbose = False, profile = False): results = matchloop(client, competition, match_data, match_tests, exec_threads) send(stdout, INF, "finished match analysis in " + str(time.time() - start) + " seconds") + if debug: + f = open("matchloop.log", "w+") + json.dump(results, f) + f.close() + start = time.time() send(stdout, INF, "uploading match results to database") push_match(client, competition, results) @@ -368,6 +375,11 @@ def main(send, verbose = False, profile = False): results = pitloop(client, competition, pit_data, pit_tests) send(stdout, INF, "finished pit analysis in " + str(time.time() - start) + " seconds") + if debug: + f = open("pitloop.log", "w+") + json.dump(results, f) + f.close() + start = time.time() send(stdout, INF, "uploading pit results to database") push_pit(client, competition, results) @@ -420,7 +432,7 @@ def save_config(path, config_vector): except: return 1 -def start(pid_path, verbose = False, profile = False): +def start(pid_path, verbose = False, profile = False, debug = False): if profile: @@ -440,6 +452,10 @@ def start(pid_path, verbose = False, profile = False): main(log, verbose = verbose) + elif debug: + + main(log, verbose = True, profile = True, debug = debug) + else: f = open('errorlog.txt', 'w+') @@ -527,6 +543,8 @@ if __name__ == "__main__": start(None, verbose = True) elif 'profile' == sys.argv[1]: start(None, profile=True) + elif 'debug' == sys.argv[1]: + start(None, debug = True) else: print("usage: %s start|stop|restart|verbose|profile" % sys.argv[0]) sys.exit(2) From 74bcc116a221a6bc4865f8b19d4e5fd182aa9303 Mon Sep 17 00:00:00 2001 From: Arthur Lu Date: Tue, 21 Sep 2021 05:14:54 +0000 Subject: [PATCH 7/9] added databse config getter/setter to data.py, fixed args for data.py functions --- src/cli/data.py | 42 +++++++++++++++++++++++++----------------- 1 file changed, 25 insertions(+), 17 deletions(-) diff --git a/src/cli/data.py b/src/cli/data.py index e5bfb5b..56f3054 100644 --- a/src/cli/data.py +++ b/src/cli/data.py @@ -114,13 +114,13 @@ def unkeyify_2l(layered_dict): out[i] = list(map(lambda x: x[1], add)) return out -def get_previous_time(apikey): +def get_previous_time(client): - previous_time = get_analysis_flags(apikey, "latest_update") + previous_time = get_analysis_flags(client, "latest_update") if previous_time == None: - set_analysis_flags(apikey, "latest_update", 0) + set_analysis_flags(client, "latest_update", 0) previous_time = 0 else: @@ -129,21 +129,29 @@ def get_previous_time(apikey): return previous_time -def set_current_time(apikey, current_time): +def set_current_time(client, current_time): - set_analysis_flags(apikey, "latest_update", {"latest_update":current_time}) + set_analysis_flags(client, "latest_update", {"latest_update":current_time}) -def load_match(apikey, competition): +def get_database_config(client): - return get_match_data_formatted(apikey, competition) + return get_analysis_flags(client, "config")["config"] -def load_metric(apikey, competition, match, group_name, metrics): +def set_database_config(client, config): + + set_analysis_flags(client, "config", {"config": config}) + +def load_match(client, competition): + + return get_match_data_formatted(client, competition) + +def load_metric(client, competition, match, group_name, metrics): group = {} for team in match[group_name]: - db_data = get_team_metrics_data(apikey, competition, team) + db_data = get_team_metrics_data(client, competition, team) if db_data == None: @@ -165,24 +173,24 @@ def load_metric(apikey, competition, match, group_name, metrics): return group -def load_pit(apikey, competition): +def load_pit(client, competition): - return get_pit_data_formatted(apikey, competition) + return get_pit_data_formatted(client, competition) -def push_match(apikey, competition, results): +def push_match(client, competition, results): for team in results: - push_team_tests_data(apikey, competition, team, results[team]) + push_team_tests_data(client, competition, team, results[team]) -def push_metric(apikey, competition, metric): +def push_metric(client, competition, metric): for team in metric: - push_team_metrics_data(apikey, competition, team, metric[team]) + push_team_metrics_data(client, competition, team, metric[team]) -def push_pit(apikey, competition, pit): +def push_pit(client, competition, pit): for variable in pit: - push_team_pit_data(apikey, competition, variable, pit[variable]) \ No newline at end of file + push_team_pit_data(client, competition, variable, pit[variable]) \ No newline at end of file From 9163244e2cb4f2d198d7a0bfa9f15ba32aebc627 Mon Sep 17 00:00:00 2001 From: Arthur Lu Date: Tue, 21 Sep 2021 23:36:13 +0000 Subject: [PATCH 8/9] added debug option to usage description, set generic error code to 1 --- src/cli/superscript.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/cli/superscript.py b/src/cli/superscript.py index ef15062..6f00928 100644 --- a/src/cli/superscript.py +++ b/src/cli/superscript.py @@ -401,7 +401,7 @@ def main(send, verbose = False, profile = False, debug = False): loop_exit_code = 0 break except Exception as e: - send(stderr, ERR, "encountered an exception while running") + send(stderr, ERR, "encountered an exception while running", code = 1) print(e, file = stderr) loop_exit_code = 1 break @@ -546,9 +546,9 @@ if __name__ == "__main__": elif 'debug' == sys.argv[1]: start(None, debug = True) else: - print("usage: %s start|stop|restart|verbose|profile" % sys.argv[0]) + print("usage: %s start|stop|restart|verbose|profile|debug" % sys.argv[0]) sys.exit(2) sys.exit(0) else: - print("usage: %s start|stop|restart|verbose|profile" % sys.argv[0]) + print("usage: %s start|stop|restart|verbose|profile|debug" % sys.argv[0]) sys.exit(2) \ No newline at end of file From da913e639ba523dacd88ae5fce50d4037a9c9a57 Mon Sep 17 00:00:00 2001 From: Arthur Lu Date: Thu, 23 Sep 2021 07:10:01 +0000 Subject: [PATCH 9/9] added opotion to pull config from database, major code refactor --- src/cli/data.py | 3 +- src/cli/processing.py | 5 +- src/cli/superscript.py | 462 +++++++++++++++++++++++------------------ 3 files changed, 265 insertions(+), 205 deletions(-) diff --git a/src/cli/data.py b/src/cli/data.py index 56f3054..1fe53c5 100644 --- a/src/cli/data.py +++ b/src/cli/data.py @@ -135,7 +135,8 @@ def set_current_time(client, current_time): def get_database_config(client): - return get_analysis_flags(client, "config")["config"] + remote_config = get_analysis_flags(client, "config") + return remote_config["config"] if remote_config != None else None def set_database_config(client, config): diff --git a/src/cli/processing.py b/src/cli/processing.py index 73b1224..aa867f6 100644 --- a/src/cli/processing.py +++ b/src/cli/processing.py @@ -81,12 +81,13 @@ def matchloop(client, competition, data, tests, exec_threads): return return_vector -def metricloop(tbakey, client, competition, timestamp, metrics): # listener based metrics update +def metricloop(client, competition, data, metrics): # listener based metrics update elo_N = metrics["elo"]["N"] elo_K = metrics["elo"]["K"] - matches = pull_new_tba_matches(tbakey, competition, timestamp) + matches = data + #matches = pull_new_tba_matches(tbakey, competition, timestamp) red = {} blu = {} diff --git a/src/cli/superscript.py b/src/cli/superscript.py index 5584032..d161fe7 100644 --- a/src/cli/superscript.py +++ b/src/cli/superscript.py @@ -19,6 +19,10 @@ __changelog__ = """changelog: - added event and time delay options to config - event delay pauses loop until even listener recieves an update - time delay pauses loop until the time specified has elapsed since the BEGINNING of previous loop + - added options to pull config information from database (reatins option to use local config file) + - config-preference option selects between prioritizing local config and prioritizing database config + - synchronize-config option selects whether to update the non prioritized config with the prioritized one + - divided config options between persistent ones (keys), and variable ones (everything else) 0.9.3: - improved data loading performance by removing redundant PyMongo client creation (120s to 14s) - passed singular instance of PyMongo client as standin for apikey parameter in all data.py functions @@ -160,65 +164,78 @@ import warnings import websockets from interface import splash, log, ERR, INF, stdout, stderr -from data import get_previous_time, set_current_time, load_match, push_match, load_pit, push_pit +from data import get_previous_time, pull_new_tba_matches, set_current_time, load_match, push_match, load_pit, push_pit, get_database_config, set_database_config from processing import matchloop, metricloop, pitloop config_path = "config.json" sample_json = """{ - "max-threads": 0.5, - "team": "", - "competition": "2020ilch", - "key":{ - "database":"", - "tba":"" - }, - "statistics":{ - "match":{ - "balls-blocked":["basic_stats","historical_analysis","regression_linear","regression_logarithmic","regression_exponential","regression_polynomial","regression_sigmoidal"], - "balls-collected":["basic_stats","historical_analysis","regression_linear","regression_logarithmic","regression_exponential","regression_polynomial","regression_sigmoidal"], - "balls-lower-teleop":["basic_stats","historical_analysis","regression_linear","regression_logarithmic","regression_exponential","regression_polynomial","regression_sigmoidal"], - "balls-lower-auto":["basic_stats","historical_analysis","regression_linear","regression_logarithmic","regression_exponential","regression_polynomial","regression_sigmoidal"], - "balls-started":["basic_stats","historical_analyss","regression_linear","regression_logarithmic","regression_exponential","regression_polynomial","regression_sigmoidal"], - "balls-upper-teleop":["basic_stats","historical_analysis","regression_linear","regression_logarithmic","regression_exponential","regression_polynomial","regression_sigmoidal"], - "balls-upper-auto":["basic_stats","historical_analysis","regression_linear","regression_logarithmic","regression_exponential","regression_polynomial","regression_sigmoidal"] - + "persistent":{ + "key":{ + "database":"", + "tba":"" }, - "metric":{ - "elo":{ - "score":1500, - "N":400, - "K":24 + "config-preference":"local", + "synchronize-config":false + }, + "variable":{ + "max-threads":0.5, + "team":"", + "competition": "2020ilch", + "statistics":{ + "match":{ + "balls-blocked":["basic_stats","historical_analysis","regression_linear","regression_logarithmic","regression_exponential","regression_polynomial","regression_sigmoidal"], + "balls-collected":["basic_stats","historical_analysis","regression_linear","regression_logarithmic","regression_exponential","regression_polynomial","regression_sigmoidal"], + "balls-lower-teleop":["basic_stats","historical_analysis","regression_linear","regression_logarithmic","regression_exponential","regression_polynomial","regression_sigmoidal"], + "balls-lower-auto":["basic_stats","historical_analysis","regression_linear","regression_logarithmic","regression_exponential","regression_polynomial","regression_sigmoidal"], + "balls-started":["basic_stats","historical_analyss","regression_linear","regression_logarithmic","regression_exponential","regression_polynomial","regression_sigmoidal"], + "balls-upper-teleop":["basic_stats","historical_analysis","regression_linear","regression_logarithmic","regression_exponential","regression_polynomial","regression_sigmoidal"], + "balls-upper-auto":["basic_stats","historical_analysis","regression_linear","regression_logarithmic","regression_exponential","regression_polynomial","regression_sigmoidal"] + }, - "gl2":{ - "score":1500, - "rd":250, - "vol":0.06 + "metric":{ + "elo":{ + "score":1500, + "N":400, + "K":24 + }, + "gl2":{ + "score":1500, + "rd":250, + "vol":0.06 + }, + "ts":{ + "mu":25, + "sigma":8.33 + } }, - "ts":{ - "mu":25, - "sigma":8.33 + "pit":{ + "wheel-mechanism":true, + "low-balls":true, + "high-balls":true, + "wheel-success":true, + "strategic-focus":true, + "climb-mechanism":true, + "attitude":true } }, - "pit":{ - "wheel-mechanism":true, - "low-balls":true, - "high-balls":true, - "wheel-success":true, - "strategic-focus":true, - "climb-mechanism":true, - "attitude":true - } - }, - "even-delay":false, - "loop-delay":60 + "even-delay":false, + "loop-delay":60 + } }""" def main(send, verbose = False, profile = False, debug = False): + def close_all(): + if "exec_threads" in locals(): + exec_threads.terminate() + exec_threads.join() + exec_threads.close() + if "client" in locals(): + client.close() + warnings.filterwarnings("ignore") sys.stderr = open("errorlog.log", "w") - loop_exit_code = 0 - loop_stored_exception = None + exit_code = 0 if verbose: splash(__version__) @@ -229,173 +246,76 @@ def main(send, verbose = False, profile = False, debug = False): loop_start = time.time() - current_time = time.time() - send(stdout, INF, "current time: " + str(current_time)) - - send(stdout, INF, "loading config at <" + config_path + ">", code = 0) + send(stdout, INF, "current time: " + str(loop_start)) config = {} - if load_config(config_path, config) == 1: + + if load_config(config_path, config): send(stderr, ERR, "could not find config at <" + config_path + ">, generating blank config and exiting", code = 100) - sys.exit(1) - - send(stdout, INF, "found and opened config at <" + config_path + ">", code = 0) - - error_flag = False - - try: - competition = config["competition"] - except: - send(stderr, ERR, "could not find competition field in config", code = 101) - error_flag = True - try: - match_tests = config["statistics"]["match"] - except: - send(stderr, ERR, "could not find match_tests field in config", code = 102) - error_flag = True - try: - metrics_tests = config["statistics"]["metric"] - except: - send(stderr, ERR, "could not find metrics_tests field in config", code = 103) - error_flag = True - try: - pit_tests = config["statistics"]["pit"] - except: - send(stderr, ERR, "could not find pit_tests field in config", code = 104) - error_flag = True + exit_code = 1 + break - if error_flag: - sys.exit(1) - error_flag = False + send(stdout, INF, "found and loaded config at <" + config_path + ">") - if competition == None or competition == "": - send(stderr, ERR, "competition field in config must not be empty", code = 105) - error_flag = True - if match_tests == None: - send(stderr, ERR, "match_tests field in config must not be empty", code = 106) - error_flag = True - if metrics_tests == None: - send(stderr, ERR, "metrics_tests field in config must not be empty", code = 107) - error_flag = True - if pit_tests == None: - send(stderr, ERR, "pit_tests field in config must not be empty", code = 108) - error_flag = True - - if error_flag: - sys.exit(1) - - send(stdout, INF, "found and loaded competition, match_tests, metrics_tests, pit_tests from config") - - sys_max_threads = os.cpu_count() - try: - cfg_max_threads = config["max-threads"] - except: - send(stderr, ERR, "max-threads field in config must not be empty, refer to documentation for configuration options", code = 109) - sys.exit(1) - - if cfg_max_threads > -sys_max_threads and cfg_max_threads < 0 : - alloc_processes = sys_max_threads + cfg_max_threads - elif cfg_max_threads > 0 and cfg_max_threads < 1: - alloc_processes = math.floor(cfg_max_threads * sys_max_threads) - elif cfg_max_threads > 1 and cfg_max_threads <= sys_max_threads: - alloc_processes = cfg_max_threads - elif cfg_max_threads == 0: - alloc_processes = sys_max_threads - else: - send(stderr, ERR, "max-threads must be between -" + str(sys_max_threads) + " and " + str(sys_max_threads) + ", but got " + cfg_max_threads, code = 110) - sys.exit(1) - - send(stdout, INF, "found and loaded max-threads from config") - send(stdout, INF, "attempting to start " + str(alloc_processes) + " threads") - try: - exec_threads = Pool(processes = alloc_processes) - except Exception as e: - send(stderr, ERR, "unable to start threads", code = 200) - send(stderr, INF, e) - sys.exit(1) - send(stdout, INF, "successfully initialized " + str(alloc_processes) + " threads") - - exit_flag = False - - try: - apikey = config["key"]["database"] - except: - send(stderr, ERR, "database key field in config must be present", code = 111) - exit_flag = True - try: - tbakey = config["key"]["tba"] - except: - send(stderr, ERR, "tba key field in config must be present", code = 112) - exit_flag = True - - if apikey == None or apikey == "": - send(stderr, ERR, "database key field in config must not be empty, please populate the database key") - exit_flag = True - if tbakey == None or tbakey == "": - send(stderr, ERR, "tba key field in config must not be empty, please populate the tba key") - exit_flag = True - - if exit_flag: - sys.exit(1) - + flag, apikey, tbakey, preference, sync = parse_config_persistent(send, config) + if flag: + exit_code = 1 + break send(stdout, INF, "found and loaded database and tba keys") client = pymongo.MongoClient(apikey) - previous_time = get_previous_time(client) - send(stdout, INF, "analysis backtimed to: " + str(previous_time)) + send(stdout, INF, "established connection to database") + send(stdout, INF, "analysis backtimed to: " + str(get_previous_time(client))) + + resolve_config_conflicts(send, client, config, preference, sync) + if config == 1: + exit_code = 1 + break + flag, exec_threads, competition, match_tests, metrics_tests, pit_tests = parse_config_variable(send, config) + if flag: + exit_code = 1 + break start = time.time() - send(stdout, INF, "loading match data") + send(stdout, INF, "loading match, metric, pit data (this may take a few seconds)") match_data = load_match(client, competition) - send(stdout, INF, "finished loading match data in " + str(time.time() - start) + " seconds") + metrics_data = pull_new_tba_matches(tbakey, competition, loop_start) + pit_data = load_pit(client, competition) + send(stdout, INF, "finished loading match, metric, pit data in "+ str(time.time() - start) + " seconds") start = time.time() - send(stdout, INF, "performing analysis on match data") - results = matchloop(client, competition, match_data, match_tests, exec_threads) - send(stdout, INF, "finished match analysis in " + str(time.time() - start) + " seconds") + send(stdout, INF, "performing analysis on match, metrics, pit data") + match_results = matchloop(client, competition, match_data, match_tests, exec_threads) + metrics_results = metricloop(client, competition, metrics_data, metrics_tests) + pit_results = pitloop(client, competition, pit_data, pit_tests) + send(stdout, INF, "finished analysis in " + str(time.time() - start) + " seconds") + + start = time.time() + send(stdout, INF, "uploading match, metrics, pit results to database") + push_match(client, competition, match_results) + push_pit(client, competition, pit_results) + send(stdout, INF, "finished uploading results in " + str(time.time() - start) + " seconds") if debug: f = open("matchloop.log", "w+") - json.dump(results, f) + json.dump(match_results, f, ensure_ascii=False, indent=4) f.close() - start = time.time() - send(stdout, INF, "uploading match results to database") - push_match(client, competition, results) - send(stdout, INF, "finished uploading match results in " + str(time.time() - start) + " seconds") - - start = time.time() - send(stdout, INF, "performing analysis on team metrics") - results = metricloop(tbakey, client, competition, current_time, metrics_tests) - send(stdout, INF, "finished metric analysis and pushed to database in " + str(time.time() - start) + " seconds") - - start = time.time() - send(stdout, INF, "loading pit data") - pit_data = load_pit(client, competition) - send(stdout, INF, "finished loading pit data in " + str(time.time() - start) + " seconds") - - start = time.time() - send(stdout, INF, "performing analysis on pit data") - results = pitloop(client, competition, pit_data, pit_tests) - send(stdout, INF, "finished pit analysis in " + str(time.time() - start) + " seconds") - - if debug: f = open("pitloop.log", "w+") - json.dump(results, f) + json.dump(pit_results, f, ensure_ascii=False, indent=4) f.close() - start = time.time() - send(stdout, INF, "uploading pit results to database") - push_pit(client, competition, results) - send(stdout, INF, "finished uploading pit results in " + str(time.time() - start) + " seconds") + set_current_time(client, loop_start) + close_all() - client.close() + send(stdout, INF, "closed threads and database client") + send(stdout, INF, "finished all tasks in " + str(time.time() - loop_start) + " seconds, looping") - set_current_time(client, current_time) - send(stdout, INF, "finished all tests in " + str(time.time() - loop_start) + " seconds, looping") + if profile: + return # return instead of break to avoid sys.exit - loop_delay = float(config["loop-delay"]) + loop_delay = float(config["variable"]["loop-delay"]) remaining_time = loop_delay - (time.time() - loop_start) if remaining_time > 0: send(stdout, INF, "loop delayed by " + str(remaining_time) + " seconds") @@ -403,24 +323,165 @@ def main(send, verbose = False, profile = False, debug = False): except KeyboardInterrupt: send(stdout, INF, "detected KeyboardInterrupt, killing threads") - if "exec_threads" in locals(): - exec_threads.terminate() - exec_threads.join() - exec_threads.close() + close_all() send(stdout, INF, "terminated threads, exiting") - loop_stored_exception = sys.exc_info() loop_exit_code = 0 break + except Exception as e: send(stderr, ERR, "encountered an exception while running", code = 1) print(e, file = stderr) - loop_exit_code = 1 + exit_code = 1 + close_all() break + + sys.exit(exit_code) - if profile: +def parse_config_persistent(send, config): + + exit_flag = False + try: + apikey = config["persistent"]["key"]["database"] + except: + send(stderr, ERR, "database key field in config must be present", code = 111) + exit_flag = True + try: + tbakey = config["persistent"]["key"]["tba"] + except: + send(stderr, ERR, "tba key field in config must be present", code = 112) + exit_flag = True + try: + preference = config["persistent"]["config-preference"] + except: + send(stderr, ERR, "config-preference field in config must be present", code = 113) + exit_flag = True + try: + sync = config["persistent"]["synchronize-config"] + except: + send(stderr, ERR, "synchronize-config field in config must be present", code = 114) + exit_flag = True + + if apikey == None or apikey == "": + send(stderr, ERR, "database key field in config must not be empty, please populate the database key", code = 115) + exit_flag = True + if tbakey == None or tbakey == "": + send(stderr, ERR, "tba key field in config must not be empty, please populate the tba key", code = 116) + exit_flag = True + if preference == None or preference == "": + send(stderr, ERR, "config-preference field in config must not be empty, please populate config-preference", code = 117) + exit_flag = True + if sync != True and sync != False: + send(stderr, ERR, "synchronize-config field in config must be a boolean, please populate synchronize-config", code = 118) + exit_flag = True + + return exit_flag, apikey, tbakey, preference, sync + +def parse_config_variable(send, config): + + exit_flag = False + + sys_max_threads = os.cpu_count() + try: + cfg_max_threads = config["variable"]["max-threads"] + except: + send(stderr, ERR, "max-threads field in config must not be empty, refer to documentation for configuration options", code = 109) + exit_flag = True + + if cfg_max_threads > -sys_max_threads and cfg_max_threads < 0 : + alloc_processes = sys_max_threads + cfg_max_threads + elif cfg_max_threads > 0 and cfg_max_threads < 1: + alloc_processes = math.floor(cfg_max_threads * sys_max_threads) + elif cfg_max_threads > 1 and cfg_max_threads <= sys_max_threads: + alloc_processes = cfg_max_threads + elif cfg_max_threads == 0: + alloc_processes = sys_max_threads + else: + send(stderr, ERR, "max-threads must be between -" + str(sys_max_threads) + " and " + str(sys_max_threads) + ", but got " + cfg_max_threads, code = 110) + exit_flag = True + + try: + exec_threads = Pool(processes = alloc_processes) + except Exception as e: + send(stderr, ERR, "unable to start threads", code = 200) + send(stderr, INF, e) + exit_flag = True + send(stdout, INF, "successfully initialized " + str(alloc_processes) + " threads") + + try: + competition = config["variable"]["competition"] + except: + send(stderr, ERR, "could not find competition field in config", code = 101) + exit_flag = True + try: + match_tests = config["variable"]["statistics"]["match"] + except: + send(stderr, ERR, "could not find match field in config", code = 102) + exit_flag = True + try: + metrics_tests = config["variable"]["statistics"]["metric"] + except: + send(stderr, ERR, "could not find metrics field in config", code = 103) + exit_flag = True + try: + pit_tests = config["variable"]["statistics"]["pit"] + except: + send(stderr, ERR, "could not find pit field in config", code = 104) + exit_flag = True + + if competition == None or competition == "": + send(stderr, ERR, "competition field in config must not be empty", code = 105) + exit_flag = True + if match_tests == None: + send(stderr, ERR, "matchfield in config must not be empty", code = 106) + exit_flag = True + if metrics_tests == None: + send(stderr, ERR, "metrics field in config must not be empty", code = 107) + exit_flag = True + if pit_tests == None: + send(stderr, ERR, "pit field in config must not be empty", code = 108) + exit_flag = True + + send(stdout, INF, "found and loaded competition, match, metrics, pit from config") + + return exit_flag, exec_threads, competition, match_tests, metrics_tests, pit_tests + +def resolve_config_conflicts(send, client, config, preference, sync): + + if sync: + if preference == "local" or preference == "client": + send(stdout, INF, "config-preference set to local/client, loading local config information") + remote_config = get_database_config(client) + if remote_config != config["variable"]: + set_database_config(client, config["variable"]) + send(stdout, INF, "database config was different and was updated") + return + elif preference == "remote" or preference == "database": + send(stdout, INF, "config-preference set to remote/database, loading remote config information") + remote_config= get_database_config(client) + if remote_config != config["variable"]: + config["variable"] = remote_config + if save_config(config_path, config): + send(stderr, ERR, "local config was different but could not be updated") + config = 1 + return + send(stdout, INF, "local config was different and was updated") + return + else: + send(stderr, ERR, "config-preference field in config must be \"local\"/\"client\" or \"remote\"/\"database\"") + config = 1 + return + else: + if preference == "local" or preference == "client": + send(stdout, INF, "config-preference set to local/client, loading local config information") + return + elif preference == "remote" or preference == "database": + send(stdout, INF, "config-preference set to remote/database, loading database config information") + config["variable"] = get_database_config(client) + return + else: + send(stderr, ERR, "config-preference field in config must be \"local\"/\"client\" or \"remote\"/\"database\"") + config = 1 return - - sys.exit(loop_exit_code) def load_config(path, config_vector): try: @@ -435,13 +496,10 @@ def load_config(path, config_vector): return 1 def save_config(path, config_vector): - try: - f = open(path) - json.dump(config_vector) - f.close() - return 0 - except: - return 1 + f = open(path, "w+") + json.dump(config_vector, f, ensure_ascii=False, indent=4) + f.close() + return 0 def start(pid_path, verbose = False, profile = False, debug = False):