# Titan Robotics Team 2022: Superscript Script # Written by Arthur Lu, Jacob Levine, and Dev Singh # Notes: # setup: __version__ = "1.0.0" # changelog should be viewed using print(analysis.__changelog__) __changelog__ = """changelog: 1.0.0: - superscript now runs in PEP 3143 compliant well behaved daemon on Linux systems - linux superscript daemon has integrated websocket output to monitor progress/status remotely - linux daemon now sends stderr to errorlog.log - added verbose option to linux superscript to allow for interactive output - moved pymongo import to superscript.py - added profile option to linux superscript to profile runtime of script - reduced memory usage slightly by consolidating the unwrapped input data - added debug option, which performs one loop of analysis and dumps results to local files - added event and time delay options to config - event delay pauses loop until even listener recieves an update - time delay pauses loop until the time specified has elapsed since the BEGINNING of previous loop - added options to pull config information from database (reatins option to use local config file) - config-preference option selects between prioritizing local config and prioritizing database config - synchronize-config option selects whether to update the non prioritized config with the prioritized one - divided config options between persistent ones (keys), and variable ones (everything else) 0.9.3: - improved data loading performance by removing redundant PyMongo client creation (120s to 14s) - passed singular instance of PyMongo client as standin for apikey parameter in all data.py functions 0.9.2: - removed unessasary imports from data - minor changes to interface 0.9.1: - fixed bugs in configuration item loading exception handling 0.9.0: - moved printing and logging related functions to interface.py (changelog will stay in this file) - changed function return files for load_config and save_config to standard C values (0 for success, 1 for error) - added local variables for config location - moved dataset getting and setting functions to dataset.py (changelog will stay in this file) - moved matchloop, metricloop, pitloop and helper functions (simplestats) to processing.py 0.8.6: - added proper main function 0.8.5: - added more gradeful KeyboardInterrupt exiting - redirected stderr to errorlog.txt 0.8.4: - added better error message for missing config.json - added automatic config.json creation - added splash text with version and system info 0.8.3: - updated matchloop with new regression format (requires tra_analysis 3.x) 0.8.2: - readded while true to main function - added more thread config options 0.8.1: - optimized matchloop further by bypassing GIL 0.8.0: - added multithreading to matchloop - tweaked user log 0.7.0: - finished implementing main function 0.6.2: - integrated get_team_rankings.py as get_team_metrics() function - integrated visualize_pit.py as graph_pit_histogram() function 0.6.1: - bug fixes with analysis.Metric() calls - modified metric functions to use config.json defined default values 0.6.0: - removed main function - changed load_config function - added save_config function - added load_match function - renamed simpleloop to matchloop - moved simplestats function inside matchloop - renamed load_metrics to load_metric - renamed metricsloop to metricloop - split push to database functions amon push_match, push_metric, push_pit - moved 0.5.2: - made changes due to refactoring of analysis 0.5.1: - text fixes - removed matplotlib requirement 0.5.0: - improved user interface 0.4.2: - removed unessasary code 0.4.1: - fixed bug where X range for regression was determined before sanitization - better sanitized data 0.4.0: - fixed spelling issue in __changelog__ - addressed nan bug in regression - fixed errors on line 335 with metrics calling incorrect key "glicko2" - fixed errors in metrics computing 0.3.0: - added analysis to pit data 0.2.1: - minor stability patches - implemented db syncing for timestamps - fixed bugs 0.2.0: - finalized testing and small fixes 0.1.4: - finished metrics implement, trueskill is bugged 0.1.3: - working 0.1.2: - started implement of metrics 0.1.1: - cleaned up imports 0.1.0: - tested working, can push to database 0.0.9: - tested working - prints out stats for the time being, will push to database later 0.0.8: - added data import - removed tba import - finished main method 0.0.7: - added load_config - optimized simpleloop for readibility - added __all__ entries - added simplestats engine - pending testing 0.0.6: - fixes 0.0.5: - imported pickle - created custom database object 0.0.4: - fixed simpleloop to actually return a vector 0.0.3: - added metricsloop which is unfinished 0.0.2: - added simpleloop which is untested until data is provided 0.0.1: - created script - added analysis, numba, numpy imports """ __author__ = ( "Arthur Lu ", "Jacob Levine ", ) __all__ = [ "load_config", "save_config", ] # imports: import json import math from multiprocessing import Pool, freeze_support import os import pymongo import sys import time import traceback import warnings import zmq from interface import splash, log, ERR, INF, stdout, stderr from data import get_previous_time, set_current_time, get_database_config, set_database_config, check_new_database_matches from module import Match, Metric, Pit class ConfigurationError (Exception): code = None def __init__(self, str, code): super().__init__(str) self.code = code config_path = "config.json" sample_json = """{ "persistent":{ "key":{ "database":"", "tba":"" }, "config-preference":"local", "synchronize-config":false }, "variable":{ "max-threads":0.5, "competition":"", "team":"", "event-delay":false, "loop-delay":0, "reportable":true, "teams":[], "modules":{ "match":{ "tests":{ "balls-blocked":["basic_stats","historical_analysis","regression_linear","regression_logarithmic","regression_exponential","regression_polynomial","regression_sigmoidal"], "balls-collected":["basic_stats","historical_analysis","regression_linear","regression_logarithmic","regression_exponential","regression_polynomial","regression_sigmoidal"], "balls-lower-teleop":["basic_stats","historical_analysis","regression_linear","regression_logarithmic","regression_exponential","regression_polynomial","regression_sigmoidal"], "balls-lower-auto":["basic_stats","historical_analysis","regression_linear","regression_logarithmic","regression_exponential","regression_polynomial","regression_sigmoidal"], "balls-started":["basic_stats","historical_analyss","regression_linear","regression_logarithmic","regression_exponential","regression_polynomial","regression_sigmoidal"], "balls-upper-teleop":["basic_stats","historical_analysis","regression_linear","regression_logarithmic","regression_exponential","regression_polynomial","regression_sigmoidal"], "balls-upper-auto":["basic_stats","historical_analysis","regression_linear","regression_logarithmic","regression_exponential","regression_polynomial","regression_sigmoidal"] } }, "metric":{ "tests":{ "elo":{ "score":1500, "N":400, "K":24 }, "gl2":{ "score":1500, "rd":250, "vol":0.06 }, "ts":{ "mu":25, "sigma":8.33 } } }, "pit":{ "tests":{ "wheel-mechanism":true, "low-balls":true, "high-balls":true, "wheel-success":true, "strategic-focus":true, "climb-mechanism":true, "attitude":true } } } } }""" def main(send, verbose = False, profile = False, debug = False): def close_all(): if "exec_threads" in locals(): exec_threads.terminate() exec_threads.join() exec_threads.close() if "client" in locals(): client.close() if "f" in locals(): f.close() warnings.filterwarnings("ignore") exit_code = 0 if verbose: splash(__version__) modules = {"match": Match, "metric": Metric, "pit": Pit} while True: try: loop_start = time.time() send(stdout, INF, "current time: " + str(loop_start)) config = {} if load_config(config_path, config): raise ConfigurationError("could not find config at <" + config_path + ">, generating blank config and exiting", 110) send(stdout, INF, "found and loaded config at <" + config_path + ">") apikey, tbakey, preference, sync = parse_config_persistent(send, config) send(stdout, INF, "found and loaded database and tba keys") client = pymongo.MongoClient(apikey) send(stdout, INF, "established connection to database") send(stdout, INF, "analysis backtimed to: " + str(get_previous_time(client))) config = resolve_config_conflicts(send, client, config, preference, sync) exec_threads, competition, config_modules = parse_config_variable(send, config) for m in config_modules: if m in modules: start = time.time() current_module = modules[m](config_modules[m], client, tbakey, loop_start, competition) valid = current_module.validate_config() if not valid: continue current_module.load_data() current_module.process_data(exec_threads) current_module.push_results() send(stdout, INF, m + " module finished in " + str(time.time() - start) + " seconds") if debug: f = open(m + ".log", "w+") json.dump({"data": current_module.data, "results":current_module.results}, f, ensure_ascii=False, indent=4) f.close() set_current_time(client, loop_start) close_all() send(stdout, INF, "closed threads and database client") send(stdout, INF, "finished all tasks in " + str(time.time() - loop_start) + " seconds, looping") if profile: exit_code = 0 break event_delay = config["variable"]["event-delay"] if event_delay: send(stdout, INF, "loop delayed until database returns new matches") new_match = False while not new_match: time.sleep(1) new_match = check_new_database_matches(client, competition) send(stdout, INF, "database returned new matches") else: loop_delay = float(config["variable"]["loop-delay"]) remaining_time = loop_delay - (time.time() - loop_start) if remaining_time > 0: send(stdout, INF, "loop delayed by " + str(remaining_time) + " seconds") time.sleep(remaining_time) except KeyboardInterrupt: send(stdout, INF, "detected KeyboardInterrupt, killing threads") close_all() send(stdout, INF, "terminated threads, exiting") break except ConfigurationError as e: send(stderr, ERR, "encountered a configuration error: " + str(e), code = e.code) traceback.print_exc(file = stderr) exit_code = 1 close_all() break except Exception as e: send(stderr, ERR, "encountered an exception while running", code = 1) traceback.print_exc(file = stderr) exit_code = 1 close_all() break return exit_code def parse_config_persistent(send, config): try: apikey = config["persistent"]["key"]["database"] except: raise ConfigurationError("persistent/key/database field is invalid or missing", 111) try: tbakey = config["persistent"]["key"]["tba"] except: raise ConfigurationError("persistent/key/tba field is invalid or missing", 112) try: preference = config["persistent"]["config-preference"] except: raise ConfigurationError("persistent/config-preference field is invalid or missing", 113) try: sync = config["persistent"]["synchronize-config"] except: raise ConfigurationError("persistent/synchronize-config field is invalid or missing", 114) if apikey == None or apikey == "": raise ConfigurationError("persistent/key/database field is empty", 115) if tbakey == None or tbakey == "": raise ConfigurationError("persistent/key/tba field is empty", 116) if preference == None or preference == "": raise ConfigurationError("persistent/config-preference field is empty", 117) if sync != True and sync != False: raise ConfigurationError("persistent/synchronize-config field is empty", 118) return apikey, tbakey, preference, sync def parse_config_variable(send, config): sys_max_threads = os.cpu_count() try: cfg_max_threads = config["variable"]["max-threads"] except: raise ConfigurationError("variable/max-threads field is invalid or missing, refer to documentation for configuration options", 109) if cfg_max_threads > -sys_max_threads and cfg_max_threads < 0 : alloc_processes = sys_max_threads + cfg_max_threads elif cfg_max_threads > 0 and cfg_max_threads < 1: alloc_processes = math.floor(cfg_max_threads * sys_max_threads) elif cfg_max_threads > 1 and cfg_max_threads <= sys_max_threads: alloc_processes = cfg_max_threads elif cfg_max_threads == 0: alloc_processes = sys_max_threads else: raise ConfigurationError("variable/max-threads must be between -" + str(sys_max_threads) + " and " + str(sys_max_threads) + ", but got " + cfg_max_threads, 110) try: exec_threads = Pool(processes = alloc_processes) except Exception as e: send(stderr, INF, e) raise ConfigurationError("unable to start threads", 200) send(stdout, INF, "successfully initialized " + str(alloc_processes) + " threads") try: competition = config["variable"]["competition"] except: raise ConfigurationError("variable/competition field is invalid or missing", 101) try: modules = config["variable"]["modules"] except: raise ConfigurationError("variable/modules field is invalid or missing", 102) if competition == None or competition == "": raise ConfigurationError("variable/competition field is empty", 105) if modules == None: raise ConfigurationError("variable/modules field is empty", 106) send(stdout, INF, "found and loaded competition, match, metrics, pit from config") return exec_threads, competition, modules def resolve_config_conflicts(send, client, config, preference, sync): if sync: if preference == "local" or preference == "client": send(stdout, INF, "config-preference set to local/client, loading local config information") remote_config = get_database_config(client) if remote_config != config["variable"]: set_database_config(client, config["variable"]) send(stdout, INF, "database config was different and was updated") return config elif preference == "remote" or preference == "database": send(stdout, INF, "config-preference set to remote/database, loading remote config information") remote_config= get_database_config(client) if remote_config != config["variable"]: config["variable"] = remote_config if save_config(config_path, config): raise ConfigurationError("local config was different but could not be updated", 121) send(stdout, INF, "local config was different and was updated") return config else: raise ConfigurationError("persistent/config-preference field must be \"local\"/\"client\" or \"remote\"/\"database\"", 120) else: if preference == "local" or preference == "client": send(stdout, INF, "config-preference set to local/client, loading local config information") return config elif preference == "remote" or preference == "database": send(stdout, INF, "config-preference set to remote/database, loading database config information") config["variable"] = get_database_config(client) return config else: raise ConfigurationError("persistent/config-preference field must be \"local\"/\"client\" or \"remote\"/\"database\"", 120) def load_config(path, config_vector): try: f = open(path, "r") config_vector.update(json.load(f)) f.close() return 0 except: f = open(path, "w") f.write(sample_json) f.close() return 1 def save_config(path, config_vector): f = open(path, "w+") json.dump(config_vector, f, ensure_ascii=False, indent=4) f.close() return 0 def start(pid_path, verbose = False, profile = False, debug = False): if profile: def send(target, level, message, code = 0): pass import cProfile, pstats, io profile = cProfile.Profile() profile.enable() exit_code = main(send, profile = True) profile.disable() f = open("profile.txt", 'w+') ps = pstats.Stats(profile, stream = f).sort_stats('cumtime') ps.print_stats() sys.exit(exit_code) elif verbose: exit_code = main(log, verbose = verbose) sys.exit(exit_code) elif debug: exit_code = main(log, verbose = True, profile = True, debug = debug) sys.exit(exit_code) else: f = open('errorlog.log', 'w+') with daemon.DaemonContext( working_directory = os.getcwd(), pidfile = pidfile.TimeoutPIDLockFile(pid_path), stderr = f ): context = zmq.Context() socket = context.socket(zmq.PUB) socket.bind("tcp://*:5678") socket.send(b'status') def send(target, level, message, code = 0): socket.send(bytes("status: " + message, 'utf-8')) exit_code = main(send) socket.close() f.close() sys.exit(exit_code) def stop(pid_path): try: pf = open(pid_path, 'r') pid = int(pf.read().strip()) pf.close() except IOError: sys.stderr.write("pidfile at <" + pid_path + "> does not exist. Daemon not running?\n") return try: while True: os.kill(pid, SIGTERM) time.sleep(0.01) except OSError as err: err = str(err) if err.find("No such process") > 0: if os.path.exists(pid_path): os.remove(pid_path) else: traceback.print_exc(file = stderr) sys.exit(1) def restart(pid_path): stop(pid_path) start(pid_path) if __name__ == "__main__": if sys.platform.startswith("win"): freeze_support() start(None, verbose = True) else: import daemon from daemon import pidfile from signal import SIGTERM pid_path = "tra-daemon.pid" if len(sys.argv) == 2: if 'start' == sys.argv[1]: start(pid_path) elif 'stop' == sys.argv[1]: stop(pid_path) elif 'restart' == sys.argv[1]: restart(pid_path) elif 'verbose' == sys.argv[1]: start(None, verbose = True) elif 'profile' == sys.argv[1]: start(None, profile=True) elif 'debug' == sys.argv[1]: start(None, debug = True) else: print("usage: %s start|stop|restart|verbose|profile|debug" % sys.argv[0]) sys.exit(2) sys.exit(0) else: print("usage: %s start|stop|restart|verbose|profile|debug" % sys.argv[0]) sys.exit(2)