diff --git a/.devcontainer/Dockerfile b/.devcontainer/Dockerfile index 160a9b9..b95bbb3 100644 --- a/.devcontainer/Dockerfile +++ b/.devcontainer/Dockerfile @@ -4,4 +4,4 @@ RUN apt-get -y update RUN DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends tzdata RUN apt-get install -y python3 python3-dev git python3-pip python3-kivy python-is-python3 libgl1-mesa-dev build-essential RUN ln -s $(which pip3) /usr/bin/pip -RUN pip install pymongo pandas numpy scipy scikit-learn matplotlib pylint kivy \ No newline at end of file +RUN pip install pymongo pandas numpy scipy scikit-learn matplotlib pylint kivy diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json index c380475..972a854 100644 --- a/.devcontainer/devcontainer.json +++ b/.devcontainer/devcontainer.json @@ -8,16 +8,10 @@ "python.pythonPath": "/usr/local/bin/python", "python.linting.enabled": true, "python.linting.pylintEnabled": true, - "python.formatting.autopep8Path": "/usr/local/py-utils/bin/autopep8", - "python.formatting.blackPath": "/usr/local/py-utils/bin/black", - "python.formatting.yapfPath": "/usr/local/py-utils/bin/yapf", - "python.linting.banditPath": "/usr/local/py-utils/bin/bandit", - "python.linting.flake8Path": "/usr/local/py-utils/bin/flake8", - "python.linting.mypyPath": "/usr/local/py-utils/bin/mypy", - "python.linting.pycodestylePath": "/usr/local/py-utils/bin/pycodestyle", - "python.linting.pydocstylePath": "/usr/local/py-utils/bin/pydocstyle", - "python.linting.pylintPath": "/usr/local/py-utils/bin/pylint", - "python.testing.pytestPath": "/usr/local/py-utils/bin/pytest" + "python.linting.pylintPath": "/usr/local/bin/pylint", + "python.testing.pytestPath": "/usr/local/bin/pytest", + "editor.tabSize": 4, + "editor.insertSpaces": false }, "extensions": [ "mhutchie.git-graph", @@ -25,4 +19,4 @@ "waderyan.gitblame" ], "postCreateCommand": "/usr/bin/pip3 install -r ${containerWorkspaceFolder}/src/requirements.txt && /usr/bin/pip3 install --no-cache-dir pylint && /usr/bin/pip3 install pytest" - } \ No newline at end of file + } diff --git a/.devcontainer/requirements.txt b/.devcontainer/requirements.txt new file mode 100644 index 0000000..3225fc7 --- /dev/null +++ b/.devcontainer/requirements.txt @@ -0,0 +1,16 @@ +cerberus +dnspython +numpy +pandas +pyinstaller +pylint +pymongo +pyparsing +pytest +python-daemon +pyzmq +requests +scikit-learn +scipy +six +tra-analysis \ No newline at end of file diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md deleted file mode 100644 index dd84ea7..0000000 --- a/.github/ISSUE_TEMPLATE/bug_report.md +++ /dev/null @@ -1,38 +0,0 @@ ---- -name: Bug report -about: Create a report to help us improve -title: '' -labels: '' -assignees: '' - ---- - -**Describe the bug** -A clear and concise description of what the bug is. - -**To Reproduce** -Steps to reproduce the behavior: -1. Go to '...' -2. Click on '....' -3. Scroll down to '....' -4. See error - -**Expected behavior** -A clear and concise description of what you expected to happen. - -**Screenshots** -If applicable, add screenshots to help explain your problem. - -**Desktop (please complete the following information):** - - OS: [e.g. iOS] - - Browser [e.g. chrome, safari] - - Version [e.g. 22] - -**Smartphone (please complete the following information):** - - Device: [e.g. iPhone6] - - OS: [e.g. iOS8.1] - - Browser [e.g. stock browser, safari] - - Version [e.g. 22] - -**Additional context** -Add any other context about the problem here. diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md deleted file mode 100644 index bbcbbe7..0000000 --- a/.github/ISSUE_TEMPLATE/feature_request.md +++ /dev/null @@ -1,20 +0,0 @@ ---- -name: Feature request -about: Suggest an idea for this project -title: '' -labels: '' -assignees: '' - ---- - -**Is your feature request related to a problem? Please describe.** -A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] - -**Describe the solution you'd like** -A clear and concise description of what you want to happen. - -**Describe alternatives you've considered** -A clear and concise description of any alternative solutions or features you've considered. - -**Additional context** -Add any other context or screenshots about the feature request here. diff --git a/.gitignore b/.gitignore index 0e3db09..1c0b0fe 100644 --- a/.gitignore +++ b/.gitignore @@ -11,4 +11,14 @@ **/errorlog.txt /dist/superscript.* -/dist/superscript \ No newline at end of file +/dist/superscript +**/*.pid + +**/profile.* + +**/*.log +**/errorlog.txt +/dist/* + +slurm-tra-superscript.out +config*.json diff --git a/competition/config.py b/competition/config.py new file mode 100644 index 0000000..006039c --- /dev/null +++ b/competition/config.py @@ -0,0 +1,251 @@ +import json +from exceptions import ConfigurationError +from cerberus import Validator + +from data import set_database_config, get_database_config + +class Configuration: + + path = None + config = {} + + _sample_config = { + "persistent":{ + "key":{ + "database":"", + "tba":"", + "tra":{ + "CLIENT_ID":"", + "CLIENT_SECRET":"", + "url": "" + } + }, + "config-preference":"local", + "synchronize-config":False + }, + "variable":{ + "event-delay":False, + "loop-delay":0, + "competition": "2020ilch", + "modules":{ + "match":{ + "tests":{ + "balls-blocked":[ + "basic_stats", + "historical_analysis", + "regression_linear", + "regression_logarithmic", + "regression_exponential", + "regression_polynomial", + "regression_sigmoidal" + ], + "balls-collected":[ + "basic_stats", + "historical_analysis", + "regression_linear", + "regression_logarithmic", + "regression_exponential", + "regression_polynomial", + "regression_sigmoidal" + ], + "balls-lower-teleop":[ + "basic_stats", + "historical_analysis", + "regression_linear", + "regression_logarithmic", + "regression_exponential", + "regression_polynomial", + "regression_sigmoidal" + ], + "balls-lower-auto":[ + "basic_stats", + "historical_analysis", + "regression_linear", + "regression_logarithmic", + "regression_exponential", + "regression_polynomial", + "regression_sigmoidal" + ], + "balls-started":[ + "basic_stats", + "historical_analyss", + "regression_linear", + "regression_logarithmic", + "regression_exponential", + "regression_polynomial", + "regression_sigmoidal" + ], + "balls-upper-teleop":[ + "basic_stats", + "historical_analysis", + "regression_linear", + "regression_logarithmic", + "regression_exponential", + "regression_polynomial", + "regression_sigmoidal" + ], + "balls-upper-auto":[ + "basic_stats", + "historical_analysis", + "regression_linear", + "regression_logarithmic", + "regression_exponential", + "regression_polynomial", + "regression_sigmoidal" + ] + } + }, + "metric":{ + "tests":{ + "elo":{ + "score":1500, + "N":400, + "K":24 + }, + "gl2":{ + "score":1500, + "rd":250, + "vol":0.06 + }, + "ts":{ + "mu":25, + "sigma":8.33 + } + } + }, + "pit":{ + "tests":{ + "wheel-mechanism":True, + "low-balls":True, + "high-balls":True, + "wheel-success":True, + "strategic-focus":True, + "climb-mechanism":True, + "attitude":True + } + } + } + } + } + + _validation_schema = { + "persistent": { + "type": "dict", + "required": True, + "require_all": True, + "schema": { + "key": { + "type": "dict", + "require_all":True, + "schema": { + "database": {"type":"string"}, + "tba": {"type": "string"}, + "tra": { + "type": "dict", + "require_all": True, + "schema": { + "CLIENT_ID": {"type": "string"}, + "CLIENT_SECRET": {"type": "string"}, + "url": {"type": "string"} + } + } + } + }, + "config-preference": {"type": "string", "required": True}, + "synchronize-config": {"type": "boolean", "required": True} + } + } + } + + def __init__(self, path): + self.path = path + self.load_config() + self.validate_config() + + def load_config(self): + try: + f = open(self.path, "r") + self.config.update(json.load(f)) + f.close() + except: + self.config = self._sample_config + self.save_config() + f.close() + raise ConfigurationError("could not find config file at <" + self.path + ">, created new sample config file at that path") + + def save_config(self): + f = open(self.path, "w+") + json.dump(self.config, f, ensure_ascii=False, indent=4) + f.close() + + def validate_config(self): + v = Validator(self._validation_schema, allow_unknown = True) + isValidated = v.validate(self.config) + + if not isValidated: + raise ConfigurationError("config validation error: " + v.errors) + + def __getattr__(self, name): # simple linear lookup method for common multikey-value paths, TYPE UNSAFE + if name == "persistent": + return self.config["persistent"] + elif name == "key": + return self.config["persistent"]["key"] + elif name == "database": + # soon to be deprecated + return self.config["persistent"]["key"]["database"] + elif name == "tba": + return self.config["persistent"]["key"]["tba"] + elif name == "tra": + return self.config["persistent"]["key"]["tra"] + elif name == "priority": + return self.config["persistent"]["config-preference"] + elif name == "sync": + return self.config["persistent"]["synchronize-config"] + elif name == "variable": + return self.config["variable"] + elif name == "event_delay": + return self.config["variable"]["event-delay"] + elif name == "loop_delay": + return self.config["variable"]["loop-delay"] + elif name == "competition": + return self.config["variable"]["competition"] + elif name == "modules": + return self.config["variable"]["modules"] + else: + return None + + def __getitem__(self, key): + return self.config[key] + + def resolve_config_conflicts(self, logger, client): # needs improvement with new localization scheme + sync = self.sync + priority = self.priority + + if sync: + if priority == "local" or priority == "client": + logger.info("config-preference set to local/client, loading local config information") + remote_config = get_database_config(client) + if remote_config != self.config["variable"]: + set_database_config(client, self.config["variable"]) + logger.info("database config was different and was updated") + # no change to config + elif priority == "remote" or priority == "database": + logger.info("config-preference set to remote/database, loading remote config information") + remote_config = get_database_config(client) + if remote_config != self.config["variable"]: + self.config["variable"] = remote_config + self.save_config() + # change variable to match remote + logger.info("local config was different and was updated") + else: + raise ConfigurationError("persistent/config-preference field must be \"local\"/\"client\" or \"remote\"/\"database\"") + else: + if priority == "local" or priority == "client": + logger.info("config-preference set to local/client, loading local config information") + # no change to config + elif priority == "remote" or priority == "database": + logger.info("config-preference set to remote/database, loading database config information") + self.config["variable"] = get_database_config(client) + # change variable to match remote without updating local version + else: + raise ConfigurationError("persistent/config-preference field must be \"local\"/\"client\" or \"remote\"/\"database\"") \ No newline at end of file diff --git a/competition/data.py b/competition/data.py new file mode 100644 index 0000000..2512f22 --- /dev/null +++ b/competition/data.py @@ -0,0 +1,218 @@ +from calendar import c +import requests +import pull +import pandas as pd +import json + +def pull_new_tba_matches(apikey, competition, last_match): + api_key= apikey + x=requests.get("https://www.thebluealliance.com/api/v3/event/"+competition+"/matches/simple", headers={"X-TBA-Auth-Key":api_key}) + json = x.json() + out = [] + for i in json: + if i["actual_time"] != None and i["comp_level"] == "qm" and i["match_number"] > last_match : + out.append({"match" : i['match_number'], "blue" : list(map(lambda x: int(x[3:]), i['alliances']['blue']['team_keys'])), "red" : list(map(lambda x: int(x[3:]), i['alliances']['red']['team_keys'])), "winner": i["winning_alliance"]}) + out.sort(key=lambda x: x['match']) + return out + +def pull_new_tba_matches_manual(apikey, competition, cutoff): + filename = competition+"-wins.json" + with open(filename, 'r') as f: + data = json.load(f) + return data + +def get_team_match_data(client, competition, team_num): + db = client.data_scouting + mdata = db.matchdata + out = {} + for i in mdata.find({"competition" : competition, "team_scouted": str(team_num)}): + out[i['match']] = i['data'] + return pd.DataFrame(out) + +def clear_metrics(client, competition): + db = client.data_processing + data = db.team_metrics + data.delete_many({competition: competition}) + return True + +def get_team_pit_data(client, competition, team_num): + db = client.data_scouting + mdata = db.pitdata + out = {} + return mdata.find_one({"competition" : competition, "team_scouted": str(team_num)})["data"] + +def get_team_metrics_data(client, competition, team_num): + db = client.data_processing + mdata = db.team_metrics + temp = mdata.find_one({"team": team_num}) + if temp != None: + if competition in temp['metrics'].keys(): + temp = temp['metrics'][competition] + else : + temp = None + else: + temp = None + return temp + +def get_match_data_formatted(client, competition): + teams_at_comp = pull.get_teams_at_competition(competition) + out = {} + for team in teams_at_comp: + try: + out[int(team)] = unkeyify_2l(get_team_match_data(client, competition, team).transpose().to_dict()) + except: + pass + return out + +def get_metrics_data_formatted(client, competition): + teams_at_comp = pull.get_teams_at_competition(competition) + out = {} + for team in teams_at_comp: + try: + out[int(team)] = get_team_metrics_data(client, competition, int(team)) + except: + pass + return out + +def get_pit_data_formatted(client, competition): + x=requests.get("https://scouting.titanrobotics2022.com/api/fetchAllTeamNicknamesAtCompetition?competition="+competition) + x = x.json() + x = x['data'] + x = x.keys() + out = {} + for i in x: + try: + out[int(i)] = get_team_pit_data(client, competition, int(i)) + except: + pass + return out + +def get_pit_variable_data(client, competition): + db = client.data_processing + mdata = db.team_pit + out = {} + return mdata.find() + +def get_pit_variable_formatted(client, competition): + temp = get_pit_variable_data(client, competition) + out = {} + for i in temp: + out[i["variable"]] = i["data"] + return out + +def push_team_tests_data(client, competition, team_num, data, dbname = "data_processing", colname = "team_tests"): + db = client[dbname] + mdata = db[colname] + mdata.replace_one({"competition" : competition, "team": team_num}, {"_id": competition+str(team_num)+"am", "competition" : competition, "team" : team_num, "data" : data}, True) + +def push_team_metrics_data(client, competition, team_num, data, dbname = "data_processing", colname = "team_metrics"): + db = client[dbname] + mdata = db[colname] + mdata.update_one({"team": team_num}, {"$set": {"metrics.{}".format(competition): data}}, upsert=True) + +def push_team_pit_data(client, competition, variable, data, dbname = "data_processing", colname = "team_pit"): + db = client[dbname] + mdata = db[colname] + mdata.replace_one({"competition" : competition, "variable": variable}, {"competition" : competition, "variable" : variable, "data" : data}, True) + +def get_analysis_flags(client, flag): + db = client.data_processing + mdata = db.flags + return mdata.find_one({"_id": "2022"}) + +def set_analysis_flags(client, flag, data): + db = client.data_processing + mdata = db.flags + return mdata.update_one({"_id": "2022"}, {"$set": data}) + +def unkeyify_2l(layered_dict): + out = {} + for i in layered_dict.keys(): + add = [] + sortkey = [] + for j in layered_dict[i].keys(): + add.append([j,layered_dict[i][j]]) + add.sort(key = lambda x: x[0]) + out[i] = list(map(lambda x: x[1], add)) + return out + +def get_previous_time(client): + + previous_time = get_analysis_flags(client, "latest_update") + + if previous_time == None: + + set_analysis_flags(client, "latest_update", 0) + previous_time = 0 + + else: + + previous_time = previous_time["latest_update"] + + return previous_time + +def set_current_time(client, current_time): + + set_analysis_flags(client, "latest_update", {"latest_update":current_time}) + +def get_database_config(client): + + remote_config = get_analysis_flags(client, "config") + return remote_config["config"] if remote_config != None else None + +def set_database_config(client, config): + + set_analysis_flags(client, "config", {"config": config}) + +def load_match(client, competition): + + return get_match_data_formatted(client, competition) + +def load_metric(client, competition, match, group_name, metrics): + + group = {} + + for team in match[group_name]: + + db_data = get_team_metrics_data(client, competition, team) + + if db_data == None: + gl2 = {"score": metrics["gl2"]["score"], "rd": metrics["gl2"]["rd"], "vol": metrics["gl2"]["vol"]} + + group[team] = {"gl2": gl2} + + else: + + metrics = db_data + + gl2 = metrics["gl2"] + + group[team] = {"gl2": gl2} + + return group + +def load_pit(client, competition): + + return get_pit_data_formatted(client, competition) + +def push_match(client, competition, results): + + for team in results: + + push_team_tests_data(client, competition, team, results[team]) + +def push_metric(client, competition, metric): + + for team in metric: + + push_team_metrics_data(client, competition, team, metric[team]) + +def push_pit(client, competition, pit): + + for variable in pit: + + push_team_pit_data(client, competition, variable, pit[variable]) + +def check_new_database_matches(client, competition): + + return True diff --git a/competition/dep.py b/competition/dep.py new file mode 100644 index 0000000..891dfb3 --- /dev/null +++ b/competition/dep.py @@ -0,0 +1,132 @@ +# contains deprecated functions, not to be used unless nessasary! + +import json + +sample_json = """ +{ + "persistent":{ + "key":{ + "database":"", + "tba":"", + "tra":{ + "CLIENT_ID":"", + "CLIENT_SECRET":"", + "url": "" + } + }, + "config-preference":"local", + "synchronize-config":false + }, + "variable":{ + "max-threads":0.5, + "team":"", + "event-delay":false, + "loop-delay":0, + "reportable":true, + "teams":[ + + ], + "modules":{ + "match":{ + "tests":{ + "balls-blocked":[ + "basic_stats", + "historical_analysis", + "regression_linear", + "regression_logarithmic", + "regression_exponential", + "regression_polynomial", + "regression_sigmoidal" + ], + "balls-collected":[ + "basic_stats", + "historical_analysis", + "regression_linear", + "regression_logarithmic", + "regression_exponential", + "regression_polynomial", + "regression_sigmoidal" + ], + "balls-lower-teleop":[ + "basic_stats", + "historical_analysis", + "regression_linear", + "regression_logarithmic", + "regression_exponential", + "regression_polynomial", + "regression_sigmoidal" + ], + "balls-lower-auto":[ + "basic_stats", + "historical_analysis", + "regression_linear", + "regression_logarithmic", + "regression_exponential", + "regression_polynomial", + "regression_sigmoidal" + ], + "balls-started":[ + "basic_stats", + "historical_analyss", + "regression_linear", + "regression_logarithmic", + "regression_exponential", + "regression_polynomial", + "regression_sigmoidal" + ], + "balls-upper-teleop":[ + "basic_stats", + "historical_analysis", + "regression_linear", + "regression_logarithmic", + "regression_exponential", + "regression_polynomial", + "regression_sigmoidal" + ], + "balls-upper-auto":[ + "basic_stats", + "historical_analysis", + "regression_linear", + "regression_logarithmic", + "regression_exponential", + "regression_polynomial", + "regression_sigmoidal" + ] + } + }, + "metric":{ + "tests":{ + "gl2":{ + "score":1500, + "rd":250, + "vol":0.06 + }, + } + }, + "pit":{ + "tests":{ + "wheel-mechanism":true, + "low-balls":true, + "high-balls":true, + "wheel-success":true, + "strategic-focus":true, + "climb-mechanism":true, + "attitude":true + } + } + } + } +} +""" + +def load_config(path, config_vector): + try: + f = open(path, "r") + config_vector.update(json.load(f)) + f.close() + return 0 + except: + f = open(path, "w") + f.write(sample_json) + f.close() + return 1 \ No newline at end of file diff --git a/competition/exceptions.py b/competition/exceptions.py new file mode 100644 index 0000000..64f97dd --- /dev/null +++ b/competition/exceptions.py @@ -0,0 +1,7 @@ +class APIError(Exception): + def __init__(self, str): + super().__init__(str) + +class ConfigurationError (Exception): + def __init__(self, str): + super().__init__(str) \ No newline at end of file diff --git a/competition/interface.py b/competition/interface.py new file mode 100644 index 0000000..5eef26b --- /dev/null +++ b/competition/interface.py @@ -0,0 +1,91 @@ +from logging import Logger as L +import datetime +import platform +import json + +class Logger(L): + + file = None + + levels = { + 0: "", + 10:"[DEBUG] ", + 20:"[INFO] ", + 30:"[WARNING] ", + 40:"[ERROR] ", + 50:"[CRITICAL]", + } + + targets = [] + + def __init__(self, verbose, profile, debug, file = None): + super().__init__("tra_logger") + + self.file = file + + if file != None: + self.targets.append(self._send_file) + + if profile: + self.targets.append(self._send_null) + elif verbose: + self.targets.append(self._send_scli) + elif debug: + self.targets.append(self._send_scli) + else: + self.targets.append(self._send_null) + + def _send_null(self, msg): + pass + + def _send_scli(self, msg): + print(msg) + + def _send_file(self, msg): + f = open(self.file, 'a') + f.write(msg + "\n") + f.close() + + def get_time_formatted(self): + return datetime.datetime.now().strftime("%Y/%m/%d %H:%M:%S %Z") + + def log(self, level, msg): + for t in self.targets: + t(self.get_time_formatted() + "| " + self.levels[level] + ": " + msg) + + def debug(self, msg): + self.log(10, msg) + + def info(self, msg): + self.log(20, msg) + + def warning(self, msg): + self.log(30, msg) + + def error(self, msg): + self.log(40, msg) + + def critical(self, msg): + self.log(50, msg) + + def splash(self, version): + + def hrule(): + self.log(0, "#"+38*"-"+"#") + def box(s): + temp = "|" + temp += s + temp += (40-len(s)-2)*" " + temp += "|" + self.log(0, temp) + + hrule() + box(" superscript version: " + version) + box(" os: " + platform.system()) + box(" python: " + platform.python_version()) + hrule() + + def save_module_to_file(self, module, data, results): + f = open(module + ".log", "w") + json.dump({"data": data, "results":results}, f, ensure_ascii=False, indent=4) + f.close() \ No newline at end of file diff --git a/competition/module.py b/competition/module.py new file mode 100644 index 0000000..1bac30c --- /dev/null +++ b/competition/module.py @@ -0,0 +1,309 @@ +import abc +import data as d +import signal +import numpy as np +from tra_analysis import Analysis as an +from tqdm import tqdm + +class Module(metaclass = abc.ABCMeta): + + @classmethod + def __subclasshook__(cls, subclass): + return (hasattr(subclass, '__init__') and + callable(subclass.__init__) and + hasattr(subclass, 'validate_config') and + callable(subclass.validate_config) and + hasattr(subclass, 'run') and + callable(subclass.run) + ) + @abc.abstractmethod + def __init__(self, config, apikey, tbakey, timestamp, competition, *args, **kwargs): + raise NotImplementedError + @abc.abstractmethod + def validate_config(self, *args, **kwargs): + raise NotImplementedError + @abc.abstractmethod + def run(self, *args, **kwargs): + raise NotImplementedError + +class Match (Module): + + config = None + apikey = None + tbakey = None + timestamp = None + competition = None + + data = None + results = None + + def __init__(self, config, apikey, tbakey, timestamp, competition): + self.config = config + self.apikey = apikey + self.tbakey = tbakey + self.timestamp = timestamp + self.competition = competition + + def validate_config(self): + return True, "" + + def run(self): + self._load_data() + self._process_data() + self._push_results() + + def _load_data(self): + self.data = d.load_match(self.apikey, self.competition) + + def _simplestats(self, data_test): + + signal.signal(signal.SIGINT, signal.SIG_IGN) + + data = np.array(data_test[3]) + data = data[np.isfinite(data)] + ranges = list(range(len(data))) + + test = data_test[2] + + if test == "basic_stats": + return an.basic_stats(data) + + if test == "historical_analysis": + return an.histo_analysis([ranges, data]) + + if test == "regression_linear": + return an.regression(ranges, data, ['lin']) + + if test == "regression_logarithmic": + return an.regression(ranges, data, ['log']) + + if test == "regression_exponential": + return an.regression(ranges, data, ['exp']) + + if test == "regression_polynomial": + return an.regression(ranges, data, ['ply']) + + if test == "regression_sigmoidal": + return an.regression(ranges, data, ['sig']) + + def _process_data(self): + + tests = self.config["tests"] + data = self.data + + input_vector = [] + + for team in data: + + for variable in data[team]: + + if variable in tests: + + for test in tests[variable]: + + input_vector.append((team, variable, test, data[team][variable])) + + self.data = input_vector + self.results = [] + for test_var_data in self.data: + self.results.append(self._simplestats(test_var_data)) + + def _push_results(self): + + short_mapping = {"regression_linear": "lin", "regression_logarithmic": "log", "regression_exponential": "exp", "regression_polynomial": "ply", "regression_sigmoidal": "sig"} + + class AutoVivification(dict): + def __getitem__(self, item): + try: + return dict.__getitem__(self, item) + except KeyError: + value = self[item] = type(self)() + return value + + result_filtered = self.results + input_vector = self.data + + return_vector = AutoVivification() + + i = 0 + + for result in result_filtered: + + filtered = input_vector[i][2] + + try: + short = short_mapping[filtered] + return_vector[input_vector[i][0]][input_vector[i][1]][input_vector[i][2]] = result[short] + except KeyError: # not in mapping + return_vector[input_vector[i][0]][input_vector[i][1]][input_vector[i][2]] = result + + i += 1 + + self.results = return_vector + + d.push_match(self.apikey, self.competition, self.results) + +class Metric (Module): + + config = None + apikey = None + tbakey = None + timestamp = None + competition = None + + data = None + results = None + + def __init__(self, config, apikey, tbakey, timestamp, competition): + self.config = config + self.apikey = apikey + self.tbakey = tbakey + self.timestamp = timestamp + self.competition = competition + + def validate_config(self): + return True, "" + + def run(self): + self._load_data() + self._process_data() + self._push_results() + + def _load_data(self): + self.last_match = d.get_analysis_flags(self.apikey, 'metrics_last_match')['metrics_last_match'] + print("Previous last match", self.last_match) + self.data = d.pull_new_tba_matches(self.tbakey, self.competition, self.last_match) + + def _process_data(self): + + self.results = {} + self.match = self.last_match + matches = self.data + red = {} + blu = {} + for match in tqdm(matches, desc="Metrics"): # grab matches and loop through each one + self.match = max(self.match, int(match['match'])) + red = d.load_metric(self.apikey, self.competition, match, "red", self.config["tests"]) # get the current ratings for red + blu = d.load_metric(self.apikey, self.competition, match, "blue", self.config["tests"]) # get the current ratings for blue + + gl2_red_score_total = 0 + gl2_blu_score_total = 0 + + gl2_red_rd_total = 0 + gl2_blu_rd_total = 0 + + gl2_red_vol_total = 0 + gl2_blu_vol_total = 0 + + for team in red: # for each team in red, add up gl2 score components + + gl2_red_score_total += red[team]["gl2"]["score"] + gl2_red_rd_total += red[team]["gl2"]["rd"] + gl2_red_vol_total += red[team]["gl2"]["vol"] + + for team in blu: # for each team in blue, add up gl2 score components + + gl2_blu_score_total += blu[team]["gl2"]["score"] + gl2_blu_rd_total += blu[team]["gl2"]["rd"] + gl2_blu_vol_total += blu[team]["gl2"]["vol"] + + + red_gl2 = {"score": gl2_red_score_total / len(red), "rd": gl2_red_rd_total / len(red), "vol": gl2_red_vol_total / len(red)} # average the scores by dividing by 3 + blu_gl2 = {"score": gl2_blu_score_total / len(blu), "rd": gl2_blu_rd_total / len(blu), "vol": gl2_blu_vol_total / len(blu)} # average the scores by dividing by 3 + + + if match["winner"] == "red": # if red won, set observations to {"red": 1, "blu": 0} + + observations = {"red": 1, "blu": 0} + + elif match["winner"] == "blue": # if blue won, set observations to {"red": 0, "blu": 1} + + observations = {"red": 0, "blu": 1} + + else: # otherwise it was a tie and observations is {"red": 0.5, "blu": 0.5} + + observations = {"red": 0.5, "blu": 0.5} + + + new_red_gl2_score, new_red_gl2_rd, new_red_gl2_vol = an.Metric().glicko2(red_gl2["score"], red_gl2["rd"], red_gl2["vol"], [blu_gl2["score"]], [blu_gl2["rd"]], [observations["red"], observations["blu"]]) # calculate new scores for gl2 for red + new_blu_gl2_score, new_blu_gl2_rd, new_blu_gl2_vol = an.Metric().glicko2(blu_gl2["score"], blu_gl2["rd"], blu_gl2["vol"], [red_gl2["score"]], [red_gl2["rd"]], [observations["blu"], observations["red"]]) # calculate new scores for gl2 for blue + + red_gl2_delta = {"score": new_red_gl2_score - red_gl2["score"], "rd": new_red_gl2_rd - red_gl2["rd"], "vol": new_red_gl2_vol - red_gl2["vol"]} # calculate gl2 deltas for red + blu_gl2_delta = {"score": new_blu_gl2_score - blu_gl2["score"], "rd": new_blu_gl2_rd - blu_gl2["rd"], "vol": new_blu_gl2_vol - blu_gl2["vol"]} # calculate gl2 deltas for blue + + for team in red: # for each team on red, add the previous score with the delta to find the new score + + red[team]["gl2"]["score"] = red[team]["gl2"]["score"] + red_gl2_delta["score"] + red[team]["gl2"]["rd"] = red[team]["gl2"]["rd"] + red_gl2_delta["rd"] + red[team]["gl2"]["vol"] = red[team]["gl2"]["vol"] + red_gl2_delta["vol"] + + for team in blu: # for each team on blue, add the previous score with the delta to find the new score + + blu[team]["gl2"]["score"] = blu[team]["gl2"]["score"] + blu_gl2_delta["score"] + blu[team]["gl2"]["rd"] = blu[team]["gl2"]["rd"] + blu_gl2_delta["rd"] + blu[team]["gl2"]["vol"] = blu[team]["gl2"]["vol"] + blu_gl2_delta["vol"] + + temp_vector = {} + temp_vector.update(red) # update the team's score with the temporay vector + temp_vector.update(blu) + + self.results[match['match']] = temp_vector + + d.push_metric(self.apikey, self.competition, temp_vector) # push new scores to db + print("New last match", self.match) + d.set_analysis_flags(self.apikey, 'metrics_last_match', {'metrics_last_match': self.match}) + def _push_results(self): + pass + +class Pit (Module): + + config = None + apikey = None + tbakey = None + timestamp = None + competition = None + + data = None + results = None + + def __init__(self, config, apikey, tbakey, timestamp, competition): + self.config = config + self.apikey = apikey + self.tbakey = tbakey + self.timestamp = timestamp + self.competition = competition + + def validate_config(self): + return True, "" + + def run(self): + self._load_data() + self._process_data() + self._push_results() + + def _load_data(self): + self.data = d.load_pit(self.apikey, self.competition) + + def _process_data(self): + tests = self.config["tests"] + return_vector = {} + for team in self.data: + for variable in self.data[team]: + if variable in tests: + if not variable in return_vector: + return_vector[variable] = [] + return_vector[variable].append(self.data[team][variable]) + + self.results = return_vector + + def _push_results(self): + d.push_pit(self.apikey, self.competition, self.results) + +class Rating (Module): + pass + +class Heatmap (Module): + pass + +class Sentiment (Module): + pass \ No newline at end of file diff --git a/competition/pull.py b/competition/pull.py new file mode 100644 index 0000000..67d3455 --- /dev/null +++ b/competition/pull.py @@ -0,0 +1,63 @@ +import requests +from exceptions import APIError +from dep import load_config + +url = "https://scouting.titanrobotics2022.com" +config_tra = {} +load_config("config.json", config_tra) +trakey = config_tra['persistent']['key']['tra'] + +def get_team_competition(): + endpoint = '/api/fetchTeamCompetition' + params = { + "CLIENT_ID": trakey['CLIENT_ID'], + "CLIENT_SECRET": trakey['CLIENT_SECRET'] + } + response = requests.request("GET", url + endpoint, params=params) + json = response.json() + if json['success']: + return json['competition'] + else: + raise APIError(json) + +def get_team(): + endpoint = '/api/fetchTeamCompetition' + params = { + "CLIENT_ID": trakey['CLIENT_ID'], + "CLIENT_SECRET": trakey['CLIENT_SECRET'] + } + response = requests.request("GET", url + endpoint, params=params) + json = response.json() + if json['success']: + return json['team'] + else: + raise APIError(json) + +def get_team_match_data(competition, team_num): + endpoint = '/api/fetchAllTeamMatchData' + params = { + "competition": competition, + "teamScouted": team_num, + "CLIENT_ID": trakey['CLIENT_ID'], + "CLIENT_SECRET": trakey['CLIENT_SECRET'] + } + response = requests.request("GET", url + endpoint, params=params) + json = response.json() + if json['success']: + return json['data'][team_num] + else: + raise APIError(json) + +def get_teams_at_competition(competition): + endpoint = '/api/fetchAllTeamNicknamesAtCompetition' + params = { + "competition": competition, + "CLIENT_ID": trakey['CLIENT_ID'], + "CLIENT_SECRET": trakey['CLIENT_SECRET'] + } + response = requests.request("GET", url + endpoint, params=params) + json = response.json() + if json['success']: + return list(json['data'].keys()) + else: + raise APIError(json) diff --git a/competition/requirements.txt b/competition/requirements.txt new file mode 100644 index 0000000..420b618 --- /dev/null +++ b/competition/requirements.txt @@ -0,0 +1,15 @@ +cerberus +dnspython +numpy +pandas +pyinstaller +pylint +pymongo +pyparsing +python-daemon +pyzmq +requests +scikit-learn +scipy +six +tra-analysis diff --git a/competition/superscript.py b/competition/superscript.py new file mode 100644 index 0000000..e695874 --- /dev/null +++ b/competition/superscript.py @@ -0,0 +1,402 @@ +# Titan Robotics Team 2022: Superscript Script +# Written by Arthur Lu, Jacob Levine, and Dev Singh +# Notes: +# setup: + +__version__ = "1.0.0" + +# changelog should be viewed using print(analysis.__changelog__) +__changelog__ = """changelog: + 1.0.0: + - superscript now runs in PEP 3143 compliant well behaved daemon on Linux systems + - linux superscript daemon has integrated websocket output to monitor progress/status remotely + - linux daemon now sends stderr to errorlog.log + - added verbose option to linux superscript to allow for interactive output + - moved pymongo import to superscript.py + - added profile option to linux superscript to profile runtime of script + - reduced memory usage slightly by consolidating the unwrapped input data + - added debug option, which performs one loop of analysis and dumps results to local files + - added event and time delay options to config + - event delay pauses loop until even listener recieves an update + - time delay pauses loop until the time specified has elapsed since the BEGINNING of previous loop + - added options to pull config information from database (reatins option to use local config file) + - config-preference option selects between prioritizing local config and prioritizing database config + - synchronize-config option selects whether to update the non prioritized config with the prioritized one + - divided config options between persistent ones (keys), and variable ones (everything else) + - generalized behavior of various core components by collecting loose functions in several dependencies into classes + - module.py contains classes, each one represents a single data analysis routine + - config.py contains the Configuration class, which stores the configuration information and abstracts the getter methods + 0.9.3: + - improved data loading performance by removing redundant PyMongo client creation (120s to 14s) + - passed singular instance of PyMongo client as standin for apikey parameter in all data.py functions + 0.9.2: + - removed unessasary imports from data + - minor changes to interface + 0.9.1: + - fixed bugs in configuration item loading exception handling + 0.9.0: + - moved printing and logging related functions to interface.py (changelog will stay in this file) + - changed function return files for load_config and save_config to standard C values (0 for success, 1 for error) + - added local variables for config location + - moved dataset getting and setting functions to dataset.py (changelog will stay in this file) + - moved matchloop, metricloop, pitloop and helper functions (simplestats) to processing.py + 0.8.6: + - added proper main function + 0.8.5: + - added more gradeful KeyboardInterrupt exiting + - redirected stderr to errorlog.txt + 0.8.4: + - added better error message for missing config.json + - added automatic config.json creation + - added splash text with version and system info + 0.8.3: + - updated matchloop with new regression format (requires tra_analysis 3.x) + 0.8.2: + - readded while true to main function + - added more thread config options + 0.8.1: + - optimized matchloop further by bypassing GIL + 0.8.0: + - added multithreading to matchloop + - tweaked user log + 0.7.0: + - finished implementing main function + 0.6.2: + - integrated get_team_rankings.py as get_team_metrics() function + - integrated visualize_pit.py as graph_pit_histogram() function + 0.6.1: + - bug fixes with analysis.Metric() calls + - modified metric functions to use config.json defined default values + 0.6.0: + - removed main function + - changed load_config function + - added save_config function + - added load_match function + - renamed simpleloop to matchloop + - moved simplestats function inside matchloop + - renamed load_metrics to load_metric + - renamed metricsloop to metricloop + - split push to database functions amon push_match, push_metric, push_pit + - moved + 0.5.2: + - made changes due to refactoring of analysis + 0.5.1: + - text fixes + - removed matplotlib requirement + 0.5.0: + - improved user interface + 0.4.2: + - removed unessasary code + 0.4.1: + - fixed bug where X range for regression was determined before sanitization + - better sanitized data + 0.4.0: + - fixed spelling issue in __changelog__ + - addressed nan bug in regression + - fixed errors on line 335 with metrics calling incorrect key "glicko2" + - fixed errors in metrics computing + 0.3.0: + - added analysis to pit data + 0.2.1: + - minor stability patches + - implemented db syncing for timestamps + - fixed bugs + 0.2.0: + - finalized testing and small fixes + 0.1.4: + - finished metrics implement, trueskill is bugged + 0.1.3: + - working + 0.1.2: + - started implement of metrics + 0.1.1: + - cleaned up imports + 0.1.0: + - tested working, can push to database + 0.0.9: + - tested working + - prints out stats for the time being, will push to database later + 0.0.8: + - added data import + - removed tba import + - finished main method + 0.0.7: + - added load_config + - optimized simpleloop for readibility + - added __all__ entries + - added simplestats engine + - pending testing + 0.0.6: + - fixes + 0.0.5: + - imported pickle + - created custom database object + 0.0.4: + - fixed simpleloop to actually return a vector + 0.0.3: + - added metricsloop which is unfinished + 0.0.2: + - added simpleloop which is untested until data is provided + 0.0.1: + - created script + - added analysis, numba, numpy imports +""" + +__author__ = ( + "Arthur Lu ", + "Jacob Levine ", +) + +# imports: + +import os, sys, time +import pymongo # soon to be deprecated +import traceback +import warnings +from config import Configuration, ConfigurationError +from data import get_previous_time, set_current_time, check_new_database_matches, clear_metrics +from interface import Logger +from module import Match, Metric, Pit +import zmq + +config_path = "config.json" + +def main(logger, verbose, profile, debug, socket_send = None): + + def close_all(): + if "client" in locals(): + client.close() + + warnings.filterwarnings("ignore") + + logger.splash(__version__) + + modules = {"match": Match, "metric": Metric, "pit": Pit} + + while True: + + try: + + loop_start = time.time() + + logger.info("current time: " + str(loop_start)) + socket_send("current time: " + str(loop_start)) + + config = Configuration(config_path) + + logger.info("found and loaded config at <" + config_path + ">") + socket_send("found and loaded config at <" + config_path + ">") + + apikey, tbakey = config.database, config.tba + + logger.info("found and loaded database and tba keys") + socket_send("found and loaded database and tba keys") + + client = pymongo.MongoClient(apikey) + + logger.info("established connection to database") + socket_send("established connection to database") + + previous_time = get_previous_time(client) + + logger.info("analysis backtimed to: " + str(previous_time)) + socket_send("analysis backtimed to: " + str(previous_time)) + + config.resolve_config_conflicts(logger, client) + + config_modules, competition = config.modules, config.competition + for m in config_modules: + if m in modules: + start = time.time() + current_module = modules[m](config_modules[m], client, tbakey, previous_time, competition) + valid = current_module.validate_config() + if not valid: + continue + current_module.run() + logger.info(m + " module finished in " + str(time.time() - start) + " seconds") + socket_send(m + " module finished in " + str(time.time() - start) + " seconds") + if debug: + logger.save_module_to_file(m, current_module.data, current_module.results) # logging flag check done in logger + + set_current_time(client, loop_start) + close_all() + + logger.info("closed threads and database client") + logger.info("finished all tasks in " + str(time.time() - loop_start) + " seconds, looping") + socket_send("closed threads and database client") + socket_send("finished all tasks in " + str(time.time() - loop_start) + " seconds, looping") + + if profile: + return 0 + + if debug: + return 0 + + event_delay = config["variable"]["event-delay"] + if event_delay: + logger.info("loop delayed until database returns new matches") + socket_send("loop delayed until database returns new matches") + new_match = False + while not new_match: + time.sleep(1) + new_match = check_new_database_matches(client, competition) + logger.info("database returned new matches") + socket_send("database returned new matches") + else: + loop_delay = float(config["variable"]["loop-delay"]) + remaining_time = loop_delay - (time.time() - loop_start) + if remaining_time > 0: + logger.info("loop delayed by " + str(remaining_time) + " seconds") + socket_send("loop delayed by " + str(remaining_time) + " seconds") + time.sleep(remaining_time) + + except KeyboardInterrupt: + close_all() + logger.info("detected KeyboardInterrupt, exiting") + socket_send("detected KeyboardInterrupt, exiting") + return 0 + + except ConfigurationError as e: + str_e = "".join(traceback.format_exception(e)) + logger.error("encountered a configuration error: " + str(e)) + logger.error(str_e) + socket_send("encountered a configuration error: " + str(e)) + socket_send(str_e) + close_all() + return 1 + + except Exception as e: + str_e = "".join(traceback.format_exception(e)) + logger.error("encountered an exception while running") + logger.error(str_e) + socket_send("encountered an exception while running") + socket_send(str_e) + close_all() + return 1 + +def start(pid_path, verbose, profile, debug): + + if profile: + + def send(msg): + pass + + logger = Logger(verbose, profile, debug) + + import cProfile, pstats, io + profile = cProfile.Profile() + profile.enable() + exit_code = main(logger, verbose, profile, debug, socket_send = send) + profile.disable() + f = open("profile.txt", 'w+') + ps = pstats.Stats(profile, stream = f).sort_stats('cumtime') + ps.print_stats() + sys.exit(exit_code) + + elif verbose: + + def send(msg): + pass + + logger = Logger(verbose, profile, debug) + + exit_code = main(logger, verbose, profile, debug, socket_send = send) + sys.exit(exit_code) + + elif debug: + + def send(msg): + pass + + logger = Logger(verbose, profile, debug) + + exit_code = main(logger, verbose, profile, debug, socket_send = send) + sys.exit(exit_code) + + else: + + logfile = "logfile.log" + + f = open(logfile, 'w+') + f.close() + + e = open('errorlog.log', 'w+') + with daemon.DaemonContext( + working_directory = os.getcwd(), + pidfile = pidfile.TimeoutPIDLockFile(pid_path), + stderr = e + ): + + context = zmq.Context() + socket = context.socket(zmq.PUB) + socket.bind("tcp://*:5678") + socket.send(b'status') + + def send(msg): + socket.send(bytes("status: " + msg, "utf-8")) + + logger = Logger(verbose, profile, debug, file = logfile) + + exit_code = main(logger, verbose, profile, debug, socket_send = send) + + socket.close() + f.close() + + sys.exit(exit_code) + +def stop(pid_path): + try: + pf = open(pid_path, 'r') + pid = int(pf.read().strip()) + pf.close() + except IOError: + sys.stderr.write("pidfile at <" + pid_path + "> does not exist. Daemon not running?\n") + return + + try: + while True: + os.kill(pid, SIGTERM) + time.sleep(0.01) + except OSError as err: + err = str(err) + if err.find("No such process") > 0: + if os.path.exists(pid_path): + os.remove(pid_path) + else: + traceback.print_exc(file = sys.stderr) + sys.exit(1) + +def restart(pid_path): + stop(pid_path) + start(pid_path, False, False, False) + +if __name__ == "__main__": + + if sys.platform.startswith("win"): + start(None, verbose = True) + + else: + import daemon + from daemon import pidfile + from signal import SIGTERM + pid_path = "tra-daemon.pid" + if len(sys.argv) == 2: + if 'start' == sys.argv[1]: + start(pid_path, False, False, False) + elif 'stop' == sys.argv[1]: + stop(pid_path) + elif 'restart' == sys.argv[1]: + restart(pid_path) + elif 'verbose' == sys.argv[1]: + start(None, True, False, False) + elif 'profile' == sys.argv[1]: + start(None, False, True, False) + elif 'debug' == sys.argv[1]: + start(None, False, False, True) + else: + print("usage: %s start|stop|restart|verbose|profile|debug" % sys.argv[0]) + sys.exit(2) + sys.exit(0) + else: + print("usage: %s start|stop|restart|verbose|profile|debug" % sys.argv[0]) + sys.exit(2) \ No newline at end of file diff --git a/src/data.py b/src/data.py deleted file mode 100644 index 641aba7..0000000 --- a/src/data.py +++ /dev/null @@ -1,129 +0,0 @@ -import requests -import pymongo -import pandas as pd -import time - -def pull_new_tba_matches(apikey, competition, cutoff): - api_key= apikey - x=requests.get("https://www.thebluealliance.com/api/v3/event/"+competition+"/matches/simple", headers={"X-TBA-Auth_Key":api_key}) - out = [] - for i in x.json(): - if i["actual_time"] != None and i["actual_time"]-cutoff >= 0 and i["comp_level"] == "qm": - out.append({"match" : i['match_number'], "blue" : list(map(lambda x: int(x[3:]), i['alliances']['blue']['team_keys'])), "red" : list(map(lambda x: int(x[3:]), i['alliances']['red']['team_keys'])), "winner": i["winning_alliance"]}) - return out - -def get_team_match_data(apikey, competition, team_num): - client = pymongo.MongoClient(apikey) - db = client.data_scouting - mdata = db.matchdata - out = {} - for i in mdata.find({"competition" : competition, "team_scouted": team_num}): - out[i['match']] = i['data'] - return pd.DataFrame(out) - -def get_team_pit_data(apikey, competition, team_num): - client = pymongo.MongoClient(apikey) - db = client.data_scouting - mdata = db.pitdata - out = {} - return mdata.find_one({"competition" : competition, "team_scouted": team_num})["data"] - -def get_team_metrics_data(apikey, competition, team_num): - client = pymongo.MongoClient(apikey) - db = client.data_processing - mdata = db.team_metrics - return mdata.find_one({"competition" : competition, "team": team_num}) - -def get_match_data_formatted(apikey, competition): - client = pymongo.MongoClient(apikey) - db = client.data_scouting - mdata = db.teamlist - x=mdata.find_one({"competition":competition}) - out = {} - for i in x: - try: - out[int(i)] = unkeyify_2l(get_team_match_data(apikey, competition, int(i)).transpose().to_dict()) - except: - pass - return out - -def get_metrics_data_formatted(apikey, competition): - client = pymongo.MongoClient(apikey) - db = client.data_scouting - mdata = db.teamlist - x=mdata.find_one({"competition":competition}) - out = {} - for i in x: - try: - out[int(i)] = d.get_team_metrics_data(apikey, competition, int(i)) - except: - pass - return out - -def get_pit_data_formatted(apikey, competition): - client = pymongo.MongoClient(apikey) - db = client.data_scouting - mdata = db.teamlist - x=mdata.find_one({"competition":competition}) - out = {} - for i in x: - try: - out[int(i)] = get_team_pit_data(apikey, competition, int(i)) - except: - pass - return out - -def get_pit_variable_data(apikey, competition): - client = pymongo.MongoClient(apikey) - db = client.data_processing - mdata = db.team_pit - out = {} - return mdata.find() - -def get_pit_variable_formatted(apikey, competition): - temp = get_pit_variable_data(apikey, competition) - out = {} - for i in temp: - out[i["variable"]] = i["data"] - return out - -def push_team_tests_data(apikey, competition, team_num, data, dbname = "data_processing", colname = "team_tests"): - client = pymongo.MongoClient(apikey) - db = client[dbname] - mdata = db[colname] - mdata.replace_one({"competition" : competition, "team": team_num}, {"_id": competition+str(team_num)+"am", "competition" : competition, "team" : team_num, "data" : data}, True) - -def push_team_metrics_data(apikey, competition, team_num, data, dbname = "data_processing", colname = "team_metrics"): - client = pymongo.MongoClient(apikey) - db = client[dbname] - mdata = db[colname] - mdata.replace_one({"competition" : competition, "team": team_num}, {"_id": competition+str(team_num)+"am", "competition" : competition, "team" : team_num, "metrics" : data}, True) - -def push_team_pit_data(apikey, competition, variable, data, dbname = "data_processing", colname = "team_pit"): - client = pymongo.MongoClient(apikey) - db = client[dbname] - mdata = db[colname] - mdata.replace_one({"competition" : competition, "variable": variable}, {"competition" : competition, "variable" : variable, "data" : data}, True) - -def get_analysis_flags(apikey, flag): - client = pymongo.MongoClient(apikey) - db = client.data_processing - mdata = db.flags - return mdata.find_one({flag:{"$exists":True}}) - -def set_analysis_flags(apikey, flag, data): - client = pymongo.MongoClient(apikey) - db = client.data_processing - mdata = db.flags - return mdata.replace_one({flag:{"$exists":True}}, data, True) - -def unkeyify_2l(layered_dict): - out = {} - for i in layered_dict.keys(): - add = [] - sortkey = [] - for j in layered_dict[i].keys(): - add.append([j,layered_dict[i][j]]) - add.sort(key = lambda x: x[0]) - out[i] = list(map(lambda x: x[1], add)) - return out \ No newline at end of file diff --git a/src/design.kv b/src/design.kv deleted file mode 100644 index 177a926..0000000 --- a/src/design.kv +++ /dev/null @@ -1,151 +0,0 @@ -: - orientation: "vertical" - - NavigationLayout: - ScreenManager: - id: screen_manager - HomeScreen: - name: "Home" - BoxLayout: - orientation: "vertical" - MDToolbar: - title: screen_manager.current - elevation: 10 - left_action_items: [['menu', lambda x: nav_drawer.toggle_nav_drawer()]] - - GridLayout: - cols: 1 - padding: 15, 15 - spacing: 20, 20 - MDTextFieldRect: - hint_text: "Console Log" - # size_hint: .8, None - # align: 'center' - # Widget: - SettingsScreen: - name: "Settings" - BoxLayout: - orientation: 'vertical' - MDToolbar: - title: screen_manager.current - elevation: 10 - left_action_items: [['menu', lambda x: nav_drawer.toggle_nav_drawer()]] - Widget: - InfoScreen: - name: "Info" - BoxLayout: - orientation: 'vertical' - MDToolbar: - title: screen_manager.current - elevation: 10 - left_action_items: [['menu', lambda x: nav_drawer.toggle_nav_drawer()]] - # GridLayout: - # cols: 2 - # padding: 15, 15 - # spacing: 20, 20 - BoxLayout: - orientation: "horizontal" - MDLabel: - text: "DB Key:" - halign: 'center' - MDTextField: - hint_text: "placeholder" - pos_hint: {"center_y": .5} - - BoxLayout: - orientation: "horizontal" - MDLabel: - text: "TBA Key:" - halign: 'center' - MDTextField: - hint_text: "placeholder" - pos_hint: {"center_y": .5} - BoxLayout: - orientation: "horizontal" - MDLabel: - text: "CPU Use:" - halign: 'center' - MDLabel: - text: "placeholder" - halign: 'center' - BoxLayout: - orientation: "horizontal" - MDLabel: - text: "Network:" - halign: 'center' - MDLabel: - text: "placeholder" - halign: 'center' - Widget: - BoxLayout: - orientation: "horizontal" - MDLabel: - text: "Progress" - halign: 'center' - MDProgressBar: - id: progress - value: 50 - StatsScreen: - name: "Stats" - MDCheckbox: - size_hint: None, None - size: "48dp", "48dp" - pos_hint: {'center_x': .5, 'center_y': .5} - on_active: Screen.test() - -#Navigation Drawer ------------------------- - MDNavigationDrawer: - id: nav_drawer - BoxLayout: - orientation: "vertical" - padding: "8dp" - spacing: "8dp" - MDLabel: - text: "Titan Scouting" - font_style: "Button" - size_hint_y: None - height: self.texture_size[1] - - MDLabel: - text: "Data Analysis" - font_style: "Caption" - size_hint_y: None - height: self.texture_size[1] - ScrollView: - MDList: - OneLineAvatarListItem: - text: "Home" - on_press: - # nav_drawer.set_state("close") - # screen_manager.transition.direction = "left" - screen_manager.current = "Home" - IconLeftWidget: - icon: "home" - - OneLineAvatarListItem: - text: "Settings" - on_press: - # nav_drawer.set_state("close") - # screen_manager.transition.direction = "right" - # screen_manager.fade - screen_manager.current = "Settings" - IconLeftWidget: - icon: "cog" - OneLineAvatarListItem: - text: "Info" - on_press: - # nav_drawer.set_state("close") - # screen_manager.transition.direction = "right" - # screen_manager.fade - screen_manager.current = "Info" - IconLeftWidget: - icon: "cog" - OneLineAvatarListItem: - text: "Stats" - on_press: - # nav_drawer.set_state("close") - # screen_manager.transition.direction = "right" - # screen_manager.fade - screen_manager.current = "Stats" - IconLeftWidget: - icon: "cog" \ No newline at end of file diff --git a/src/main.py b/src/main.py deleted file mode 100644 index a57421e..0000000 --- a/src/main.py +++ /dev/null @@ -1,58 +0,0 @@ -from kivy.lang import Builder - -from kivymd.uix.screen import Screen -from kivymd.uix.list import OneLineListItem, MDList, TwoLineListItem, ThreeLineListItem -from kivymd.uix.list import OneLineIconListItem, IconLeftWidget -from kivy.uix.scrollview import ScrollView - - -from kivy.uix.boxlayout import BoxLayout -from kivy.uix.screenmanager import ScreenManager, Screen -from kivy.uix.dropdown import DropDown -from kivy.uix.button import Button -from kivy.base import runTouchApp -from kivymd.uix.menu import MDDropdownMenu, MDMenuItem - -from kivymd.app import MDApp -# import superscript as ss - -# from tra_analysis import analysis as an -import data as d -from collections import defaultdict -import json -import math -import numpy as np -import os -from os import system, name -from pathlib import Path -from multiprocessing import Pool -import matplotlib.pyplot as plt -from concurrent.futures import ThreadPoolExecutor -import time -import warnings - -# global exec_threads - - -# Screens -class HomeScreen(Screen): - pass -class SettingsScreen(Screen): - pass -class InfoScreen(Screen): - pass - -class StatsScreen(Screen): - pass - - -class MyApp(MDApp): - def build(self): - self.theme_cls.primary_palette = "Red" - return Builder.load_file("design.kv") - def test(): - print("test") - - -if __name__ == "__main__": - MyApp().run() \ No newline at end of file diff --git a/submit-debug.sh b/submit-debug.sh new file mode 100644 index 0000000..861af0b --- /dev/null +++ b/submit-debug.sh @@ -0,0 +1,12 @@ +#!/bin/bash +# +#SBATCH --job-name=tra-superscript +#SBATCH --output=slurm-tra-superscript.out +#SBATCH --ntasks=8 +#SBATCH --time=24:00:00 +#SBATCH --mem-per-cpu=256 +#SBATCH --mail-user=dsingh@imsa.edu +#SBATCH -p cpu-long + +cd competition +python superscript.py debug diff --git a/submit-prod.sh b/submit-prod.sh new file mode 100644 index 0000000..1b58edf --- /dev/null +++ b/submit-prod.sh @@ -0,0 +1,12 @@ +#!/bin/bash +# +#SBATCH --job-name=tra-superscript +#SBATCH --output=PROD_slurm-tra-superscript.out +#SBATCH --ntasks=8 +#SBATCH --time=24:00:00 +#SBATCH --mem-per-cpu=256 +#SBATCH --mail-user=dsingh@imsa.edu +#SBATCH -p cpu-long + +cd competition +python superscript.py verbose