removed all unessasary files,

moved important files to folder "competition"


Former-commit-id: 59becb22abc3305a36e2876351e6c7306e3f551e
This commit is contained in:
Arthur Lu
2022-03-14 01:33:24 +00:00
parent 4836f48a34
commit 8c28c24d60
20 changed files with 0 additions and 232 deletions

251
competition/config.py Normal file
View File

@@ -0,0 +1,251 @@
import json
from exceptions import ConfigurationError
from cerberus import Validator
from data import set_database_config, get_database_config
class Configuration:
path = None
config = {}
_sample_config = {
"persistent":{
"key":{
"database":"",
"tba":"",
"tra":{
"CLIENT_ID":"",
"CLIENT_SECRET":"",
"url": ""
}
},
"config-preference":"local",
"synchronize-config":False
},
"variable":{
"event-delay":False,
"loop-delay":0,
"competition": "2020ilch",
"modules":{
"match":{
"tests":{
"balls-blocked":[
"basic_stats",
"historical_analysis",
"regression_linear",
"regression_logarithmic",
"regression_exponential",
"regression_polynomial",
"regression_sigmoidal"
],
"balls-collected":[
"basic_stats",
"historical_analysis",
"regression_linear",
"regression_logarithmic",
"regression_exponential",
"regression_polynomial",
"regression_sigmoidal"
],
"balls-lower-teleop":[
"basic_stats",
"historical_analysis",
"regression_linear",
"regression_logarithmic",
"regression_exponential",
"regression_polynomial",
"regression_sigmoidal"
],
"balls-lower-auto":[
"basic_stats",
"historical_analysis",
"regression_linear",
"regression_logarithmic",
"regression_exponential",
"regression_polynomial",
"regression_sigmoidal"
],
"balls-started":[
"basic_stats",
"historical_analyss",
"regression_linear",
"regression_logarithmic",
"regression_exponential",
"regression_polynomial",
"regression_sigmoidal"
],
"balls-upper-teleop":[
"basic_stats",
"historical_analysis",
"regression_linear",
"regression_logarithmic",
"regression_exponential",
"regression_polynomial",
"regression_sigmoidal"
],
"balls-upper-auto":[
"basic_stats",
"historical_analysis",
"regression_linear",
"regression_logarithmic",
"regression_exponential",
"regression_polynomial",
"regression_sigmoidal"
]
}
},
"metric":{
"tests":{
"elo":{
"score":1500,
"N":400,
"K":24
},
"gl2":{
"score":1500,
"rd":250,
"vol":0.06
},
"ts":{
"mu":25,
"sigma":8.33
}
}
},
"pit":{
"tests":{
"wheel-mechanism":True,
"low-balls":True,
"high-balls":True,
"wheel-success":True,
"strategic-focus":True,
"climb-mechanism":True,
"attitude":True
}
}
}
}
}
_validation_schema = {
"persistent": {
"type": "dict",
"required": True,
"require_all": True,
"schema": {
"key": {
"type": "dict",
"require_all":True,
"schema": {
"database": {"type":"string"},
"tba": {"type": "string"},
"tra": {
"type": "dict",
"require_all": True,
"schema": {
"CLIENT_ID": {"type": "string"},
"CLIENT_SECRET": {"type": "string"},
"url": {"type": "string"}
}
}
}
},
"config-preference": {"type": "string", "required": True},
"synchronize-config": {"type": "boolean", "required": True}
}
}
}
def __init__(self, path):
self.path = path
self.load_config()
self.validate_config()
def load_config(self):
try:
f = open(self.path, "r")
self.config.update(json.load(f))
f.close()
except:
self.config = self._sample_config
self.save_config()
f.close()
raise ConfigurationError("could not find config file at <" + self.path + ">, created new sample config file at that path")
def save_config(self):
f = open(self.path, "w+")
json.dump(self.config, f, ensure_ascii=False, indent=4)
f.close()
def validate_config(self):
v = Validator(self._validation_schema, allow_unknown = True)
isValidated = v.validate(self.config)
if not isValidated:
raise ConfigurationError("config validation error: " + v.errors)
def __getattr__(self, name): # simple linear lookup method for common multikey-value paths, TYPE UNSAFE
if name == "persistent":
return self.config["persistent"]
elif name == "key":
return self.config["persistent"]["key"]
elif name == "database":
# soon to be deprecated
return self.config["persistent"]["key"]["database"]
elif name == "tba":
return self.config["persistent"]["key"]["tba"]
elif name == "tra":
return self.config["persistent"]["key"]["tra"]
elif name == "priority":
return self.config["persistent"]["config-preference"]
elif name == "sync":
return self.config["persistent"]["synchronize-config"]
elif name == "variable":
return self.config["variable"]
elif name == "event_delay":
return self.config["variable"]["event-delay"]
elif name == "loop_delay":
return self.config["variable"]["loop-delay"]
elif name == "competition":
return self.config["variable"]["competition"]
elif name == "modules":
return self.config["variable"]["modules"]
else:
return None
def __getitem__(self, key):
return self.config[key]
def resolve_config_conflicts(self, logger, client): # needs improvement with new localization scheme
sync = self.sync
priority = self.priority
if sync:
if priority == "local" or priority == "client":
logger.info("config-preference set to local/client, loading local config information")
remote_config = get_database_config(client)
if remote_config != self.config["variable"]:
set_database_config(client, self.config["variable"])
logger.info("database config was different and was updated")
# no change to config
elif priority == "remote" or priority == "database":
logger.info("config-preference set to remote/database, loading remote config information")
remote_config = get_database_config(client)
if remote_config != self.config["variable"]:
self.config["variable"] = remote_config
self.save_config()
# change variable to match remote
logger.info("local config was different and was updated")
else:
raise ConfigurationError("persistent/config-preference field must be \"local\"/\"client\" or \"remote\"/\"database\"")
else:
if priority == "local" or priority == "client":
logger.info("config-preference set to local/client, loading local config information")
# no change to config
elif priority == "remote" or priority == "database":
logger.info("config-preference set to remote/database, loading database config information")
self.config["variable"] = get_database_config(client)
# change variable to match remote without updating local version
else:
raise ConfigurationError("persistent/config-preference field must be \"local\"/\"client\" or \"remote\"/\"database\"")

199
competition/data.py Normal file
View File

@@ -0,0 +1,199 @@
import requests
import pull
import pandas as pd
def pull_new_tba_matches(apikey, competition, cutoff):
api_key= apikey
x=requests.get("https://www.thebluealliance.com/api/v3/event/"+competition+"/matches/simple", headers={"X-TBA-Auth-Key":api_key})
out = []
for i in x.json():
if i["actual_time"] != None and i["actual_time"]-cutoff >= 0 and i["comp_level"] == "qm":
out.append({"match" : i['match_number'], "blue" : list(map(lambda x: int(x[3:]), i['alliances']['blue']['team_keys'])), "red" : list(map(lambda x: int(x[3:]), i['alliances']['red']['team_keys'])), "winner": i["winning_alliance"]})
return out
def get_team_match_data(client, competition, team_num):
db = client.data_scouting
mdata = db.matchdata
out = {}
for i in mdata.find({"competition" : competition, "team_scouted": str(team_num)}):
out[i['match']] = i['data']
return pd.DataFrame(out)
def get_team_pit_data(client, competition, team_num):
db = client.data_scouting
mdata = db.pitdata
out = {}
return mdata.find_one({"competition" : competition, "team_scouted": str(team_num)})["data"]
def get_team_metrics_data(client, competition, team_num):
db = client.data_processing
mdata = db.team_metrics
return mdata.find_one({"competition" : competition, "team": team_num})
def get_match_data_formatted(client, competition):
teams_at_comp = pull.get_teams_at_competition(competition)
out = {}
for team in teams_at_comp:
try:
out[int(team)] = unkeyify_2l(get_team_match_data(client, competition, team).transpose().to_dict())
except:
pass
return out
def get_metrics_data_formatted(client, competition):
teams_at_comp = pull.get_teams_at_competition(competition)
out = {}
for team in teams_at_comp:
try:
out[int(team)] = get_team_metrics_data(client, competition, int(team))
except:
pass
return out
def get_pit_data_formatted(client, competition):
x=requests.get("https://titanscouting.epochml.org/api/fetchAllTeamNicknamesAtCompetition?competition="+competition)
x = x.json()
x = x['data']
x = x.keys()
out = {}
for i in x:
try:
out[int(i)] = get_team_pit_data(client, competition, int(i))
except:
pass
return out
def get_pit_variable_data(client, competition):
db = client.data_processing
mdata = db.team_pit
out = {}
return mdata.find()
def get_pit_variable_formatted(client, competition):
temp = get_pit_variable_data(client, competition)
out = {}
for i in temp:
out[i["variable"]] = i["data"]
return out
def push_team_tests_data(client, competition, team_num, data, dbname = "data_processing", colname = "team_tests"):
db = client[dbname]
mdata = db[colname]
mdata.replace_one({"competition" : competition, "team": team_num}, {"_id": competition+str(team_num)+"am", "competition" : competition, "team" : team_num, "data" : data}, True)
def push_team_metrics_data(client, competition, team_num, data, dbname = "data_processing", colname = "team_metrics"):
db = client[dbname]
mdata = db[colname]
mdata.replace_one({"competition" : competition, "team": team_num}, {"_id": competition+str(team_num)+"am", "competition" : competition, "team" : team_num, "metrics" : data}, True)
def push_team_pit_data(client, competition, variable, data, dbname = "data_processing", colname = "team_pit"):
db = client[dbname]
mdata = db[colname]
mdata.replace_one({"competition" : competition, "variable": variable}, {"competition" : competition, "variable" : variable, "data" : data}, True)
def get_analysis_flags(client, flag):
db = client.data_processing
mdata = db.flags
return mdata.find_one({flag:{"$exists":True}})
def set_analysis_flags(client, flag, data):
db = client.data_processing
mdata = db.flags
return mdata.replace_one({flag:{"$exists":True}}, data, True)
def unkeyify_2l(layered_dict):
out = {}
for i in layered_dict.keys():
add = []
sortkey = []
for j in layered_dict[i].keys():
add.append([j,layered_dict[i][j]])
add.sort(key = lambda x: x[0])
out[i] = list(map(lambda x: x[1], add))
return out
def get_previous_time(client):
previous_time = get_analysis_flags(client, "latest_update")
if previous_time == None:
set_analysis_flags(client, "latest_update", 0)
previous_time = 0
else:
previous_time = previous_time["latest_update"]
return previous_time
def set_current_time(client, current_time):
set_analysis_flags(client, "latest_update", {"latest_update":current_time})
def get_database_config(client):
remote_config = get_analysis_flags(client, "config")
return remote_config["config"] if remote_config != None else None
def set_database_config(client, config):
set_analysis_flags(client, "config", {"config": config})
def load_match(client, competition):
return get_match_data_formatted(client, competition)
def load_metric(client, competition, match, group_name, metrics):
group = {}
for team in match[group_name]:
db_data = get_team_metrics_data(client, competition, team)
if db_data == None:
elo = {"score": metrics["elo"]["score"]}
gl2 = {"score": metrics["gl2"]["score"], "rd": metrics["gl2"]["rd"], "vol": metrics["gl2"]["vol"]}
ts = {"mu": metrics["ts"]["mu"], "sigma": metrics["ts"]["sigma"]}
group[team] = {"elo": elo, "gl2": gl2, "ts": ts}
else:
metrics = db_data["metrics"]
elo = metrics["elo"]
gl2 = metrics["gl2"]
ts = metrics["ts"]
group[team] = {"elo": elo, "gl2": gl2, "ts": ts}
return group
def load_pit(client, competition):
return get_pit_data_formatted(client, competition)
def push_match(client, competition, results):
for team in results:
push_team_tests_data(client, competition, team, results[team])
def push_metric(client, competition, metric):
for team in metric:
push_team_metrics_data(client, competition, team, metric[team])
def push_pit(client, competition, pit):
for variable in pit:
push_team_pit_data(client, competition, variable, pit[variable])
def check_new_database_matches(client, competition):
return True

141
competition/dep.py Normal file
View File

@@ -0,0 +1,141 @@
# contains deprecated functions, not to be used unless nessasary!
import json
sample_json = """
{
"persistent":{
"key":{
"database":"",
"tba":"",
"tra":{
"CLIENT_ID":"",
"CLIENT_SECRET":"",
"url": ""
}
},
"config-preference":"local",
"synchronize-config":false
},
"variable":{
"max-threads":0.5,
"team":"",
"event-delay":false,
"loop-delay":0,
"reportable":true,
"teams":[
],
"modules":{
"match":{
"tests":{
"balls-blocked":[
"basic_stats",
"historical_analysis",
"regression_linear",
"regression_logarithmic",
"regression_exponential",
"regression_polynomial",
"regression_sigmoidal"
],
"balls-collected":[
"basic_stats",
"historical_analysis",
"regression_linear",
"regression_logarithmic",
"regression_exponential",
"regression_polynomial",
"regression_sigmoidal"
],
"balls-lower-teleop":[
"basic_stats",
"historical_analysis",
"regression_linear",
"regression_logarithmic",
"regression_exponential",
"regression_polynomial",
"regression_sigmoidal"
],
"balls-lower-auto":[
"basic_stats",
"historical_analysis",
"regression_linear",
"regression_logarithmic",
"regression_exponential",
"regression_polynomial",
"regression_sigmoidal"
],
"balls-started":[
"basic_stats",
"historical_analyss",
"regression_linear",
"regression_logarithmic",
"regression_exponential",
"regression_polynomial",
"regression_sigmoidal"
],
"balls-upper-teleop":[
"basic_stats",
"historical_analysis",
"regression_linear",
"regression_logarithmic",
"regression_exponential",
"regression_polynomial",
"regression_sigmoidal"
],
"balls-upper-auto":[
"basic_stats",
"historical_analysis",
"regression_linear",
"regression_logarithmic",
"regression_exponential",
"regression_polynomial",
"regression_sigmoidal"
]
}
},
"metric":{
"tests":{
"elo":{
"score":1500,
"N":400,
"K":24
},
"gl2":{
"score":1500,
"rd":250,
"vol":0.06
},
"ts":{
"mu":25,
"sigma":8.33
}
}
},
"pit":{
"tests":{
"wheel-mechanism":true,
"low-balls":true,
"high-balls":true,
"wheel-success":true,
"strategic-focus":true,
"climb-mechanism":true,
"attitude":true
}
}
}
}
}
"""
def load_config(path, config_vector):
try:
f = open(path, "r")
config_vector.update(json.load(f))
f.close()
return 0
except:
f = open(path, "w")
f.write(sample_json)
f.close()
return 1

View File

@@ -0,0 +1,7 @@
class APIError(Exception):
def __init__(self, str):
super().__init__(str)
class ConfigurationError (Exception):
def __init__(self, str):
super().__init__(str)

91
competition/interface.py Normal file
View File

@@ -0,0 +1,91 @@
from logging import Logger as L
import datetime
import platform
import json
class Logger(L):
file = None
levels = {
0: "",
10:"[DEBUG] ",
20:"[INFO] ",
30:"[WARNING] ",
40:"[ERROR] ",
50:"[CRITICAL]",
}
targets = []
def __init__(self, verbose, profile, debug, file = None):
super().__init__("tra_logger")
self.file = file
if file != None:
self.targets.append(self._send_file)
if profile:
self.targets.append(self._send_null)
elif verbose:
self.targets.append(self._send_scli)
elif debug:
self.targets.append(self._send_scli)
else:
self.targets.append(self._send_null)
def _send_null(self, msg):
pass
def _send_scli(self, msg):
print(msg)
def _send_file(self, msg):
f = open(self.file, 'a')
f.write(msg + "\n")
f.close()
def get_time_formatted(self):
return datetime.datetime.now().strftime("%Y/%m/%d %H:%M:%S %Z")
def log(self, level, msg):
for t in self.targets:
t(self.get_time_formatted() + "| " + self.levels[level] + ": " + msg)
def debug(self, msg):
self.log(10, msg)
def info(self, msg):
self.log(20, msg)
def warning(self, msg):
self.log(30, msg)
def error(self, msg):
self.log(40, msg)
def critical(self, msg):
self.log(50, msg)
def splash(self, version):
def hrule():
self.log(0, "#"+38*"-"+"#")
def box(s):
temp = "|"
temp += s
temp += (40-len(s)-2)*" "
temp += "|"
self.log(0, temp)
hrule()
box(" superscript version: " + version)
box(" os: " + platform.system())
box(" python: " + platform.python_version())
hrule()
def save_module_to_file(self, module, data, results):
f = open(module + ".log", "w")
json.dump({"data": data, "results":results}, f, ensure_ascii=False, indent=4)
f.close()

321
competition/module.py Normal file
View File

@@ -0,0 +1,321 @@
import abc
import data as d
import signal
import numpy as np
from tra_analysis import Analysis as an
class Module(metaclass = abc.ABCMeta):
@classmethod
def __subclasshook__(cls, subclass):
return (hasattr(subclass, '__init__') and
callable(subclass.__init__) and
hasattr(subclass, 'validate_config') and
callable(subclass.validate_config) and
hasattr(subclass, 'run') and
callable(subclass.run)
)
@abc.abstractmethod
def __init__(self, config, apikey, tbakey, timestamp, competition, *args, **kwargs):
raise NotImplementedError
@abc.abstractmethod
def validate_config(self, *args, **kwargs):
raise NotImplementedError
@abc.abstractmethod
def run(self, *args, **kwargs):
raise NotImplementedError
class Match (Module):
config = None
apikey = None
tbakey = None
timestamp = None
competition = None
data = None
results = None
def __init__(self, config, apikey, tbakey, timestamp, competition):
self.config = config
self.apikey = apikey
self.tbakey = tbakey
self.timestamp = timestamp
self.competition = competition
def validate_config(self):
return True, ""
def run(self):
self._load_data()
self._process_data()
self._push_results()
def _load_data(self):
self.data = d.load_match(self.apikey, self.competition)
def _simplestats(self, data_test):
signal.signal(signal.SIGINT, signal.SIG_IGN)
data = np.array(data_test[3])
data = data[np.isfinite(data)]
ranges = list(range(len(data)))
test = data_test[2]
if test == "basic_stats":
return an.basic_stats(data)
if test == "historical_analysis":
return an.histo_analysis([ranges, data])
if test == "regression_linear":
return an.regression(ranges, data, ['lin'])
if test == "regression_logarithmic":
return an.regression(ranges, data, ['log'])
if test == "regression_exponential":
return an.regression(ranges, data, ['exp'])
if test == "regression_polynomial":
return an.regression(ranges, data, ['ply'])
if test == "regression_sigmoidal":
return an.regression(ranges, data, ['sig'])
def _process_data(self):
tests = self.config["tests"]
data = self.data
input_vector = []
for team in data:
for variable in data[team]:
if variable in tests:
for test in tests[variable]:
input_vector.append((team, variable, test, data[team][variable]))
self.data = input_vector
self.results = []
for test_var_data in self.data:
self.results.append(self._simplestats(test_var_data))
def _push_results(self):
short_mapping = {"regression_linear": "lin", "regression_logarithmic": "log", "regression_exponential": "exp", "regression_polynomial": "ply", "regression_sigmoidal": "sig"}
class AutoVivification(dict):
def __getitem__(self, item):
try:
return dict.__getitem__(self, item)
except KeyError:
value = self[item] = type(self)()
return value
result_filtered = self.results
input_vector = self.data
return_vector = AutoVivification()
i = 0
for result in result_filtered:
filtered = input_vector[i][2]
try:
short = short_mapping[filtered]
return_vector[input_vector[i][0]][input_vector[i][1]][input_vector[i][2]] = result[short]
except KeyError: # not in mapping
return_vector[input_vector[i][0]][input_vector[i][1]][input_vector[i][2]] = result
i += 1
self.results = return_vector
d.push_match(self.apikey, self.competition, self.results)
class Metric (Module):
config = None
apikey = None
tbakey = None
timestamp = None
competition = None
data = None
results = None
def __init__(self, config, apikey, tbakey, timestamp, competition):
self.config = config
self.apikey = apikey
self.tbakey = tbakey
self.timestamp = timestamp
self.competition = competition
def validate_config(self):
return True, ""
def run(self):
self._load_data()
self._process_data()
self._push_results()
def _load_data(self):
self.data = d.pull_new_tba_matches(self.tbakey, self.competition, self.timestamp)
def _process_data(self):
elo_N = self.config["tests"]["elo"]["N"]
elo_K = self.config["tests"]["elo"]["K"]
matches = self.data
red = {}
blu = {}
for match in matches:
red = d.load_metric(self.apikey, self.competition, match, "red", self.config["tests"])
blu = d.load_metric(self.apikey, self.competition, match, "blue", self.config["tests"])
elo_red_total = 0
elo_blu_total = 0
gl2_red_score_total = 0
gl2_blu_score_total = 0
gl2_red_rd_total = 0
gl2_blu_rd_total = 0
gl2_red_vol_total = 0
gl2_blu_vol_total = 0
for team in red:
elo_red_total += red[team]["elo"]["score"]
gl2_red_score_total += red[team]["gl2"]["score"]
gl2_red_rd_total += red[team]["gl2"]["rd"]
gl2_red_vol_total += red[team]["gl2"]["vol"]
for team in blu:
elo_blu_total += blu[team]["elo"]["score"]
gl2_blu_score_total += blu[team]["gl2"]["score"]
gl2_blu_rd_total += blu[team]["gl2"]["rd"]
gl2_blu_vol_total += blu[team]["gl2"]["vol"]
red_elo = {"score": elo_red_total / len(red)}
blu_elo = {"score": elo_blu_total / len(blu)}
red_gl2 = {"score": gl2_red_score_total / len(red), "rd": gl2_red_rd_total / len(red), "vol": gl2_red_vol_total / len(red)}
blu_gl2 = {"score": gl2_blu_score_total / len(blu), "rd": gl2_blu_rd_total / len(blu), "vol": gl2_blu_vol_total / len(blu)}
if match["winner"] == "red":
observations = {"red": 1, "blu": 0}
elif match["winner"] == "blue":
observations = {"red": 0, "blu": 1}
else:
observations = {"red": 0.5, "blu": 0.5}
red_elo_delta = an.Metric().elo(red_elo["score"], blu_elo["score"], observations["red"], elo_N, elo_K) - red_elo["score"]
blu_elo_delta = an.Metric().elo(blu_elo["score"], red_elo["score"], observations["blu"], elo_N, elo_K) - blu_elo["score"]
new_red_gl2_score, new_red_gl2_rd, new_red_gl2_vol = an.Metric().glicko2(red_gl2["score"], red_gl2["rd"], red_gl2["vol"], [blu_gl2["score"]], [blu_gl2["rd"]], [observations["red"], observations["blu"]])
new_blu_gl2_score, new_blu_gl2_rd, new_blu_gl2_vol = an.Metric().glicko2(blu_gl2["score"], blu_gl2["rd"], blu_gl2["vol"], [red_gl2["score"]], [red_gl2["rd"]], [observations["blu"], observations["red"]])
red_gl2_delta = {"score": new_red_gl2_score - red_gl2["score"], "rd": new_red_gl2_rd - red_gl2["rd"], "vol": new_red_gl2_vol - red_gl2["vol"]}
blu_gl2_delta = {"score": new_blu_gl2_score - blu_gl2["score"], "rd": new_blu_gl2_rd - blu_gl2["rd"], "vol": new_blu_gl2_vol - blu_gl2["vol"]}
for team in red:
red[team]["elo"]["score"] = red[team]["elo"]["score"] + red_elo_delta
red[team]["gl2"]["score"] = red[team]["gl2"]["score"] + red_gl2_delta["score"]
red[team]["gl2"]["rd"] = red[team]["gl2"]["rd"] + red_gl2_delta["rd"]
red[team]["gl2"]["vol"] = red[team]["gl2"]["vol"] + red_gl2_delta["vol"]
for team in blu:
blu[team]["elo"]["score"] = blu[team]["elo"]["score"] + blu_elo_delta
blu[team]["gl2"]["score"] = blu[team]["gl2"]["score"] + blu_gl2_delta["score"]
blu[team]["gl2"]["rd"] = blu[team]["gl2"]["rd"] + blu_gl2_delta["rd"]
blu[team]["gl2"]["vol"] = blu[team]["gl2"]["vol"] + blu_gl2_delta["vol"]
temp_vector = {}
temp_vector.update(red)
temp_vector.update(blu)
d.push_metric(self.apikey, self.competition, temp_vector)
def _push_results(self):
pass
class Pit (Module):
config = None
apikey = None
tbakey = None
timestamp = None
competition = None
data = None
results = None
def __init__(self, config, apikey, tbakey, timestamp, competition):
self.config = config
self.apikey = apikey
self.tbakey = tbakey
self.timestamp = timestamp
self.competition = competition
def validate_config(self):
return True, ""
def run(self):
self._load_data()
self._process_data()
self._push_results()
def _load_data(self):
self.data = d.load_pit(self.apikey, self.competition)
def _process_data(self):
tests = self.config["tests"]
return_vector = {}
for team in self.data:
for variable in self.data[team]:
if variable in tests:
if not variable in return_vector:
return_vector[variable] = []
return_vector[variable].append(self.data[team][variable])
self.results = return_vector
def _push_results(self):
d.push_pit(self.apikey, self.competition, self.results)
class Rating (Module):
pass
class Heatmap (Module):
pass
class Sentiment (Module):
pass

63
competition/pull.py Normal file
View File

@@ -0,0 +1,63 @@
import requests
from exceptions import APIError
from dep import load_config
url = "https://titanscouting.epochml.org"
config_tra = {}
load_config("config.json", config_tra)
trakey = config_tra['persistent']['key']['tra']
def get_team_competition():
endpoint = '/api/fetchTeamCompetition'
params = {
"CLIENT_ID": trakey['CLIENT_ID'],
"CLIENT_SECRET": trakey['CLIENT_SECRET']
}
response = requests.request("GET", url + endpoint, params=params)
json = response.json()
if json['success']:
return json['competition']
else:
raise APIError(json)
def get_team():
endpoint = '/api/fetchTeamCompetition'
params = {
"CLIENT_ID": trakey['CLIENT_ID'],
"CLIENT_SECRET": trakey['CLIENT_SECRET']
}
response = requests.request("GET", url + endpoint, params=params)
json = response.json()
if json['success']:
return json['team']
else:
raise APIError(json)
def get_team_match_data(competition, team_num):
endpoint = '/api/fetchAllTeamMatchData'
params = {
"competition": competition,
"teamScouted": team_num,
"CLIENT_ID": trakey['CLIENT_ID'],
"CLIENT_SECRET": trakey['CLIENT_SECRET']
}
response = requests.request("GET", url + endpoint, params=params)
json = response.json()
if json['success']:
return json['data'][team_num]
else:
raise APIError(json)
def get_teams_at_competition(competition):
endpoint = '/api/fetchAllTeamNicknamesAtCompetition'
params = {
"competition": competition,
"CLIENT_ID": trakey['CLIENT_ID'],
"CLIENT_SECRET": trakey['CLIENT_SECRET']
}
response = requests.request("GET", url + endpoint, params=params)
json = response.json()
if json['success']:
return list(json['data'].keys())
else:
raise APIError(json)

View File

@@ -0,0 +1,14 @@
cerberus
dnspython
numpy
pyinstaller
pylint
pymongo
pyparsing
python-daemon
pyzmq
requests
scikit-learn
scipy
six
tra-analysis

403
competition/superscript.py Normal file
View File

@@ -0,0 +1,403 @@
# Titan Robotics Team 2022: Superscript Script
# Written by Arthur Lu, Jacob Levine, and Dev Singh
# Notes:
# setup:
__version__ = "1.0.0"
# changelog should be viewed using print(analysis.__changelog__)
__changelog__ = """changelog:
1.0.0:
- superscript now runs in PEP 3143 compliant well behaved daemon on Linux systems
- linux superscript daemon has integrated websocket output to monitor progress/status remotely
- linux daemon now sends stderr to errorlog.log
- added verbose option to linux superscript to allow for interactive output
- moved pymongo import to superscript.py
- added profile option to linux superscript to profile runtime of script
- reduced memory usage slightly by consolidating the unwrapped input data
- added debug option, which performs one loop of analysis and dumps results to local files
- added event and time delay options to config
- event delay pauses loop until even listener recieves an update
- time delay pauses loop until the time specified has elapsed since the BEGINNING of previous loop
- added options to pull config information from database (reatins option to use local config file)
- config-preference option selects between prioritizing local config and prioritizing database config
- synchronize-config option selects whether to update the non prioritized config with the prioritized one
- divided config options between persistent ones (keys), and variable ones (everything else)
- generalized behavior of various core components by collecting loose functions in several dependencies into classes
- module.py contains classes, each one represents a single data analysis routine
- config.py contains the Configuration class, which stores the configuration information and abstracts the getter methods
0.9.3:
- improved data loading performance by removing redundant PyMongo client creation (120s to 14s)
- passed singular instance of PyMongo client as standin for apikey parameter in all data.py functions
0.9.2:
- removed unessasary imports from data
- minor changes to interface
0.9.1:
- fixed bugs in configuration item loading exception handling
0.9.0:
- moved printing and logging related functions to interface.py (changelog will stay in this file)
- changed function return files for load_config and save_config to standard C values (0 for success, 1 for error)
- added local variables for config location
- moved dataset getting and setting functions to dataset.py (changelog will stay in this file)
- moved matchloop, metricloop, pitloop and helper functions (simplestats) to processing.py
0.8.6:
- added proper main function
0.8.5:
- added more gradeful KeyboardInterrupt exiting
- redirected stderr to errorlog.txt
0.8.4:
- added better error message for missing config.json
- added automatic config.json creation
- added splash text with version and system info
0.8.3:
- updated matchloop with new regression format (requires tra_analysis 3.x)
0.8.2:
- readded while true to main function
- added more thread config options
0.8.1:
- optimized matchloop further by bypassing GIL
0.8.0:
- added multithreading to matchloop
- tweaked user log
0.7.0:
- finished implementing main function
0.6.2:
- integrated get_team_rankings.py as get_team_metrics() function
- integrated visualize_pit.py as graph_pit_histogram() function
0.6.1:
- bug fixes with analysis.Metric() calls
- modified metric functions to use config.json defined default values
0.6.0:
- removed main function
- changed load_config function
- added save_config function
- added load_match function
- renamed simpleloop to matchloop
- moved simplestats function inside matchloop
- renamed load_metrics to load_metric
- renamed metricsloop to metricloop
- split push to database functions amon push_match, push_metric, push_pit
- moved
0.5.2:
- made changes due to refactoring of analysis
0.5.1:
- text fixes
- removed matplotlib requirement
0.5.0:
- improved user interface
0.4.2:
- removed unessasary code
0.4.1:
- fixed bug where X range for regression was determined before sanitization
- better sanitized data
0.4.0:
- fixed spelling issue in __changelog__
- addressed nan bug in regression
- fixed errors on line 335 with metrics calling incorrect key "glicko2"
- fixed errors in metrics computing
0.3.0:
- added analysis to pit data
0.2.1:
- minor stability patches
- implemented db syncing for timestamps
- fixed bugs
0.2.0:
- finalized testing and small fixes
0.1.4:
- finished metrics implement, trueskill is bugged
0.1.3:
- working
0.1.2:
- started implement of metrics
0.1.1:
- cleaned up imports
0.1.0:
- tested working, can push to database
0.0.9:
- tested working
- prints out stats for the time being, will push to database later
0.0.8:
- added data import
- removed tba import
- finished main method
0.0.7:
- added load_config
- optimized simpleloop for readibility
- added __all__ entries
- added simplestats engine
- pending testing
0.0.6:
- fixes
0.0.5:
- imported pickle
- created custom database object
0.0.4:
- fixed simpleloop to actually return a vector
0.0.3:
- added metricsloop which is unfinished
0.0.2:
- added simpleloop which is untested until data is provided
0.0.1:
- created script
- added analysis, numba, numpy imports
"""
__author__ = (
"Arthur Lu <learthurgo@gmail.com>",
"Jacob Levine <jlevine@imsa.edu>",
)
# imports:
import os, sys, time
import pymongo # soon to be deprecated
import traceback
import warnings
from config import Configuration, ConfigurationError
from data import get_previous_time, set_current_time, check_new_database_matches
from interface import Logger
from module import Match, Metric, Pit
import zmq
config_path = "config.json"
def main(logger, verbose, profile, debug, socket_send = None):
def close_all():
if "client" in locals():
client.close()
warnings.filterwarnings("ignore")
logger.splash(__version__)
modules = {"match": Match, "metric": Metric, "pit": Pit}
while True:
try:
loop_start = time.time()
logger.info("current time: " + str(loop_start))
socket_send("current time: " + str(loop_start))
config = Configuration(config_path)
logger.info("found and loaded config at <" + config_path + ">")
socket_send("found and loaded config at <" + config_path + ">")
apikey, tbakey = config.database, config.tba
logger.info("found and loaded database and tba keys")
socket_send("found and loaded database and tba keys")
client = pymongo.MongoClient(apikey)
logger.info("established connection to database")
socket_send("established connection to database")
previous_time = get_previous_time(client)
logger.info("analysis backtimed to: " + str(previous_time))
socket_send("analysis backtimed to: " + str(previous_time))
config.resolve_config_conflicts(logger, client)
config_modules, competition = config.modules, config.competition
for m in config_modules:
if m in modules:
start = time.time()
current_module = modules[m](config_modules[m], client, tbakey, previous_time, competition)
valid = current_module.validate_config()
if not valid:
continue
current_module.run()
logger.info(m + " module finished in " + str(time.time() - start) + " seconds")
socket_send(m + " module finished in " + str(time.time() - start) + " seconds")
if debug:
logger.save_module_to_file(m, current_module.data, current_module.results) # logging flag check done in logger
set_current_time(client, loop_start)
close_all()
logger.info("closed threads and database client")
logger.info("finished all tasks in " + str(time.time() - loop_start) + " seconds, looping")
socket_send("closed threads and database client")
socket_send("finished all tasks in " + str(time.time() - loop_start) + " seconds, looping")
if profile:
return 0
if debug:
return 0
event_delay = config["variable"]["event-delay"]
if event_delay:
logger.info("loop delayed until database returns new matches")
socket_send("loop delayed until database returns new matches")
new_match = False
while not new_match:
time.sleep(1)
new_match = check_new_database_matches(client, competition)
logger.info("database returned new matches")
socket_send("database returned new matches")
else:
loop_delay = float(config["variable"]["loop-delay"])
remaining_time = loop_delay - (time.time() - loop_start)
if remaining_time > 0:
logger.info("loop delayed by " + str(remaining_time) + " seconds")
socket_send("loop delayed by " + str(remaining_time) + " seconds")
time.sleep(remaining_time)
except KeyboardInterrupt:
close_all()
logger.info("detected KeyboardInterrupt, exiting")
socket_send("detected KeyboardInterrupt, exiting")
return 0
except ConfigurationError as e:
str_e = "".join(traceback.format_exception(e))
logger.error("encountered a configuration error: " + str(e))
logger.error(str_e)
socket_send("encountered a configuration error: " + str(e))
socket_send(str_e)
close_all()
return 1
except Exception as e:
str_e = "".join(traceback.format_exception(e))
logger.error("encountered an exception while running")
logger.error(str_e)
socket_send("encountered an exception while running")
socket_send(str_e)
close_all()
return 1
def start(pid_path, verbose, profile, debug):
if profile:
def send(msg):
pass
logger = Logger(verbose, profile, debug)
import cProfile, pstats, io
profile = cProfile.Profile()
profile.enable()
exit_code = main(logger, verbose, profile, debug, socket_send = send)
profile.disable()
f = open("profile.txt", 'w+')
ps = pstats.Stats(profile, stream = f).sort_stats('cumtime')
ps.print_stats()
sys.exit(exit_code)
elif verbose:
def send(msg):
pass
logger = Logger(verbose, profile, debug)
exit_code = main(logger, verbose, profile, debug, socket_send = send)
sys.exit(exit_code)
elif debug:
def send(msg):
pass
logger = Logger(verbose, profile, debug)
exit_code = main(logger, verbose, profile, debug, socket_send = send)
sys.exit(exit_code)
else:
logfile = "logfile.log"
f = open(logfile, 'w+')
f.close()
e = open('errorlog.log', 'w+')
with daemon.DaemonContext(
working_directory = os.getcwd(),
pidfile = pidfile.TimeoutPIDLockFile(pid_path),
stderr = e
):
context = zmq.Context()
socket = context.socket(zmq.PUB)
socket.bind("tcp://*:5678")
socket.send(b'status')
def send(msg):
socket.send(bytes("status: " + msg, "utf-8"))
logger = Logger(verbose, profile, debug, file = logfile)
exit_code = main(logger, verbose, profile, debug, socket_send = send)
socket.close()
f.close()
sys.exit(exit_code)
def stop(pid_path):
try:
pf = open(pid_path, 'r')
pid = int(pf.read().strip())
pf.close()
except IOError:
sys.stderr.write("pidfile at <" + pid_path + "> does not exist. Daemon not running?\n")
return
try:
while True:
os.kill(pid, SIGTERM)
time.sleep(0.01)
except OSError as err:
err = str(err)
if err.find("No such process") > 0:
if os.path.exists(pid_path):
os.remove(pid_path)
else:
traceback.print_exc(file = sys.stderr)
sys.exit(1)
def restart(pid_path):
stop(pid_path)
start(pid_path, False, False, False)
if __name__ == "__main__":
if sys.platform.startswith("win"):
start(None, verbose = True)
else:
import daemon
from daemon import pidfile
from signal import SIGTERM
pid_path = "tra-daemon.pid"
if len(sys.argv) == 2:
if 'start' == sys.argv[1]:
start(pid_path, False, False, False)
elif 'stop' == sys.argv[1]:
stop(pid_path)
elif 'restart' == sys.argv[1]:
restart(pid_path)
elif 'verbose' == sys.argv[1]:
start(None, True, False, False)
elif 'profile' == sys.argv[1]:
start(None, False, True, False)
elif 'debug' == sys.argv[1]:
start(None, False, False, True)
else:
print("usage: %s start|stop|restart|verbose|profile|debug" % sys.argv[0])
sys.exit(2)
sys.exit(0)
else:
print("usage: %s start|stop|restart|verbose|profile|debug" % sys.argv[0])
sys.exit(2)