2 Commits
gui ... v0.0.0

Author SHA1 Message Date
Arthur Lu
95e820627f fixed badge url
Former-commit-id: 9039cde818
2021-08-26 18:20:11 -07:00
Arthur Lu
da4795a345 Update release badge
Former-commit-id: a8f0172b28
2021-08-26 18:11:25 -07:00
23 changed files with 47 additions and 1376 deletions

View File

@@ -1,6 +1,7 @@
FROM python:slim FROM ubuntu:20.04
WORKDIR / WORKDIR /
RUN apt-get -y update; apt-get -y upgrade RUN apt-get -y update
RUN apt-get -y install git RUN DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends tzdata
COPY requirements.txt . RUN apt-get install -y python3 python3-dev git python3-pip python3-kivy python-is-python3 libgl1-mesa-dev build-essential
RUN pip install -r requirements.txt RUN ln -s $(which pip3) /usr/bin/pip
RUN pip install pymongo pandas numpy scipy scikit-learn matplotlib pylint kivy

View File

@@ -0,0 +1,2 @@
FROM titanscout2022/tra-analysis-base:latest
WORKDIR /

View File

@@ -1,22 +1,28 @@
{ {
"name": "TRA Analysis Development Environment", "name": "TRA Analysis Development Environment",
"build": { "build": {
"dockerfile": "Dockerfile", "dockerfile": "dev-dockerfile",
}, },
"settings": { "settings": {
"terminal.integrated.shell.linux": "/bin/bash", "terminal.integrated.shell.linux": "/bin/bash",
"python.pythonPath": "", "python.pythonPath": "/usr/local/bin/python",
"python.linting.enabled": true, "python.linting.enabled": true,
"python.linting.pylintEnabled": true, "python.linting.pylintEnabled": true,
"python.linting.pylintPath": "", "python.formatting.autopep8Path": "/usr/local/py-utils/bin/autopep8",
"python.testing.pytestPath": "", "python.formatting.blackPath": "/usr/local/py-utils/bin/black",
"editor.tabSize": 4, "python.formatting.yapfPath": "/usr/local/py-utils/bin/yapf",
"editor.insertSpaces": false "python.linting.banditPath": "/usr/local/py-utils/bin/bandit",
"python.linting.flake8Path": "/usr/local/py-utils/bin/flake8",
"python.linting.mypyPath": "/usr/local/py-utils/bin/mypy",
"python.linting.pycodestylePath": "/usr/local/py-utils/bin/pycodestyle",
"python.linting.pydocstylePath": "/usr/local/py-utils/bin/pydocstyle",
"python.linting.pylintPath": "/usr/local/py-utils/bin/pylint",
"python.testing.pytestPath": "/usr/local/py-utils/bin/pytest"
}, },
"extensions": [ "extensions": [
"mhutchie.git-graph", "mhutchie.git-graph",
"ms-python.python", "ms-python.python",
"waderyan.gitblame" "waderyan.gitblame"
], ],
"postCreateCommand": "" "postCreateCommand": "/usr/bin/pip3 install -r ${containerWorkspaceFolder}/src/requirements.txt && /usr/bin/pip3 install --no-cache-dir pylint && /usr/bin/pip3 install pytest"
} }

View File

@@ -1,7 +1,7 @@
# This workflow will install Python dependencies, run tests and lint with a variety of Python versions # This workflow will install Python dependencies, run tests and lint with a variety of Python versions
# For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions # For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions
name: Build Superscript Linux name: Superscript Unit Tests
on: on:
release: release:
@@ -11,25 +11,7 @@ jobs:
generate: generate:
name: Build Linux name: Build Linux
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Checkout master - name: Checkout master
uses: actions/checkout@master uses: actions/checkout@master
- name: Install Dependencies
run: pip install -r requirements.txt
working-directory: src/
- name: Give Execute Permission
run: chmod +x build-CLI.sh
working-directory: build/
- name: Build Binary
run: ./build-CLI.sh
working-directory: build/
- name: Copy Binary to Root Dir
run: cp superscript ..
working-directory: dist/
- name: Upload Release Asset
uses: svenstaro/upload-release-action@v2
with:
repo_token: ${{ secrets.GITHUB_TOKEN }}
file: superscript
asset_name: superscript
tag: ${{ github.ref }}

5
.gitignore vendored
View File

@@ -9,11 +9,6 @@
**/tra_analysis/ **/tra_analysis/
**/temp/* **/temp/*
**/*.pid
**/profile.*
**/*.log
**/errorlog.txt **/errorlog.txt
/dist/superscript.* /dist/superscript.*
/dist/superscript /dist/superscript

View File

@@ -1,4 +1,4 @@
# Red Alliance Analysis · ![GitHub release (latest by date)](https://img.shields.io/github/v/release/titanscout2022/red-alliance-analysis) # Red Alliance Analysis · ![GitHub release (latest by date)](https://img.shields.io/github/v/release/titanscout2022/tra-superscript)
Titan Robotics 2022 Strategy Team Repository for Data Analysis Tools. Included with these tools are the backend data analysis engine formatted as a python package, associated binaries for the analysis package, and premade scripts that can be pulled directly from this repository and will integrate with other Red Alliance applications to quickly deploy FRC scouting tools. Titan Robotics 2022 Strategy Team Repository for Data Analysis Tools. Included with these tools are the backend data analysis engine formatted as a python package, associated binaries for the analysis package, and premade scripts that can be pulled directly from this repository and will integrate with other Red Alliance applications to quickly deploy FRC scouting tools.

View File

@@ -1,5 +1,5 @@
set pathtospec="../src/cli/superscript.spec" set pathtospec="../src/superscript.spec"
set pathtodist="../dist/" set pathtodist="../dist/"
set pathtowork="temp/" set pathtowork="temp/"
pyinstaller --clean --distpath %pathtodist% --workpath %pathtowork% %pathtospec% pyinstaller --onefile --clean --distpath %pathtodist% --workpath %pathtowork% %pathtospec%

View File

@@ -1,5 +1,5 @@
pathtospec="../src/cli/superscript.spec" pathtospec="../src/superscript.spec"
pathtodist="../dist/" pathtodist="../dist/"
pathtowork="temp/" pathtowork="temp/"
pyinstaller --clean --distpath ${pathtodist} --workpath ${pathtowork} ${pathtospec} pyinstaller --onefile --clean --distpath ${pathtodist} --workpath ${pathtowork} ${pathtospec}

View File

@@ -1,244 +0,0 @@
import math
import json
from multiprocessing import Pool
import os
from cerberus import Validator
from exceptions import ConfigurationError
from data import set_database_config, get_database_config
from interface import stderr, stdout, INF, ERR
config_path = "config.json"
sample_json = """
{
"persistent":{
"key":{
"database":"",
"tba":"",
"tra":{
"CLIENT_ID":"",
"CLIENT_SECRET":"",
"url": ""
}
},
"config-preference":"local",
"synchronize-config":false
},
"variable":{
"max-threads":0.5,
"team":"",
"event-delay":false,
"loop-delay":0,
"reportable":true,
"teams":[
],
"modules":{
"match":{
"tests":{
"balls-blocked":[
"basic_stats",
"historical_analysis",
"regression_linear",
"regression_logarithmic",
"regression_exponential",
"regression_polynomial",
"regression_sigmoidal"
],
"balls-collected":[
"basic_stats",
"historical_analysis",
"regression_linear",
"regression_logarithmic",
"regression_exponential",
"regression_polynomial",
"regression_sigmoidal"
],
"balls-lower-teleop":[
"basic_stats",
"historical_analysis",
"regression_linear",
"regression_logarithmic",
"regression_exponential",
"regression_polynomial",
"regression_sigmoidal"
],
"balls-lower-auto":[
"basic_stats",
"historical_analysis",
"regression_linear",
"regression_logarithmic",
"regression_exponential",
"regression_polynomial",
"regression_sigmoidal"
],
"balls-started":[
"basic_stats",
"historical_analyss",
"regression_linear",
"regression_logarithmic",
"regression_exponential",
"regression_polynomial",
"regression_sigmoidal"
],
"balls-upper-teleop":[
"basic_stats",
"historical_analysis",
"regression_linear",
"regression_logarithmic",
"regression_exponential",
"regression_polynomial",
"regression_sigmoidal"
],
"balls-upper-auto":[
"basic_stats",
"historical_analysis",
"regression_linear",
"regression_logarithmic",
"regression_exponential",
"regression_polynomial",
"regression_sigmoidal"
]
}
},
"metric":{
"tests":{
"elo":{
"score":1500,
"N":400,
"K":24
},
"gl2":{
"score":1500,
"rd":250,
"vol":0.06
},
"ts":{
"mu":25,
"sigma":8.33
}
}
},
"pit":{
"tests":{
"wheel-mechanism":true,
"low-balls":true,
"high-balls":true,
"wheel-success":true,
"strategic-focus":true,
"climb-mechanism":true,
"attitude":true
}
}
}
}
}
"""
def parse_config_persistent(send, config):
v = Validator(load_validation_schema(), allow_unknown = True)
isValidated = v.validate(config)
if not isValidated:
raise ConfigurationError(v.errors, 101)
apikey = config["persistent"]["key"]["database"]
tbakey = config["persistent"]["key"]["tba"]
preference = config["persistent"]["config-preference"]
sync = config["persistent"]["synchronize-config"]
return apikey, tbakey, preference, sync
def parse_config_variable(send, config):
sys_max_threads = os.cpu_count()
try:
cfg_max_threads = config["variable"]["max-threads"]
except:
raise ConfigurationError("variable/max-threads field is invalid or missing, refer to documentation for configuration options", 109)
if cfg_max_threads > -sys_max_threads and cfg_max_threads < 0 :
alloc_processes = sys_max_threads + cfg_max_threads
elif cfg_max_threads > 0 and cfg_max_threads < 1:
alloc_processes = math.floor(cfg_max_threads * sys_max_threads)
elif cfg_max_threads > 1 and cfg_max_threads <= sys_max_threads:
alloc_processes = cfg_max_threads
elif cfg_max_threads == 0:
alloc_processes = sys_max_threads
else:
raise ConfigurationError("variable/max-threads must be between -" + str(sys_max_threads) + " and " + str(sys_max_threads) + ", but got " + cfg_max_threads, 110)
try:
exec_threads = Pool(processes = alloc_processes)
except Exception as e:
send(stderr, INF, e)
raise ConfigurationError("unable to start threads", 200)
send(stdout, INF, "successfully initialized " + str(alloc_processes) + " threads")
try:
modules = config["variable"]["modules"]
except:
raise ConfigurationError("variable/modules field is invalid or missing", 102)
if modules == None:
raise ConfigurationError("variable/modules field is empty", 106)
send(stdout, INF, "found and loaded competition, match, metrics, pit from config")
return exec_threads, modules
def resolve_config_conflicts(send, client, config, preference, sync):
if sync:
if preference == "local" or preference == "client":
send(stdout, INF, "config-preference set to local/client, loading local config information")
remote_config = get_database_config(client)
if remote_config != config["variable"]:
set_database_config(client, config["variable"])
send(stdout, INF, "database config was different and was updated")
return config
elif preference == "remote" or preference == "database":
send(stdout, INF, "config-preference set to remote/database, loading remote config information")
remote_config= get_database_config(client)
if remote_config != config["variable"]:
config["variable"] = remote_config
if save_config(config_path, config):
raise ConfigurationError("local config was different but could not be updated", 121)
send(stdout, INF, "local config was different and was updated")
return config
else:
raise ConfigurationError("persistent/config-preference field must be \"local\"/\"client\" or \"remote\"/\"database\"", 120)
else:
if preference == "local" or preference == "client":
send(stdout, INF, "config-preference set to local/client, loading local config information")
return config
elif preference == "remote" or preference == "database":
send(stdout, INF, "config-preference set to remote/database, loading database config information")
config["variable"] = get_database_config(client)
return config
else:
raise ConfigurationError("persistent/config-preference field must be \"local\"/\"client\" or \"remote\"/\"database\"", 120)
def load_config(path, config_vector):
try:
f = open(path, "r")
config_vector.update(json.load(f))
f.close()
return 0
except:
f = open(path, "w")
f.write(sample_json)
f.close()
return 1
def load_validation_schema():
try:
with open("validation-schema.json", "r") as f:
return json.load(f)
except:
raise FileNotFoundError("Validation schema not found at validation-schema.json")
def save_config(path, config_vector):
f = open(path, "w+")
json.dump(config_vector, f, ensure_ascii=False, indent=4)
f.close()
return 0

View File

@@ -1,199 +0,0 @@
import requests
import pull
import pandas as pd
def pull_new_tba_matches(apikey, competition, cutoff):
api_key= apikey
x=requests.get("https://www.thebluealliance.com/api/v3/event/"+competition+"/matches/simple", headers={"X-TBA-Auth-Key":api_key}, verify=False)
out = []
for i in x.json():
if i["actual_time"] != None and i["actual_time"]-cutoff >= 0 and i["comp_level"] == "qm":
out.append({"match" : i['match_number'], "blue" : list(map(lambda x: int(x[3:]), i['alliances']['blue']['team_keys'])), "red" : list(map(lambda x: int(x[3:]), i['alliances']['red']['team_keys'])), "winner": i["winning_alliance"]})
return out
def get_team_match_data(client, competition, team_num):
db = client.data_scouting
mdata = db.matchdata
out = {}
for i in mdata.find({"competition" : competition, "team_scouted": str(team_num)}):
out[i['match']] = i['data']
return pd.DataFrame(out)
def get_team_pit_data(client, competition, team_num):
db = client.data_scouting
mdata = db.pitdata
out = {}
return mdata.find_one({"competition" : competition, "team_scouted": str(team_num)})["data"]
def get_team_metrics_data(client, competition, team_num):
db = client.data_processing
mdata = db.team_metrics
return mdata.find_one({"competition" : competition, "team": team_num})
def get_match_data_formatted(client, competition):
teams_at_comp = pull.get_teams_at_competition(competition)
out = {}
for team in teams_at_comp:
try:
out[int(team)] = unkeyify_2l(get_team_match_data(client, competition, team).transpose().to_dict())
except:
pass
return out
def get_metrics_data_formatted(client, competition):
teams_at_comp = pull.get_teams_at_competition(competition)
out = {}
for team in teams_at_comp:
try:
out[int(team)] = get_team_metrics_data(client, competition, int(team))
except:
pass
return out
def get_pit_data_formatted(client, competition):
x=requests.get("https://titanscouting.epochml.org/api/fetchAllTeamNicknamesAtCompetition?competition="+competition)
x = x.json()
x = x['data']
x = x.keys()
out = {}
for i in x:
try:
out[int(i)] = get_team_pit_data(client, competition, int(i))
except:
pass
return out
def get_pit_variable_data(client, competition):
db = client.data_processing
mdata = db.team_pit
out = {}
return mdata.find()
def get_pit_variable_formatted(client, competition):
temp = get_pit_variable_data(client, competition)
out = {}
for i in temp:
out[i["variable"]] = i["data"]
return out
def push_team_tests_data(client, competition, team_num, data, dbname = "data_processing", colname = "team_tests"):
db = client[dbname]
mdata = db[colname]
mdata.replace_one({"competition" : competition, "team": team_num}, {"_id": competition+str(team_num)+"am", "competition" : competition, "team" : team_num, "data" : data}, True)
def push_team_metrics_data(client, competition, team_num, data, dbname = "data_processing", colname = "team_metrics"):
db = client[dbname]
mdata = db[colname]
mdata.replace_one({"competition" : competition, "team": team_num}, {"_id": competition+str(team_num)+"am", "competition" : competition, "team" : team_num, "metrics" : data}, True)
def push_team_pit_data(client, competition, variable, data, dbname = "data_processing", colname = "team_pit"):
db = client[dbname]
mdata = db[colname]
mdata.replace_one({"competition" : competition, "variable": variable}, {"competition" : competition, "variable" : variable, "data" : data}, True)
def get_analysis_flags(client, flag):
db = client.data_processing
mdata = db.flags
return mdata.find_one({flag:{"$exists":True}})
def set_analysis_flags(client, flag, data):
db = client.data_processing
mdata = db.flags
return mdata.replace_one({flag:{"$exists":True}}, data, True)
def unkeyify_2l(layered_dict):
out = {}
for i in layered_dict.keys():
add = []
sortkey = []
for j in layered_dict[i].keys():
add.append([j,layered_dict[i][j]])
add.sort(key = lambda x: x[0])
out[i] = list(map(lambda x: x[1], add))
return out
def get_previous_time(client):
previous_time = get_analysis_flags(client, "latest_update")
if previous_time == None:
set_analysis_flags(client, "latest_update", 0)
previous_time = 0
else:
previous_time = previous_time["latest_update"]
return previous_time
def set_current_time(client, current_time):
set_analysis_flags(client, "latest_update", {"latest_update":current_time})
def get_database_config(client):
remote_config = get_analysis_flags(client, "config")
return remote_config["config"] if remote_config != None else None
def set_database_config(client, config):
set_analysis_flags(client, "config", {"config": config})
def load_match(client, competition):
return get_match_data_formatted(client, competition)
def load_metric(client, competition, match, group_name, metrics):
group = {}
for team in match[group_name]:
db_data = get_team_metrics_data(client, competition, team)
if db_data == None:
elo = {"score": metrics["elo"]["score"]}
gl2 = {"score": metrics["gl2"]["score"], "rd": metrics["gl2"]["rd"], "vol": metrics["gl2"]["vol"]}
ts = {"mu": metrics["ts"]["mu"], "sigma": metrics["ts"]["sigma"]}
group[team] = {"elo": elo, "gl2": gl2, "ts": ts}
else:
metrics = db_data["metrics"]
elo = metrics["elo"]
gl2 = metrics["gl2"]
ts = metrics["ts"]
group[team] = {"elo": elo, "gl2": gl2, "ts": ts}
return group
def load_pit(client, competition):
return get_pit_data_formatted(client, competition)
def push_match(client, competition, results):
for team in results:
push_team_tests_data(client, competition, team, results[team])
def push_metric(client, competition, metric):
for team in metric:
push_team_metrics_data(client, competition, team, metric[team])
def push_pit(client, competition, pit):
for variable in pit:
push_team_pit_data(client, competition, variable, pit[variable])
def check_new_database_matches(client, competition):
return True

View File

@@ -1,11 +0,0 @@
class APIError(Exception):
code = None
def __init__(self, str, endpoint):
super().__init__(str)
self.endpoint = endpoint
class ConfigurationError (Exception):
code = None
def __init__(self, str, code):
super().__init__(str)
self.code = code

View File

@@ -1,44 +0,0 @@
import sys
import time
from os import system, name
import platform
empty_delim = " "
hard_divided_delim = "|"
soft_divided_delim = "|"
l_brack = "["
r_brack = "]"
ERR = "[ERR]"
INF = "[INF]"
stdout = sys.stdout
stderr = sys.stderr
def log(target, level, message, code = 0):
message = time.ctime() + empty_delim + str(level) + l_brack + f"{code:+05}" + r_brack + empty_delim + soft_divided_delim + empty_delim + message
print(message, file = target)
def clear():
if name == "nt":
system("cls")
else:
system("clear")
def splash(version):
def hrule():
print("#"+38*"-"+"#")
def box(s):
temp = "|"
temp += s
temp += (40-len(s)-2)*" "
temp += "|"
print(temp)
hrule()
box(" superscript version: " + version)
box(" os: " + platform.system())
box(" python: " + platform.python_version())
hrule()

View File

@@ -1,323 +0,0 @@
import abc
import data as d
import signal
import numpy as np
from tra_analysis import Analysis as an
class Module(metaclass = abc.ABCMeta):
@classmethod
def __subclasshook__(cls, subclass):
return (hasattr(subclass, '__init__') and
callable(subclass.__init__) and
hasattr(subclass, 'validate_config') and
callable(subclass.validate_config) and
hasattr(subclass, 'run') and
callable(subclass.run)
)
@abc.abstractmethod
def __init__(self, config, apikey, tbakey, timestamp, competition, *args, **kwargs):
raise NotImplementedError
@abc.abstractmethod
def validate_config(self, *args, **kwargs):
raise NotImplementedError
@abc.abstractmethod
def run(self, exec_threads, *args, **kwargs):
raise NotImplementedError
class Match (Module):
config = None
apikey = None
tbakey = None
timestamp = None
competition = None
data = None
results = None
def __init__(self, config, apikey, tbakey, timestamp, competition):
self.config = config
self.apikey = apikey
self.tbakey = tbakey
self.timestamp = timestamp
self.competition = competition
def validate_config(self):
return True, ""
def run(self, exec_threads):
self._load_data()
self._process_data(exec_threads)
self._push_results()
def _load_data(self):
self.data = d.load_match(self.apikey, self.competition)
def _simplestats(self, data_test):
signal.signal(signal.SIGINT, signal.SIG_IGN)
data = np.array(data_test[3])
data = data[np.isfinite(data)]
ranges = list(range(len(data)))
test = data_test[2]
if test == "basic_stats":
return an.basic_stats(data)
if test == "historical_analysis":
return an.histo_analysis([ranges, data])
if test == "regression_linear":
return an.regression(ranges, data, ['lin'])
if test == "regression_logarithmic":
return an.regression(ranges, data, ['log'])
if test == "regression_exponential":
return an.regression(ranges, data, ['exp'])
if test == "regression_polynomial":
return an.regression(ranges, data, ['ply'])
if test == "regression_sigmoidal":
return an.regression(ranges, data, ['sig'])
def _process_data(self, exec_threads):
tests = self.config["tests"]
data = self.data
input_vector = []
for team in data:
for variable in data[team]:
if variable in tests:
for test in tests[variable]:
input_vector.append((team, variable, test, data[team][variable]))
self.data = input_vector
#self.results = list(exec_threads.map(self._simplestats, self.data))
self.results = []
for test_var_data in self.data:
self.results.append(self._simplestats(test_var_data))
def _push_results(self):
short_mapping = {"regression_linear": "lin", "regression_logarithmic": "log", "regression_exponential": "exp", "regression_polynomial": "ply", "regression_sigmoidal": "sig"}
class AutoVivification(dict):
def __getitem__(self, item):
try:
return dict.__getitem__(self, item)
except KeyError:
value = self[item] = type(self)()
return value
result_filtered = self.results
input_vector = self.data
return_vector = AutoVivification()
i = 0
for result in result_filtered:
filtered = input_vector[i][2]
try:
short = short_mapping[filtered]
return_vector[input_vector[i][0]][input_vector[i][1]][input_vector[i][2]] = result[short]
except KeyError: # not in mapping
return_vector[input_vector[i][0]][input_vector[i][1]][input_vector[i][2]] = result
i += 1
self.results = return_vector
d.push_match(self.apikey, self.competition, self.results)
class Metric (Module):
config = None
apikey = None
tbakey = None
timestamp = None
competition = None
data = None
results = None
def __init__(self, config, apikey, tbakey, timestamp, competition):
self.config = config
self.apikey = apikey
self.tbakey = tbakey
self.timestamp = timestamp
self.competition = competition
def validate_config(self):
return True, ""
def run(self, exec_threads):
self._load_data()
self._process_data(exec_threads)
self._push_results()
def _load_data(self):
self.data = d.pull_new_tba_matches(self.tbakey, self.competition, self.timestamp)
def _process_data(self, exec_threads):
elo_N = self.config["tests"]["elo"]["N"]
elo_K = self.config["tests"]["elo"]["K"]
matches = self.data
red = {}
blu = {}
for match in matches:
red = d.load_metric(self.apikey, self.competition, match, "red", self.config["tests"])
blu = d.load_metric(self.apikey, self.competition, match, "blue", self.config["tests"])
elo_red_total = 0
elo_blu_total = 0
gl2_red_score_total = 0
gl2_blu_score_total = 0
gl2_red_rd_total = 0
gl2_blu_rd_total = 0
gl2_red_vol_total = 0
gl2_blu_vol_total = 0
for team in red:
elo_red_total += red[team]["elo"]["score"]
gl2_red_score_total += red[team]["gl2"]["score"]
gl2_red_rd_total += red[team]["gl2"]["rd"]
gl2_red_vol_total += red[team]["gl2"]["vol"]
for team in blu:
elo_blu_total += blu[team]["elo"]["score"]
gl2_blu_score_total += blu[team]["gl2"]["score"]
gl2_blu_rd_total += blu[team]["gl2"]["rd"]
gl2_blu_vol_total += blu[team]["gl2"]["vol"]
red_elo = {"score": elo_red_total / len(red)}
blu_elo = {"score": elo_blu_total / len(blu)}
red_gl2 = {"score": gl2_red_score_total / len(red), "rd": gl2_red_rd_total / len(red), "vol": gl2_red_vol_total / len(red)}
blu_gl2 = {"score": gl2_blu_score_total / len(blu), "rd": gl2_blu_rd_total / len(blu), "vol": gl2_blu_vol_total / len(blu)}
if match["winner"] == "red":
observations = {"red": 1, "blu": 0}
elif match["winner"] == "blue":
observations = {"red": 0, "blu": 1}
else:
observations = {"red": 0.5, "blu": 0.5}
red_elo_delta = an.Metric().elo(red_elo["score"], blu_elo["score"], observations["red"], elo_N, elo_K) - red_elo["score"]
blu_elo_delta = an.Metric().elo(blu_elo["score"], red_elo["score"], observations["blu"], elo_N, elo_K) - blu_elo["score"]
new_red_gl2_score, new_red_gl2_rd, new_red_gl2_vol = an.Metric().glicko2(red_gl2["score"], red_gl2["rd"], red_gl2["vol"], [blu_gl2["score"]], [blu_gl2["rd"]], [observations["red"], observations["blu"]])
new_blu_gl2_score, new_blu_gl2_rd, new_blu_gl2_vol = an.Metric().glicko2(blu_gl2["score"], blu_gl2["rd"], blu_gl2["vol"], [red_gl2["score"]], [red_gl2["rd"]], [observations["blu"], observations["red"]])
red_gl2_delta = {"score": new_red_gl2_score - red_gl2["score"], "rd": new_red_gl2_rd - red_gl2["rd"], "vol": new_red_gl2_vol - red_gl2["vol"]}
blu_gl2_delta = {"score": new_blu_gl2_score - blu_gl2["score"], "rd": new_blu_gl2_rd - blu_gl2["rd"], "vol": new_blu_gl2_vol - blu_gl2["vol"]}
for team in red:
red[team]["elo"]["score"] = red[team]["elo"]["score"] + red_elo_delta
red[team]["gl2"]["score"] = red[team]["gl2"]["score"] + red_gl2_delta["score"]
red[team]["gl2"]["rd"] = red[team]["gl2"]["rd"] + red_gl2_delta["rd"]
red[team]["gl2"]["vol"] = red[team]["gl2"]["vol"] + red_gl2_delta["vol"]
for team in blu:
blu[team]["elo"]["score"] = blu[team]["elo"]["score"] + blu_elo_delta
blu[team]["gl2"]["score"] = blu[team]["gl2"]["score"] + blu_gl2_delta["score"]
blu[team]["gl2"]["rd"] = blu[team]["gl2"]["rd"] + blu_gl2_delta["rd"]
blu[team]["gl2"]["vol"] = blu[team]["gl2"]["vol"] + blu_gl2_delta["vol"]
temp_vector = {}
temp_vector.update(red)
temp_vector.update(blu)
d.push_metric(self.apikey, self.competition, temp_vector)
def _push_results(self):
pass
class Pit (Module):
config = None
apikey = None
tbakey = None
timestamp = None
competition = None
data = None
results = None
def __init__(self, config, apikey, tbakey, timestamp, competition):
self.config = config
self.apikey = apikey
self.tbakey = tbakey
self.timestamp = timestamp
self.competition = competition
def validate_config(self):
return True, ""
def run(self, exec_threads):
self._load_data()
self._process_data(exec_threads)
self._push_results()
def _load_data(self):
self.data = d.load_pit(self.apikey, self.competition)
def _process_data(self, exec_threads):
tests = self.config["tests"]
print(tests)
return_vector = {}
for team in self.data:
for variable in self.data[team]:
if variable in tests:
if not variable in return_vector:
return_vector[variable] = []
return_vector[variable].append(self.data[team][variable])
self.results = return_vector
def _push_results(self):
d.push_pit(self.apikey, self.competition, self.results)
class Rating (Module):
pass
class Heatmap (Module):
pass
class Sentiment (Module):
pass

View File

@@ -1,64 +0,0 @@
import requests
import json
from exceptions import APIError
from config import load_config
url = "https://titanscouting.epochml.org"
config_tra = {}
load_config("config.json", config_tra)
trakey = config_tra['persistent']['key']['tra']
def get_team_competition():
endpoint = '/api/fetchTeamCompetition'
params = {
"CLIENT_ID": trakey['CLIENT_ID'],
"CLIENT_SECRET": trakey['CLIENT_SECRET']
}
response = requests.request("GET", url + endpoint, params=params)
json = response.json()
if json['success']:
return json['competition']
else:
raise APIError(json, endpoint)
def get_team():
endpoint = '/api/fetchTeamCompetition'
params = {
"CLIENT_ID": trakey['CLIENT_ID'],
"CLIENT_SECRET": trakey['CLIENT_SECRET']
}
response = requests.request("GET", url + endpoint, params=params)
json = response.json()
if json['success']:
return json['team']
else:
raise APIError(json, endpoint)
def get_team_match_data(competition, team_num):
endpoint = '/api/fetchAllTeamMatchData'
params = {
"competition": competition,
"teamScouted": team_num,
"CLIENT_ID": trakey['CLIENT_ID'],
"CLIENT_SECRET": trakey['CLIENT_SECRET']
}
response = requests.request("GET", url + endpoint, params=params)
json = response.json()
if json['success']:
return json['data'][team_num]
else:
raise APIError(json, endpoint)
def get_teams_at_competition(competition):
endpoint = '/api/fetchAllTeamNicknamesAtCompetition'
params = {
"competition": competition,
"CLIENT_ID": trakey['CLIENT_ID'],
"CLIENT_SECRET": trakey['CLIENT_SECRET']
}
response = requests.request("GET", url + endpoint, params=params)
json = response.json()
if json['success']:
return list(json['data'].keys())
else:
raise APIError(json, endpoint)

View File

@@ -1,390 +0,0 @@
# Titan Robotics Team 2022: Superscript Script
# Written by Arthur Lu, Jacob Levine, and Dev Singh
# Notes:
# setup:
__version__ = "1.0.0"
# changelog should be viewed using print(analysis.__changelog__)
__changelog__ = """changelog:
1.0.0:
- superscript now runs in PEP 3143 compliant well behaved daemon on Linux systems
- linux superscript daemon has integrated websocket output to monitor progress/status remotely
- linux daemon now sends stderr to errorlog.log
- added verbose option to linux superscript to allow for interactive output
- moved pymongo import to superscript.py
- added profile option to linux superscript to profile runtime of script
- reduced memory usage slightly by consolidating the unwrapped input data
- added debug option, which performs one loop of analysis and dumps results to local files
- added event and time delay options to config
- event delay pauses loop until even listener recieves an update
- time delay pauses loop until the time specified has elapsed since the BEGINNING of previous loop
- added options to pull config information from database (reatins option to use local config file)
- config-preference option selects between prioritizing local config and prioritizing database config
- synchronize-config option selects whether to update the non prioritized config with the prioritized one
- divided config options between persistent ones (keys), and variable ones (everything else)
0.9.3:
- improved data loading performance by removing redundant PyMongo client creation (120s to 14s)
- passed singular instance of PyMongo client as standin for apikey parameter in all data.py functions
0.9.2:
- removed unessasary imports from data
- minor changes to interface
0.9.1:
- fixed bugs in configuration item loading exception handling
0.9.0:
- moved printing and logging related functions to interface.py (changelog will stay in this file)
- changed function return files for load_config and save_config to standard C values (0 for success, 1 for error)
- added local variables for config location
- moved dataset getting and setting functions to dataset.py (changelog will stay in this file)
- moved matchloop, metricloop, pitloop and helper functions (simplestats) to processing.py
0.8.6:
- added proper main function
0.8.5:
- added more gradeful KeyboardInterrupt exiting
- redirected stderr to errorlog.txt
0.8.4:
- added better error message for missing config.json
- added automatic config.json creation
- added splash text with version and system info
0.8.3:
- updated matchloop with new regression format (requires tra_analysis 3.x)
0.8.2:
- readded while true to main function
- added more thread config options
0.8.1:
- optimized matchloop further by bypassing GIL
0.8.0:
- added multithreading to matchloop
- tweaked user log
0.7.0:
- finished implementing main function
0.6.2:
- integrated get_team_rankings.py as get_team_metrics() function
- integrated visualize_pit.py as graph_pit_histogram() function
0.6.1:
- bug fixes with analysis.Metric() calls
- modified metric functions to use config.json defined default values
0.6.0:
- removed main function
- changed load_config function
- added save_config function
- added load_match function
- renamed simpleloop to matchloop
- moved simplestats function inside matchloop
- renamed load_metrics to load_metric
- renamed metricsloop to metricloop
- split push to database functions amon push_match, push_metric, push_pit
- moved
0.5.2:
- made changes due to refactoring of analysis
0.5.1:
- text fixes
- removed matplotlib requirement
0.5.0:
- improved user interface
0.4.2:
- removed unessasary code
0.4.1:
- fixed bug where X range for regression was determined before sanitization
- better sanitized data
0.4.0:
- fixed spelling issue in __changelog__
- addressed nan bug in regression
- fixed errors on line 335 with metrics calling incorrect key "glicko2"
- fixed errors in metrics computing
0.3.0:
- added analysis to pit data
0.2.1:
- minor stability patches
- implemented db syncing for timestamps
- fixed bugs
0.2.0:
- finalized testing and small fixes
0.1.4:
- finished metrics implement, trueskill is bugged
0.1.3:
- working
0.1.2:
- started implement of metrics
0.1.1:
- cleaned up imports
0.1.0:
- tested working, can push to database
0.0.9:
- tested working
- prints out stats for the time being, will push to database later
0.0.8:
- added data import
- removed tba import
- finished main method
0.0.7:
- added load_config
- optimized simpleloop for readibility
- added __all__ entries
- added simplestats engine
- pending testing
0.0.6:
- fixes
0.0.5:
- imported pickle
- created custom database object
0.0.4:
- fixed simpleloop to actually return a vector
0.0.3:
- added metricsloop which is unfinished
0.0.2:
- added simpleloop which is untested until data is provided
0.0.1:
- created script
- added analysis, numba, numpy imports
"""
__author__ = (
"Arthur Lu <learthurgo@gmail.com>",
"Jacob Levine <jlevine@imsa.edu>",
)
__all__ = [
"load_config",
"save_config",
]
# imports:
import json
from multiprocessing import freeze_support
import os
import pymongo
import sys
import time
import traceback
import warnings
import zmq
import pull
from config import parse_config_persistent, parse_config_variable, resolve_config_conflicts, load_config, save_config, ConfigurationError
from data import get_previous_time, set_current_time, check_new_database_matches
from interface import splash, log, ERR, INF, stdout, stderr
from module import Match, Metric, Pit
config_path = "config.json"
def main(send, verbose = False, profile = False, debug = False):
def close_all():
if "exec_threads" in locals():
exec_threads.terminate()
exec_threads.join()
exec_threads.close()
if "client" in locals():
client.close()
if "f" in locals():
f.close()
warnings.filterwarnings("ignore")
exit_code = 0
if verbose:
splash(__version__)
modules = {"match": Match, "metric": Metric, "pit": Pit}
while True:
try:
loop_start = time.time()
send(stdout, INF, "current time: " + str(loop_start))
config = {}
if load_config(config_path, config):
raise ConfigurationError("could not find config at <" + config_path + ">, generating blank config and exiting", 110)
send(stdout, INF, "found and loaded config at <" + config_path + ">")
apikey, tbakey, preference, sync = parse_config_persistent(send, config)
send(stdout, INF, "found and loaded database and tba keys")
client = pymongo.MongoClient(apikey)
send(stdout, INF, "established connection to database")
previous_time = get_previous_time(client)
send(stdout, INF, "analysis backtimed to: " + str(previous_time))
config = resolve_config_conflicts(send, client, config, preference, sync)
exec_threads, config_modules = parse_config_variable(send, config)
if 'competition' in config['variable']:
competition = config['variable']['competition']
else:
competition = pull.get_team_competition()
for m in config_modules:
if m in modules:
start = time.time()
current_module = modules[m](config_modules[m], client, tbakey, previous_time, competition)
valid = current_module.validate_config()
if not valid:
continue
current_module.run(exec_threads)
send(stdout, INF, m + " module finished in " + str(time.time() - start) + " seconds")
if debug:
f = open(m + ".log", "w+")
json.dump({"data": current_module.data, "results":current_module.results}, f, ensure_ascii=False, indent=4)
f.close()
set_current_time(client, loop_start)
close_all()
send(stdout, INF, "closed threads and database client")
send(stdout, INF, "finished all tasks in " + str(time.time() - loop_start) + " seconds, looping")
if profile:
exit_code = 0
break
event_delay = config["variable"]["event-delay"]
if event_delay:
send(stdout, INF, "loop delayed until database returns new matches")
new_match = False
while not new_match:
time.sleep(1)
new_match = check_new_database_matches(client, competition)
send(stdout, INF, "database returned new matches")
else:
loop_delay = float(config["variable"]["loop-delay"])
remaining_time = loop_delay - (time.time() - loop_start)
if remaining_time > 0:
send(stdout, INF, "loop delayed by " + str(remaining_time) + " seconds")
time.sleep(remaining_time)
except KeyboardInterrupt:
send(stdout, INF, "detected KeyboardInterrupt, killing threads")
close_all()
send(stdout, INF, "terminated threads, exiting")
break
except ConfigurationError as e:
send(stderr, ERR, "encountered a configuration error: " + str(e), code = e.code)
traceback.print_exc(file = stderr)
exit_code = 1
close_all()
break
except Exception as e:
send(stderr, ERR, "encountered an exception while running", code = 1)
traceback.print_exc(file = stderr)
exit_code = 1
close_all()
break
return exit_code
def start(pid_path, verbose = False, profile = False, debug = False):
if profile:
def send(target, level, message, code = 0):
pass
import cProfile, pstats, io
profile = cProfile.Profile()
profile.enable()
exit_code = main(send, profile = True)
profile.disable()
f = open("profile.txt", 'w+')
ps = pstats.Stats(profile, stream = f).sort_stats('cumtime')
ps.print_stats()
sys.exit(exit_code)
elif verbose:
exit_code = main(log, verbose = verbose)
sys.exit(exit_code)
elif debug:
exit_code = main(log, verbose = True, profile = True, debug = debug)
sys.exit(exit_code)
else:
f = open('errorlog.log', 'w+')
with daemon.DaemonContext(
working_directory = os.getcwd(),
pidfile = pidfile.TimeoutPIDLockFile(pid_path),
stderr = f
):
context = zmq.Context()
socket = context.socket(zmq.PUB)
socket.bind("tcp://*:5678")
socket.send(b'status')
def send(target, level, message, code = 0):
socket.send(bytes("status: " + message, 'utf-8'))
exit_code = main(send)
socket.close()
f.close()
sys.exit(exit_code)
def stop(pid_path):
try:
pf = open(pid_path, 'r')
pid = int(pf.read().strip())
pf.close()
except IOError:
sys.stderr.write("pidfile at <" + pid_path + "> does not exist. Daemon not running?\n")
return
try:
while True:
os.kill(pid, SIGTERM)
time.sleep(0.01)
except OSError as err:
err = str(err)
if err.find("No such process") > 0:
if os.path.exists(pid_path):
os.remove(pid_path)
else:
traceback.print_exc(file = stderr)
sys.exit(1)
def restart(pid_path):
stop(pid_path)
start(pid_path)
if __name__ == "__main__":
if sys.platform.startswith("win"):
freeze_support()
start(None, verbose = True)
else:
import daemon
from daemon import pidfile
from signal import SIGTERM
pid_path = "tra-daemon.pid"
if len(sys.argv) == 2:
if 'start' == sys.argv[1]:
start(pid_path)
elif 'stop' == sys.argv[1]:
stop(pid_path)
elif 'restart' == sys.argv[1]:
restart(pid_path)
elif 'verbose' == sys.argv[1]:
start(None, verbose = True)
elif 'profile' == sys.argv[1]:
start(None, profile=True)
elif 'debug' == sys.argv[1]:
start(None, debug = True)
else:
print("usage: %s start|stop|restart|verbose|profile|debug" % sys.argv[0])
sys.exit(2)
sys.exit(0)
else:
print("usage: %s start|stop|restart|verbose|profile|debug" % sys.argv[0])
sys.exit(2)

View File

@@ -1,27 +0,0 @@
{
"persistent": {
"type": "dict",
"require_all": true,
"schema": {
"key": {
"type": "dict",
"require_all":true,
"schema": {
"database": {"type":"string"},
"tba": {"type": "string"},
"tra": {
"type": "dict",
"require_all": true,
"schema": {
"CLIENT_ID": {"type": "string"},
"CLIENT_SECRET": {"type": "string"},
"url": {"type": "string"}
}
}
}
},
"config-preference": {"type": "string", "required": true},
"synchronize-config": {"type": "boolean", "required": true}
}
}
}

View File

@@ -1,15 +1,18 @@
cerberus
dnspython
numpy
pyinstaller
pylint
pymongo
pyparsing
pytest
python-daemon
pyzmq
requests requests
scikit-learn pymongo
scipy pandas
six
tra-analysis tra-analysis
dnspython
pyinstaller
requests
pymongo
numpy
scipy
scikit-learn
six
pyparsing
pandas
kivy==2.0.0rc2

View File

@@ -2,6 +2,7 @@
block_cipher = None block_cipher = None
a = Analysis(['superscript.py'], a = Analysis(['superscript.py'],
pathex=['/workspaces/tra-data-analysis/src'], pathex=['/workspaces/tra-data-analysis/src'],
binaries=[], binaries=[],
@@ -13,10 +14,7 @@ a = Analysis(['superscript.py'],
], ],
hookspath=[], hookspath=[],
runtime_hooks=[], runtime_hooks=[],
excludes=[ excludes=[],
"matplotlib",
"pandas"
],
win_no_prefer_redirects=False, win_no_prefer_redirects=False,
win_private_assemblies=False, win_private_assemblies=False,
cipher=block_cipher, cipher=block_cipher,

View File

@@ -1,14 +0,0 @@
import signal
import zmq
signal.signal(signal.SIGINT, signal.SIG_DFL)
context = zmq.Context()
socket = context.socket(zmq.SUB)
socket.connect('tcp://localhost:5678')
socket.setsockopt(zmq.SUBSCRIBE, b'status')
while True:
message = socket.recv_multipart()
print(f'Received: {message}')