2 Commits

Author SHA1 Message Date
Arthur Lu
95e820627f fixed badge url
Former-commit-id: 9039cde818
2021-08-26 18:20:11 -07:00
Arthur Lu
da4795a345 Update release badge
Former-commit-id: a8f0172b28
2021-08-26 18:11:25 -07:00
22 changed files with 815 additions and 1363 deletions

View File

@@ -1,6 +1,7 @@
FROM python:slim FROM ubuntu:20.04
WORKDIR / WORKDIR /
RUN apt-get -y update; apt-get -y upgrade RUN apt-get -y update
RUN apt-get -y install git binutils RUN DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends tzdata
COPY requirements.txt . RUN apt-get install -y python3 python3-dev git python3-pip python3-kivy python-is-python3 libgl1-mesa-dev build-essential
RUN pip install -r requirements.txt RUN ln -s $(which pip3) /usr/bin/pip
RUN pip install pymongo pandas numpy scipy scikit-learn matplotlib pylint kivy

View File

@@ -0,0 +1,2 @@
FROM titanscout2022/tra-analysis-base:latest
WORKDIR /

View File

@@ -1,22 +1,28 @@
{ {
"name": "TRA Analysis Development Environment", "name": "TRA Analysis Development Environment",
"build": { "build": {
"dockerfile": "Dockerfile", "dockerfile": "dev-dockerfile",
}, },
"settings": { "settings": {
"terminal.integrated.shell.linux": "/bin/bash", "terminal.integrated.shell.linux": "/bin/bash",
"python.pythonPath": "/usr/local/bin/python", "python.pythonPath": "/usr/local/bin/python",
"python.linting.enabled": true, "python.linting.enabled": true,
"python.linting.pylintEnabled": true, "python.linting.pylintEnabled": true,
"python.linting.pylintPath": "/usr/local/bin/pylint", "python.formatting.autopep8Path": "/usr/local/py-utils/bin/autopep8",
"python.testing.pytestPath": "/usr/local/bin/pytest", "python.formatting.blackPath": "/usr/local/py-utils/bin/black",
"editor.tabSize": 4, "python.formatting.yapfPath": "/usr/local/py-utils/bin/yapf",
"editor.insertSpaces": false "python.linting.banditPath": "/usr/local/py-utils/bin/bandit",
"python.linting.flake8Path": "/usr/local/py-utils/bin/flake8",
"python.linting.mypyPath": "/usr/local/py-utils/bin/mypy",
"python.linting.pycodestylePath": "/usr/local/py-utils/bin/pycodestyle",
"python.linting.pydocstylePath": "/usr/local/py-utils/bin/pydocstyle",
"python.linting.pylintPath": "/usr/local/py-utils/bin/pylint",
"python.testing.pytestPath": "/usr/local/py-utils/bin/pytest"
}, },
"extensions": [ "extensions": [
"mhutchie.git-graph", "mhutchie.git-graph",
"ms-python.python", "ms-python.python",
"waderyan.gitblame" "waderyan.gitblame"
], ],
"postCreateCommand": "" "postCreateCommand": "/usr/bin/pip3 install -r ${containerWorkspaceFolder}/src/requirements.txt && /usr/bin/pip3 install --no-cache-dir pylint && /usr/bin/pip3 install pytest"
} }

View File

@@ -1,7 +1,7 @@
# This workflow will install Python dependencies, run tests and lint with a variety of Python versions # This workflow will install Python dependencies, run tests and lint with a variety of Python versions
# For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions # For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions
name: Build Superscript Linux name: Superscript Unit Tests
on: on:
release: release:
@@ -11,25 +11,7 @@ jobs:
generate: generate:
name: Build Linux name: Build Linux
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Checkout master - name: Checkout master
uses: actions/checkout@master uses: actions/checkout@master
- name: Install Dependencies
run: pip install -r requirements.txt
working-directory: src/
- name: Give Execute Permission
run: chmod +x build-CLI.sh
working-directory: build/
- name: Build Binary
run: ./build-CLI.sh
working-directory: build/
- name: Copy Binary to Root Dir
run: cp superscript ..
working-directory: dist/
- name: Upload Release Asset
uses: svenstaro/upload-release-action@v2
with:
repo_token: ${{ secrets.GITHUB_TOKEN }}
file: superscript
asset_name: superscript
tag: ${{ github.ref }}

8
.gitignore vendored
View File

@@ -9,10 +9,6 @@
**/tra_analysis/ **/tra_analysis/
**/temp/* **/temp/*
**/*.pid
**/profile.*
**/*.log
**/errorlog.txt **/errorlog.txt
/dist/* /dist/superscript.*
/dist/superscript

View File

@@ -1,4 +1,4 @@
# Red Alliance Analysis · ![GitHub release (latest by date)](https://img.shields.io/github/v/release/titanscout2022/red-alliance-analysis) # Red Alliance Analysis · ![GitHub release (latest by date)](https://img.shields.io/github/v/release/titanscout2022/tra-superscript)
Titan Robotics 2022 Strategy Team Repository for Data Analysis Tools. Included with these tools are the backend data analysis engine formatted as a python package, associated binaries for the analysis package, and premade scripts that can be pulled directly from this repository and will integrate with other Red Alliance applications to quickly deploy FRC scouting tools. Titan Robotics 2022 Strategy Team Repository for Data Analysis Tools. Included with these tools are the backend data analysis engine formatted as a python package, associated binaries for the analysis package, and premade scripts that can be pulled directly from this repository and will integrate with other Red Alliance applications to quickly deploy FRC scouting tools.
@@ -43,4 +43,4 @@ don't worry, you may have just not configured the application correctly, but wou
# Build Statuses # Build Statuses
Coming soon! Coming soon!

View File

@@ -2,4 +2,4 @@ set pathtospec="../src/superscript.spec"
set pathtodist="../dist/" set pathtodist="../dist/"
set pathtowork="temp/" set pathtowork="temp/"
pyinstaller --clean --distpath %pathtodist% --workpath %pathtowork% %pathtospec% pyinstaller --onefile --clean --distpath %pathtodist% --workpath %pathtowork% %pathtospec%

View File

@@ -1,5 +1,5 @@
pathtospec="superscript.spec" pathtospec="../src/superscript.spec"
pathtodist="../dist/" pathtodist="../dist/"
pathtowork="temp/" pathtowork="temp/"
pyinstaller --clean --distpath ${pathtodist} --workpath ${pathtowork} ${pathtospec} pyinstaller --onefile --clean --distpath ${pathtodist} --workpath ${pathtowork} ${pathtospec}

View File

@@ -1,50 +0,0 @@
# -*- mode: python ; coding: utf-8 -*-
block_cipher = None
a = Analysis(
['../src/superscript.py'],
pathex=[],
binaries=[],
datas=[],
hiddenimports=['dnspython', 'sklearn.utils._weight_vector', 'sklearn.utils._typedefs', 'sklearn.neighbors._partition_nodes', 'requests'],
hookspath=[],
hooksconfig={},
runtime_hooks=[],
excludes=['matplotlib'],
win_no_prefer_redirects=False,
win_private_assemblies=False,
cipher=block_cipher,
noarchive=False
)
pyz = PYZ(
a.pure,
a.zipped_data,
cipher=block_cipher
)
exe = EXE(
pyz,
a.scripts,
[],
exclude_binaries=True,
name='superscript',
debug=False,
bootloader_ignore_signals=False,
strip=False,
upx=True,
console=True,
disable_windowed_traceback=False,
target_arch=None,
codesign_identity=None,
entitlements_file=None
)
coll = COLLECT(
exe,
a.binaries,
a.zipfiles,
a.datas,
strip=False,
upx=True,
upx_exclude=[],
name='superscript'
)

View File

@@ -1,251 +0,0 @@
import json
from exceptions import ConfigurationError
from cerberus import Validator
from data import set_database_config, get_database_config
class Configuration:
path = None
config = {}
_sample_config = {
"persistent":{
"key":{
"database":"",
"tba":"",
"tra":{
"CLIENT_ID":"",
"CLIENT_SECRET":"",
"url": ""
}
},
"config-preference":"local",
"synchronize-config":False
},
"variable":{
"event-delay":False,
"loop-delay":0,
"competition": "2020ilch",
"modules":{
"match":{
"tests":{
"balls-blocked":[
"basic_stats",
"historical_analysis",
"regression_linear",
"regression_logarithmic",
"regression_exponential",
"regression_polynomial",
"regression_sigmoidal"
],
"balls-collected":[
"basic_stats",
"historical_analysis",
"regression_linear",
"regression_logarithmic",
"regression_exponential",
"regression_polynomial",
"regression_sigmoidal"
],
"balls-lower-teleop":[
"basic_stats",
"historical_analysis",
"regression_linear",
"regression_logarithmic",
"regression_exponential",
"regression_polynomial",
"regression_sigmoidal"
],
"balls-lower-auto":[
"basic_stats",
"historical_analysis",
"regression_linear",
"regression_logarithmic",
"regression_exponential",
"regression_polynomial",
"regression_sigmoidal"
],
"balls-started":[
"basic_stats",
"historical_analyss",
"regression_linear",
"regression_logarithmic",
"regression_exponential",
"regression_polynomial",
"regression_sigmoidal"
],
"balls-upper-teleop":[
"basic_stats",
"historical_analysis",
"regression_linear",
"regression_logarithmic",
"regression_exponential",
"regression_polynomial",
"regression_sigmoidal"
],
"balls-upper-auto":[
"basic_stats",
"historical_analysis",
"regression_linear",
"regression_logarithmic",
"regression_exponential",
"regression_polynomial",
"regression_sigmoidal"
]
}
},
"metric":{
"tests":{
"elo":{
"score":1500,
"N":400,
"K":24
},
"gl2":{
"score":1500,
"rd":250,
"vol":0.06
},
"ts":{
"mu":25,
"sigma":8.33
}
}
},
"pit":{
"tests":{
"wheel-mechanism":True,
"low-balls":True,
"high-balls":True,
"wheel-success":True,
"strategic-focus":True,
"climb-mechanism":True,
"attitude":True
}
}
}
}
}
_validation_schema = {
"persistent": {
"type": "dict",
"required": True,
"require_all": True,
"schema": {
"key": {
"type": "dict",
"require_all":True,
"schema": {
"database": {"type":"string"},
"tba": {"type": "string"},
"tra": {
"type": "dict",
"require_all": True,
"schema": {
"CLIENT_ID": {"type": "string"},
"CLIENT_SECRET": {"type": "string"},
"url": {"type": "string"}
}
}
}
},
"config-preference": {"type": "string", "required": True},
"synchronize-config": {"type": "boolean", "required": True}
}
}
}
def __init__(self, path):
self.path = path
self.load_config()
self.validate_config()
def load_config(self):
try:
f = open(self.path, "r")
self.config.update(json.load(f))
f.close()
except:
self.config = self._sample_config
self.save_config()
f.close()
raise ConfigurationError("could not find config file at <" + self.path + ">, created new sample config file at that path")
def save_config(self):
f = open(self.path, "w+")
json.dump(self.config, f, ensure_ascii=False, indent=4)
f.close()
def validate_config(self):
v = Validator(self._validation_schema, allow_unknown = True)
isValidated = v.validate(self.config)
if not isValidated:
raise ConfigurationError("config validation error: " + v.errors)
def __getattr__(self, name): # simple linear lookup method for common multikey-value paths, TYPE UNSAFE
if name == "persistent":
return self.config["persistent"]
elif name == "key":
return self.config["persistent"]["key"]
elif name == "database":
# soon to be deprecated
return self.config["persistent"]["key"]["database"]
elif name == "tba":
return self.config["persistent"]["key"]["tba"]
elif name == "tra":
return self.config["persistent"]["key"]["tra"]
elif name == "priority":
return self.config["persistent"]["config-preference"]
elif name == "sync":
return self.config["persistent"]["synchronize-config"]
elif name == "variable":
return self.config["variable"]
elif name == "event_delay":
return self.config["variable"]["event-delay"]
elif name == "loop_delay":
return self.config["variable"]["loop-delay"]
elif name == "competition":
return self.config["variable"]["competition"]
elif name == "modules":
return self.config["variable"]["modules"]
else:
return None
def __getitem__(self, key):
return self.config[key]
def resolve_config_conflicts(self, logger, client): # needs improvement with new localization scheme
sync = self.sync
priority = self.priority
if sync:
if priority == "local" or priority == "client":
logger.info("config-preference set to local/client, loading local config information")
remote_config = get_database_config(client)
if remote_config != self.config["variable"]:
set_database_config(client, self.config["variable"])
logger.info("database config was different and was updated")
# no change to config
elif priority == "remote" or priority == "database":
logger.info("config-preference set to remote/database, loading remote config information")
remote_config = get_database_config(client)
if remote_config != self.config["variable"]:
self.config["variable"] = remote_config
self.save_config()
# change variable to match remote
logger.info("local config was different and was updated")
else:
raise ConfigurationError("persistent/config-preference field must be \"local\"/\"client\" or \"remote\"/\"database\"")
else:
if priority == "local" or priority == "client":
logger.info("config-preference set to local/client, loading local config information")
# no change to config
elif priority == "remote" or priority == "database":
logger.info("config-preference set to remote/database, loading database config information")
self.config["variable"] = get_database_config(client)
# change variable to match remote without updating local version
else:
raise ConfigurationError("persistent/config-preference field must be \"local\"/\"client\" or \"remote\"/\"database\"")

View File

@@ -1,102 +1,118 @@
import requests import requests
import pull import pymongo
import pandas as pd import pandas as pd
import time
def pull_new_tba_matches(apikey, competition, cutoff): def pull_new_tba_matches(apikey, competition, cutoff):
api_key= apikey api_key= apikey
x=requests.get("https://www.thebluealliance.com/api/v3/event/"+competition+"/matches/simple", headers={"X-TBA-Auth-Key":api_key}) x=requests.get("https://www.thebluealliance.com/api/v3/event/"+competition+"/matches/simple", headers={"X-TBA-Auth_Key":api_key})
out = [] out = []
for i in x.json(): for i in x.json():
if i["actual_time"] != None and i["actual_time"]-cutoff >= 0 and i["comp_level"] == "qm": if i["actual_time"] != None and i["actual_time"]-cutoff >= 0 and i["comp_level"] == "qm":
out.append({"match" : i['match_number'], "blue" : list(map(lambda x: int(x[3:]), i['alliances']['blue']['team_keys'])), "red" : list(map(lambda x: int(x[3:]), i['alliances']['red']['team_keys'])), "winner": i["winning_alliance"]}) out.append({"match" : i['match_number'], "blue" : list(map(lambda x: int(x[3:]), i['alliances']['blue']['team_keys'])), "red" : list(map(lambda x: int(x[3:]), i['alliances']['red']['team_keys'])), "winner": i["winning_alliance"]})
return out return out
def get_team_match_data(client, competition, team_num): def get_team_match_data(apikey, competition, team_num):
client = pymongo.MongoClient(apikey)
db = client.data_scouting db = client.data_scouting
mdata = db.matchdata mdata = db.matchdata
out = {} out = {}
for i in mdata.find({"competition" : competition, "team_scouted": str(team_num)}): for i in mdata.find({"competition" : competition, "team_scouted": team_num}):
out[i['match']] = i['data'] out[i['match']] = i['data']
return pd.DataFrame(out) return pd.DataFrame(out)
def get_team_pit_data(client, competition, team_num): def get_team_pit_data(apikey, competition, team_num):
client = pymongo.MongoClient(apikey)
db = client.data_scouting db = client.data_scouting
mdata = db.pitdata mdata = db.pitdata
out = {} out = {}
return mdata.find_one({"competition" : competition, "team_scouted": str(team_num)})["data"] return mdata.find_one({"competition" : competition, "team_scouted": team_num})["data"]
def get_team_metrics_data(client, competition, team_num): def get_team_metrics_data(apikey, competition, team_num):
client = pymongo.MongoClient(apikey)
db = client.data_processing db = client.data_processing
mdata = db.team_metrics mdata = db.team_metrics
return mdata.find_one({"competition" : competition, "team": team_num}) return mdata.find_one({"competition" : competition, "team": team_num})
def get_match_data_formatted(client, competition): def get_match_data_formatted(apikey, competition):
teams_at_comp = pull.get_teams_at_competition(competition) client = pymongo.MongoClient(apikey)
out = {} db = client.data_scouting
for team in teams_at_comp: mdata = db.teamlist
try: x=mdata.find_one({"competition":competition})
out[int(team)] = unkeyify_2l(get_team_match_data(client, competition, team).transpose().to_dict())
except:
pass
return out
def get_metrics_data_formatted(client, competition):
teams_at_comp = pull.get_teams_at_competition(competition)
out = {}
for team in teams_at_comp:
try:
out[int(team)] = get_team_metrics_data(client, competition, int(team))
except:
pass
return out
def get_pit_data_formatted(client, competition):
x=requests.get("https://titanscouting.epochml.org/api/fetchAllTeamNicknamesAtCompetition?competition="+competition)
x = x.json()
x = x['data']
x = x.keys()
out = {} out = {}
for i in x: for i in x:
try: try:
out[int(i)] = get_team_pit_data(client, competition, int(i)) out[int(i)] = unkeyify_2l(get_team_match_data(apikey, competition, int(i)).transpose().to_dict())
except: except:
pass pass
return out return out
def get_pit_variable_data(client, competition): def get_metrics_data_formatted(apikey, competition):
client = pymongo.MongoClient(apikey)
db = client.data_scouting
mdata = db.teamlist
x=mdata.find_one({"competition":competition})
out = {}
for i in x:
try:
out[int(i)] = d.get_team_metrics_data(apikey, competition, int(i))
except:
pass
return out
def get_pit_data_formatted(apikey, competition):
client = pymongo.MongoClient(apikey)
db = client.data_scouting
mdata = db.teamlist
x=mdata.find_one({"competition":competition})
out = {}
for i in x:
try:
out[int(i)] = get_team_pit_data(apikey, competition, int(i))
except:
pass
return out
def get_pit_variable_data(apikey, competition):
client = pymongo.MongoClient(apikey)
db = client.data_processing db = client.data_processing
mdata = db.team_pit mdata = db.team_pit
out = {} out = {}
return mdata.find() return mdata.find()
def get_pit_variable_formatted(client, competition): def get_pit_variable_formatted(apikey, competition):
temp = get_pit_variable_data(client, competition) temp = get_pit_variable_data(apikey, competition)
out = {} out = {}
for i in temp: for i in temp:
out[i["variable"]] = i["data"] out[i["variable"]] = i["data"]
return out return out
def push_team_tests_data(client, competition, team_num, data, dbname = "data_processing", colname = "team_tests"): def push_team_tests_data(apikey, competition, team_num, data, dbname = "data_processing", colname = "team_tests"):
client = pymongo.MongoClient(apikey)
db = client[dbname] db = client[dbname]
mdata = db[colname] mdata = db[colname]
mdata.replace_one({"competition" : competition, "team": team_num}, {"_id": competition+str(team_num)+"am", "competition" : competition, "team" : team_num, "data" : data}, True) mdata.replace_one({"competition" : competition, "team": team_num}, {"_id": competition+str(team_num)+"am", "competition" : competition, "team" : team_num, "data" : data}, True)
def push_team_metrics_data(client, competition, team_num, data, dbname = "data_processing", colname = "team_metrics"): def push_team_metrics_data(apikey, competition, team_num, data, dbname = "data_processing", colname = "team_metrics"):
client = pymongo.MongoClient(apikey)
db = client[dbname] db = client[dbname]
mdata = db[colname] mdata = db[colname]
mdata.replace_one({"competition" : competition, "team": team_num}, {"_id": competition+str(team_num)+"am", "competition" : competition, "team" : team_num, "metrics" : data}, True) mdata.replace_one({"competition" : competition, "team": team_num}, {"_id": competition+str(team_num)+"am", "competition" : competition, "team" : team_num, "metrics" : data}, True)
def push_team_pit_data(client, competition, variable, data, dbname = "data_processing", colname = "team_pit"): def push_team_pit_data(apikey, competition, variable, data, dbname = "data_processing", colname = "team_pit"):
client = pymongo.MongoClient(apikey)
db = client[dbname] db = client[dbname]
mdata = db[colname] mdata = db[colname]
mdata.replace_one({"competition" : competition, "variable": variable}, {"competition" : competition, "variable" : variable, "data" : data}, True) mdata.replace_one({"competition" : competition, "variable": variable}, {"competition" : competition, "variable" : variable, "data" : data}, True)
def get_analysis_flags(client, flag): def get_analysis_flags(apikey, flag):
client = pymongo.MongoClient(apikey)
db = client.data_processing db = client.data_processing
mdata = db.flags mdata = db.flags
return mdata.find_one({flag:{"$exists":True}}) return mdata.find_one({flag:{"$exists":True}})
def set_analysis_flags(client, flag, data): def set_analysis_flags(apikey, flag, data):
client = pymongo.MongoClient(apikey)
db = client.data_processing db = client.data_processing
mdata = db.flags mdata = db.flags
return mdata.replace_one({flag:{"$exists":True}}, data, True) return mdata.replace_one({flag:{"$exists":True}}, data, True)
@@ -110,90 +126,4 @@ def unkeyify_2l(layered_dict):
add.append([j,layered_dict[i][j]]) add.append([j,layered_dict[i][j]])
add.sort(key = lambda x: x[0]) add.sort(key = lambda x: x[0])
out[i] = list(map(lambda x: x[1], add)) out[i] = list(map(lambda x: x[1], add))
return out return out
def get_previous_time(client):
previous_time = get_analysis_flags(client, "latest_update")
if previous_time == None:
set_analysis_flags(client, "latest_update", 0)
previous_time = 0
else:
previous_time = previous_time["latest_update"]
return previous_time
def set_current_time(client, current_time):
set_analysis_flags(client, "latest_update", {"latest_update":current_time})
def get_database_config(client):
remote_config = get_analysis_flags(client, "config")
return remote_config["config"] if remote_config != None else None
def set_database_config(client, config):
set_analysis_flags(client, "config", {"config": config})
def load_match(client, competition):
return get_match_data_formatted(client, competition)
def load_metric(client, competition, match, group_name, metrics):
group = {}
for team in match[group_name]:
db_data = get_team_metrics_data(client, competition, team)
if db_data == None:
elo = {"score": metrics["elo"]["score"]}
gl2 = {"score": metrics["gl2"]["score"], "rd": metrics["gl2"]["rd"], "vol": metrics["gl2"]["vol"]}
ts = {"mu": metrics["ts"]["mu"], "sigma": metrics["ts"]["sigma"]}
group[team] = {"elo": elo, "gl2": gl2, "ts": ts}
else:
metrics = db_data["metrics"]
elo = metrics["elo"]
gl2 = metrics["gl2"]
ts = metrics["ts"]
group[team] = {"elo": elo, "gl2": gl2, "ts": ts}
return group
def load_pit(client, competition):
return get_pit_data_formatted(client, competition)
def push_match(client, competition, results):
for team in results:
push_team_tests_data(client, competition, team, results[team])
def push_metric(client, competition, metric):
for team in metric:
push_team_metrics_data(client, competition, team, metric[team])
def push_pit(client, competition, pit):
for variable in pit:
push_team_pit_data(client, competition, variable, pit[variable])
def check_new_database_matches(client, competition):
return True

View File

@@ -1,141 +0,0 @@
# contains deprecated functions, not to be used unless nessasary!
import json
sample_json = """
{
"persistent":{
"key":{
"database":"",
"tba":"",
"tra":{
"CLIENT_ID":"",
"CLIENT_SECRET":"",
"url": ""
}
},
"config-preference":"local",
"synchronize-config":false
},
"variable":{
"max-threads":0.5,
"team":"",
"event-delay":false,
"loop-delay":0,
"reportable":true,
"teams":[
],
"modules":{
"match":{
"tests":{
"balls-blocked":[
"basic_stats",
"historical_analysis",
"regression_linear",
"regression_logarithmic",
"regression_exponential",
"regression_polynomial",
"regression_sigmoidal"
],
"balls-collected":[
"basic_stats",
"historical_analysis",
"regression_linear",
"regression_logarithmic",
"regression_exponential",
"regression_polynomial",
"regression_sigmoidal"
],
"balls-lower-teleop":[
"basic_stats",
"historical_analysis",
"regression_linear",
"regression_logarithmic",
"regression_exponential",
"regression_polynomial",
"regression_sigmoidal"
],
"balls-lower-auto":[
"basic_stats",
"historical_analysis",
"regression_linear",
"regression_logarithmic",
"regression_exponential",
"regression_polynomial",
"regression_sigmoidal"
],
"balls-started":[
"basic_stats",
"historical_analyss",
"regression_linear",
"regression_logarithmic",
"regression_exponential",
"regression_polynomial",
"regression_sigmoidal"
],
"balls-upper-teleop":[
"basic_stats",
"historical_analysis",
"regression_linear",
"regression_logarithmic",
"regression_exponential",
"regression_polynomial",
"regression_sigmoidal"
],
"balls-upper-auto":[
"basic_stats",
"historical_analysis",
"regression_linear",
"regression_logarithmic",
"regression_exponential",
"regression_polynomial",
"regression_sigmoidal"
]
}
},
"metric":{
"tests":{
"elo":{
"score":1500,
"N":400,
"K":24
},
"gl2":{
"score":1500,
"rd":250,
"vol":0.06
},
"ts":{
"mu":25,
"sigma":8.33
}
}
},
"pit":{
"tests":{
"wheel-mechanism":true,
"low-balls":true,
"high-balls":true,
"wheel-success":true,
"strategic-focus":true,
"climb-mechanism":true,
"attitude":true
}
}
}
}
}
"""
def load_config(path, config_vector):
try:
f = open(path, "r")
config_vector.update(json.load(f))
f.close()
return 0
except:
f = open(path, "w")
f.write(sample_json)
f.close()
return 1

151
src/design.kv Normal file
View File

@@ -0,0 +1,151 @@
<Launch>:
orientation: "vertical"
NavigationLayout:
ScreenManager:
id: screen_manager
HomeScreen:
name: "Home"
BoxLayout:
orientation: "vertical"
MDToolbar:
title: screen_manager.current
elevation: 10
left_action_items: [['menu', lambda x: nav_drawer.toggle_nav_drawer()]]
GridLayout:
cols: 1
padding: 15, 15
spacing: 20, 20
MDTextFieldRect:
hint_text: "Console Log"
# size_hint: .8, None
# align: 'center'
# Widget:
SettingsScreen:
name: "Settings"
BoxLayout:
orientation: 'vertical'
MDToolbar:
title: screen_manager.current
elevation: 10
left_action_items: [['menu', lambda x: nav_drawer.toggle_nav_drawer()]]
Widget:
InfoScreen:
name: "Info"
BoxLayout:
orientation: 'vertical'
MDToolbar:
title: screen_manager.current
elevation: 10
left_action_items: [['menu', lambda x: nav_drawer.toggle_nav_drawer()]]
# GridLayout:
# cols: 2
# padding: 15, 15
# spacing: 20, 20
BoxLayout:
orientation: "horizontal"
MDLabel:
text: "DB Key:"
halign: 'center'
MDTextField:
hint_text: "placeholder"
pos_hint: {"center_y": .5}
BoxLayout:
orientation: "horizontal"
MDLabel:
text: "TBA Key:"
halign: 'center'
MDTextField:
hint_text: "placeholder"
pos_hint: {"center_y": .5}
BoxLayout:
orientation: "horizontal"
MDLabel:
text: "CPU Use:"
halign: 'center'
MDLabel:
text: "placeholder"
halign: 'center'
BoxLayout:
orientation: "horizontal"
MDLabel:
text: "Network:"
halign: 'center'
MDLabel:
text: "placeholder"
halign: 'center'
Widget:
BoxLayout:
orientation: "horizontal"
MDLabel:
text: "Progress"
halign: 'center'
MDProgressBar:
id: progress
value: 50
StatsScreen:
name: "Stats"
MDCheckbox:
size_hint: None, None
size: "48dp", "48dp"
pos_hint: {'center_x': .5, 'center_y': .5}
on_active: Screen.test()
#Navigation Drawer -------------------------
MDNavigationDrawer:
id: nav_drawer
BoxLayout:
orientation: "vertical"
padding: "8dp"
spacing: "8dp"
MDLabel:
text: "Titan Scouting"
font_style: "Button"
size_hint_y: None
height: self.texture_size[1]
MDLabel:
text: "Data Analysis"
font_style: "Caption"
size_hint_y: None
height: self.texture_size[1]
ScrollView:
MDList:
OneLineAvatarListItem:
text: "Home"
on_press:
# nav_drawer.set_state("close")
# screen_manager.transition.direction = "left"
screen_manager.current = "Home"
IconLeftWidget:
icon: "home"
OneLineAvatarListItem:
text: "Settings"
on_press:
# nav_drawer.set_state("close")
# screen_manager.transition.direction = "right"
# screen_manager.fade
screen_manager.current = "Settings"
IconLeftWidget:
icon: "cog"
OneLineAvatarListItem:
text: "Info"
on_press:
# nav_drawer.set_state("close")
# screen_manager.transition.direction = "right"
# screen_manager.fade
screen_manager.current = "Info"
IconLeftWidget:
icon: "cog"
OneLineAvatarListItem:
text: "Stats"
on_press:
# nav_drawer.set_state("close")
# screen_manager.transition.direction = "right"
# screen_manager.fade
screen_manager.current = "Stats"
IconLeftWidget:
icon: "cog"

View File

@@ -1,7 +0,0 @@
class APIError(Exception):
def __init__(self, str):
super().__init__(str)
class ConfigurationError (Exception):
def __init__(self, str):
super().__init__(str)

View File

@@ -1,91 +0,0 @@
from logging import Logger as L
import datetime
import platform
import json
class Logger(L):
file = None
levels = {
0: "",
10:"[DEBUG] ",
20:"[INFO] ",
30:"[WARNING] ",
40:"[ERROR] ",
50:"[CRITICAL]",
}
targets = []
def __init__(self, verbose, profile, debug, file = None):
super().__init__("tra_logger")
self.file = file
if file != None:
self.targets.append(self._send_file)
if profile:
self.targets.append(self._send_null)
elif verbose:
self.targets.append(self._send_scli)
elif debug:
self.targets.append(self._send_scli)
else:
self.targets.append(self._send_null)
def _send_null(self, msg):
pass
def _send_scli(self, msg):
print(msg)
def _send_file(self, msg):
f = open(self.file, 'a')
f.write(msg + "\n")
f.close()
def get_time_formatted(self):
return datetime.datetime.now().strftime("%Y/%m/%d %H:%M:%S %Z")
def log(self, level, msg):
for t in self.targets:
t(self.get_time_formatted() + "| " + self.levels[level] + ": " + msg)
def debug(self, msg):
self.log(10, msg)
def info(self, msg):
self.log(20, msg)
def warning(self, msg):
self.log(30, msg)
def error(self, msg):
self.log(40, msg)
def critical(self, msg):
self.log(50, msg)
def splash(self, version):
def hrule():
self.log(0, "#"+38*"-"+"#")
def box(s):
temp = "|"
temp += s
temp += (40-len(s)-2)*" "
temp += "|"
self.log(0, temp)
hrule()
box(" superscript version: " + version)
box(" os: " + platform.system())
box(" python: " + platform.python_version())
hrule()
def save_module_to_file(self, module, data, results):
f = open(module + ".log", "w")
json.dump({"data": data, "results":results}, f, ensure_ascii=False, indent=4)
f.close()

58
src/main.py Normal file
View File

@@ -0,0 +1,58 @@
from kivy.lang import Builder
from kivymd.uix.screen import Screen
from kivymd.uix.list import OneLineListItem, MDList, TwoLineListItem, ThreeLineListItem
from kivymd.uix.list import OneLineIconListItem, IconLeftWidget
from kivy.uix.scrollview import ScrollView
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.screenmanager import ScreenManager, Screen
from kivy.uix.dropdown import DropDown
from kivy.uix.button import Button
from kivy.base import runTouchApp
from kivymd.uix.menu import MDDropdownMenu, MDMenuItem
from kivymd.app import MDApp
# import superscript as ss
# from tra_analysis import analysis as an
import data as d
from collections import defaultdict
import json
import math
import numpy as np
import os
from os import system, name
from pathlib import Path
from multiprocessing import Pool
import matplotlib.pyplot as plt
from concurrent.futures import ThreadPoolExecutor
import time
import warnings
# global exec_threads
# Screens
class HomeScreen(Screen):
pass
class SettingsScreen(Screen):
pass
class InfoScreen(Screen):
pass
class StatsScreen(Screen):
pass
class MyApp(MDApp):
def build(self):
self.theme_cls.primary_palette = "Red"
return Builder.load_file("design.kv")
def test():
print("test")
if __name__ == "__main__":
MyApp().run()

View File

@@ -1,321 +0,0 @@
import abc
import data as d
import signal
import numpy as np
from tra_analysis import Analysis as an
class Module(metaclass = abc.ABCMeta):
@classmethod
def __subclasshook__(cls, subclass):
return (hasattr(subclass, '__init__') and
callable(subclass.__init__) and
hasattr(subclass, 'validate_config') and
callable(subclass.validate_config) and
hasattr(subclass, 'run') and
callable(subclass.run)
)
@abc.abstractmethod
def __init__(self, config, apikey, tbakey, timestamp, competition, *args, **kwargs):
raise NotImplementedError
@abc.abstractmethod
def validate_config(self, *args, **kwargs):
raise NotImplementedError
@abc.abstractmethod
def run(self, *args, **kwargs):
raise NotImplementedError
class Match (Module):
config = None
apikey = None
tbakey = None
timestamp = None
competition = None
data = None
results = None
def __init__(self, config, apikey, tbakey, timestamp, competition):
self.config = config
self.apikey = apikey
self.tbakey = tbakey
self.timestamp = timestamp
self.competition = competition
def validate_config(self):
return True, ""
def run(self):
self._load_data()
self._process_data()
self._push_results()
def _load_data(self):
self.data = d.load_match(self.apikey, self.competition)
def _simplestats(self, data_test):
signal.signal(signal.SIGINT, signal.SIG_IGN)
data = np.array(data_test[3])
data = data[np.isfinite(data)]
ranges = list(range(len(data)))
test = data_test[2]
if test == "basic_stats":
return an.basic_stats(data)
if test == "historical_analysis":
return an.histo_analysis([ranges, data])
if test == "regression_linear":
return an.regression(ranges, data, ['lin'])
if test == "regression_logarithmic":
return an.regression(ranges, data, ['log'])
if test == "regression_exponential":
return an.regression(ranges, data, ['exp'])
if test == "regression_polynomial":
return an.regression(ranges, data, ['ply'])
if test == "regression_sigmoidal":
return an.regression(ranges, data, ['sig'])
def _process_data(self):
tests = self.config["tests"]
data = self.data
input_vector = []
for team in data:
for variable in data[team]:
if variable in tests:
for test in tests[variable]:
input_vector.append((team, variable, test, data[team][variable]))
self.data = input_vector
self.results = []
for test_var_data in self.data:
self.results.append(self._simplestats(test_var_data))
def _push_results(self):
short_mapping = {"regression_linear": "lin", "regression_logarithmic": "log", "regression_exponential": "exp", "regression_polynomial": "ply", "regression_sigmoidal": "sig"}
class AutoVivification(dict):
def __getitem__(self, item):
try:
return dict.__getitem__(self, item)
except KeyError:
value = self[item] = type(self)()
return value
result_filtered = self.results
input_vector = self.data
return_vector = AutoVivification()
i = 0
for result in result_filtered:
filtered = input_vector[i][2]
try:
short = short_mapping[filtered]
return_vector[input_vector[i][0]][input_vector[i][1]][input_vector[i][2]] = result[short]
except KeyError: # not in mapping
return_vector[input_vector[i][0]][input_vector[i][1]][input_vector[i][2]] = result
i += 1
self.results = return_vector
d.push_match(self.apikey, self.competition, self.results)
class Metric (Module):
config = None
apikey = None
tbakey = None
timestamp = None
competition = None
data = None
results = None
def __init__(self, config, apikey, tbakey, timestamp, competition):
self.config = config
self.apikey = apikey
self.tbakey = tbakey
self.timestamp = timestamp
self.competition = competition
def validate_config(self):
return True, ""
def run(self):
self._load_data()
self._process_data()
self._push_results()
def _load_data(self):
self.data = d.pull_new_tba_matches(self.tbakey, self.competition, self.timestamp)
def _process_data(self):
elo_N = self.config["tests"]["elo"]["N"]
elo_K = self.config["tests"]["elo"]["K"]
matches = self.data
red = {}
blu = {}
for match in matches:
red = d.load_metric(self.apikey, self.competition, match, "red", self.config["tests"])
blu = d.load_metric(self.apikey, self.competition, match, "blue", self.config["tests"])
elo_red_total = 0
elo_blu_total = 0
gl2_red_score_total = 0
gl2_blu_score_total = 0
gl2_red_rd_total = 0
gl2_blu_rd_total = 0
gl2_red_vol_total = 0
gl2_blu_vol_total = 0
for team in red:
elo_red_total += red[team]["elo"]["score"]
gl2_red_score_total += red[team]["gl2"]["score"]
gl2_red_rd_total += red[team]["gl2"]["rd"]
gl2_red_vol_total += red[team]["gl2"]["vol"]
for team in blu:
elo_blu_total += blu[team]["elo"]["score"]
gl2_blu_score_total += blu[team]["gl2"]["score"]
gl2_blu_rd_total += blu[team]["gl2"]["rd"]
gl2_blu_vol_total += blu[team]["gl2"]["vol"]
red_elo = {"score": elo_red_total / len(red)}
blu_elo = {"score": elo_blu_total / len(blu)}
red_gl2 = {"score": gl2_red_score_total / len(red), "rd": gl2_red_rd_total / len(red), "vol": gl2_red_vol_total / len(red)}
blu_gl2 = {"score": gl2_blu_score_total / len(blu), "rd": gl2_blu_rd_total / len(blu), "vol": gl2_blu_vol_total / len(blu)}
if match["winner"] == "red":
observations = {"red": 1, "blu": 0}
elif match["winner"] == "blue":
observations = {"red": 0, "blu": 1}
else:
observations = {"red": 0.5, "blu": 0.5}
red_elo_delta = an.Metric().elo(red_elo["score"], blu_elo["score"], observations["red"], elo_N, elo_K) - red_elo["score"]
blu_elo_delta = an.Metric().elo(blu_elo["score"], red_elo["score"], observations["blu"], elo_N, elo_K) - blu_elo["score"]
new_red_gl2_score, new_red_gl2_rd, new_red_gl2_vol = an.Metric().glicko2(red_gl2["score"], red_gl2["rd"], red_gl2["vol"], [blu_gl2["score"]], [blu_gl2["rd"]], [observations["red"], observations["blu"]])
new_blu_gl2_score, new_blu_gl2_rd, new_blu_gl2_vol = an.Metric().glicko2(blu_gl2["score"], blu_gl2["rd"], blu_gl2["vol"], [red_gl2["score"]], [red_gl2["rd"]], [observations["blu"], observations["red"]])
red_gl2_delta = {"score": new_red_gl2_score - red_gl2["score"], "rd": new_red_gl2_rd - red_gl2["rd"], "vol": new_red_gl2_vol - red_gl2["vol"]}
blu_gl2_delta = {"score": new_blu_gl2_score - blu_gl2["score"], "rd": new_blu_gl2_rd - blu_gl2["rd"], "vol": new_blu_gl2_vol - blu_gl2["vol"]}
for team in red:
red[team]["elo"]["score"] = red[team]["elo"]["score"] + red_elo_delta
red[team]["gl2"]["score"] = red[team]["gl2"]["score"] + red_gl2_delta["score"]
red[team]["gl2"]["rd"] = red[team]["gl2"]["rd"] + red_gl2_delta["rd"]
red[team]["gl2"]["vol"] = red[team]["gl2"]["vol"] + red_gl2_delta["vol"]
for team in blu:
blu[team]["elo"]["score"] = blu[team]["elo"]["score"] + blu_elo_delta
blu[team]["gl2"]["score"] = blu[team]["gl2"]["score"] + blu_gl2_delta["score"]
blu[team]["gl2"]["rd"] = blu[team]["gl2"]["rd"] + blu_gl2_delta["rd"]
blu[team]["gl2"]["vol"] = blu[team]["gl2"]["vol"] + blu_gl2_delta["vol"]
temp_vector = {}
temp_vector.update(red)
temp_vector.update(blu)
d.push_metric(self.apikey, self.competition, temp_vector)
def _push_results(self):
pass
class Pit (Module):
config = None
apikey = None
tbakey = None
timestamp = None
competition = None
data = None
results = None
def __init__(self, config, apikey, tbakey, timestamp, competition):
self.config = config
self.apikey = apikey
self.tbakey = tbakey
self.timestamp = timestamp
self.competition = competition
def validate_config(self):
return True, ""
def run(self):
self._load_data()
self._process_data()
self._push_results()
def _load_data(self):
self.data = d.load_pit(self.apikey, self.competition)
def _process_data(self):
tests = self.config["tests"]
return_vector = {}
for team in self.data:
for variable in self.data[team]:
if variable in tests:
if not variable in return_vector:
return_vector[variable] = []
return_vector[variable].append(self.data[team][variable])
self.results = return_vector
def _push_results(self):
d.push_pit(self.apikey, self.competition, self.results)
class Rating (Module):
pass
class Heatmap (Module):
pass
class Sentiment (Module):
pass

View File

@@ -1,63 +0,0 @@
import requests
from exceptions import APIError
from dep import load_config
url = "https://titanscouting.epochml.org"
config_tra = {}
load_config("config.json", config_tra)
trakey = config_tra['persistent']['key']['tra']
def get_team_competition():
endpoint = '/api/fetchTeamCompetition'
params = {
"CLIENT_ID": trakey['CLIENT_ID'],
"CLIENT_SECRET": trakey['CLIENT_SECRET']
}
response = requests.request("GET", url + endpoint, params=params)
json = response.json()
if json['success']:
return json['competition']
else:
raise APIError(json)
def get_team():
endpoint = '/api/fetchTeamCompetition'
params = {
"CLIENT_ID": trakey['CLIENT_ID'],
"CLIENT_SECRET": trakey['CLIENT_SECRET']
}
response = requests.request("GET", url + endpoint, params=params)
json = response.json()
if json['success']:
return json['team']
else:
raise APIError(json)
def get_team_match_data(competition, team_num):
endpoint = '/api/fetchAllTeamMatchData'
params = {
"competition": competition,
"teamScouted": team_num,
"CLIENT_ID": trakey['CLIENT_ID'],
"CLIENT_SECRET": trakey['CLIENT_SECRET']
}
response = requests.request("GET", url + endpoint, params=params)
json = response.json()
if json['success']:
return json['data'][team_num]
else:
raise APIError(json)
def get_teams_at_competition(competition):
endpoint = '/api/fetchAllTeamNicknamesAtCompetition'
params = {
"competition": competition,
"CLIENT_ID": trakey['CLIENT_ID'],
"CLIENT_SECRET": trakey['CLIENT_SECRET']
}
response = requests.request("GET", url + endpoint, params=params)
json = response.json()
if json['success']:
return list(json['data'].keys())
else:
raise APIError(json)

View File

@@ -1,15 +1,18 @@
cerberus
dnspython
numpy
pyinstaller
pylint
pymongo
pyparsing
pytest
python-daemon
pyzmq
requests requests
scikit-learn pymongo
pandas
tra-analysis
dnspython
pyinstaller
requests
pymongo
numpy
scipy scipy
scikit-learn
six six
tra-analysis pyparsing
pandas
kivy==2.0.0rc2

View File

@@ -3,43 +3,10 @@
# Notes: # Notes:
# setup: # setup:
__version__ = "1.0.0" __version__ = "0.8.6"
# changelog should be viewed using print(analysis.__changelog__) # changelog should be viewed using print(analysis.__changelog__)
__changelog__ = """changelog: __changelog__ = """changelog:
1.0.0:
- superscript now runs in PEP 3143 compliant well behaved daemon on Linux systems
- linux superscript daemon has integrated websocket output to monitor progress/status remotely
- linux daemon now sends stderr to errorlog.log
- added verbose option to linux superscript to allow for interactive output
- moved pymongo import to superscript.py
- added profile option to linux superscript to profile runtime of script
- reduced memory usage slightly by consolidating the unwrapped input data
- added debug option, which performs one loop of analysis and dumps results to local files
- added event and time delay options to config
- event delay pauses loop until even listener recieves an update
- time delay pauses loop until the time specified has elapsed since the BEGINNING of previous loop
- added options to pull config information from database (reatins option to use local config file)
- config-preference option selects between prioritizing local config and prioritizing database config
- synchronize-config option selects whether to update the non prioritized config with the prioritized one
- divided config options between persistent ones (keys), and variable ones (everything else)
- generalized behavior of various core components by collecting loose functions in several dependencies into classes
- module.py contains classes, each one represents a single data analysis routine
- config.py contains the Configuration class, which stores the configuration information and abstracts the getter methods
0.9.3:
- improved data loading performance by removing redundant PyMongo client creation (120s to 14s)
- passed singular instance of PyMongo client as standin for apikey parameter in all data.py functions
0.9.2:
- removed unessasary imports from data
- minor changes to interface
0.9.1:
- fixed bugs in configuration item loading exception handling
0.9.0:
- moved printing and logging related functions to interface.py (changelog will stay in this file)
- changed function return files for load_config and save_config to standard C values (0 for success, 1 for error)
- added local variables for config location
- moved dataset getting and setting functions to dataset.py (changelog will stay in this file)
- moved matchloop, metricloop, pitloop and helper functions (simplestats) to processing.py
0.8.6: 0.8.6:
- added proper main function - added proper main function
0.8.5: 0.8.5:
@@ -147,257 +114,514 @@ __author__ = (
"Jacob Levine <jlevine@imsa.edu>", "Jacob Levine <jlevine@imsa.edu>",
) )
__all__ = [
"load_config",
"save_config",
"get_previous_time",
"load_match",
"matchloop",
"load_metric",
"metricloop",
"load_pit",
"pitloop",
"push_match",
"push_metric",
"push_pit",
]
# imports: # imports:
import os, sys, time from tra_analysis import analysis as an
import pymongo # soon to be deprecated import data as d
import traceback from collections import defaultdict
import json
import math
import numpy as np
import os
from os import system, name
from pathlib import Path
from multiprocessing import Pool
import platform
import sys
import time
import warnings import warnings
from config import Configuration, ConfigurationError
from data import get_previous_time, set_current_time, check_new_database_matches
from interface import Logger
from module import Match, Metric, Pit
import zmq
config_path = "config.json" global exec_threads
def main(logger, verbose, profile, debug, socket_send = None): def main():
def close_all(): global exec_threads
if "client" in locals():
client.close() sys.stderr = open("errorlog.txt", "w")
warnings.filterwarnings("ignore") warnings.filterwarnings("ignore")
logger.splash(__version__) splash()
modules = {"match": Match, "metric": Metric, "pit": Pit} while (True):
while True:
try: try:
loop_start = time.time() current_time = time.time()
print("[OK] time: " + str(current_time))
logger.info("current time: " + str(loop_start)) config = load_config("config.json")
socket_send("current time: " + str(loop_start)) competition = config["competition"]
match_tests = config["statistics"]["match"]
pit_tests = config["statistics"]["pit"]
metrics_tests = config["statistics"]["metric"]
print("[OK] configs loaded")
config = Configuration(config_path) print("[OK] starting threads")
cfg_max_threads = config["max-threads"]
logger.info("found and loaded config at <" + config_path + ">") sys_max_threads = os.cpu_count()
socket_send("found and loaded config at <" + config_path + ">") if cfg_max_threads > -sys_max_threads and cfg_max_threads < 0 :
alloc_processes = sys_max_threads + cfg_max_threads
apikey, tbakey = config.database, config.tba elif cfg_max_threads > 0 and cfg_max_threads < 1:
alloc_processes = math.floor(cfg_max_threads * sys_max_threads)
logger.info("found and loaded database and tba keys") elif cfg_max_threads > 1 and cfg_max_threads <= sys_max_threads:
socket_send("found and loaded database and tba keys") alloc_processes = cfg_max_threads
elif cfg_max_threads == 0:
client = pymongo.MongoClient(apikey) alloc_processes = sys_max_threads
logger.info("established connection to database")
socket_send("established connection to database")
previous_time = get_previous_time(client)
logger.info("analysis backtimed to: " + str(previous_time))
socket_send("analysis backtimed to: " + str(previous_time))
config.resolve_config_conflicts(logger, client)
config_modules, competition = config.modules, config.competition
for m in config_modules:
if m in modules:
start = time.time()
current_module = modules[m](config_modules[m], client, tbakey, previous_time, competition)
valid = current_module.validate_config()
if not valid:
continue
current_module.run()
logger.info(m + " module finished in " + str(time.time() - start) + " seconds")
socket_send(m + " module finished in " + str(time.time() - start) + " seconds")
if debug:
logger.save_module_to_file(m, current_module.data, current_module.results) # logging flag check done in logger
set_current_time(client, loop_start)
close_all()
logger.info("closed threads and database client")
logger.info("finished all tasks in " + str(time.time() - loop_start) + " seconds, looping")
socket_send("closed threads and database client")
socket_send("finished all tasks in " + str(time.time() - loop_start) + " seconds, looping")
if profile:
return 0
if debug:
return 0
event_delay = config["variable"]["event-delay"]
if event_delay:
logger.info("loop delayed until database returns new matches")
socket_send("loop delayed until database returns new matches")
new_match = False
while not new_match:
time.sleep(1)
new_match = check_new_database_matches(client, competition)
logger.info("database returned new matches")
socket_send("database returned new matches")
else: else:
loop_delay = float(config["variable"]["loop-delay"]) print("[ERROR] Invalid number of processes, must be between -" + str(sys_max_threads) + " and " + str(sys_max_threads))
remaining_time = loop_delay - (time.time() - loop_start) exit()
if remaining_time > 0: exec_threads = Pool(processes = alloc_processes)
logger.info("loop delayed by " + str(remaining_time) + " seconds") print("[OK] " + str(alloc_processes) + " threads started")
socket_send("loop delayed by " + str(remaining_time) + " seconds")
time.sleep(remaining_time)
apikey = config["key"]["database"]
tbakey = config["key"]["tba"]
print("[OK] loaded keys")
previous_time = get_previous_time(apikey)
print("[OK] analysis backtimed to: " + str(previous_time))
print("[OK] loading data")
start = time.time()
match_data = load_match(apikey, competition)
pit_data = load_pit(apikey, competition)
print("[OK] loaded data in " + str(time.time() - start) + " seconds")
print("[OK] running match stats")
start = time.time()
matchloop(apikey, competition, match_data, match_tests)
print("[OK] finished match stats in " + str(time.time() - start) + " seconds")
print("[OK] running team metrics")
start = time.time()
metricloop(tbakey, apikey, competition, previous_time, metrics_tests)
print("[OK] finished team metrics in " + str(time.time() - start) + " seconds")
print("[OK] running pit analysis")
start = time.time()
pitloop(apikey, competition, pit_data, pit_tests)
print("[OK] finished pit analysis in " + str(time.time() - start) + " seconds")
set_current_time(apikey, current_time)
print("[OK] finished all tests, looping")
print_hrule()
except KeyboardInterrupt: except KeyboardInterrupt:
close_all() print("\n[OK] caught KeyboardInterrupt, killing processes")
logger.info("detected KeyboardInterrupt, exiting") exec_threads.terminate()
socket_send("detected KeyboardInterrupt, exiting") print("[OK] processes killed, exiting")
return 0 exit()
except ConfigurationError as e: else:
str_e = "".join(traceback.format_exception(e))
logger.error("encountered a configuration error: " + str(e))
logger.error(str_e)
socket_send("encountered a configuration error: " + str(e))
socket_send(str_e)
close_all()
return 1
except Exception as e:
str_e = "".join(traceback.format_exception(e))
logger.error("encountered an exception while running")
logger.error(str_e)
socket_send("encountered an exception while running")
socket_send(str_e)
close_all()
return 1
def start(pid_path, verbose, profile, debug):
if profile:
def send(msg):
pass pass
logger = Logger(verbose, profile, debug) #clear()
import cProfile, pstats, io def clear():
profile = cProfile.Profile()
profile.enable() # for windows
exit_code = main(logger, verbose, profile, debug, socket_send = send) if name == 'nt':
profile.disable() _ = system('cls')
f = open("profile.txt", 'w+')
ps = pstats.Stats(profile, stream = f).sort_stats('cumtime') # for mac and linux(here, os.name is 'posix')
ps.print_stats() else:
sys.exit(exit_code) _ = system('clear')
elif verbose: def print_hrule():
def send(msg): print("#"+38*"-"+"#")
pass
logger = Logger(verbose, profile, debug) def print_box(s):
exit_code = main(logger, verbose, profile, debug, socket_send = send) temp = "|"
sys.exit(exit_code) temp += s
temp += (40-len(s)-2)*" "
temp += "|"
print(temp)
elif debug: def splash():
def send(msg): print_hrule()
pass print_box(" superscript version: " + __version__)
print_box(" os: " + platform.system())
print_box(" python: " + platform.python_version())
print_hrule()
logger = Logger(verbose, profile, debug) def load_config(file):
exit_code = main(logger, verbose, profile, debug, socket_send = send) config_vector = {}
sys.exit(exit_code)
try:
f = open(file)
except:
print("[ERROR] could not locate config.json, generating blank config.json and exiting")
f = open(file, "w")
f.write(sample_json)
exit()
config_vector = json.load(f)
return config_vector
def save_config(file, config_vector):
with open(file) as f:
json.dump(config_vector, f)
def get_previous_time(apikey):
previous_time = d.get_analysis_flags(apikey, "latest_update")
if previous_time == None:
d.set_analysis_flags(apikey, "latest_update", 0)
previous_time = 0
else: else:
logfile = "logfile.log" previous_time = previous_time["latest_update"]
f = open(logfile, 'w+') return previous_time
f.close()
e = open('errorlog.log', 'w+') def set_current_time(apikey, current_time):
with daemon.DaemonContext(
working_directory = os.getcwd(),
pidfile = pidfile.TimeoutPIDLockFile(pid_path),
stderr = e
):
context = zmq.Context() d.set_analysis_flags(apikey, "latest_update", {"latest_update":current_time})
socket = context.socket(zmq.PUB)
socket.bind("tcp://*:5678")
socket.send(b'status')
def send(msg): def load_match(apikey, competition):
socket.send(bytes("status: " + msg, "utf-8"))
logger = Logger(verbose, profile, debug, file = logfile) return d.get_match_data_formatted(apikey, competition)
exit_code = main(logger, verbose, profile, debug, socket_send = send) def simplestats(data_test):
socket.close() data = np.array(data_test[0])
f.close() data = data[np.isfinite(data)]
ranges = list(range(len(data)))
sys.exit(exit_code)
def stop(pid_path): test = data_test[1]
try:
pf = open(pid_path, 'r') if test == "basic_stats":
pid = int(pf.read().strip()) return an.basic_stats(data)
pf.close()
except IOError: if test == "historical_analysis":
sys.stderr.write("pidfile at <" + pid_path + "> does not exist. Daemon not running?\n") return an.histo_analysis([ranges, data])
return
if test == "regression_linear":
return an.regression(ranges, data, ['lin'])
if test == "regression_logarithmic":
return an.regression(ranges, data, ['log'])
if test == "regression_exponential":
return an.regression(ranges, data, ['exp'])
if test == "regression_polynomial":
return an.regression(ranges, data, ['ply'])
if test == "regression_sigmoidal":
return an.regression(ranges, data, ['sig'])
def matchloop(apikey, competition, data, tests): # expects 3D array with [Team][Variable][Match]
global exec_threads
short_mapping = {"regression_linear": "lin", "regression_logarithmic": "log", "regression_exponential": "exp", "regression_polynomial": "ply", "regression_sigmoidal": "sig"}
class AutoVivification(dict):
def __getitem__(self, item):
try:
return dict.__getitem__(self, item)
except KeyError:
value = self[item] = type(self)()
return value
return_vector = {}
team_filtered = []
variable_filtered = []
variable_data = []
test_filtered = []
result_filtered = []
return_vector = AutoVivification()
for team in data:
for variable in data[team]:
if variable in tests:
for test in tests[variable]:
team_filtered.append(team)
variable_filtered.append(variable)
variable_data.append((data[team][variable], test))
test_filtered.append(test)
result_filtered = exec_threads.map(simplestats, variable_data)
i = 0
result_filtered = list(result_filtered)
for result in result_filtered:
filtered = test_filtered[i]
try:
short = short_mapping[filtered]
return_vector[team_filtered[i]][variable_filtered[i]][test_filtered[i]] = result[short]
except KeyError: # not in mapping
return_vector[team_filtered[i]][variable_filtered[i]][test_filtered[i]] = result
i += 1
push_match(apikey, competition, return_vector)
def load_metric(apikey, competition, match, group_name, metrics):
group = {}
for team in match[group_name]:
db_data = d.get_team_metrics_data(apikey, competition, team)
if d.get_team_metrics_data(apikey, competition, team) == None:
elo = {"score": metrics["elo"]["score"]}
gl2 = {"score": metrics["gl2"]["score"], "rd": metrics["gl2"]["rd"], "vol": metrics["gl2"]["vol"]}
ts = {"mu": metrics["ts"]["mu"], "sigma": metrics["ts"]["sigma"]}
group[team] = {"elo": elo, "gl2": gl2, "ts": ts}
try:
while True:
os.kill(pid, SIGTERM)
time.sleep(0.01)
except OSError as err:
err = str(err)
if err.find("No such process") > 0:
if os.path.exists(pid_path):
os.remove(pid_path)
else: else:
traceback.print_exc(file = sys.stderr)
sys.exit(1)
def restart(pid_path): metrics = db_data["metrics"]
stop(pid_path)
start(pid_path, False, False, False) elo = metrics["elo"]
gl2 = metrics["gl2"]
ts = metrics["ts"]
group[team] = {"elo": elo, "gl2": gl2, "ts": ts}
return group
def metricloop(tbakey, apikey, competition, timestamp, metrics): # listener based metrics update
elo_N = metrics["elo"]["N"]
elo_K = metrics["elo"]["K"]
matches = d.pull_new_tba_matches(tbakey, competition, timestamp)
red = {}
blu = {}
for match in matches:
red = load_metric(apikey, competition, match, "red", metrics)
blu = load_metric(apikey, competition, match, "blue", metrics)
elo_red_total = 0
elo_blu_total = 0
gl2_red_score_total = 0
gl2_blu_score_total = 0
gl2_red_rd_total = 0
gl2_blu_rd_total = 0
gl2_red_vol_total = 0
gl2_blu_vol_total = 0
for team in red:
elo_red_total += red[team]["elo"]["score"]
gl2_red_score_total += red[team]["gl2"]["score"]
gl2_red_rd_total += red[team]["gl2"]["rd"]
gl2_red_vol_total += red[team]["gl2"]["vol"]
for team in blu:
elo_blu_total += blu[team]["elo"]["score"]
gl2_blu_score_total += blu[team]["gl2"]["score"]
gl2_blu_rd_total += blu[team]["gl2"]["rd"]
gl2_blu_vol_total += blu[team]["gl2"]["vol"]
red_elo = {"score": elo_red_total / len(red)}
blu_elo = {"score": elo_blu_total / len(blu)}
red_gl2 = {"score": gl2_red_score_total / len(red), "rd": gl2_red_rd_total / len(red), "vol": gl2_red_vol_total / len(red)}
blu_gl2 = {"score": gl2_blu_score_total / len(blu), "rd": gl2_blu_rd_total / len(blu), "vol": gl2_blu_vol_total / len(blu)}
if match["winner"] == "red":
observations = {"red": 1, "blu": 0}
elif match["winner"] == "blue":
observations = {"red": 0, "blu": 1}
else:
observations = {"red": 0.5, "blu": 0.5}
red_elo_delta = an.Metric().elo(red_elo["score"], blu_elo["score"], observations["red"], elo_N, elo_K) - red_elo["score"]
blu_elo_delta = an.Metric().elo(blu_elo["score"], red_elo["score"], observations["blu"], elo_N, elo_K) - blu_elo["score"]
new_red_gl2_score, new_red_gl2_rd, new_red_gl2_vol = an.Metric().glicko2(red_gl2["score"], red_gl2["rd"], red_gl2["vol"], [blu_gl2["score"]], [blu_gl2["rd"]], [observations["red"], observations["blu"]])
new_blu_gl2_score, new_blu_gl2_rd, new_blu_gl2_vol = an.Metric().glicko2(blu_gl2["score"], blu_gl2["rd"], blu_gl2["vol"], [red_gl2["score"]], [red_gl2["rd"]], [observations["blu"], observations["red"]])
red_gl2_delta = {"score": new_red_gl2_score - red_gl2["score"], "rd": new_red_gl2_rd - red_gl2["rd"], "vol": new_red_gl2_vol - red_gl2["vol"]}
blu_gl2_delta = {"score": new_blu_gl2_score - blu_gl2["score"], "rd": new_blu_gl2_rd - blu_gl2["rd"], "vol": new_blu_gl2_vol - blu_gl2["vol"]}
for team in red:
red[team]["elo"]["score"] = red[team]["elo"]["score"] + red_elo_delta
red[team]["gl2"]["score"] = red[team]["gl2"]["score"] + red_gl2_delta["score"]
red[team]["gl2"]["rd"] = red[team]["gl2"]["rd"] + red_gl2_delta["rd"]
red[team]["gl2"]["vol"] = red[team]["gl2"]["vol"] + red_gl2_delta["vol"]
for team in blu:
blu[team]["elo"]["score"] = blu[team]["elo"]["score"] + blu_elo_delta
blu[team]["gl2"]["score"] = blu[team]["gl2"]["score"] + blu_gl2_delta["score"]
blu[team]["gl2"]["rd"] = blu[team]["gl2"]["rd"] + blu_gl2_delta["rd"]
blu[team]["gl2"]["vol"] = blu[team]["gl2"]["vol"] + blu_gl2_delta["vol"]
temp_vector = {}
temp_vector.update(red)
temp_vector.update(blu)
push_metric(apikey, competition, temp_vector)
def load_pit(apikey, competition):
return d.get_pit_data_formatted(apikey, competition)
def pitloop(apikey, competition, pit, tests):
return_vector = {}
for team in pit:
for variable in pit[team]:
if variable in tests:
if not variable in return_vector:
return_vector[variable] = []
return_vector[variable].append(pit[team][variable])
push_pit(apikey, competition, return_vector)
def push_match(apikey, competition, results):
for team in results:
d.push_team_tests_data(apikey, competition, team, results[team])
def push_metric(apikey, competition, metric):
for team in metric:
d.push_team_metrics_data(apikey, competition, team, metric[team])
def push_pit(apikey, competition, pit):
for variable in pit:
d.push_team_pit_data(apikey, competition, variable, pit[variable])
def get_team_metrics(apikey, tbakey, competition):
metrics = d.get_metrics_data_formatted(apikey, competition)
elo = {}
gl2 = {}
for team in metrics:
elo[team] = metrics[team]["metrics"]["elo"]["score"]
gl2[team] = metrics[team]["metrics"]["gl2"]["score"]
elo = {k: v for k, v in sorted(elo.items(), key=lambda item: item[1])}
gl2 = {k: v for k, v in sorted(gl2.items(), key=lambda item: item[1])}
elo_ranked = []
for team in elo:
elo_ranked.append({"team": str(team), "elo": str(elo[team])})
gl2_ranked = []
for team in gl2:
gl2_ranked.append({"team": str(team), "gl2": str(gl2[team])})
return {"elo-ranks": elo_ranked, "glicko2-ranks": gl2_ranked}
sample_json = """{
"max-threads": 0.5,
"team": "",
"competition": "2020ilch",
"key":{
"database":"",
"tba":""
},
"statistics":{
"match":{
"balls-blocked":["basic_stats","historical_analysis","regression_linear","regression_logarithmic","regression_exponential","regression_polynomial","regression_sigmoidal"],
"balls-collected":["basic_stats","historical_analysis","regression_linear","regression_logarithmic","regression_exponential","regression_polynomial","regression_sigmoidal"],
"balls-lower-teleop":["basic_stats","historical_analysis","regression_linear","regression_logarithmic","regression_exponential","regression_polynomial","regression_sigmoidal"],
"balls-lower-auto":["basic_stats","historical_analysis","regression_linear","regression_logarithmic","regression_exponential","regression_polynomial","regression_sigmoidal"],
"balls-started":["basic_stats","historical_analyss","regression_linear","regression_logarithmic","regression_exponential","regression_polynomial","regression_sigmoidal"],
"balls-upper-teleop":["basic_stats","historical_analysis","regression_linear","regression_logarithmic","regression_exponential","regression_polynomial","regression_sigmoidal"],
"balls-upper-auto":["basic_stats","historical_analysis","regression_linear","regression_logarithmic","regression_exponential","regression_polynomial","regression_sigmoidal"]
},
"metric":{
"elo":{
"score":1500,
"N":400,
"K":24
},
"gl2":{
"score":1500,
"rd":250,
"vol":0.06
},
"ts":{
"mu":25,
"sigma":8.33
}
},
"pit":{
"wheel-mechanism":true,
"low-balls":true,
"high-balls":true,
"wheel-success":true,
"strategic-focus":true,
"climb-mechanism":true,
"attitude":true
}
}
}"""
if __name__ == "__main__": if __name__ == "__main__":
if sys.platform.startswith('win'):
if sys.platform.startswith("win"): multiprocessing.freeze_support()
start(None, verbose = True) main()
else:
import daemon
from daemon import pidfile
from signal import SIGTERM
pid_path = "tra-daemon.pid"
if len(sys.argv) == 2:
if 'start' == sys.argv[1]:
start(pid_path, False, False, False)
elif 'stop' == sys.argv[1]:
stop(pid_path)
elif 'restart' == sys.argv[1]:
restart(pid_path)
elif 'verbose' == sys.argv[1]:
start(None, True, False, False)
elif 'profile' == sys.argv[1]:
start(None, False, True, False)
elif 'debug' == sys.argv[1]:
start(None, False, False, True)
else:
print("usage: %s start|stop|restart|verbose|profile|debug" % sys.argv[0])
sys.exit(2)
sys.exit(0)
else:
print("usage: %s start|stop|restart|verbose|profile|debug" % sys.argv[0])
sys.exit(2)

37
src/superscript.spec Normal file
View File

@@ -0,0 +1,37 @@
# -*- mode: python ; coding: utf-8 -*-
block_cipher = None
a = Analysis(['superscript.py'],
pathex=['/workspaces/tra-data-analysis/src'],
binaries=[],
datas=[],
hiddenimports=[
"dnspython",
"sklearn.utils._weight_vector",
"requests",
],
hookspath=[],
runtime_hooks=[],
excludes=[],
win_no_prefer_redirects=False,
win_private_assemblies=False,
cipher=block_cipher,
noarchive=False)
pyz = PYZ(a.pure, a.zipped_data,
cipher=block_cipher)
exe = EXE(pyz,
a.scripts,
a.binaries,
a.zipfiles,
a.datas,
[('W ignore', None, 'OPTION')],
name='superscript',
debug=False,
bootloader_ignore_signals=False,
strip=False,
upx=True,
upx_exclude=[],
runtime_tmpdir=None,
console=True )

View File

@@ -1,14 +0,0 @@
import signal
import zmq
signal.signal(signal.SIGINT, signal.SIG_DFL)
context = zmq.Context()
socket = context.socket(zmq.SUB)
socket.connect('tcp://localhost:5678')
socket.setsockopt(zmq.SUBSCRIBE, b'status')
while True:
message = socket.recv_multipart()
print(f'Received: {message}')