mirror of
https://github.com/titanscouting/tra-superscript.git
synced 2025-09-26 07:10:18 +00:00
Compare commits
127 Commits
snyk-fix-4
...
gui
Author | SHA1 | Date | |
---|---|---|---|
|
9c152fb109 | ||
|
491508a400 | ||
|
b0ffdbab9a | ||
|
c4bb633e08 | ||
|
c46e74036b | ||
|
c08d3d3ae7 | ||
|
6cfe4e3f33 | ||
|
40673169b2 | ||
|
7d08194636 | ||
|
a940f3ffeb | ||
|
db4b020851 | ||
|
18d4438f7f | ||
|
c77dd1ea5f | ||
|
74542c1d8f | ||
|
b4c9ef22b6 | ||
|
af74a69579 | ||
|
ac66545226 | ||
|
4b5db1aba8 | ||
|
b9610b7b46 | ||
|
14ed3cc507 | ||
|
c90a35ff51 | ||
|
60e88a5987 | ||
|
9c2946718f | ||
|
bd0c0d99f9 | ||
|
9ddf61621e | ||
|
0af93238be | ||
|
6c8e738420 | ||
|
0eccc32096 | ||
|
f8f8543ea2 | ||
|
a1fb295a1a | ||
|
4ded02b34f | ||
|
88282aa18b | ||
|
66bf3a3a3e | ||
|
375b550153 | ||
|
79dd2c3479 | ||
|
d2ced284ea | ||
|
07965cba4b | ||
|
36081e1239 | ||
|
49ee02d6fa | ||
|
08f5ba987a | ||
|
dafd66447f | ||
|
6bcb3cbff4 | ||
|
6cd092da37 | ||
|
8540642bef | ||
|
5d0fbc06c6 | ||
|
a4a13c7cb5 | ||
|
26d9d962c0 | ||
|
e20a212bd9 | ||
|
1151541976 | ||
|
6f28e507a1 | ||
|
1e6bc1926a | ||
|
d89f997117 | ||
|
31423d04d8 | ||
|
44e9711b2d | ||
|
f24d5163d7 | ||
|
06630b0dd4 | ||
|
32ae4fd636 | ||
|
96ebb82085 | ||
|
09b8cca884 | ||
|
fb1033e92c | ||
|
16882a0a75 | ||
|
2b8ecc5bee | ||
|
8112effbce | ||
|
56d3a0adcd | ||
|
2444963af9 | ||
|
8000a7314d | ||
|
3e212e4502 | ||
|
66e00987c4 | ||
|
2f90e7d11a | ||
|
f0ef4fea5d | ||
|
9be9008ae1 | ||
|
097fd2836b | ||
|
052788afb9 | ||
|
e4eb824f51 | ||
|
11d3db4b44 | ||
|
76f78047b3 | ||
|
20f2040a1a | ||
|
ffead9e240 | ||
|
0287b5c0e2 | ||
|
b6a1dfedb9 | ||
|
cafb773d8b | ||
|
871b313d95 | ||
|
bcbb653696 | ||
|
b4c7365bf0 | ||
|
c2f35f4cb2 | ||
|
2ebd2cba8a | ||
|
6819aaf143 | ||
|
b3ab9156db | ||
|
30641e43d8 | ||
|
3a068654ed | ||
|
962061007b | ||
|
91f34a8d74 | ||
|
19bca6967c | ||
|
fb2ea60fea | ||
|
5d95913467 | ||
|
7e800c9004 | ||
|
3c7262498c | ||
|
4c65e88903 | ||
|
b3c26ce2cf | ||
|
089ff7ec01 | ||
|
b561c51d21 | ||
|
40191aa6b5 | ||
|
6c385b5bd3 | ||
|
727398d32f | ||
|
d16ef53457 | ||
|
a367e7254b | ||
|
78f737d45f | ||
|
651ae0c2db | ||
|
2619f9a729 | ||
|
08292d5dc8 | ||
|
75dde2171c | ||
|
014570930a | ||
|
7abfb2d90a | ||
|
8f79be68d4 | ||
|
5a454e0e39 | ||
|
e6e0351288 | ||
|
70afd23f2c | ||
|
8ede63ed04 | ||
|
0899d9b099 | ||
|
51b4943307 | ||
|
d093cc2423 | ||
|
f5f0c03218 | ||
|
4872af581a | ||
|
d21ba90557 | ||
|
4b26e4c531 | ||
|
bc0405665c | ||
|
3273bdef5d |
@@ -1,2 +1,6 @@
|
||||
FROM python
|
||||
WORKDIR ~/
|
||||
FROM python:slim
|
||||
WORKDIR /
|
||||
RUN apt-get -y update; apt-get -y upgrade
|
||||
RUN apt-get -y install git
|
||||
COPY requirements.txt .
|
||||
RUN pip install -r requirements.txt
|
@@ -5,24 +5,18 @@
|
||||
},
|
||||
"settings": {
|
||||
"terminal.integrated.shell.linux": "/bin/bash",
|
||||
"python.pythonPath": "/usr/local/bin/python",
|
||||
"python.pythonPath": "",
|
||||
"python.linting.enabled": true,
|
||||
"python.linting.pylintEnabled": true,
|
||||
"python.formatting.autopep8Path": "/usr/local/py-utils/bin/autopep8",
|
||||
"python.formatting.blackPath": "/usr/local/py-utils/bin/black",
|
||||
"python.formatting.yapfPath": "/usr/local/py-utils/bin/yapf",
|
||||
"python.linting.banditPath": "/usr/local/py-utils/bin/bandit",
|
||||
"python.linting.flake8Path": "/usr/local/py-utils/bin/flake8",
|
||||
"python.linting.mypyPath": "/usr/local/py-utils/bin/mypy",
|
||||
"python.linting.pycodestylePath": "/usr/local/py-utils/bin/pycodestyle",
|
||||
"python.linting.pydocstylePath": "/usr/local/py-utils/bin/pydocstyle",
|
||||
"python.linting.pylintPath": "/usr/local/py-utils/bin/pylint",
|
||||
"python.testing.pytestPath": "/usr/local/py-utils/bin/pytest"
|
||||
"python.linting.pylintPath": "",
|
||||
"python.testing.pytestPath": "",
|
||||
"editor.tabSize": 4,
|
||||
"editor.insertSpaces": false
|
||||
},
|
||||
"extensions": [
|
||||
"mhutchie.git-graph",
|
||||
"ms-python.python",
|
||||
"waderyan.gitblame"
|
||||
],
|
||||
"postCreateCommand": "apt install vim -y ; pip install -r src/requirements.txt ; pip install pylint ; pip install tra-analysis; pip install pytest"
|
||||
}
|
||||
"postCreateCommand": ""
|
||||
}
|
15
.devcontainer/requirements.txt
Normal file
15
.devcontainer/requirements.txt
Normal file
@@ -0,0 +1,15 @@
|
||||
cerberus
|
||||
dnspython
|
||||
numpy
|
||||
pyinstaller
|
||||
pylint
|
||||
pymongo
|
||||
pyparsing
|
||||
pytest
|
||||
python-daemon
|
||||
pyzmq
|
||||
requests
|
||||
scikit-learn
|
||||
scipy
|
||||
six
|
||||
tra-analysis
|
4
.gitattributes
vendored
Normal file
4
.gitattributes
vendored
Normal file
@@ -0,0 +1,4 @@
|
||||
# Auto detect text files and perform LF normalization
|
||||
* text=auto eol=lf
|
||||
*.{cmd,[cC][mM][dD]} text eol=crlf
|
||||
*.{bat,[bB][aA][tT]} text eol=crlf
|
35
.github/workflows/build-cli.yml
vendored
Normal file
35
.github/workflows/build-cli.yml
vendored
Normal file
@@ -0,0 +1,35 @@
|
||||
# This workflow will install Python dependencies, run tests and lint with a variety of Python versions
|
||||
# For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions
|
||||
|
||||
name: Build Superscript Linux
|
||||
|
||||
on:
|
||||
release:
|
||||
types: [published, edited]
|
||||
|
||||
jobs:
|
||||
generate:
|
||||
name: Build Linux
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout master
|
||||
uses: actions/checkout@master
|
||||
- name: Install Dependencies
|
||||
run: pip install -r requirements.txt
|
||||
working-directory: src/
|
||||
- name: Give Execute Permission
|
||||
run: chmod +x build-CLI.sh
|
||||
working-directory: build/
|
||||
- name: Build Binary
|
||||
run: ./build-CLI.sh
|
||||
working-directory: build/
|
||||
- name: Copy Binary to Root Dir
|
||||
run: cp superscript ..
|
||||
working-directory: dist/
|
||||
- name: Upload Release Asset
|
||||
uses: svenstaro/upload-release-action@v2
|
||||
with:
|
||||
repo_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
file: superscript
|
||||
asset_name: superscript
|
||||
tag: ${{ github.ref }}
|
@@ -17,8 +17,6 @@ jobs:
|
||||
matrix:
|
||||
python-version: [3.7, 3.8]
|
||||
|
||||
env:
|
||||
working-directory: ./data-analysis/
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
@@ -30,9 +28,7 @@ jobs:
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
pip install pytest
|
||||
if [ -f requirements.txt ]; then pip install -r requirements.txt; fi
|
||||
working-directory: ${{ env.working-directory }}
|
||||
if [ -f src/requirements.txt ]; then pip install -r src/requirements.txt; fi
|
||||
- name: Test with pytest
|
||||
run: |
|
||||
pytest
|
||||
working-directory: ${{ env.working-directory }}
|
||||
pytest test/
|
14
.gitignore
vendored
14
.gitignore
vendored
@@ -4,6 +4,16 @@
|
||||
**/.pytest_cache/
|
||||
**/*.pyc
|
||||
|
||||
**/build/
|
||||
**/*.egg-info/
|
||||
**/dist/
|
||||
**/config.json
|
||||
**/tra_analysis/
|
||||
**/temp/*
|
||||
|
||||
**/*.pid
|
||||
|
||||
**/profile.*
|
||||
|
||||
**/*.log
|
||||
**/errorlog.txt
|
||||
/dist/superscript.*
|
||||
/dist/superscript
|
3
LICENSE
3
LICENSE
@@ -1,6 +1,7 @@
|
||||
BSD 3-Clause License
|
||||
|
||||
Copyright (c) 2020, Titan Scouting
|
||||
Copyright (c) 2021, Titan Scouting
|
||||
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
|
@@ -42,4 +42,5 @@ don't worry, you may have just not configured the application correctly, but wou
|
||||
---
|
||||
|
||||
# Build Statuses
|
||||

|
||||
|
||||
Coming soon!
|
5
build/build-CLI.bat
Normal file
5
build/build-CLI.bat
Normal file
@@ -0,0 +1,5 @@
|
||||
set pathtospec="../src/cli/superscript.spec"
|
||||
set pathtodist="../dist/"
|
||||
set pathtowork="temp/"
|
||||
|
||||
pyinstaller --clean --distpath %pathtodist% --workpath %pathtowork% %pathtospec%
|
5
build/build-CLI.sh
Normal file
5
build/build-CLI.sh
Normal file
@@ -0,0 +1,5 @@
|
||||
pathtospec="../src/cli/superscript.spec"
|
||||
pathtodist="../dist/"
|
||||
pathtowork="temp/"
|
||||
|
||||
pyinstaller --clean --distpath ${pathtodist} --workpath ${pathtowork} ${pathtospec}
|
244
src/cli/config.py
Normal file
244
src/cli/config.py
Normal file
@@ -0,0 +1,244 @@
|
||||
import math
|
||||
import json
|
||||
from multiprocessing import Pool
|
||||
import os
|
||||
from cerberus import Validator
|
||||
from exceptions import ConfigurationError
|
||||
|
||||
from data import set_database_config, get_database_config
|
||||
from interface import stderr, stdout, INF, ERR
|
||||
|
||||
config_path = "config.json"
|
||||
|
||||
sample_json = """
|
||||
{
|
||||
"persistent":{
|
||||
"key":{
|
||||
"database":"",
|
||||
"tba":"",
|
||||
"tra":{
|
||||
"CLIENT_ID":"",
|
||||
"CLIENT_SECRET":"",
|
||||
"url": ""
|
||||
}
|
||||
},
|
||||
"config-preference":"local",
|
||||
"synchronize-config":false
|
||||
},
|
||||
"variable":{
|
||||
"max-threads":0.5,
|
||||
"team":"",
|
||||
"event-delay":false,
|
||||
"loop-delay":0,
|
||||
"reportable":true,
|
||||
"teams":[
|
||||
|
||||
],
|
||||
"modules":{
|
||||
"match":{
|
||||
"tests":{
|
||||
"balls-blocked":[
|
||||
"basic_stats",
|
||||
"historical_analysis",
|
||||
"regression_linear",
|
||||
"regression_logarithmic",
|
||||
"regression_exponential",
|
||||
"regression_polynomial",
|
||||
"regression_sigmoidal"
|
||||
],
|
||||
"balls-collected":[
|
||||
"basic_stats",
|
||||
"historical_analysis",
|
||||
"regression_linear",
|
||||
"regression_logarithmic",
|
||||
"regression_exponential",
|
||||
"regression_polynomial",
|
||||
"regression_sigmoidal"
|
||||
],
|
||||
"balls-lower-teleop":[
|
||||
"basic_stats",
|
||||
"historical_analysis",
|
||||
"regression_linear",
|
||||
"regression_logarithmic",
|
||||
"regression_exponential",
|
||||
"regression_polynomial",
|
||||
"regression_sigmoidal"
|
||||
],
|
||||
"balls-lower-auto":[
|
||||
"basic_stats",
|
||||
"historical_analysis",
|
||||
"regression_linear",
|
||||
"regression_logarithmic",
|
||||
"regression_exponential",
|
||||
"regression_polynomial",
|
||||
"regression_sigmoidal"
|
||||
],
|
||||
"balls-started":[
|
||||
"basic_stats",
|
||||
"historical_analyss",
|
||||
"regression_linear",
|
||||
"regression_logarithmic",
|
||||
"regression_exponential",
|
||||
"regression_polynomial",
|
||||
"regression_sigmoidal"
|
||||
],
|
||||
"balls-upper-teleop":[
|
||||
"basic_stats",
|
||||
"historical_analysis",
|
||||
"regression_linear",
|
||||
"regression_logarithmic",
|
||||
"regression_exponential",
|
||||
"regression_polynomial",
|
||||
"regression_sigmoidal"
|
||||
],
|
||||
"balls-upper-auto":[
|
||||
"basic_stats",
|
||||
"historical_analysis",
|
||||
"regression_linear",
|
||||
"regression_logarithmic",
|
||||
"regression_exponential",
|
||||
"regression_polynomial",
|
||||
"regression_sigmoidal"
|
||||
]
|
||||
}
|
||||
},
|
||||
"metric":{
|
||||
"tests":{
|
||||
"elo":{
|
||||
"score":1500,
|
||||
"N":400,
|
||||
"K":24
|
||||
},
|
||||
"gl2":{
|
||||
"score":1500,
|
||||
"rd":250,
|
||||
"vol":0.06
|
||||
},
|
||||
"ts":{
|
||||
"mu":25,
|
||||
"sigma":8.33
|
||||
}
|
||||
}
|
||||
},
|
||||
"pit":{
|
||||
"tests":{
|
||||
"wheel-mechanism":true,
|
||||
"low-balls":true,
|
||||
"high-balls":true,
|
||||
"wheel-success":true,
|
||||
"strategic-focus":true,
|
||||
"climb-mechanism":true,
|
||||
"attitude":true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
"""
|
||||
|
||||
def parse_config_persistent(send, config):
|
||||
v = Validator(load_validation_schema(), allow_unknown = True)
|
||||
isValidated = v.validate(config)
|
||||
|
||||
if not isValidated:
|
||||
raise ConfigurationError(v.errors, 101)
|
||||
|
||||
apikey = config["persistent"]["key"]["database"]
|
||||
tbakey = config["persistent"]["key"]["tba"]
|
||||
preference = config["persistent"]["config-preference"]
|
||||
sync = config["persistent"]["synchronize-config"]
|
||||
|
||||
return apikey, tbakey, preference, sync
|
||||
|
||||
def parse_config_variable(send, config):
|
||||
|
||||
sys_max_threads = os.cpu_count()
|
||||
try:
|
||||
cfg_max_threads = config["variable"]["max-threads"]
|
||||
except:
|
||||
raise ConfigurationError("variable/max-threads field is invalid or missing, refer to documentation for configuration options", 109)
|
||||
if cfg_max_threads > -sys_max_threads and cfg_max_threads < 0 :
|
||||
alloc_processes = sys_max_threads + cfg_max_threads
|
||||
elif cfg_max_threads > 0 and cfg_max_threads < 1:
|
||||
alloc_processes = math.floor(cfg_max_threads * sys_max_threads)
|
||||
elif cfg_max_threads > 1 and cfg_max_threads <= sys_max_threads:
|
||||
alloc_processes = cfg_max_threads
|
||||
elif cfg_max_threads == 0:
|
||||
alloc_processes = sys_max_threads
|
||||
else:
|
||||
raise ConfigurationError("variable/max-threads must be between -" + str(sys_max_threads) + " and " + str(sys_max_threads) + ", but got " + cfg_max_threads, 110)
|
||||
try:
|
||||
exec_threads = Pool(processes = alloc_processes)
|
||||
except Exception as e:
|
||||
send(stderr, INF, e)
|
||||
raise ConfigurationError("unable to start threads", 200)
|
||||
send(stdout, INF, "successfully initialized " + str(alloc_processes) + " threads")
|
||||
|
||||
try:
|
||||
modules = config["variable"]["modules"]
|
||||
except:
|
||||
raise ConfigurationError("variable/modules field is invalid or missing", 102)
|
||||
|
||||
if modules == None:
|
||||
raise ConfigurationError("variable/modules field is empty", 106)
|
||||
|
||||
send(stdout, INF, "found and loaded competition, match, metrics, pit from config")
|
||||
|
||||
return exec_threads, modules
|
||||
|
||||
def resolve_config_conflicts(send, client, config, preference, sync):
|
||||
|
||||
if sync:
|
||||
if preference == "local" or preference == "client":
|
||||
send(stdout, INF, "config-preference set to local/client, loading local config information")
|
||||
remote_config = get_database_config(client)
|
||||
if remote_config != config["variable"]:
|
||||
set_database_config(client, config["variable"])
|
||||
send(stdout, INF, "database config was different and was updated")
|
||||
return config
|
||||
elif preference == "remote" or preference == "database":
|
||||
send(stdout, INF, "config-preference set to remote/database, loading remote config information")
|
||||
remote_config= get_database_config(client)
|
||||
if remote_config != config["variable"]:
|
||||
config["variable"] = remote_config
|
||||
if save_config(config_path, config):
|
||||
raise ConfigurationError("local config was different but could not be updated", 121)
|
||||
send(stdout, INF, "local config was different and was updated")
|
||||
return config
|
||||
else:
|
||||
raise ConfigurationError("persistent/config-preference field must be \"local\"/\"client\" or \"remote\"/\"database\"", 120)
|
||||
else:
|
||||
if preference == "local" or preference == "client":
|
||||
send(stdout, INF, "config-preference set to local/client, loading local config information")
|
||||
return config
|
||||
elif preference == "remote" or preference == "database":
|
||||
send(stdout, INF, "config-preference set to remote/database, loading database config information")
|
||||
config["variable"] = get_database_config(client)
|
||||
return config
|
||||
else:
|
||||
raise ConfigurationError("persistent/config-preference field must be \"local\"/\"client\" or \"remote\"/\"database\"", 120)
|
||||
|
||||
def load_config(path, config_vector):
|
||||
try:
|
||||
f = open(path, "r")
|
||||
config_vector.update(json.load(f))
|
||||
f.close()
|
||||
return 0
|
||||
except:
|
||||
f = open(path, "w")
|
||||
f.write(sample_json)
|
||||
f.close()
|
||||
return 1
|
||||
|
||||
def load_validation_schema():
|
||||
try:
|
||||
with open("validation-schema.json", "r") as f:
|
||||
return json.load(f)
|
||||
except:
|
||||
raise FileNotFoundError("Validation schema not found at validation-schema.json")
|
||||
|
||||
def save_config(path, config_vector):
|
||||
f = open(path, "w+")
|
||||
json.dump(config_vector, f, ensure_ascii=False, indent=4)
|
||||
f.close()
|
||||
return 0
|
199
src/cli/data.py
Normal file
199
src/cli/data.py
Normal file
@@ -0,0 +1,199 @@
|
||||
import requests
|
||||
import pull
|
||||
import pandas as pd
|
||||
|
||||
def pull_new_tba_matches(apikey, competition, cutoff):
|
||||
api_key= apikey
|
||||
x=requests.get("https://www.thebluealliance.com/api/v3/event/"+competition+"/matches/simple", headers={"X-TBA-Auth-Key":api_key}, verify=False)
|
||||
out = []
|
||||
for i in x.json():
|
||||
if i["actual_time"] != None and i["actual_time"]-cutoff >= 0 and i["comp_level"] == "qm":
|
||||
out.append({"match" : i['match_number'], "blue" : list(map(lambda x: int(x[3:]), i['alliances']['blue']['team_keys'])), "red" : list(map(lambda x: int(x[3:]), i['alliances']['red']['team_keys'])), "winner": i["winning_alliance"]})
|
||||
return out
|
||||
|
||||
def get_team_match_data(client, competition, team_num):
|
||||
db = client.data_scouting
|
||||
mdata = db.matchdata
|
||||
out = {}
|
||||
for i in mdata.find({"competition" : competition, "team_scouted": str(team_num)}):
|
||||
out[i['match']] = i['data']
|
||||
return pd.DataFrame(out)
|
||||
|
||||
def get_team_pit_data(client, competition, team_num):
|
||||
db = client.data_scouting
|
||||
mdata = db.pitdata
|
||||
out = {}
|
||||
return mdata.find_one({"competition" : competition, "team_scouted": str(team_num)})["data"]
|
||||
|
||||
def get_team_metrics_data(client, competition, team_num):
|
||||
db = client.data_processing
|
||||
mdata = db.team_metrics
|
||||
return mdata.find_one({"competition" : competition, "team": team_num})
|
||||
|
||||
def get_match_data_formatted(client, competition):
|
||||
teams_at_comp = pull.get_teams_at_competition(competition)
|
||||
out = {}
|
||||
for team in teams_at_comp:
|
||||
try:
|
||||
out[int(team)] = unkeyify_2l(get_team_match_data(client, competition, team).transpose().to_dict())
|
||||
except:
|
||||
pass
|
||||
return out
|
||||
|
||||
def get_metrics_data_formatted(client, competition):
|
||||
teams_at_comp = pull.get_teams_at_competition(competition)
|
||||
out = {}
|
||||
for team in teams_at_comp:
|
||||
try:
|
||||
out[int(team)] = get_team_metrics_data(client, competition, int(team))
|
||||
except:
|
||||
pass
|
||||
return out
|
||||
|
||||
def get_pit_data_formatted(client, competition):
|
||||
x=requests.get("https://titanscouting.epochml.org/api/fetchAllTeamNicknamesAtCompetition?competition="+competition)
|
||||
x = x.json()
|
||||
x = x['data']
|
||||
x = x.keys()
|
||||
out = {}
|
||||
for i in x:
|
||||
try:
|
||||
out[int(i)] = get_team_pit_data(client, competition, int(i))
|
||||
except:
|
||||
pass
|
||||
return out
|
||||
|
||||
def get_pit_variable_data(client, competition):
|
||||
db = client.data_processing
|
||||
mdata = db.team_pit
|
||||
out = {}
|
||||
return mdata.find()
|
||||
|
||||
def get_pit_variable_formatted(client, competition):
|
||||
temp = get_pit_variable_data(client, competition)
|
||||
out = {}
|
||||
for i in temp:
|
||||
out[i["variable"]] = i["data"]
|
||||
return out
|
||||
|
||||
def push_team_tests_data(client, competition, team_num, data, dbname = "data_processing", colname = "team_tests"):
|
||||
db = client[dbname]
|
||||
mdata = db[colname]
|
||||
mdata.replace_one({"competition" : competition, "team": team_num}, {"_id": competition+str(team_num)+"am", "competition" : competition, "team" : team_num, "data" : data}, True)
|
||||
|
||||
def push_team_metrics_data(client, competition, team_num, data, dbname = "data_processing", colname = "team_metrics"):
|
||||
db = client[dbname]
|
||||
mdata = db[colname]
|
||||
mdata.replace_one({"competition" : competition, "team": team_num}, {"_id": competition+str(team_num)+"am", "competition" : competition, "team" : team_num, "metrics" : data}, True)
|
||||
|
||||
def push_team_pit_data(client, competition, variable, data, dbname = "data_processing", colname = "team_pit"):
|
||||
db = client[dbname]
|
||||
mdata = db[colname]
|
||||
mdata.replace_one({"competition" : competition, "variable": variable}, {"competition" : competition, "variable" : variable, "data" : data}, True)
|
||||
|
||||
def get_analysis_flags(client, flag):
|
||||
db = client.data_processing
|
||||
mdata = db.flags
|
||||
return mdata.find_one({flag:{"$exists":True}})
|
||||
|
||||
def set_analysis_flags(client, flag, data):
|
||||
db = client.data_processing
|
||||
mdata = db.flags
|
||||
return mdata.replace_one({flag:{"$exists":True}}, data, True)
|
||||
|
||||
def unkeyify_2l(layered_dict):
|
||||
out = {}
|
||||
for i in layered_dict.keys():
|
||||
add = []
|
||||
sortkey = []
|
||||
for j in layered_dict[i].keys():
|
||||
add.append([j,layered_dict[i][j]])
|
||||
add.sort(key = lambda x: x[0])
|
||||
out[i] = list(map(lambda x: x[1], add))
|
||||
return out
|
||||
|
||||
def get_previous_time(client):
|
||||
|
||||
previous_time = get_analysis_flags(client, "latest_update")
|
||||
|
||||
if previous_time == None:
|
||||
|
||||
set_analysis_flags(client, "latest_update", 0)
|
||||
previous_time = 0
|
||||
|
||||
else:
|
||||
|
||||
previous_time = previous_time["latest_update"]
|
||||
|
||||
return previous_time
|
||||
|
||||
def set_current_time(client, current_time):
|
||||
|
||||
set_analysis_flags(client, "latest_update", {"latest_update":current_time})
|
||||
|
||||
def get_database_config(client):
|
||||
|
||||
remote_config = get_analysis_flags(client, "config")
|
||||
return remote_config["config"] if remote_config != None else None
|
||||
|
||||
def set_database_config(client, config):
|
||||
|
||||
set_analysis_flags(client, "config", {"config": config})
|
||||
|
||||
def load_match(client, competition):
|
||||
|
||||
return get_match_data_formatted(client, competition)
|
||||
|
||||
def load_metric(client, competition, match, group_name, metrics):
|
||||
|
||||
group = {}
|
||||
|
||||
for team in match[group_name]:
|
||||
|
||||
db_data = get_team_metrics_data(client, competition, team)
|
||||
|
||||
if db_data == None:
|
||||
|
||||
elo = {"score": metrics["elo"]["score"]}
|
||||
gl2 = {"score": metrics["gl2"]["score"], "rd": metrics["gl2"]["rd"], "vol": metrics["gl2"]["vol"]}
|
||||
ts = {"mu": metrics["ts"]["mu"], "sigma": metrics["ts"]["sigma"]}
|
||||
|
||||
group[team] = {"elo": elo, "gl2": gl2, "ts": ts}
|
||||
|
||||
else:
|
||||
|
||||
metrics = db_data["metrics"]
|
||||
|
||||
elo = metrics["elo"]
|
||||
gl2 = metrics["gl2"]
|
||||
ts = metrics["ts"]
|
||||
|
||||
group[team] = {"elo": elo, "gl2": gl2, "ts": ts}
|
||||
|
||||
return group
|
||||
|
||||
def load_pit(client, competition):
|
||||
|
||||
return get_pit_data_formatted(client, competition)
|
||||
|
||||
def push_match(client, competition, results):
|
||||
|
||||
for team in results:
|
||||
|
||||
push_team_tests_data(client, competition, team, results[team])
|
||||
|
||||
def push_metric(client, competition, metric):
|
||||
|
||||
for team in metric:
|
||||
|
||||
push_team_metrics_data(client, competition, team, metric[team])
|
||||
|
||||
def push_pit(client, competition, pit):
|
||||
|
||||
for variable in pit:
|
||||
|
||||
push_team_pit_data(client, competition, variable, pit[variable])
|
||||
|
||||
def check_new_database_matches(client, competition):
|
||||
|
||||
return True
|
11
src/cli/exceptions.py
Normal file
11
src/cli/exceptions.py
Normal file
@@ -0,0 +1,11 @@
|
||||
class APIError(Exception):
|
||||
code = None
|
||||
def __init__(self, str, endpoint):
|
||||
super().__init__(str)
|
||||
self.endpoint = endpoint
|
||||
|
||||
class ConfigurationError (Exception):
|
||||
code = None
|
||||
def __init__(self, str, code):
|
||||
super().__init__(str)
|
||||
self.code = code
|
44
src/cli/interface.py
Normal file
44
src/cli/interface.py
Normal file
@@ -0,0 +1,44 @@
|
||||
import sys
|
||||
import time
|
||||
from os import system, name
|
||||
import platform
|
||||
|
||||
empty_delim = " "
|
||||
hard_divided_delim = "|"
|
||||
soft_divided_delim = "|"
|
||||
l_brack = "["
|
||||
r_brack = "]"
|
||||
|
||||
ERR = "[ERR]"
|
||||
INF = "[INF]"
|
||||
|
||||
stdout = sys.stdout
|
||||
stderr = sys.stderr
|
||||
|
||||
def log(target, level, message, code = 0):
|
||||
|
||||
message = time.ctime() + empty_delim + str(level) + l_brack + f"{code:+05}" + r_brack + empty_delim + soft_divided_delim + empty_delim + message
|
||||
print(message, file = target)
|
||||
|
||||
def clear():
|
||||
if name == "nt":
|
||||
system("cls")
|
||||
else:
|
||||
system("clear")
|
||||
|
||||
def splash(version):
|
||||
|
||||
def hrule():
|
||||
print("#"+38*"-"+"#")
|
||||
def box(s):
|
||||
temp = "|"
|
||||
temp += s
|
||||
temp += (40-len(s)-2)*" "
|
||||
temp += "|"
|
||||
print(temp)
|
||||
|
||||
hrule()
|
||||
box(" superscript version: " + version)
|
||||
box(" os: " + platform.system())
|
||||
box(" python: " + platform.python_version())
|
||||
hrule()
|
323
src/cli/module.py
Normal file
323
src/cli/module.py
Normal file
@@ -0,0 +1,323 @@
|
||||
import abc
|
||||
import data as d
|
||||
import signal
|
||||
import numpy as np
|
||||
from tra_analysis import Analysis as an
|
||||
|
||||
class Module(metaclass = abc.ABCMeta):
|
||||
|
||||
@classmethod
|
||||
def __subclasshook__(cls, subclass):
|
||||
return (hasattr(subclass, '__init__') and
|
||||
callable(subclass.__init__) and
|
||||
hasattr(subclass, 'validate_config') and
|
||||
callable(subclass.validate_config) and
|
||||
hasattr(subclass, 'run') and
|
||||
callable(subclass.run)
|
||||
)
|
||||
@abc.abstractmethod
|
||||
def __init__(self, config, apikey, tbakey, timestamp, competition, *args, **kwargs):
|
||||
raise NotImplementedError
|
||||
@abc.abstractmethod
|
||||
def validate_config(self, *args, **kwargs):
|
||||
raise NotImplementedError
|
||||
@abc.abstractmethod
|
||||
def run(self, exec_threads, *args, **kwargs):
|
||||
raise NotImplementedError
|
||||
|
||||
class Match (Module):
|
||||
|
||||
config = None
|
||||
apikey = None
|
||||
tbakey = None
|
||||
timestamp = None
|
||||
competition = None
|
||||
|
||||
data = None
|
||||
results = None
|
||||
|
||||
def __init__(self, config, apikey, tbakey, timestamp, competition):
|
||||
self.config = config
|
||||
self.apikey = apikey
|
||||
self.tbakey = tbakey
|
||||
self.timestamp = timestamp
|
||||
self.competition = competition
|
||||
|
||||
def validate_config(self):
|
||||
return True, ""
|
||||
|
||||
def run(self, exec_threads):
|
||||
self._load_data()
|
||||
self._process_data(exec_threads)
|
||||
self._push_results()
|
||||
|
||||
def _load_data(self):
|
||||
self.data = d.load_match(self.apikey, self.competition)
|
||||
|
||||
def _simplestats(self, data_test):
|
||||
|
||||
signal.signal(signal.SIGINT, signal.SIG_IGN)
|
||||
|
||||
data = np.array(data_test[3])
|
||||
data = data[np.isfinite(data)]
|
||||
ranges = list(range(len(data)))
|
||||
|
||||
test = data_test[2]
|
||||
|
||||
if test == "basic_stats":
|
||||
return an.basic_stats(data)
|
||||
|
||||
if test == "historical_analysis":
|
||||
return an.histo_analysis([ranges, data])
|
||||
|
||||
if test == "regression_linear":
|
||||
return an.regression(ranges, data, ['lin'])
|
||||
|
||||
if test == "regression_logarithmic":
|
||||
return an.regression(ranges, data, ['log'])
|
||||
|
||||
if test == "regression_exponential":
|
||||
return an.regression(ranges, data, ['exp'])
|
||||
|
||||
if test == "regression_polynomial":
|
||||
return an.regression(ranges, data, ['ply'])
|
||||
|
||||
if test == "regression_sigmoidal":
|
||||
return an.regression(ranges, data, ['sig'])
|
||||
|
||||
def _process_data(self, exec_threads):
|
||||
|
||||
tests = self.config["tests"]
|
||||
data = self.data
|
||||
|
||||
input_vector = []
|
||||
|
||||
for team in data:
|
||||
|
||||
for variable in data[team]:
|
||||
|
||||
if variable in tests:
|
||||
|
||||
for test in tests[variable]:
|
||||
|
||||
input_vector.append((team, variable, test, data[team][variable]))
|
||||
|
||||
self.data = input_vector
|
||||
#self.results = list(exec_threads.map(self._simplestats, self.data))
|
||||
self.results = []
|
||||
for test_var_data in self.data:
|
||||
self.results.append(self._simplestats(test_var_data))
|
||||
|
||||
def _push_results(self):
|
||||
|
||||
short_mapping = {"regression_linear": "lin", "regression_logarithmic": "log", "regression_exponential": "exp", "regression_polynomial": "ply", "regression_sigmoidal": "sig"}
|
||||
|
||||
class AutoVivification(dict):
|
||||
def __getitem__(self, item):
|
||||
try:
|
||||
return dict.__getitem__(self, item)
|
||||
except KeyError:
|
||||
value = self[item] = type(self)()
|
||||
return value
|
||||
|
||||
result_filtered = self.results
|
||||
input_vector = self.data
|
||||
|
||||
return_vector = AutoVivification()
|
||||
|
||||
i = 0
|
||||
|
||||
for result in result_filtered:
|
||||
|
||||
filtered = input_vector[i][2]
|
||||
|
||||
try:
|
||||
short = short_mapping[filtered]
|
||||
return_vector[input_vector[i][0]][input_vector[i][1]][input_vector[i][2]] = result[short]
|
||||
except KeyError: # not in mapping
|
||||
return_vector[input_vector[i][0]][input_vector[i][1]][input_vector[i][2]] = result
|
||||
|
||||
i += 1
|
||||
|
||||
self.results = return_vector
|
||||
|
||||
d.push_match(self.apikey, self.competition, self.results)
|
||||
|
||||
class Metric (Module):
|
||||
|
||||
config = None
|
||||
apikey = None
|
||||
tbakey = None
|
||||
timestamp = None
|
||||
competition = None
|
||||
|
||||
data = None
|
||||
results = None
|
||||
|
||||
def __init__(self, config, apikey, tbakey, timestamp, competition):
|
||||
self.config = config
|
||||
self.apikey = apikey
|
||||
self.tbakey = tbakey
|
||||
self.timestamp = timestamp
|
||||
self.competition = competition
|
||||
|
||||
def validate_config(self):
|
||||
return True, ""
|
||||
|
||||
def run(self, exec_threads):
|
||||
self._load_data()
|
||||
self._process_data(exec_threads)
|
||||
self._push_results()
|
||||
|
||||
def _load_data(self):
|
||||
self.data = d.pull_new_tba_matches(self.tbakey, self.competition, self.timestamp)
|
||||
|
||||
def _process_data(self, exec_threads):
|
||||
|
||||
elo_N = self.config["tests"]["elo"]["N"]
|
||||
elo_K = self.config["tests"]["elo"]["K"]
|
||||
|
||||
matches = self.data
|
||||
|
||||
red = {}
|
||||
blu = {}
|
||||
|
||||
for match in matches:
|
||||
|
||||
red = d.load_metric(self.apikey, self.competition, match, "red", self.config["tests"])
|
||||
blu = d.load_metric(self.apikey, self.competition, match, "blue", self.config["tests"])
|
||||
|
||||
elo_red_total = 0
|
||||
elo_blu_total = 0
|
||||
|
||||
gl2_red_score_total = 0
|
||||
gl2_blu_score_total = 0
|
||||
|
||||
gl2_red_rd_total = 0
|
||||
gl2_blu_rd_total = 0
|
||||
|
||||
gl2_red_vol_total = 0
|
||||
gl2_blu_vol_total = 0
|
||||
|
||||
for team in red:
|
||||
|
||||
elo_red_total += red[team]["elo"]["score"]
|
||||
|
||||
gl2_red_score_total += red[team]["gl2"]["score"]
|
||||
gl2_red_rd_total += red[team]["gl2"]["rd"]
|
||||
gl2_red_vol_total += red[team]["gl2"]["vol"]
|
||||
|
||||
for team in blu:
|
||||
|
||||
elo_blu_total += blu[team]["elo"]["score"]
|
||||
|
||||
gl2_blu_score_total += blu[team]["gl2"]["score"]
|
||||
gl2_blu_rd_total += blu[team]["gl2"]["rd"]
|
||||
gl2_blu_vol_total += blu[team]["gl2"]["vol"]
|
||||
|
||||
red_elo = {"score": elo_red_total / len(red)}
|
||||
blu_elo = {"score": elo_blu_total / len(blu)}
|
||||
|
||||
red_gl2 = {"score": gl2_red_score_total / len(red), "rd": gl2_red_rd_total / len(red), "vol": gl2_red_vol_total / len(red)}
|
||||
blu_gl2 = {"score": gl2_blu_score_total / len(blu), "rd": gl2_blu_rd_total / len(blu), "vol": gl2_blu_vol_total / len(blu)}
|
||||
|
||||
|
||||
if match["winner"] == "red":
|
||||
|
||||
observations = {"red": 1, "blu": 0}
|
||||
|
||||
elif match["winner"] == "blue":
|
||||
|
||||
observations = {"red": 0, "blu": 1}
|
||||
|
||||
else:
|
||||
|
||||
observations = {"red": 0.5, "blu": 0.5}
|
||||
|
||||
red_elo_delta = an.Metric().elo(red_elo["score"], blu_elo["score"], observations["red"], elo_N, elo_K) - red_elo["score"]
|
||||
blu_elo_delta = an.Metric().elo(blu_elo["score"], red_elo["score"], observations["blu"], elo_N, elo_K) - blu_elo["score"]
|
||||
|
||||
new_red_gl2_score, new_red_gl2_rd, new_red_gl2_vol = an.Metric().glicko2(red_gl2["score"], red_gl2["rd"], red_gl2["vol"], [blu_gl2["score"]], [blu_gl2["rd"]], [observations["red"], observations["blu"]])
|
||||
new_blu_gl2_score, new_blu_gl2_rd, new_blu_gl2_vol = an.Metric().glicko2(blu_gl2["score"], blu_gl2["rd"], blu_gl2["vol"], [red_gl2["score"]], [red_gl2["rd"]], [observations["blu"], observations["red"]])
|
||||
|
||||
red_gl2_delta = {"score": new_red_gl2_score - red_gl2["score"], "rd": new_red_gl2_rd - red_gl2["rd"], "vol": new_red_gl2_vol - red_gl2["vol"]}
|
||||
blu_gl2_delta = {"score": new_blu_gl2_score - blu_gl2["score"], "rd": new_blu_gl2_rd - blu_gl2["rd"], "vol": new_blu_gl2_vol - blu_gl2["vol"]}
|
||||
|
||||
for team in red:
|
||||
|
||||
red[team]["elo"]["score"] = red[team]["elo"]["score"] + red_elo_delta
|
||||
|
||||
red[team]["gl2"]["score"] = red[team]["gl2"]["score"] + red_gl2_delta["score"]
|
||||
red[team]["gl2"]["rd"] = red[team]["gl2"]["rd"] + red_gl2_delta["rd"]
|
||||
red[team]["gl2"]["vol"] = red[team]["gl2"]["vol"] + red_gl2_delta["vol"]
|
||||
|
||||
for team in blu:
|
||||
|
||||
blu[team]["elo"]["score"] = blu[team]["elo"]["score"] + blu_elo_delta
|
||||
|
||||
blu[team]["gl2"]["score"] = blu[team]["gl2"]["score"] + blu_gl2_delta["score"]
|
||||
blu[team]["gl2"]["rd"] = blu[team]["gl2"]["rd"] + blu_gl2_delta["rd"]
|
||||
blu[team]["gl2"]["vol"] = blu[team]["gl2"]["vol"] + blu_gl2_delta["vol"]
|
||||
|
||||
temp_vector = {}
|
||||
temp_vector.update(red)
|
||||
temp_vector.update(blu)
|
||||
|
||||
d.push_metric(self.apikey, self.competition, temp_vector)
|
||||
|
||||
def _push_results(self):
|
||||
pass
|
||||
|
||||
class Pit (Module):
|
||||
|
||||
config = None
|
||||
apikey = None
|
||||
tbakey = None
|
||||
timestamp = None
|
||||
competition = None
|
||||
|
||||
data = None
|
||||
results = None
|
||||
|
||||
def __init__(self, config, apikey, tbakey, timestamp, competition):
|
||||
self.config = config
|
||||
self.apikey = apikey
|
||||
self.tbakey = tbakey
|
||||
self.timestamp = timestamp
|
||||
self.competition = competition
|
||||
|
||||
def validate_config(self):
|
||||
return True, ""
|
||||
|
||||
def run(self, exec_threads):
|
||||
self._load_data()
|
||||
self._process_data(exec_threads)
|
||||
self._push_results()
|
||||
|
||||
def _load_data(self):
|
||||
self.data = d.load_pit(self.apikey, self.competition)
|
||||
|
||||
def _process_data(self, exec_threads):
|
||||
tests = self.config["tests"]
|
||||
print(tests)
|
||||
return_vector = {}
|
||||
for team in self.data:
|
||||
for variable in self.data[team]:
|
||||
if variable in tests:
|
||||
if not variable in return_vector:
|
||||
return_vector[variable] = []
|
||||
return_vector[variable].append(self.data[team][variable])
|
||||
|
||||
self.results = return_vector
|
||||
|
||||
def _push_results(self):
|
||||
d.push_pit(self.apikey, self.competition, self.results)
|
||||
|
||||
class Rating (Module):
|
||||
pass
|
||||
|
||||
class Heatmap (Module):
|
||||
pass
|
||||
|
||||
class Sentiment (Module):
|
||||
pass
|
64
src/cli/pull.py
Normal file
64
src/cli/pull.py
Normal file
@@ -0,0 +1,64 @@
|
||||
import requests
|
||||
import json
|
||||
from exceptions import APIError
|
||||
from config import load_config
|
||||
|
||||
url = "https://titanscouting.epochml.org"
|
||||
config_tra = {}
|
||||
load_config("config.json", config_tra)
|
||||
trakey = config_tra['persistent']['key']['tra']
|
||||
|
||||
def get_team_competition():
|
||||
endpoint = '/api/fetchTeamCompetition'
|
||||
params = {
|
||||
"CLIENT_ID": trakey['CLIENT_ID'],
|
||||
"CLIENT_SECRET": trakey['CLIENT_SECRET']
|
||||
}
|
||||
response = requests.request("GET", url + endpoint, params=params)
|
||||
json = response.json()
|
||||
if json['success']:
|
||||
return json['competition']
|
||||
else:
|
||||
raise APIError(json, endpoint)
|
||||
|
||||
def get_team():
|
||||
endpoint = '/api/fetchTeamCompetition'
|
||||
params = {
|
||||
"CLIENT_ID": trakey['CLIENT_ID'],
|
||||
"CLIENT_SECRET": trakey['CLIENT_SECRET']
|
||||
}
|
||||
response = requests.request("GET", url + endpoint, params=params)
|
||||
json = response.json()
|
||||
if json['success']:
|
||||
return json['team']
|
||||
else:
|
||||
raise APIError(json, endpoint)
|
||||
|
||||
def get_team_match_data(competition, team_num):
|
||||
endpoint = '/api/fetchAllTeamMatchData'
|
||||
params = {
|
||||
"competition": competition,
|
||||
"teamScouted": team_num,
|
||||
"CLIENT_ID": trakey['CLIENT_ID'],
|
||||
"CLIENT_SECRET": trakey['CLIENT_SECRET']
|
||||
}
|
||||
response = requests.request("GET", url + endpoint, params=params)
|
||||
json = response.json()
|
||||
if json['success']:
|
||||
return json['data'][team_num]
|
||||
else:
|
||||
raise APIError(json, endpoint)
|
||||
|
||||
def get_teams_at_competition(competition):
|
||||
endpoint = '/api/fetchAllTeamNicknamesAtCompetition'
|
||||
params = {
|
||||
"competition": competition,
|
||||
"CLIENT_ID": trakey['CLIENT_ID'],
|
||||
"CLIENT_SECRET": trakey['CLIENT_SECRET']
|
||||
}
|
||||
response = requests.request("GET", url + endpoint, params=params)
|
||||
json = response.json()
|
||||
if json['success']:
|
||||
return list(json['data'].keys())
|
||||
else:
|
||||
raise APIError(json, endpoint)
|
390
src/cli/superscript.py
Normal file
390
src/cli/superscript.py
Normal file
@@ -0,0 +1,390 @@
|
||||
# Titan Robotics Team 2022: Superscript Script
|
||||
# Written by Arthur Lu, Jacob Levine, and Dev Singh
|
||||
# Notes:
|
||||
# setup:
|
||||
|
||||
__version__ = "1.0.0"
|
||||
|
||||
# changelog should be viewed using print(analysis.__changelog__)
|
||||
__changelog__ = """changelog:
|
||||
1.0.0:
|
||||
- superscript now runs in PEP 3143 compliant well behaved daemon on Linux systems
|
||||
- linux superscript daemon has integrated websocket output to monitor progress/status remotely
|
||||
- linux daemon now sends stderr to errorlog.log
|
||||
- added verbose option to linux superscript to allow for interactive output
|
||||
- moved pymongo import to superscript.py
|
||||
- added profile option to linux superscript to profile runtime of script
|
||||
- reduced memory usage slightly by consolidating the unwrapped input data
|
||||
- added debug option, which performs one loop of analysis and dumps results to local files
|
||||
- added event and time delay options to config
|
||||
- event delay pauses loop until even listener recieves an update
|
||||
- time delay pauses loop until the time specified has elapsed since the BEGINNING of previous loop
|
||||
- added options to pull config information from database (reatins option to use local config file)
|
||||
- config-preference option selects between prioritizing local config and prioritizing database config
|
||||
- synchronize-config option selects whether to update the non prioritized config with the prioritized one
|
||||
- divided config options between persistent ones (keys), and variable ones (everything else)
|
||||
0.9.3:
|
||||
- improved data loading performance by removing redundant PyMongo client creation (120s to 14s)
|
||||
- passed singular instance of PyMongo client as standin for apikey parameter in all data.py functions
|
||||
0.9.2:
|
||||
- removed unessasary imports from data
|
||||
- minor changes to interface
|
||||
0.9.1:
|
||||
- fixed bugs in configuration item loading exception handling
|
||||
0.9.0:
|
||||
- moved printing and logging related functions to interface.py (changelog will stay in this file)
|
||||
- changed function return files for load_config and save_config to standard C values (0 for success, 1 for error)
|
||||
- added local variables for config location
|
||||
- moved dataset getting and setting functions to dataset.py (changelog will stay in this file)
|
||||
- moved matchloop, metricloop, pitloop and helper functions (simplestats) to processing.py
|
||||
0.8.6:
|
||||
- added proper main function
|
||||
0.8.5:
|
||||
- added more gradeful KeyboardInterrupt exiting
|
||||
- redirected stderr to errorlog.txt
|
||||
0.8.4:
|
||||
- added better error message for missing config.json
|
||||
- added automatic config.json creation
|
||||
- added splash text with version and system info
|
||||
0.8.3:
|
||||
- updated matchloop with new regression format (requires tra_analysis 3.x)
|
||||
0.8.2:
|
||||
- readded while true to main function
|
||||
- added more thread config options
|
||||
0.8.1:
|
||||
- optimized matchloop further by bypassing GIL
|
||||
0.8.0:
|
||||
- added multithreading to matchloop
|
||||
- tweaked user log
|
||||
0.7.0:
|
||||
- finished implementing main function
|
||||
0.6.2:
|
||||
- integrated get_team_rankings.py as get_team_metrics() function
|
||||
- integrated visualize_pit.py as graph_pit_histogram() function
|
||||
0.6.1:
|
||||
- bug fixes with analysis.Metric() calls
|
||||
- modified metric functions to use config.json defined default values
|
||||
0.6.0:
|
||||
- removed main function
|
||||
- changed load_config function
|
||||
- added save_config function
|
||||
- added load_match function
|
||||
- renamed simpleloop to matchloop
|
||||
- moved simplestats function inside matchloop
|
||||
- renamed load_metrics to load_metric
|
||||
- renamed metricsloop to metricloop
|
||||
- split push to database functions amon push_match, push_metric, push_pit
|
||||
- moved
|
||||
0.5.2:
|
||||
- made changes due to refactoring of analysis
|
||||
0.5.1:
|
||||
- text fixes
|
||||
- removed matplotlib requirement
|
||||
0.5.0:
|
||||
- improved user interface
|
||||
0.4.2:
|
||||
- removed unessasary code
|
||||
0.4.1:
|
||||
- fixed bug where X range for regression was determined before sanitization
|
||||
- better sanitized data
|
||||
0.4.0:
|
||||
- fixed spelling issue in __changelog__
|
||||
- addressed nan bug in regression
|
||||
- fixed errors on line 335 with metrics calling incorrect key "glicko2"
|
||||
- fixed errors in metrics computing
|
||||
0.3.0:
|
||||
- added analysis to pit data
|
||||
0.2.1:
|
||||
- minor stability patches
|
||||
- implemented db syncing for timestamps
|
||||
- fixed bugs
|
||||
0.2.0:
|
||||
- finalized testing and small fixes
|
||||
0.1.4:
|
||||
- finished metrics implement, trueskill is bugged
|
||||
0.1.3:
|
||||
- working
|
||||
0.1.2:
|
||||
- started implement of metrics
|
||||
0.1.1:
|
||||
- cleaned up imports
|
||||
0.1.0:
|
||||
- tested working, can push to database
|
||||
0.0.9:
|
||||
- tested working
|
||||
- prints out stats for the time being, will push to database later
|
||||
0.0.8:
|
||||
- added data import
|
||||
- removed tba import
|
||||
- finished main method
|
||||
0.0.7:
|
||||
- added load_config
|
||||
- optimized simpleloop for readibility
|
||||
- added __all__ entries
|
||||
- added simplestats engine
|
||||
- pending testing
|
||||
0.0.6:
|
||||
- fixes
|
||||
0.0.5:
|
||||
- imported pickle
|
||||
- created custom database object
|
||||
0.0.4:
|
||||
- fixed simpleloop to actually return a vector
|
||||
0.0.3:
|
||||
- added metricsloop which is unfinished
|
||||
0.0.2:
|
||||
- added simpleloop which is untested until data is provided
|
||||
0.0.1:
|
||||
- created script
|
||||
- added analysis, numba, numpy imports
|
||||
"""
|
||||
|
||||
__author__ = (
|
||||
"Arthur Lu <learthurgo@gmail.com>",
|
||||
"Jacob Levine <jlevine@imsa.edu>",
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
"load_config",
|
||||
"save_config",
|
||||
]
|
||||
|
||||
# imports:
|
||||
|
||||
import json
|
||||
from multiprocessing import freeze_support
|
||||
import os
|
||||
import pymongo
|
||||
import sys
|
||||
import time
|
||||
import traceback
|
||||
import warnings
|
||||
import zmq
|
||||
import pull
|
||||
from config import parse_config_persistent, parse_config_variable, resolve_config_conflicts, load_config, save_config, ConfigurationError
|
||||
from data import get_previous_time, set_current_time, check_new_database_matches
|
||||
from interface import splash, log, ERR, INF, stdout, stderr
|
||||
from module import Match, Metric, Pit
|
||||
|
||||
config_path = "config.json"
|
||||
|
||||
def main(send, verbose = False, profile = False, debug = False):
|
||||
|
||||
def close_all():
|
||||
if "exec_threads" in locals():
|
||||
exec_threads.terminate()
|
||||
exec_threads.join()
|
||||
exec_threads.close()
|
||||
if "client" in locals():
|
||||
client.close()
|
||||
if "f" in locals():
|
||||
f.close()
|
||||
|
||||
warnings.filterwarnings("ignore")
|
||||
exit_code = 0
|
||||
|
||||
if verbose:
|
||||
splash(__version__)
|
||||
|
||||
modules = {"match": Match, "metric": Metric, "pit": Pit}
|
||||
|
||||
while True:
|
||||
|
||||
try:
|
||||
|
||||
loop_start = time.time()
|
||||
|
||||
send(stdout, INF, "current time: " + str(loop_start))
|
||||
|
||||
config = {}
|
||||
|
||||
if load_config(config_path, config):
|
||||
raise ConfigurationError("could not find config at <" + config_path + ">, generating blank config and exiting", 110)
|
||||
|
||||
send(stdout, INF, "found and loaded config at <" + config_path + ">")
|
||||
|
||||
apikey, tbakey, preference, sync = parse_config_persistent(send, config)
|
||||
|
||||
send(stdout, INF, "found and loaded database and tba keys")
|
||||
|
||||
client = pymongo.MongoClient(apikey)
|
||||
|
||||
send(stdout, INF, "established connection to database")
|
||||
previous_time = get_previous_time(client)
|
||||
send(stdout, INF, "analysis backtimed to: " + str(previous_time))
|
||||
|
||||
config = resolve_config_conflicts(send, client, config, preference, sync)
|
||||
|
||||
exec_threads, config_modules = parse_config_variable(send, config)
|
||||
if 'competition' in config['variable']:
|
||||
competition = config['variable']['competition']
|
||||
else:
|
||||
competition = pull.get_team_competition()
|
||||
for m in config_modules:
|
||||
if m in modules:
|
||||
start = time.time()
|
||||
current_module = modules[m](config_modules[m], client, tbakey, previous_time, competition)
|
||||
valid = current_module.validate_config()
|
||||
if not valid:
|
||||
continue
|
||||
current_module.run(exec_threads)
|
||||
send(stdout, INF, m + " module finished in " + str(time.time() - start) + " seconds")
|
||||
if debug:
|
||||
f = open(m + ".log", "w+")
|
||||
json.dump({"data": current_module.data, "results":current_module.results}, f, ensure_ascii=False, indent=4)
|
||||
f.close()
|
||||
|
||||
set_current_time(client, loop_start)
|
||||
close_all()
|
||||
|
||||
send(stdout, INF, "closed threads and database client")
|
||||
send(stdout, INF, "finished all tasks in " + str(time.time() - loop_start) + " seconds, looping")
|
||||
|
||||
if profile:
|
||||
exit_code = 0
|
||||
break
|
||||
|
||||
event_delay = config["variable"]["event-delay"]
|
||||
if event_delay:
|
||||
send(stdout, INF, "loop delayed until database returns new matches")
|
||||
new_match = False
|
||||
while not new_match:
|
||||
time.sleep(1)
|
||||
new_match = check_new_database_matches(client, competition)
|
||||
send(stdout, INF, "database returned new matches")
|
||||
else:
|
||||
loop_delay = float(config["variable"]["loop-delay"])
|
||||
remaining_time = loop_delay - (time.time() - loop_start)
|
||||
if remaining_time > 0:
|
||||
send(stdout, INF, "loop delayed by " + str(remaining_time) + " seconds")
|
||||
time.sleep(remaining_time)
|
||||
|
||||
except KeyboardInterrupt:
|
||||
send(stdout, INF, "detected KeyboardInterrupt, killing threads")
|
||||
close_all()
|
||||
send(stdout, INF, "terminated threads, exiting")
|
||||
break
|
||||
|
||||
except ConfigurationError as e:
|
||||
send(stderr, ERR, "encountered a configuration error: " + str(e), code = e.code)
|
||||
traceback.print_exc(file = stderr)
|
||||
exit_code = 1
|
||||
close_all()
|
||||
break
|
||||
|
||||
except Exception as e:
|
||||
send(stderr, ERR, "encountered an exception while running", code = 1)
|
||||
traceback.print_exc(file = stderr)
|
||||
exit_code = 1
|
||||
close_all()
|
||||
break
|
||||
|
||||
return exit_code
|
||||
|
||||
def start(pid_path, verbose = False, profile = False, debug = False):
|
||||
|
||||
if profile:
|
||||
|
||||
def send(target, level, message, code = 0):
|
||||
pass
|
||||
|
||||
import cProfile, pstats, io
|
||||
profile = cProfile.Profile()
|
||||
profile.enable()
|
||||
exit_code = main(send, profile = True)
|
||||
profile.disable()
|
||||
f = open("profile.txt", 'w+')
|
||||
ps = pstats.Stats(profile, stream = f).sort_stats('cumtime')
|
||||
ps.print_stats()
|
||||
sys.exit(exit_code)
|
||||
|
||||
elif verbose:
|
||||
|
||||
exit_code = main(log, verbose = verbose)
|
||||
sys.exit(exit_code)
|
||||
|
||||
elif debug:
|
||||
|
||||
exit_code = main(log, verbose = True, profile = True, debug = debug)
|
||||
sys.exit(exit_code)
|
||||
|
||||
else:
|
||||
|
||||
f = open('errorlog.log', 'w+')
|
||||
with daemon.DaemonContext(
|
||||
working_directory = os.getcwd(),
|
||||
pidfile = pidfile.TimeoutPIDLockFile(pid_path),
|
||||
stderr = f
|
||||
):
|
||||
|
||||
context = zmq.Context()
|
||||
socket = context.socket(zmq.PUB)
|
||||
socket.bind("tcp://*:5678")
|
||||
|
||||
socket.send(b'status')
|
||||
|
||||
def send(target, level, message, code = 0):
|
||||
socket.send(bytes("status: " + message, 'utf-8'))
|
||||
|
||||
exit_code = main(send)
|
||||
socket.close()
|
||||
f.close()
|
||||
sys.exit(exit_code)
|
||||
|
||||
def stop(pid_path):
|
||||
try:
|
||||
pf = open(pid_path, 'r')
|
||||
pid = int(pf.read().strip())
|
||||
pf.close()
|
||||
except IOError:
|
||||
sys.stderr.write("pidfile at <" + pid_path + "> does not exist. Daemon not running?\n")
|
||||
return
|
||||
|
||||
try:
|
||||
while True:
|
||||
os.kill(pid, SIGTERM)
|
||||
time.sleep(0.01)
|
||||
except OSError as err:
|
||||
err = str(err)
|
||||
if err.find("No such process") > 0:
|
||||
if os.path.exists(pid_path):
|
||||
os.remove(pid_path)
|
||||
else:
|
||||
traceback.print_exc(file = stderr)
|
||||
sys.exit(1)
|
||||
|
||||
def restart(pid_path):
|
||||
stop(pid_path)
|
||||
start(pid_path)
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
if sys.platform.startswith("win"):
|
||||
freeze_support()
|
||||
start(None, verbose = True)
|
||||
|
||||
else:
|
||||
import daemon
|
||||
from daemon import pidfile
|
||||
from signal import SIGTERM
|
||||
pid_path = "tra-daemon.pid"
|
||||
if len(sys.argv) == 2:
|
||||
if 'start' == sys.argv[1]:
|
||||
start(pid_path)
|
||||
elif 'stop' == sys.argv[1]:
|
||||
stop(pid_path)
|
||||
elif 'restart' == sys.argv[1]:
|
||||
restart(pid_path)
|
||||
elif 'verbose' == sys.argv[1]:
|
||||
start(None, verbose = True)
|
||||
elif 'profile' == sys.argv[1]:
|
||||
start(None, profile=True)
|
||||
elif 'debug' == sys.argv[1]:
|
||||
start(None, debug = True)
|
||||
else:
|
||||
print("usage: %s start|stop|restart|verbose|profile|debug" % sys.argv[0])
|
||||
sys.exit(2)
|
||||
sys.exit(0)
|
||||
else:
|
||||
print("usage: %s start|stop|restart|verbose|profile|debug" % sys.argv[0])
|
||||
sys.exit(2)
|
39
src/cli/superscript.spec
Normal file
39
src/cli/superscript.spec
Normal file
@@ -0,0 +1,39 @@
|
||||
# -*- mode: python ; coding: utf-8 -*-
|
||||
|
||||
block_cipher = None
|
||||
|
||||
a = Analysis(['superscript.py'],
|
||||
pathex=['/workspaces/tra-data-analysis/src'],
|
||||
binaries=[],
|
||||
datas=[],
|
||||
hiddenimports=[
|
||||
"dnspython",
|
||||
"sklearn.utils._weight_vector",
|
||||
"requests",
|
||||
],
|
||||
hookspath=[],
|
||||
runtime_hooks=[],
|
||||
excludes=[
|
||||
"matplotlib",
|
||||
"pandas"
|
||||
],
|
||||
win_no_prefer_redirects=False,
|
||||
win_private_assemblies=False,
|
||||
cipher=block_cipher,
|
||||
noarchive=False)
|
||||
pyz = PYZ(a.pure, a.zipped_data,
|
||||
cipher=block_cipher)
|
||||
exe = EXE(pyz,
|
||||
a.scripts,
|
||||
a.binaries,
|
||||
a.zipfiles,
|
||||
a.datas,
|
||||
[('W ignore', None, 'OPTION')],
|
||||
name='superscript',
|
||||
debug=False,
|
||||
bootloader_ignore_signals=False,
|
||||
strip=False,
|
||||
upx=True,
|
||||
upx_exclude=[],
|
||||
runtime_tmpdir=None,
|
||||
console=True )
|
27
src/cli/validation-schema.json
Normal file
27
src/cli/validation-schema.json
Normal file
@@ -0,0 +1,27 @@
|
||||
{
|
||||
"persistent": {
|
||||
"type": "dict",
|
||||
"require_all": true,
|
||||
"schema": {
|
||||
"key": {
|
||||
"type": "dict",
|
||||
"require_all":true,
|
||||
"schema": {
|
||||
"database": {"type":"string"},
|
||||
"tba": {"type": "string"},
|
||||
"tra": {
|
||||
"type": "dict",
|
||||
"require_all": true,
|
||||
"schema": {
|
||||
"CLIENT_ID": {"type": "string"},
|
||||
"CLIENT_SECRET": {"type": "string"},
|
||||
"url": {"type": "string"}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"config-preference": {"type": "string", "required": true},
|
||||
"synchronize-config": {"type": "boolean", "required": true}
|
||||
}
|
||||
}
|
||||
}
|
101
src/config.json
101
src/config.json
@@ -1,101 +0,0 @@
|
||||
{
|
||||
"max-threads": 0.5,
|
||||
"team": "",
|
||||
"competition": "",
|
||||
"key": {
|
||||
"database": "",
|
||||
"tba": ""
|
||||
},
|
||||
"statistics": {
|
||||
"match": {
|
||||
"balls-blocked": [
|
||||
"basic_stats",
|
||||
"historical_analysis",
|
||||
"regression_linear",
|
||||
"regression_logarithmic",
|
||||
"regression_exponential",
|
||||
"regression_polynomial",
|
||||
"regression_sigmoidal"
|
||||
],
|
||||
"balls-collected": [
|
||||
"basic_stats",
|
||||
"historical_analysis",
|
||||
"regression_linear",
|
||||
"regression_logarithmic",
|
||||
"regression_exponential",
|
||||
"regression_polynomial",
|
||||
"regression_sigmoidal"
|
||||
],
|
||||
"balls-lower-teleop": [
|
||||
"basic_stats",
|
||||
"historical_analysis",
|
||||
"regression_linear",
|
||||
"regression_logarithmic",
|
||||
"regression_exponential",
|
||||
"regression_polynomial",
|
||||
"regression_sigmoidal"
|
||||
],
|
||||
"balls-lower-auto": [
|
||||
"basic_stats",
|
||||
"historical_analysis",
|
||||
"regression_linear",
|
||||
"regression_logarithmic",
|
||||
"regression_exponential",
|
||||
"regression_polynomial",
|
||||
"regression_sigmoidal"
|
||||
],
|
||||
"balls-started": [
|
||||
"basic_stats",
|
||||
"historical_analyss",
|
||||
"regression_linear",
|
||||
"regression_logarithmic",
|
||||
"regression_exponential",
|
||||
"regression_polynomial",
|
||||
"regression_sigmoidal"
|
||||
],
|
||||
"balls-upper-teleop": [
|
||||
"basic_stats",
|
||||
"historical_analysis",
|
||||
"regression_linear",
|
||||
"regression_logarithmic",
|
||||
"regression_exponential",
|
||||
"regression_polynomial",
|
||||
"regression_sigmoidal"
|
||||
],
|
||||
"balls-upper-auto": [
|
||||
"basic_stats",
|
||||
"historical_analysis",
|
||||
"regression_linear",
|
||||
"regression_logarithmic",
|
||||
"regression_exponential",
|
||||
"regression_polynomial",
|
||||
"regression_sigmoidal"
|
||||
]
|
||||
},
|
||||
"metric": {
|
||||
"elo": {
|
||||
"score": 1500,
|
||||
"N": 400,
|
||||
"K": 24
|
||||
},
|
||||
"gl2": {
|
||||
"score": 1500,
|
||||
"rd": 250,
|
||||
"vol": 0.06
|
||||
},
|
||||
"ts": {
|
||||
"mu": 25,
|
||||
"sigma": 8.33
|
||||
}
|
||||
},
|
||||
"pit": {
|
||||
"wheel-mechanism": true,
|
||||
"low-balls": true,
|
||||
"high-balls": true,
|
||||
"wheel-success": true,
|
||||
"strategic-focus": true,
|
||||
"climb-mechanism": true,
|
||||
"attitude": true
|
||||
}
|
||||
}
|
||||
}
|
@@ -3,10 +3,21 @@
|
||||
# Notes:
|
||||
# setup:
|
||||
|
||||
__version__ = "0.8.2"
|
||||
__version__ = "0.8.6"
|
||||
|
||||
# changelog should be viewed using print(analysis.__changelog__)
|
||||
__changelog__ = """changelog:
|
||||
0.8.6:
|
||||
- added proper main function
|
||||
0.8.5:
|
||||
- added more gradeful KeyboardInterrupt exiting
|
||||
- redirected stderr to errorlog.txt
|
||||
0.8.4:
|
||||
- added better error message for missing config.json
|
||||
- added automatic config.json creation
|
||||
- added splash text with version and system info
|
||||
0.8.3:
|
||||
- updated matchloop with new regression format (requires tra_analysis 3.x)
|
||||
0.8.2:
|
||||
- readded while true to main function
|
||||
- added more thread config options
|
||||
@@ -130,80 +141,97 @@ import os
|
||||
from os import system, name
|
||||
from pathlib import Path
|
||||
from multiprocessing import Pool
|
||||
import matplotlib.pyplot as plt
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
import platform
|
||||
import sys
|
||||
import time
|
||||
import warnings
|
||||
|
||||
# global exec_threads
|
||||
global exec_threads
|
||||
|
||||
def main():
|
||||
|
||||
# global exec_threads
|
||||
global exec_threads
|
||||
|
||||
sys.stderr = open("errorlog.txt", "w")
|
||||
|
||||
warnings.filterwarnings("ignore")
|
||||
|
||||
# while (True):
|
||||
splash()
|
||||
|
||||
current_time = time.time()
|
||||
print("[OK] time: " + str(current_time))
|
||||
while (True):
|
||||
|
||||
config = load_config("red-alliance-analysis\data-analysis\config.json")
|
||||
competition = config["competition"]
|
||||
match_tests = config["statistics"]["match"]
|
||||
pit_tests = config["statistics"]["pit"]
|
||||
metrics_tests = config["statistics"]["metric"]
|
||||
print("[OK] configs loaded")
|
||||
try:
|
||||
|
||||
print("[OK] starting threads")
|
||||
cfg_max_threads = config["max-threads"]
|
||||
sys_max_threads = os.cpu_count()
|
||||
if cfg_max_threads > -sys_max_threads and cfg_max_threads < 0 :
|
||||
alloc_processes = sys_max_threads + cfg_max_threads
|
||||
elif cfg_max_threads > 0 and cfg_max_threads < 1:
|
||||
alloc_processes = math.floor(cfg_max_threads * sys_max_threads)
|
||||
elif cfg_max_threads > 1 and cfg_max_threads <= sys_max_threads:
|
||||
alloc_processes = cfg_max_threads
|
||||
elif cfg_max_threads == 0:
|
||||
alloc_processes = sys_max_threads
|
||||
else:
|
||||
print("[Err] Invalid number of processes, must be between -" + str(sys_max_threads) + " and " + str(sys_max_threads))
|
||||
exit()
|
||||
# exec_threads = Pool(processes = alloc_processes)
|
||||
# print("[OK] " + str(alloc_processes) + " threads started")
|
||||
current_time = time.time()
|
||||
print("[OK] time: " + str(current_time))
|
||||
|
||||
apikey = config["key"]["database"]
|
||||
tbakey = config["key"]["tba"]
|
||||
print("[OK] loaded keys")
|
||||
config = load_config("config.json")
|
||||
competition = config["competition"]
|
||||
match_tests = config["statistics"]["match"]
|
||||
pit_tests = config["statistics"]["pit"]
|
||||
metrics_tests = config["statistics"]["metric"]
|
||||
print("[OK] configs loaded")
|
||||
|
||||
previous_time = get_previous_time(apikey)
|
||||
print("[OK] analysis backtimed to: " + str(previous_time))
|
||||
print("[OK] starting threads")
|
||||
cfg_max_threads = config["max-threads"]
|
||||
sys_max_threads = os.cpu_count()
|
||||
if cfg_max_threads > -sys_max_threads and cfg_max_threads < 0 :
|
||||
alloc_processes = sys_max_threads + cfg_max_threads
|
||||
elif cfg_max_threads > 0 and cfg_max_threads < 1:
|
||||
alloc_processes = math.floor(cfg_max_threads * sys_max_threads)
|
||||
elif cfg_max_threads > 1 and cfg_max_threads <= sys_max_threads:
|
||||
alloc_processes = cfg_max_threads
|
||||
elif cfg_max_threads == 0:
|
||||
alloc_processes = sys_max_threads
|
||||
else:
|
||||
print("[ERROR] Invalid number of processes, must be between -" + str(sys_max_threads) + " and " + str(sys_max_threads))
|
||||
exit()
|
||||
exec_threads = Pool(processes = alloc_processes)
|
||||
print("[OK] " + str(alloc_processes) + " threads started")
|
||||
|
||||
print("[OK] loading data")
|
||||
start = time.time()
|
||||
match_data = load_match(apikey, competition)
|
||||
pit_data = load_pit(apikey, competition)
|
||||
print("[OK] loaded data in " + str(time.time() - start) + " seconds")
|
||||
apikey = config["key"]["database"]
|
||||
tbakey = config["key"]["tba"]
|
||||
print("[OK] loaded keys")
|
||||
|
||||
print("[OK] running match stats")
|
||||
start = time.time()
|
||||
matchloop(apikey, competition, match_data, match_tests)
|
||||
print("[OK] finished match stats in " + str(time.time() - start) + " seconds")
|
||||
previous_time = get_previous_time(apikey)
|
||||
print("[OK] analysis backtimed to: " + str(previous_time))
|
||||
|
||||
print("[OK] running team metrics")
|
||||
start = time.time()
|
||||
metricloop(tbakey, apikey, competition, previous_time, metrics_tests)
|
||||
print("[OK] finished team metrics in " + str(time.time() - start) + " seconds")
|
||||
print("[OK] loading data")
|
||||
start = time.time()
|
||||
match_data = load_match(apikey, competition)
|
||||
pit_data = load_pit(apikey, competition)
|
||||
print("[OK] loaded data in " + str(time.time() - start) + " seconds")
|
||||
|
||||
print("[OK] running pit analysis")
|
||||
start = time.time()
|
||||
pitloop(apikey, competition, pit_data, pit_tests)
|
||||
print("[OK] finished pit analysis in " + str(time.time() - start) + " seconds")
|
||||
print("[OK] running match stats")
|
||||
start = time.time()
|
||||
matchloop(apikey, competition, match_data, match_tests)
|
||||
print("[OK] finished match stats in " + str(time.time() - start) + " seconds")
|
||||
|
||||
set_current_time(apikey, current_time)
|
||||
print("[OK] finished all tests, looping")
|
||||
print("[OK] running team metrics")
|
||||
start = time.time()
|
||||
metricloop(tbakey, apikey, competition, previous_time, metrics_tests)
|
||||
print("[OK] finished team metrics in " + str(time.time() - start) + " seconds")
|
||||
|
||||
# clear()
|
||||
print("[OK] running pit analysis")
|
||||
start = time.time()
|
||||
pitloop(apikey, competition, pit_data, pit_tests)
|
||||
print("[OK] finished pit analysis in " + str(time.time() - start) + " seconds")
|
||||
|
||||
set_current_time(apikey, current_time)
|
||||
print("[OK] finished all tests, looping")
|
||||
|
||||
print_hrule()
|
||||
|
||||
except KeyboardInterrupt:
|
||||
print("\n[OK] caught KeyboardInterrupt, killing processes")
|
||||
exec_threads.terminate()
|
||||
print("[OK] processes killed, exiting")
|
||||
exit()
|
||||
|
||||
else:
|
||||
pass
|
||||
|
||||
#clear()
|
||||
|
||||
def clear():
|
||||
|
||||
@@ -215,11 +243,39 @@ def clear():
|
||||
else:
|
||||
_ = system('clear')
|
||||
|
||||
def print_hrule():
|
||||
|
||||
print("#"+38*"-"+"#")
|
||||
|
||||
def print_box(s):
|
||||
|
||||
temp = "|"
|
||||
temp += s
|
||||
temp += (40-len(s)-2)*" "
|
||||
temp += "|"
|
||||
print(temp)
|
||||
|
||||
def splash():
|
||||
|
||||
print_hrule()
|
||||
print_box(" superscript version: " + __version__)
|
||||
print_box(" os: " + platform.system())
|
||||
print_box(" python: " + platform.python_version())
|
||||
print_hrule()
|
||||
|
||||
def load_config(file):
|
||||
|
||||
config_vector = {}
|
||||
with open(file) as f:
|
||||
config_vector = json.load(f)
|
||||
|
||||
try:
|
||||
f = open(file)
|
||||
except:
|
||||
print("[ERROR] could not locate config.json, generating blank config.json and exiting")
|
||||
f = open(file, "w")
|
||||
f.write(sample_json)
|
||||
exit()
|
||||
|
||||
config_vector = json.load(f)
|
||||
|
||||
return config_vector
|
||||
|
||||
@@ -284,6 +340,8 @@ def matchloop(apikey, competition, data, tests): # expects 3D array with [Team][
|
||||
|
||||
global exec_threads
|
||||
|
||||
short_mapping = {"regression_linear": "lin", "regression_logarithmic": "log", "regression_exponential": "exp", "regression_polynomial": "ply", "regression_sigmoidal": "sig"}
|
||||
|
||||
class AutoVivification(dict):
|
||||
def __getitem__(self, item):
|
||||
try:
|
||||
@@ -314,14 +372,20 @@ def matchloop(apikey, competition, data, tests): # expects 3D array with [Team][
|
||||
variable_data.append((data[team][variable], test))
|
||||
test_filtered.append(test)
|
||||
|
||||
result_filtered = map(simplestats, variable_data)
|
||||
result_filtered = exec_threads.map(simplestats, variable_data)
|
||||
i = 0
|
||||
|
||||
result_filtered = list(result_filtered)
|
||||
|
||||
for result in result_filtered:
|
||||
|
||||
return_vector[team_filtered[i]][variable_filtered[i]][test_filtered[i]] = result
|
||||
filtered = test_filtered[i]
|
||||
|
||||
try:
|
||||
short = short_mapping[filtered]
|
||||
return_vector[team_filtered[i]][variable_filtered[i]][test_filtered[i]] = result[short]
|
||||
except KeyError: # not in mapping
|
||||
return_vector[team_filtered[i]][variable_filtered[i]][test_filtered[i]] = result
|
||||
i += 1
|
||||
|
||||
push_match(apikey, competition, return_vector)
|
||||
@@ -510,27 +574,54 @@ def get_team_metrics(apikey, tbakey, competition):
|
||||
|
||||
return {"elo-ranks": elo_ranked, "glicko2-ranks": gl2_ranked}
|
||||
|
||||
def graph_pit_histogram(apikey, competition, figsize=(80,15)):
|
||||
sample_json = """{
|
||||
"max-threads": 0.5,
|
||||
"team": "",
|
||||
"competition": "2020ilch",
|
||||
"key":{
|
||||
"database":"",
|
||||
"tba":""
|
||||
},
|
||||
"statistics":{
|
||||
"match":{
|
||||
"balls-blocked":["basic_stats","historical_analysis","regression_linear","regression_logarithmic","regression_exponential","regression_polynomial","regression_sigmoidal"],
|
||||
"balls-collected":["basic_stats","historical_analysis","regression_linear","regression_logarithmic","regression_exponential","regression_polynomial","regression_sigmoidal"],
|
||||
"balls-lower-teleop":["basic_stats","historical_analysis","regression_linear","regression_logarithmic","regression_exponential","regression_polynomial","regression_sigmoidal"],
|
||||
"balls-lower-auto":["basic_stats","historical_analysis","regression_linear","regression_logarithmic","regression_exponential","regression_polynomial","regression_sigmoidal"],
|
||||
"balls-started":["basic_stats","historical_analyss","regression_linear","regression_logarithmic","regression_exponential","regression_polynomial","regression_sigmoidal"],
|
||||
"balls-upper-teleop":["basic_stats","historical_analysis","regression_linear","regression_logarithmic","regression_exponential","regression_polynomial","regression_sigmoidal"],
|
||||
"balls-upper-auto":["basic_stats","historical_analysis","regression_linear","regression_logarithmic","regression_exponential","regression_polynomial","regression_sigmoidal"]
|
||||
|
||||
pit = d.get_pit_variable_formatted(apikey, competition)
|
||||
},
|
||||
"metric":{
|
||||
"elo":{
|
||||
"score":1500,
|
||||
"N":400,
|
||||
"K":24
|
||||
},
|
||||
"gl2":{
|
||||
"score":1500,
|
||||
"rd":250,
|
||||
"vol":0.06
|
||||
},
|
||||
"ts":{
|
||||
"mu":25,
|
||||
"sigma":8.33
|
||||
}
|
||||
},
|
||||
"pit":{
|
||||
"wheel-mechanism":true,
|
||||
"low-balls":true,
|
||||
"high-balls":true,
|
||||
"wheel-success":true,
|
||||
"strategic-focus":true,
|
||||
"climb-mechanism":true,
|
||||
"attitude":true
|
||||
}
|
||||
}
|
||||
}"""
|
||||
|
||||
fig, ax = plt.subplots(1, len(pit), sharey=True, figsize=figsize)
|
||||
|
||||
i = 0
|
||||
|
||||
for variable in pit:
|
||||
|
||||
ax[i].hist(pit[variable])
|
||||
ax[i].invert_xaxis()
|
||||
|
||||
ax[i].set_xlabel('')
|
||||
ax[i].set_ylabel('Frequency')
|
||||
ax[i].set_title(variable)
|
||||
|
||||
plt.yticks(np.arange(len(pit[variable])))
|
||||
|
||||
i+=1
|
||||
|
||||
plt.show()
|
||||
|
||||
main()
|
||||
if __name__ == "__main__":
|
||||
if sys.platform.startswith('win'):
|
||||
multiprocessing.freeze_support()
|
||||
main()
|
@@ -1,5 +0,0 @@
|
||||
requests
|
||||
pymongo
|
||||
pandas
|
||||
tra-analysis
|
||||
kivy==2.0.0rc2
|
14
test/client.py
Normal file
14
test/client.py
Normal file
@@ -0,0 +1,14 @@
|
||||
import signal
|
||||
import zmq
|
||||
|
||||
signal.signal(signal.SIGINT, signal.SIG_DFL)
|
||||
|
||||
context = zmq.Context()
|
||||
|
||||
socket = context.socket(zmq.SUB)
|
||||
socket.connect('tcp://localhost:5678')
|
||||
socket.setsockopt(zmq.SUBSCRIBE, b'status')
|
||||
|
||||
while True:
|
||||
message = socket.recv_multipart()
|
||||
print(f'Received: {message}')
|
Reference in New Issue
Block a user