mirror of
https://github.com/titanscouting/tra-superscript.git
synced 2025-09-26 07:10:18 +00:00
Compare commits
32 Commits
gui
...
competitio
Author | SHA1 | Date | |
---|---|---|---|
|
8e6c44db65 | ||
|
11398290eb | ||
|
d847f6d6a7 | ||
|
b5c8a91fad | ||
|
8e5fa7eace | ||
|
69c6059ff8 | ||
|
fdcdadb8b2 | ||
|
cdd81295fc | ||
|
82ec2d85cc | ||
|
ac8002aaf8 | ||
|
25e4babd71 | ||
|
3fe2922e97 | ||
|
9752fd323b | ||
|
ef63c1de7e | ||
|
8908f05cbe | ||
|
143218dda3 | ||
|
def2fc9b73 | ||
|
e8a5bb75f8 | ||
|
c9dd09f5e9 | ||
|
3c6e3ac58e | ||
|
8c28c24d60 | ||
|
4836f48a34 | ||
|
9a1a45f1c9 | ||
|
d7ed695ad1 | ||
|
21d92e65b2 | ||
|
0cace3cec3 | ||
|
80b63269df | ||
|
56447603e1 | ||
|
2130182212 | ||
|
b1eff19ea4 | ||
|
b43836899d | ||
|
524a0a211d |
@@ -1,6 +1,6 @@
|
|||||||
FROM python:slim
|
FROM python:slim
|
||||||
WORKDIR /
|
WORKDIR /
|
||||||
RUN apt-get -y update; apt-get -y upgrade
|
RUN apt-get -y update; apt-get -y upgrade
|
||||||
RUN apt-get -y install git
|
RUN apt-get -y install git binutils
|
||||||
COPY requirements.txt .
|
COPY requirements.txt .
|
||||||
RUN pip install -r requirements.txt
|
RUN pip install -r requirements.txt
|
@@ -1,15 +1,15 @@
|
|||||||
{
|
{
|
||||||
"name": "TRA Analysis Development Environment",
|
"name": "TRA Analysis Development Environment",
|
||||||
"build": {
|
"build": {
|
||||||
"dockerfile": "Dockerfile",
|
"dockerfile": "Dockerfile"
|
||||||
},
|
},
|
||||||
"settings": {
|
"settings": {
|
||||||
"terminal.integrated.shell.linux": "/bin/bash",
|
"terminal.integrated.shell.linux": "/bin/bash",
|
||||||
"python.pythonPath": "",
|
"python.pythonPath": "/usr/local/bin/python",
|
||||||
"python.linting.enabled": true,
|
"python.linting.enabled": true,
|
||||||
"python.linting.pylintEnabled": true,
|
"python.linting.pylintEnabled": true,
|
||||||
"python.linting.pylintPath": "",
|
"python.linting.pylintPath": "/usr/local/bin/pylint",
|
||||||
"python.testing.pytestPath": "",
|
"python.testing.pytestPath": "/usr/local/bin/pytest",
|
||||||
"editor.tabSize": 4,
|
"editor.tabSize": 4,
|
||||||
"editor.insertSpaces": false
|
"editor.insertSpaces": false
|
||||||
},
|
},
|
||||||
@@ -19,4 +19,4 @@
|
|||||||
"waderyan.gitblame"
|
"waderyan.gitblame"
|
||||||
],
|
],
|
||||||
"postCreateCommand": ""
|
"postCreateCommand": ""
|
||||||
}
|
}
|
@@ -1,6 +1,7 @@
|
|||||||
cerberus
|
cerberus
|
||||||
dnspython
|
dnspython
|
||||||
numpy
|
numpy
|
||||||
|
pandas
|
||||||
pyinstaller
|
pyinstaller
|
||||||
pylint
|
pylint
|
||||||
pymongo
|
pymongo
|
||||||
|
38
.github/ISSUE_TEMPLATE/bug_report.md
vendored
38
.github/ISSUE_TEMPLATE/bug_report.md
vendored
@@ -1,38 +0,0 @@
|
|||||||
---
|
|
||||||
name: Bug report
|
|
||||||
about: Create a report to help us improve
|
|
||||||
title: ''
|
|
||||||
labels: ''
|
|
||||||
assignees: ''
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
**Describe the bug**
|
|
||||||
A clear and concise description of what the bug is.
|
|
||||||
|
|
||||||
**To Reproduce**
|
|
||||||
Steps to reproduce the behavior:
|
|
||||||
1. Go to '...'
|
|
||||||
2. Click on '....'
|
|
||||||
3. Scroll down to '....'
|
|
||||||
4. See error
|
|
||||||
|
|
||||||
**Expected behavior**
|
|
||||||
A clear and concise description of what you expected to happen.
|
|
||||||
|
|
||||||
**Screenshots**
|
|
||||||
If applicable, add screenshots to help explain your problem.
|
|
||||||
|
|
||||||
**Desktop (please complete the following information):**
|
|
||||||
- OS: [e.g. iOS]
|
|
||||||
- Browser [e.g. chrome, safari]
|
|
||||||
- Version [e.g. 22]
|
|
||||||
|
|
||||||
**Smartphone (please complete the following information):**
|
|
||||||
- Device: [e.g. iPhone6]
|
|
||||||
- OS: [e.g. iOS8.1]
|
|
||||||
- Browser [e.g. stock browser, safari]
|
|
||||||
- Version [e.g. 22]
|
|
||||||
|
|
||||||
**Additional context**
|
|
||||||
Add any other context about the problem here.
|
|
20
.github/ISSUE_TEMPLATE/feature_request.md
vendored
20
.github/ISSUE_TEMPLATE/feature_request.md
vendored
@@ -1,20 +0,0 @@
|
|||||||
---
|
|
||||||
name: Feature request
|
|
||||||
about: Suggest an idea for this project
|
|
||||||
title: ''
|
|
||||||
labels: ''
|
|
||||||
assignees: ''
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
**Is your feature request related to a problem? Please describe.**
|
|
||||||
A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
|
|
||||||
|
|
||||||
**Describe the solution you'd like**
|
|
||||||
A clear and concise description of what you want to happen.
|
|
||||||
|
|
||||||
**Describe alternatives you've considered**
|
|
||||||
A clear and concise description of any alternative solutions or features you've considered.
|
|
||||||
|
|
||||||
**Additional context**
|
|
||||||
Add any other context or screenshots about the feature request here.
|
|
35
.github/workflows/build-cli.yml
vendored
35
.github/workflows/build-cli.yml
vendored
@@ -1,35 +0,0 @@
|
|||||||
# This workflow will install Python dependencies, run tests and lint with a variety of Python versions
|
|
||||||
# For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions
|
|
||||||
|
|
||||||
name: Build Superscript Linux
|
|
||||||
|
|
||||||
on:
|
|
||||||
release:
|
|
||||||
types: [published, edited]
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
generate:
|
|
||||||
name: Build Linux
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- name: Checkout master
|
|
||||||
uses: actions/checkout@master
|
|
||||||
- name: Install Dependencies
|
|
||||||
run: pip install -r requirements.txt
|
|
||||||
working-directory: src/
|
|
||||||
- name: Give Execute Permission
|
|
||||||
run: chmod +x build-CLI.sh
|
|
||||||
working-directory: build/
|
|
||||||
- name: Build Binary
|
|
||||||
run: ./build-CLI.sh
|
|
||||||
working-directory: build/
|
|
||||||
- name: Copy Binary to Root Dir
|
|
||||||
run: cp superscript ..
|
|
||||||
working-directory: dist/
|
|
||||||
- name: Upload Release Asset
|
|
||||||
uses: svenstaro/upload-release-action@v2
|
|
||||||
with:
|
|
||||||
repo_token: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
file: superscript
|
|
||||||
asset_name: superscript
|
|
||||||
tag: ${{ github.ref }}
|
|
34
.github/workflows/superscript-unit.yml
vendored
34
.github/workflows/superscript-unit.yml
vendored
@@ -1,34 +0,0 @@
|
|||||||
# This workflow will install Python dependencies, run tests and lint with a variety of Python versions
|
|
||||||
# For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions
|
|
||||||
|
|
||||||
name: Superscript Unit Tests
|
|
||||||
|
|
||||||
on:
|
|
||||||
push:
|
|
||||||
branches: [ master ]
|
|
||||||
pull_request:
|
|
||||||
branches: [ master ]
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
build:
|
|
||||||
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
strategy:
|
|
||||||
matrix:
|
|
||||||
python-version: [3.7, 3.8]
|
|
||||||
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v2
|
|
||||||
- name: Set up Python ${{ matrix.python-version }}
|
|
||||||
uses: actions/setup-python@v2
|
|
||||||
with:
|
|
||||||
python-version: ${{ matrix.python-version }}
|
|
||||||
- name: Install dependencies
|
|
||||||
run: |
|
|
||||||
python -m pip install --upgrade pip
|
|
||||||
pip install pytest
|
|
||||||
if [ -f src/requirements.txt ]; then pip install -r src/requirements.txt; fi
|
|
||||||
- name: Test with pytest
|
|
||||||
run: |
|
|
||||||
pytest test/
|
|
6
.gitignore
vendored
6
.gitignore
vendored
@@ -15,5 +15,7 @@
|
|||||||
|
|
||||||
**/*.log
|
**/*.log
|
||||||
**/errorlog.txt
|
**/errorlog.txt
|
||||||
/dist/superscript.*
|
/dist/*
|
||||||
/dist/superscript
|
|
||||||
|
slurm-tra-superscript.out
|
||||||
|
config*.json
|
@@ -1,5 +0,0 @@
|
|||||||
set pathtospec="../src/cli/superscript.spec"
|
|
||||||
set pathtodist="../dist/"
|
|
||||||
set pathtowork="temp/"
|
|
||||||
|
|
||||||
pyinstaller --clean --distpath %pathtodist% --workpath %pathtowork% %pathtospec%
|
|
@@ -1,5 +0,0 @@
|
|||||||
pathtospec="../src/cli/superscript.spec"
|
|
||||||
pathtodist="../dist/"
|
|
||||||
pathtowork="temp/"
|
|
||||||
|
|
||||||
pyinstaller --clean --distpath ${pathtodist} --workpath ${pathtowork} ${pathtospec}
|
|
251
competition/config.py
Normal file
251
competition/config.py
Normal file
@@ -0,0 +1,251 @@
|
|||||||
|
import json
|
||||||
|
from exceptions import ConfigurationError
|
||||||
|
from cerberus import Validator
|
||||||
|
|
||||||
|
from data import set_database_config, get_database_config
|
||||||
|
|
||||||
|
class Configuration:
|
||||||
|
|
||||||
|
path = None
|
||||||
|
config = {}
|
||||||
|
|
||||||
|
_sample_config = {
|
||||||
|
"persistent":{
|
||||||
|
"key":{
|
||||||
|
"database":"",
|
||||||
|
"tba":"",
|
||||||
|
"tra":{
|
||||||
|
"CLIENT_ID":"",
|
||||||
|
"CLIENT_SECRET":"",
|
||||||
|
"url": ""
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"config-preference":"local",
|
||||||
|
"synchronize-config":False
|
||||||
|
},
|
||||||
|
"variable":{
|
||||||
|
"event-delay":False,
|
||||||
|
"loop-delay":0,
|
||||||
|
"competition": "2020ilch",
|
||||||
|
"modules":{
|
||||||
|
"match":{
|
||||||
|
"tests":{
|
||||||
|
"balls-blocked":[
|
||||||
|
"basic_stats",
|
||||||
|
"historical_analysis",
|
||||||
|
"regression_linear",
|
||||||
|
"regression_logarithmic",
|
||||||
|
"regression_exponential",
|
||||||
|
"regression_polynomial",
|
||||||
|
"regression_sigmoidal"
|
||||||
|
],
|
||||||
|
"balls-collected":[
|
||||||
|
"basic_stats",
|
||||||
|
"historical_analysis",
|
||||||
|
"regression_linear",
|
||||||
|
"regression_logarithmic",
|
||||||
|
"regression_exponential",
|
||||||
|
"regression_polynomial",
|
||||||
|
"regression_sigmoidal"
|
||||||
|
],
|
||||||
|
"balls-lower-teleop":[
|
||||||
|
"basic_stats",
|
||||||
|
"historical_analysis",
|
||||||
|
"regression_linear",
|
||||||
|
"regression_logarithmic",
|
||||||
|
"regression_exponential",
|
||||||
|
"regression_polynomial",
|
||||||
|
"regression_sigmoidal"
|
||||||
|
],
|
||||||
|
"balls-lower-auto":[
|
||||||
|
"basic_stats",
|
||||||
|
"historical_analysis",
|
||||||
|
"regression_linear",
|
||||||
|
"regression_logarithmic",
|
||||||
|
"regression_exponential",
|
||||||
|
"regression_polynomial",
|
||||||
|
"regression_sigmoidal"
|
||||||
|
],
|
||||||
|
"balls-started":[
|
||||||
|
"basic_stats",
|
||||||
|
"historical_analyss",
|
||||||
|
"regression_linear",
|
||||||
|
"regression_logarithmic",
|
||||||
|
"regression_exponential",
|
||||||
|
"regression_polynomial",
|
||||||
|
"regression_sigmoidal"
|
||||||
|
],
|
||||||
|
"balls-upper-teleop":[
|
||||||
|
"basic_stats",
|
||||||
|
"historical_analysis",
|
||||||
|
"regression_linear",
|
||||||
|
"regression_logarithmic",
|
||||||
|
"regression_exponential",
|
||||||
|
"regression_polynomial",
|
||||||
|
"regression_sigmoidal"
|
||||||
|
],
|
||||||
|
"balls-upper-auto":[
|
||||||
|
"basic_stats",
|
||||||
|
"historical_analysis",
|
||||||
|
"regression_linear",
|
||||||
|
"regression_logarithmic",
|
||||||
|
"regression_exponential",
|
||||||
|
"regression_polynomial",
|
||||||
|
"regression_sigmoidal"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"metric":{
|
||||||
|
"tests":{
|
||||||
|
"elo":{
|
||||||
|
"score":1500,
|
||||||
|
"N":400,
|
||||||
|
"K":24
|
||||||
|
},
|
||||||
|
"gl2":{
|
||||||
|
"score":1500,
|
||||||
|
"rd":250,
|
||||||
|
"vol":0.06
|
||||||
|
},
|
||||||
|
"ts":{
|
||||||
|
"mu":25,
|
||||||
|
"sigma":8.33
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"pit":{
|
||||||
|
"tests":{
|
||||||
|
"wheel-mechanism":True,
|
||||||
|
"low-balls":True,
|
||||||
|
"high-balls":True,
|
||||||
|
"wheel-success":True,
|
||||||
|
"strategic-focus":True,
|
||||||
|
"climb-mechanism":True,
|
||||||
|
"attitude":True
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
_validation_schema = {
|
||||||
|
"persistent": {
|
||||||
|
"type": "dict",
|
||||||
|
"required": True,
|
||||||
|
"require_all": True,
|
||||||
|
"schema": {
|
||||||
|
"key": {
|
||||||
|
"type": "dict",
|
||||||
|
"require_all":True,
|
||||||
|
"schema": {
|
||||||
|
"database": {"type":"string"},
|
||||||
|
"tba": {"type": "string"},
|
||||||
|
"tra": {
|
||||||
|
"type": "dict",
|
||||||
|
"require_all": True,
|
||||||
|
"schema": {
|
||||||
|
"CLIENT_ID": {"type": "string"},
|
||||||
|
"CLIENT_SECRET": {"type": "string"},
|
||||||
|
"url": {"type": "string"}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"config-preference": {"type": "string", "required": True},
|
||||||
|
"synchronize-config": {"type": "boolean", "required": True}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
def __init__(self, path):
|
||||||
|
self.path = path
|
||||||
|
self.load_config()
|
||||||
|
self.validate_config()
|
||||||
|
|
||||||
|
def load_config(self):
|
||||||
|
try:
|
||||||
|
f = open(self.path, "r")
|
||||||
|
self.config.update(json.load(f))
|
||||||
|
f.close()
|
||||||
|
except:
|
||||||
|
self.config = self._sample_config
|
||||||
|
self.save_config()
|
||||||
|
f.close()
|
||||||
|
raise ConfigurationError("could not find config file at <" + self.path + ">, created new sample config file at that path")
|
||||||
|
|
||||||
|
def save_config(self):
|
||||||
|
f = open(self.path, "w+")
|
||||||
|
json.dump(self.config, f, ensure_ascii=False, indent=4)
|
||||||
|
f.close()
|
||||||
|
|
||||||
|
def validate_config(self):
|
||||||
|
v = Validator(self._validation_schema, allow_unknown = True)
|
||||||
|
isValidated = v.validate(self.config)
|
||||||
|
|
||||||
|
if not isValidated:
|
||||||
|
raise ConfigurationError("config validation error: " + v.errors)
|
||||||
|
|
||||||
|
def __getattr__(self, name): # simple linear lookup method for common multikey-value paths, TYPE UNSAFE
|
||||||
|
if name == "persistent":
|
||||||
|
return self.config["persistent"]
|
||||||
|
elif name == "key":
|
||||||
|
return self.config["persistent"]["key"]
|
||||||
|
elif name == "database":
|
||||||
|
# soon to be deprecated
|
||||||
|
return self.config["persistent"]["key"]["database"]
|
||||||
|
elif name == "tba":
|
||||||
|
return self.config["persistent"]["key"]["tba"]
|
||||||
|
elif name == "tra":
|
||||||
|
return self.config["persistent"]["key"]["tra"]
|
||||||
|
elif name == "priority":
|
||||||
|
return self.config["persistent"]["config-preference"]
|
||||||
|
elif name == "sync":
|
||||||
|
return self.config["persistent"]["synchronize-config"]
|
||||||
|
elif name == "variable":
|
||||||
|
return self.config["variable"]
|
||||||
|
elif name == "event_delay":
|
||||||
|
return self.config["variable"]["event-delay"]
|
||||||
|
elif name == "loop_delay":
|
||||||
|
return self.config["variable"]["loop-delay"]
|
||||||
|
elif name == "competition":
|
||||||
|
return self.config["variable"]["competition"]
|
||||||
|
elif name == "modules":
|
||||||
|
return self.config["variable"]["modules"]
|
||||||
|
else:
|
||||||
|
return None
|
||||||
|
|
||||||
|
def __getitem__(self, key):
|
||||||
|
return self.config[key]
|
||||||
|
|
||||||
|
def resolve_config_conflicts(self, logger, client): # needs improvement with new localization scheme
|
||||||
|
sync = self.sync
|
||||||
|
priority = self.priority
|
||||||
|
|
||||||
|
if sync:
|
||||||
|
if priority == "local" or priority == "client":
|
||||||
|
logger.info("config-preference set to local/client, loading local config information")
|
||||||
|
remote_config = get_database_config(client)
|
||||||
|
if remote_config != self.config["variable"]:
|
||||||
|
set_database_config(client, self.config["variable"])
|
||||||
|
logger.info("database config was different and was updated")
|
||||||
|
# no change to config
|
||||||
|
elif priority == "remote" or priority == "database":
|
||||||
|
logger.info("config-preference set to remote/database, loading remote config information")
|
||||||
|
remote_config = get_database_config(client)
|
||||||
|
if remote_config != self.config["variable"]:
|
||||||
|
self.config["variable"] = remote_config
|
||||||
|
self.save_config()
|
||||||
|
# change variable to match remote
|
||||||
|
logger.info("local config was different and was updated")
|
||||||
|
else:
|
||||||
|
raise ConfigurationError("persistent/config-preference field must be \"local\"/\"client\" or \"remote\"/\"database\"")
|
||||||
|
else:
|
||||||
|
if priority == "local" or priority == "client":
|
||||||
|
logger.info("config-preference set to local/client, loading local config information")
|
||||||
|
# no change to config
|
||||||
|
elif priority == "remote" or priority == "database":
|
||||||
|
logger.info("config-preference set to remote/database, loading database config information")
|
||||||
|
self.config["variable"] = get_database_config(client)
|
||||||
|
# change variable to match remote without updating local version
|
||||||
|
else:
|
||||||
|
raise ConfigurationError("persistent/config-preference field must be \"local\"/\"client\" or \"remote\"/\"database\"")
|
@@ -1,16 +1,26 @@
|
|||||||
|
from calendar import c
|
||||||
import requests
|
import requests
|
||||||
import pull
|
import pull
|
||||||
import pandas as pd
|
import pandas as pd
|
||||||
|
import json
|
||||||
|
|
||||||
def pull_new_tba_matches(apikey, competition, cutoff):
|
def pull_new_tba_matches(apikey, competition, last_match):
|
||||||
api_key= apikey
|
api_key= apikey
|
||||||
x=requests.get("https://www.thebluealliance.com/api/v3/event/"+competition+"/matches/simple", headers={"X-TBA-Auth-Key":api_key}, verify=False)
|
x=requests.get("https://www.thebluealliance.com/api/v3/event/"+competition+"/matches/simple", headers={"X-TBA-Auth-Key":api_key})
|
||||||
|
json = x.json()
|
||||||
out = []
|
out = []
|
||||||
for i in x.json():
|
for i in json:
|
||||||
if i["actual_time"] != None and i["actual_time"]-cutoff >= 0 and i["comp_level"] == "qm":
|
if i["actual_time"] != None and i["comp_level"] == "qm" and i["match_number"] > last_match :
|
||||||
out.append({"match" : i['match_number'], "blue" : list(map(lambda x: int(x[3:]), i['alliances']['blue']['team_keys'])), "red" : list(map(lambda x: int(x[3:]), i['alliances']['red']['team_keys'])), "winner": i["winning_alliance"]})
|
out.append({"match" : i['match_number'], "blue" : list(map(lambda x: int(x[3:]), i['alliances']['blue']['team_keys'])), "red" : list(map(lambda x: int(x[3:]), i['alliances']['red']['team_keys'])), "winner": i["winning_alliance"]})
|
||||||
|
out.sort(key=lambda x: x['match'])
|
||||||
return out
|
return out
|
||||||
|
|
||||||
|
def pull_new_tba_matches_manual(apikey, competition, cutoff):
|
||||||
|
filename = competition+"-wins.json"
|
||||||
|
with open(filename, 'r') as f:
|
||||||
|
data = json.load(f)
|
||||||
|
return data
|
||||||
|
|
||||||
def get_team_match_data(client, competition, team_num):
|
def get_team_match_data(client, competition, team_num):
|
||||||
db = client.data_scouting
|
db = client.data_scouting
|
||||||
mdata = db.matchdata
|
mdata = db.matchdata
|
||||||
@@ -19,6 +29,12 @@ def get_team_match_data(client, competition, team_num):
|
|||||||
out[i['match']] = i['data']
|
out[i['match']] = i['data']
|
||||||
return pd.DataFrame(out)
|
return pd.DataFrame(out)
|
||||||
|
|
||||||
|
def clear_metrics(client, competition):
|
||||||
|
db = client.data_processing
|
||||||
|
data = db.team_metrics
|
||||||
|
data.delete_many({competition: competition})
|
||||||
|
return True
|
||||||
|
|
||||||
def get_team_pit_data(client, competition, team_num):
|
def get_team_pit_data(client, competition, team_num):
|
||||||
db = client.data_scouting
|
db = client.data_scouting
|
||||||
mdata = db.pitdata
|
mdata = db.pitdata
|
||||||
@@ -28,7 +44,15 @@ def get_team_pit_data(client, competition, team_num):
|
|||||||
def get_team_metrics_data(client, competition, team_num):
|
def get_team_metrics_data(client, competition, team_num):
|
||||||
db = client.data_processing
|
db = client.data_processing
|
||||||
mdata = db.team_metrics
|
mdata = db.team_metrics
|
||||||
return mdata.find_one({"competition" : competition, "team": team_num})
|
temp = mdata.find_one({"team": team_num})
|
||||||
|
if temp != None:
|
||||||
|
if competition in temp['metrics'].keys():
|
||||||
|
temp = temp['metrics'][competition]
|
||||||
|
else :
|
||||||
|
temp = None
|
||||||
|
else:
|
||||||
|
temp = None
|
||||||
|
return temp
|
||||||
|
|
||||||
def get_match_data_formatted(client, competition):
|
def get_match_data_formatted(client, competition):
|
||||||
teams_at_comp = pull.get_teams_at_competition(competition)
|
teams_at_comp = pull.get_teams_at_competition(competition)
|
||||||
@@ -51,7 +75,7 @@ def get_metrics_data_formatted(client, competition):
|
|||||||
return out
|
return out
|
||||||
|
|
||||||
def get_pit_data_formatted(client, competition):
|
def get_pit_data_formatted(client, competition):
|
||||||
x=requests.get("https://titanscouting.epochml.org/api/fetchAllTeamNicknamesAtCompetition?competition="+competition)
|
x=requests.get("https://scouting.titanrobotics2022.com/api/fetchAllTeamNicknamesAtCompetition?competition="+competition)
|
||||||
x = x.json()
|
x = x.json()
|
||||||
x = x['data']
|
x = x['data']
|
||||||
x = x.keys()
|
x = x.keys()
|
||||||
@@ -84,7 +108,7 @@ def push_team_tests_data(client, competition, team_num, data, dbname = "data_pro
|
|||||||
def push_team_metrics_data(client, competition, team_num, data, dbname = "data_processing", colname = "team_metrics"):
|
def push_team_metrics_data(client, competition, team_num, data, dbname = "data_processing", colname = "team_metrics"):
|
||||||
db = client[dbname]
|
db = client[dbname]
|
||||||
mdata = db[colname]
|
mdata = db[colname]
|
||||||
mdata.replace_one({"competition" : competition, "team": team_num}, {"_id": competition+str(team_num)+"am", "competition" : competition, "team" : team_num, "metrics" : data}, True)
|
mdata.update_one({"team": team_num}, {"$set": {"metrics.{}".format(competition): data}}, upsert=True)
|
||||||
|
|
||||||
def push_team_pit_data(client, competition, variable, data, dbname = "data_processing", colname = "team_pit"):
|
def push_team_pit_data(client, competition, variable, data, dbname = "data_processing", colname = "team_pit"):
|
||||||
db = client[dbname]
|
db = client[dbname]
|
||||||
@@ -94,12 +118,12 @@ def push_team_pit_data(client, competition, variable, data, dbname = "data_proce
|
|||||||
def get_analysis_flags(client, flag):
|
def get_analysis_flags(client, flag):
|
||||||
db = client.data_processing
|
db = client.data_processing
|
||||||
mdata = db.flags
|
mdata = db.flags
|
||||||
return mdata.find_one({flag:{"$exists":True}})
|
return mdata.find_one({"_id": "2022"})
|
||||||
|
|
||||||
def set_analysis_flags(client, flag, data):
|
def set_analysis_flags(client, flag, data):
|
||||||
db = client.data_processing
|
db = client.data_processing
|
||||||
mdata = db.flags
|
mdata = db.flags
|
||||||
return mdata.replace_one({flag:{"$exists":True}}, data, True)
|
return mdata.update_one({"_id": "2022"}, {"$set": data})
|
||||||
|
|
||||||
def unkeyify_2l(layered_dict):
|
def unkeyify_2l(layered_dict):
|
||||||
out = {}
|
out = {}
|
||||||
@@ -153,22 +177,17 @@ def load_metric(client, competition, match, group_name, metrics):
|
|||||||
db_data = get_team_metrics_data(client, competition, team)
|
db_data = get_team_metrics_data(client, competition, team)
|
||||||
|
|
||||||
if db_data == None:
|
if db_data == None:
|
||||||
|
|
||||||
elo = {"score": metrics["elo"]["score"]}
|
|
||||||
gl2 = {"score": metrics["gl2"]["score"], "rd": metrics["gl2"]["rd"], "vol": metrics["gl2"]["vol"]}
|
gl2 = {"score": metrics["gl2"]["score"], "rd": metrics["gl2"]["rd"], "vol": metrics["gl2"]["vol"]}
|
||||||
ts = {"mu": metrics["ts"]["mu"], "sigma": metrics["ts"]["sigma"]}
|
|
||||||
|
|
||||||
group[team] = {"elo": elo, "gl2": gl2, "ts": ts}
|
group[team] = {"gl2": gl2}
|
||||||
|
|
||||||
else:
|
else:
|
||||||
|
|
||||||
metrics = db_data["metrics"]
|
metrics = db_data
|
||||||
|
|
||||||
elo = metrics["elo"]
|
|
||||||
gl2 = metrics["gl2"]
|
gl2 = metrics["gl2"]
|
||||||
ts = metrics["ts"]
|
|
||||||
|
|
||||||
group[team] = {"elo": elo, "gl2": gl2, "ts": ts}
|
group[team] = {"gl2": gl2}
|
||||||
|
|
||||||
return group
|
return group
|
||||||
|
|
132
competition/dep.py
Normal file
132
competition/dep.py
Normal file
@@ -0,0 +1,132 @@
|
|||||||
|
# contains deprecated functions, not to be used unless nessasary!
|
||||||
|
|
||||||
|
import json
|
||||||
|
|
||||||
|
sample_json = """
|
||||||
|
{
|
||||||
|
"persistent":{
|
||||||
|
"key":{
|
||||||
|
"database":"",
|
||||||
|
"tba":"",
|
||||||
|
"tra":{
|
||||||
|
"CLIENT_ID":"",
|
||||||
|
"CLIENT_SECRET":"",
|
||||||
|
"url": ""
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"config-preference":"local",
|
||||||
|
"synchronize-config":false
|
||||||
|
},
|
||||||
|
"variable":{
|
||||||
|
"max-threads":0.5,
|
||||||
|
"team":"",
|
||||||
|
"event-delay":false,
|
||||||
|
"loop-delay":0,
|
||||||
|
"reportable":true,
|
||||||
|
"teams":[
|
||||||
|
|
||||||
|
],
|
||||||
|
"modules":{
|
||||||
|
"match":{
|
||||||
|
"tests":{
|
||||||
|
"balls-blocked":[
|
||||||
|
"basic_stats",
|
||||||
|
"historical_analysis",
|
||||||
|
"regression_linear",
|
||||||
|
"regression_logarithmic",
|
||||||
|
"regression_exponential",
|
||||||
|
"regression_polynomial",
|
||||||
|
"regression_sigmoidal"
|
||||||
|
],
|
||||||
|
"balls-collected":[
|
||||||
|
"basic_stats",
|
||||||
|
"historical_analysis",
|
||||||
|
"regression_linear",
|
||||||
|
"regression_logarithmic",
|
||||||
|
"regression_exponential",
|
||||||
|
"regression_polynomial",
|
||||||
|
"regression_sigmoidal"
|
||||||
|
],
|
||||||
|
"balls-lower-teleop":[
|
||||||
|
"basic_stats",
|
||||||
|
"historical_analysis",
|
||||||
|
"regression_linear",
|
||||||
|
"regression_logarithmic",
|
||||||
|
"regression_exponential",
|
||||||
|
"regression_polynomial",
|
||||||
|
"regression_sigmoidal"
|
||||||
|
],
|
||||||
|
"balls-lower-auto":[
|
||||||
|
"basic_stats",
|
||||||
|
"historical_analysis",
|
||||||
|
"regression_linear",
|
||||||
|
"regression_logarithmic",
|
||||||
|
"regression_exponential",
|
||||||
|
"regression_polynomial",
|
||||||
|
"regression_sigmoidal"
|
||||||
|
],
|
||||||
|
"balls-started":[
|
||||||
|
"basic_stats",
|
||||||
|
"historical_analyss",
|
||||||
|
"regression_linear",
|
||||||
|
"regression_logarithmic",
|
||||||
|
"regression_exponential",
|
||||||
|
"regression_polynomial",
|
||||||
|
"regression_sigmoidal"
|
||||||
|
],
|
||||||
|
"balls-upper-teleop":[
|
||||||
|
"basic_stats",
|
||||||
|
"historical_analysis",
|
||||||
|
"regression_linear",
|
||||||
|
"regression_logarithmic",
|
||||||
|
"regression_exponential",
|
||||||
|
"regression_polynomial",
|
||||||
|
"regression_sigmoidal"
|
||||||
|
],
|
||||||
|
"balls-upper-auto":[
|
||||||
|
"basic_stats",
|
||||||
|
"historical_analysis",
|
||||||
|
"regression_linear",
|
||||||
|
"regression_logarithmic",
|
||||||
|
"regression_exponential",
|
||||||
|
"regression_polynomial",
|
||||||
|
"regression_sigmoidal"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"metric":{
|
||||||
|
"tests":{
|
||||||
|
"gl2":{
|
||||||
|
"score":1500,
|
||||||
|
"rd":250,
|
||||||
|
"vol":0.06
|
||||||
|
},
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"pit":{
|
||||||
|
"tests":{
|
||||||
|
"wheel-mechanism":true,
|
||||||
|
"low-balls":true,
|
||||||
|
"high-balls":true,
|
||||||
|
"wheel-success":true,
|
||||||
|
"strategic-focus":true,
|
||||||
|
"climb-mechanism":true,
|
||||||
|
"attitude":true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
"""
|
||||||
|
|
||||||
|
def load_config(path, config_vector):
|
||||||
|
try:
|
||||||
|
f = open(path, "r")
|
||||||
|
config_vector.update(json.load(f))
|
||||||
|
f.close()
|
||||||
|
return 0
|
||||||
|
except:
|
||||||
|
f = open(path, "w")
|
||||||
|
f.write(sample_json)
|
||||||
|
f.close()
|
||||||
|
return 1
|
7
competition/exceptions.py
Normal file
7
competition/exceptions.py
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
class APIError(Exception):
|
||||||
|
def __init__(self, str):
|
||||||
|
super().__init__(str)
|
||||||
|
|
||||||
|
class ConfigurationError (Exception):
|
||||||
|
def __init__(self, str):
|
||||||
|
super().__init__(str)
|
91
competition/interface.py
Normal file
91
competition/interface.py
Normal file
@@ -0,0 +1,91 @@
|
|||||||
|
from logging import Logger as L
|
||||||
|
import datetime
|
||||||
|
import platform
|
||||||
|
import json
|
||||||
|
|
||||||
|
class Logger(L):
|
||||||
|
|
||||||
|
file = None
|
||||||
|
|
||||||
|
levels = {
|
||||||
|
0: "",
|
||||||
|
10:"[DEBUG] ",
|
||||||
|
20:"[INFO] ",
|
||||||
|
30:"[WARNING] ",
|
||||||
|
40:"[ERROR] ",
|
||||||
|
50:"[CRITICAL]",
|
||||||
|
}
|
||||||
|
|
||||||
|
targets = []
|
||||||
|
|
||||||
|
def __init__(self, verbose, profile, debug, file = None):
|
||||||
|
super().__init__("tra_logger")
|
||||||
|
|
||||||
|
self.file = file
|
||||||
|
|
||||||
|
if file != None:
|
||||||
|
self.targets.append(self._send_file)
|
||||||
|
|
||||||
|
if profile:
|
||||||
|
self.targets.append(self._send_null)
|
||||||
|
elif verbose:
|
||||||
|
self.targets.append(self._send_scli)
|
||||||
|
elif debug:
|
||||||
|
self.targets.append(self._send_scli)
|
||||||
|
else:
|
||||||
|
self.targets.append(self._send_null)
|
||||||
|
|
||||||
|
def _send_null(self, msg):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def _send_scli(self, msg):
|
||||||
|
print(msg)
|
||||||
|
|
||||||
|
def _send_file(self, msg):
|
||||||
|
f = open(self.file, 'a')
|
||||||
|
f.write(msg + "\n")
|
||||||
|
f.close()
|
||||||
|
|
||||||
|
def get_time_formatted(self):
|
||||||
|
return datetime.datetime.now().strftime("%Y/%m/%d %H:%M:%S %Z")
|
||||||
|
|
||||||
|
def log(self, level, msg):
|
||||||
|
for t in self.targets:
|
||||||
|
t(self.get_time_formatted() + "| " + self.levels[level] + ": " + msg)
|
||||||
|
|
||||||
|
def debug(self, msg):
|
||||||
|
self.log(10, msg)
|
||||||
|
|
||||||
|
def info(self, msg):
|
||||||
|
self.log(20, msg)
|
||||||
|
|
||||||
|
def warning(self, msg):
|
||||||
|
self.log(30, msg)
|
||||||
|
|
||||||
|
def error(self, msg):
|
||||||
|
self.log(40, msg)
|
||||||
|
|
||||||
|
def critical(self, msg):
|
||||||
|
self.log(50, msg)
|
||||||
|
|
||||||
|
def splash(self, version):
|
||||||
|
|
||||||
|
def hrule():
|
||||||
|
self.log(0, "#"+38*"-"+"#")
|
||||||
|
def box(s):
|
||||||
|
temp = "|"
|
||||||
|
temp += s
|
||||||
|
temp += (40-len(s)-2)*" "
|
||||||
|
temp += "|"
|
||||||
|
self.log(0, temp)
|
||||||
|
|
||||||
|
hrule()
|
||||||
|
box(" superscript version: " + version)
|
||||||
|
box(" os: " + platform.system())
|
||||||
|
box(" python: " + platform.python_version())
|
||||||
|
hrule()
|
||||||
|
|
||||||
|
def save_module_to_file(self, module, data, results):
|
||||||
|
f = open(module + ".log", "w")
|
||||||
|
json.dump({"data": data, "results":results}, f, ensure_ascii=False, indent=4)
|
||||||
|
f.close()
|
@@ -3,6 +3,7 @@ import data as d
|
|||||||
import signal
|
import signal
|
||||||
import numpy as np
|
import numpy as np
|
||||||
from tra_analysis import Analysis as an
|
from tra_analysis import Analysis as an
|
||||||
|
from tqdm import tqdm
|
||||||
|
|
||||||
class Module(metaclass = abc.ABCMeta):
|
class Module(metaclass = abc.ABCMeta):
|
||||||
|
|
||||||
@@ -22,7 +23,7 @@ class Module(metaclass = abc.ABCMeta):
|
|||||||
def validate_config(self, *args, **kwargs):
|
def validate_config(self, *args, **kwargs):
|
||||||
raise NotImplementedError
|
raise NotImplementedError
|
||||||
@abc.abstractmethod
|
@abc.abstractmethod
|
||||||
def run(self, exec_threads, *args, **kwargs):
|
def run(self, *args, **kwargs):
|
||||||
raise NotImplementedError
|
raise NotImplementedError
|
||||||
|
|
||||||
class Match (Module):
|
class Match (Module):
|
||||||
@@ -46,9 +47,9 @@ class Match (Module):
|
|||||||
def validate_config(self):
|
def validate_config(self):
|
||||||
return True, ""
|
return True, ""
|
||||||
|
|
||||||
def run(self, exec_threads):
|
def run(self):
|
||||||
self._load_data()
|
self._load_data()
|
||||||
self._process_data(exec_threads)
|
self._process_data()
|
||||||
self._push_results()
|
self._push_results()
|
||||||
|
|
||||||
def _load_data(self):
|
def _load_data(self):
|
||||||
@@ -85,7 +86,7 @@ class Match (Module):
|
|||||||
if test == "regression_sigmoidal":
|
if test == "regression_sigmoidal":
|
||||||
return an.regression(ranges, data, ['sig'])
|
return an.regression(ranges, data, ['sig'])
|
||||||
|
|
||||||
def _process_data(self, exec_threads):
|
def _process_data(self):
|
||||||
|
|
||||||
tests = self.config["tests"]
|
tests = self.config["tests"]
|
||||||
data = self.data
|
data = self.data
|
||||||
@@ -103,7 +104,6 @@ class Match (Module):
|
|||||||
input_vector.append((team, variable, test, data[team][variable]))
|
input_vector.append((team, variable, test, data[team][variable]))
|
||||||
|
|
||||||
self.data = input_vector
|
self.data = input_vector
|
||||||
#self.results = list(exec_threads.map(self._simplestats, self.data))
|
|
||||||
self.results = []
|
self.results = []
|
||||||
for test_var_data in self.data:
|
for test_var_data in self.data:
|
||||||
self.results.append(self._simplestats(test_var_data))
|
self.results.append(self._simplestats(test_var_data))
|
||||||
@@ -164,31 +164,27 @@ class Metric (Module):
|
|||||||
def validate_config(self):
|
def validate_config(self):
|
||||||
return True, ""
|
return True, ""
|
||||||
|
|
||||||
def run(self, exec_threads):
|
def run(self):
|
||||||
self._load_data()
|
self._load_data()
|
||||||
self._process_data(exec_threads)
|
self._process_data()
|
||||||
self._push_results()
|
self._push_results()
|
||||||
|
|
||||||
def _load_data(self):
|
def _load_data(self):
|
||||||
self.data = d.pull_new_tba_matches(self.tbakey, self.competition, self.timestamp)
|
self.last_match = d.get_analysis_flags(self.apikey, 'metrics_last_match')['metrics_last_match']
|
||||||
|
print("Previous last match", self.last_match)
|
||||||
|
self.data = d.pull_new_tba_matches(self.tbakey, self.competition, self.last_match)
|
||||||
|
|
||||||
def _process_data(self, exec_threads):
|
def _process_data(self):
|
||||||
|
|
||||||
elo_N = self.config["tests"]["elo"]["N"]
|
|
||||||
elo_K = self.config["tests"]["elo"]["K"]
|
|
||||||
|
|
||||||
|
self.results = {}
|
||||||
|
self.match = self.last_match
|
||||||
matches = self.data
|
matches = self.data
|
||||||
|
|
||||||
red = {}
|
red = {}
|
||||||
blu = {}
|
blu = {}
|
||||||
|
for match in tqdm(matches, desc="Metrics"): # grab matches and loop through each one
|
||||||
for match in matches:
|
self.match = max(self.match, int(match['match']))
|
||||||
|
red = d.load_metric(self.apikey, self.competition, match, "red", self.config["tests"]) # get the current ratings for red
|
||||||
red = d.load_metric(self.apikey, self.competition, match, "red", self.config["tests"])
|
blu = d.load_metric(self.apikey, self.competition, match, "blue", self.config["tests"]) # get the current ratings for blue
|
||||||
blu = d.load_metric(self.apikey, self.competition, match, "blue", self.config["tests"])
|
|
||||||
|
|
||||||
elo_red_total = 0
|
|
||||||
elo_blu_total = 0
|
|
||||||
|
|
||||||
gl2_red_score_total = 0
|
gl2_red_score_total = 0
|
||||||
gl2_blu_score_total = 0
|
gl2_blu_score_total = 0
|
||||||
@@ -199,72 +195,63 @@ class Metric (Module):
|
|||||||
gl2_red_vol_total = 0
|
gl2_red_vol_total = 0
|
||||||
gl2_blu_vol_total = 0
|
gl2_blu_vol_total = 0
|
||||||
|
|
||||||
for team in red:
|
for team in red: # for each team in red, add up gl2 score components
|
||||||
|
|
||||||
elo_red_total += red[team]["elo"]["score"]
|
|
||||||
|
|
||||||
gl2_red_score_total += red[team]["gl2"]["score"]
|
gl2_red_score_total += red[team]["gl2"]["score"]
|
||||||
gl2_red_rd_total += red[team]["gl2"]["rd"]
|
gl2_red_rd_total += red[team]["gl2"]["rd"]
|
||||||
gl2_red_vol_total += red[team]["gl2"]["vol"]
|
gl2_red_vol_total += red[team]["gl2"]["vol"]
|
||||||
|
|
||||||
for team in blu:
|
for team in blu: # for each team in blue, add up gl2 score components
|
||||||
|
|
||||||
elo_blu_total += blu[team]["elo"]["score"]
|
|
||||||
|
|
||||||
gl2_blu_score_total += blu[team]["gl2"]["score"]
|
gl2_blu_score_total += blu[team]["gl2"]["score"]
|
||||||
gl2_blu_rd_total += blu[team]["gl2"]["rd"]
|
gl2_blu_rd_total += blu[team]["gl2"]["rd"]
|
||||||
gl2_blu_vol_total += blu[team]["gl2"]["vol"]
|
gl2_blu_vol_total += blu[team]["gl2"]["vol"]
|
||||||
|
|
||||||
red_elo = {"score": elo_red_total / len(red)}
|
|
||||||
blu_elo = {"score": elo_blu_total / len(blu)}
|
|
||||||
|
|
||||||
red_gl2 = {"score": gl2_red_score_total / len(red), "rd": gl2_red_rd_total / len(red), "vol": gl2_red_vol_total / len(red)}
|
red_gl2 = {"score": gl2_red_score_total / len(red), "rd": gl2_red_rd_total / len(red), "vol": gl2_red_vol_total / len(red)} # average the scores by dividing by 3
|
||||||
blu_gl2 = {"score": gl2_blu_score_total / len(blu), "rd": gl2_blu_rd_total / len(blu), "vol": gl2_blu_vol_total / len(blu)}
|
blu_gl2 = {"score": gl2_blu_score_total / len(blu), "rd": gl2_blu_rd_total / len(blu), "vol": gl2_blu_vol_total / len(blu)} # average the scores by dividing by 3
|
||||||
|
|
||||||
|
|
||||||
if match["winner"] == "red":
|
if match["winner"] == "red": # if red won, set observations to {"red": 1, "blu": 0}
|
||||||
|
|
||||||
observations = {"red": 1, "blu": 0}
|
observations = {"red": 1, "blu": 0}
|
||||||
|
|
||||||
elif match["winner"] == "blue":
|
elif match["winner"] == "blue": # if blue won, set observations to {"red": 0, "blu": 1}
|
||||||
|
|
||||||
observations = {"red": 0, "blu": 1}
|
observations = {"red": 0, "blu": 1}
|
||||||
|
|
||||||
else:
|
else: # otherwise it was a tie and observations is {"red": 0.5, "blu": 0.5}
|
||||||
|
|
||||||
observations = {"red": 0.5, "blu": 0.5}
|
observations = {"red": 0.5, "blu": 0.5}
|
||||||
|
|
||||||
red_elo_delta = an.Metric().elo(red_elo["score"], blu_elo["score"], observations["red"], elo_N, elo_K) - red_elo["score"]
|
|
||||||
blu_elo_delta = an.Metric().elo(blu_elo["score"], red_elo["score"], observations["blu"], elo_N, elo_K) - blu_elo["score"]
|
|
||||||
|
|
||||||
new_red_gl2_score, new_red_gl2_rd, new_red_gl2_vol = an.Metric().glicko2(red_gl2["score"], red_gl2["rd"], red_gl2["vol"], [blu_gl2["score"]], [blu_gl2["rd"]], [observations["red"], observations["blu"]])
|
new_red_gl2_score, new_red_gl2_rd, new_red_gl2_vol = an.Metric().glicko2(red_gl2["score"], red_gl2["rd"], red_gl2["vol"], [blu_gl2["score"]], [blu_gl2["rd"]], [observations["red"], observations["blu"]]) # calculate new scores for gl2 for red
|
||||||
new_blu_gl2_score, new_blu_gl2_rd, new_blu_gl2_vol = an.Metric().glicko2(blu_gl2["score"], blu_gl2["rd"], blu_gl2["vol"], [red_gl2["score"]], [red_gl2["rd"]], [observations["blu"], observations["red"]])
|
new_blu_gl2_score, new_blu_gl2_rd, new_blu_gl2_vol = an.Metric().glicko2(blu_gl2["score"], blu_gl2["rd"], blu_gl2["vol"], [red_gl2["score"]], [red_gl2["rd"]], [observations["blu"], observations["red"]]) # calculate new scores for gl2 for blue
|
||||||
|
|
||||||
red_gl2_delta = {"score": new_red_gl2_score - red_gl2["score"], "rd": new_red_gl2_rd - red_gl2["rd"], "vol": new_red_gl2_vol - red_gl2["vol"]}
|
red_gl2_delta = {"score": new_red_gl2_score - red_gl2["score"], "rd": new_red_gl2_rd - red_gl2["rd"], "vol": new_red_gl2_vol - red_gl2["vol"]} # calculate gl2 deltas for red
|
||||||
blu_gl2_delta = {"score": new_blu_gl2_score - blu_gl2["score"], "rd": new_blu_gl2_rd - blu_gl2["rd"], "vol": new_blu_gl2_vol - blu_gl2["vol"]}
|
blu_gl2_delta = {"score": new_blu_gl2_score - blu_gl2["score"], "rd": new_blu_gl2_rd - blu_gl2["rd"], "vol": new_blu_gl2_vol - blu_gl2["vol"]} # calculate gl2 deltas for blue
|
||||||
|
|
||||||
for team in red:
|
for team in red: # for each team on red, add the previous score with the delta to find the new score
|
||||||
|
|
||||||
red[team]["elo"]["score"] = red[team]["elo"]["score"] + red_elo_delta
|
|
||||||
|
|
||||||
red[team]["gl2"]["score"] = red[team]["gl2"]["score"] + red_gl2_delta["score"]
|
red[team]["gl2"]["score"] = red[team]["gl2"]["score"] + red_gl2_delta["score"]
|
||||||
red[team]["gl2"]["rd"] = red[team]["gl2"]["rd"] + red_gl2_delta["rd"]
|
red[team]["gl2"]["rd"] = red[team]["gl2"]["rd"] + red_gl2_delta["rd"]
|
||||||
red[team]["gl2"]["vol"] = red[team]["gl2"]["vol"] + red_gl2_delta["vol"]
|
red[team]["gl2"]["vol"] = red[team]["gl2"]["vol"] + red_gl2_delta["vol"]
|
||||||
|
|
||||||
for team in blu:
|
for team in blu: # for each team on blue, add the previous score with the delta to find the new score
|
||||||
|
|
||||||
blu[team]["elo"]["score"] = blu[team]["elo"]["score"] + blu_elo_delta
|
|
||||||
|
|
||||||
blu[team]["gl2"]["score"] = blu[team]["gl2"]["score"] + blu_gl2_delta["score"]
|
blu[team]["gl2"]["score"] = blu[team]["gl2"]["score"] + blu_gl2_delta["score"]
|
||||||
blu[team]["gl2"]["rd"] = blu[team]["gl2"]["rd"] + blu_gl2_delta["rd"]
|
blu[team]["gl2"]["rd"] = blu[team]["gl2"]["rd"] + blu_gl2_delta["rd"]
|
||||||
blu[team]["gl2"]["vol"] = blu[team]["gl2"]["vol"] + blu_gl2_delta["vol"]
|
blu[team]["gl2"]["vol"] = blu[team]["gl2"]["vol"] + blu_gl2_delta["vol"]
|
||||||
|
|
||||||
temp_vector = {}
|
temp_vector = {}
|
||||||
temp_vector.update(red)
|
temp_vector.update(red) # update the team's score with the temporay vector
|
||||||
temp_vector.update(blu)
|
temp_vector.update(blu)
|
||||||
|
|
||||||
d.push_metric(self.apikey, self.competition, temp_vector)
|
self.results[match['match']] = temp_vector
|
||||||
|
|
||||||
|
d.push_metric(self.apikey, self.competition, temp_vector) # push new scores to db
|
||||||
|
print("New last match", self.match)
|
||||||
|
d.set_analysis_flags(self.apikey, 'metrics_last_match', {'metrics_last_match': self.match})
|
||||||
def _push_results(self):
|
def _push_results(self):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
@@ -289,17 +276,16 @@ class Pit (Module):
|
|||||||
def validate_config(self):
|
def validate_config(self):
|
||||||
return True, ""
|
return True, ""
|
||||||
|
|
||||||
def run(self, exec_threads):
|
def run(self):
|
||||||
self._load_data()
|
self._load_data()
|
||||||
self._process_data(exec_threads)
|
self._process_data()
|
||||||
self._push_results()
|
self._push_results()
|
||||||
|
|
||||||
def _load_data(self):
|
def _load_data(self):
|
||||||
self.data = d.load_pit(self.apikey, self.competition)
|
self.data = d.load_pit(self.apikey, self.competition)
|
||||||
|
|
||||||
def _process_data(self, exec_threads):
|
def _process_data(self):
|
||||||
tests = self.config["tests"]
|
tests = self.config["tests"]
|
||||||
print(tests)
|
|
||||||
return_vector = {}
|
return_vector = {}
|
||||||
for team in self.data:
|
for team in self.data:
|
||||||
for variable in self.data[team]:
|
for variable in self.data[team]:
|
@@ -1,9 +1,8 @@
|
|||||||
import requests
|
import requests
|
||||||
import json
|
|
||||||
from exceptions import APIError
|
from exceptions import APIError
|
||||||
from config import load_config
|
from dep import load_config
|
||||||
|
|
||||||
url = "https://titanscouting.epochml.org"
|
url = "https://scouting.titanrobotics2022.com"
|
||||||
config_tra = {}
|
config_tra = {}
|
||||||
load_config("config.json", config_tra)
|
load_config("config.json", config_tra)
|
||||||
trakey = config_tra['persistent']['key']['tra']
|
trakey = config_tra['persistent']['key']['tra']
|
||||||
@@ -19,7 +18,7 @@ def get_team_competition():
|
|||||||
if json['success']:
|
if json['success']:
|
||||||
return json['competition']
|
return json['competition']
|
||||||
else:
|
else:
|
||||||
raise APIError(json, endpoint)
|
raise APIError(json)
|
||||||
|
|
||||||
def get_team():
|
def get_team():
|
||||||
endpoint = '/api/fetchTeamCompetition'
|
endpoint = '/api/fetchTeamCompetition'
|
||||||
@@ -32,7 +31,7 @@ def get_team():
|
|||||||
if json['success']:
|
if json['success']:
|
||||||
return json['team']
|
return json['team']
|
||||||
else:
|
else:
|
||||||
raise APIError(json, endpoint)
|
raise APIError(json)
|
||||||
|
|
||||||
def get_team_match_data(competition, team_num):
|
def get_team_match_data(competition, team_num):
|
||||||
endpoint = '/api/fetchAllTeamMatchData'
|
endpoint = '/api/fetchAllTeamMatchData'
|
||||||
@@ -47,7 +46,7 @@ def get_team_match_data(competition, team_num):
|
|||||||
if json['success']:
|
if json['success']:
|
||||||
return json['data'][team_num]
|
return json['data'][team_num]
|
||||||
else:
|
else:
|
||||||
raise APIError(json, endpoint)
|
raise APIError(json)
|
||||||
|
|
||||||
def get_teams_at_competition(competition):
|
def get_teams_at_competition(competition):
|
||||||
endpoint = '/api/fetchAllTeamNicknamesAtCompetition'
|
endpoint = '/api/fetchAllTeamNicknamesAtCompetition'
|
||||||
@@ -61,4 +60,4 @@ def get_teams_at_competition(competition):
|
|||||||
if json['success']:
|
if json['success']:
|
||||||
return list(json['data'].keys())
|
return list(json['data'].keys())
|
||||||
else:
|
else:
|
||||||
raise APIError(json, endpoint)
|
raise APIError(json)
|
15
competition/requirements.txt
Normal file
15
competition/requirements.txt
Normal file
@@ -0,0 +1,15 @@
|
|||||||
|
cerberus
|
||||||
|
dnspython
|
||||||
|
numpy
|
||||||
|
pandas
|
||||||
|
pyinstaller
|
||||||
|
pylint
|
||||||
|
pymongo
|
||||||
|
pyparsing
|
||||||
|
python-daemon
|
||||||
|
pyzmq
|
||||||
|
requests
|
||||||
|
scikit-learn
|
||||||
|
scipy
|
||||||
|
six
|
||||||
|
tra-analysis
|
@@ -23,6 +23,9 @@ __changelog__ = """changelog:
|
|||||||
- config-preference option selects between prioritizing local config and prioritizing database config
|
- config-preference option selects between prioritizing local config and prioritizing database config
|
||||||
- synchronize-config option selects whether to update the non prioritized config with the prioritized one
|
- synchronize-config option selects whether to update the non prioritized config with the prioritized one
|
||||||
- divided config options between persistent ones (keys), and variable ones (everything else)
|
- divided config options between persistent ones (keys), and variable ones (everything else)
|
||||||
|
- generalized behavior of various core components by collecting loose functions in several dependencies into classes
|
||||||
|
- module.py contains classes, each one represents a single data analysis routine
|
||||||
|
- config.py contains the Configuration class, which stores the configuration information and abstracts the getter methods
|
||||||
0.9.3:
|
0.9.3:
|
||||||
- improved data loading performance by removing redundant PyMongo client creation (120s to 14s)
|
- improved data loading performance by removing redundant PyMongo client creation (120s to 14s)
|
||||||
- passed singular instance of PyMongo client as standin for apikey parameter in all data.py functions
|
- passed singular instance of PyMongo client as standin for apikey parameter in all data.py functions
|
||||||
@@ -144,47 +147,29 @@ __author__ = (
|
|||||||
"Jacob Levine <jlevine@imsa.edu>",
|
"Jacob Levine <jlevine@imsa.edu>",
|
||||||
)
|
)
|
||||||
|
|
||||||
__all__ = [
|
|
||||||
"load_config",
|
|
||||||
"save_config",
|
|
||||||
]
|
|
||||||
|
|
||||||
# imports:
|
# imports:
|
||||||
|
|
||||||
import json
|
import os, sys, time
|
||||||
from multiprocessing import freeze_support
|
import pymongo # soon to be deprecated
|
||||||
import os
|
|
||||||
import pymongo
|
|
||||||
import sys
|
|
||||||
import time
|
|
||||||
import traceback
|
import traceback
|
||||||
import warnings
|
import warnings
|
||||||
import zmq
|
from config import Configuration, ConfigurationError
|
||||||
import pull
|
from data import get_previous_time, set_current_time, check_new_database_matches, clear_metrics
|
||||||
from config import parse_config_persistent, parse_config_variable, resolve_config_conflicts, load_config, save_config, ConfigurationError
|
from interface import Logger
|
||||||
from data import get_previous_time, set_current_time, check_new_database_matches
|
|
||||||
from interface import splash, log, ERR, INF, stdout, stderr
|
|
||||||
from module import Match, Metric, Pit
|
from module import Match, Metric, Pit
|
||||||
|
import zmq
|
||||||
|
|
||||||
config_path = "config.json"
|
config_path = "config.json"
|
||||||
|
|
||||||
def main(send, verbose = False, profile = False, debug = False):
|
def main(logger, verbose, profile, debug, socket_send = None):
|
||||||
|
|
||||||
def close_all():
|
def close_all():
|
||||||
if "exec_threads" in locals():
|
|
||||||
exec_threads.terminate()
|
|
||||||
exec_threads.join()
|
|
||||||
exec_threads.close()
|
|
||||||
if "client" in locals():
|
if "client" in locals():
|
||||||
client.close()
|
client.close()
|
||||||
if "f" in locals():
|
|
||||||
f.close()
|
|
||||||
|
|
||||||
warnings.filterwarnings("ignore")
|
warnings.filterwarnings("ignore")
|
||||||
exit_code = 0
|
|
||||||
|
|
||||||
if verbose:
|
logger.splash(__version__)
|
||||||
splash(__version__)
|
|
||||||
|
|
||||||
modules = {"match": Match, "metric": Metric, "pit": Pit}
|
modules = {"match": Match, "metric": Metric, "pit": Pit}
|
||||||
|
|
||||||
@@ -194,32 +179,32 @@ def main(send, verbose = False, profile = False, debug = False):
|
|||||||
|
|
||||||
loop_start = time.time()
|
loop_start = time.time()
|
||||||
|
|
||||||
send(stdout, INF, "current time: " + str(loop_start))
|
logger.info("current time: " + str(loop_start))
|
||||||
|
socket_send("current time: " + str(loop_start))
|
||||||
|
|
||||||
config = {}
|
config = Configuration(config_path)
|
||||||
|
|
||||||
if load_config(config_path, config):
|
logger.info("found and loaded config at <" + config_path + ">")
|
||||||
raise ConfigurationError("could not find config at <" + config_path + ">, generating blank config and exiting", 110)
|
socket_send("found and loaded config at <" + config_path + ">")
|
||||||
|
|
||||||
send(stdout, INF, "found and loaded config at <" + config_path + ">")
|
apikey, tbakey = config.database, config.tba
|
||||||
|
|
||||||
apikey, tbakey, preference, sync = parse_config_persistent(send, config)
|
logger.info("found and loaded database and tba keys")
|
||||||
|
socket_send("found and loaded database and tba keys")
|
||||||
send(stdout, INF, "found and loaded database and tba keys")
|
|
||||||
|
|
||||||
client = pymongo.MongoClient(apikey)
|
client = pymongo.MongoClient(apikey)
|
||||||
|
|
||||||
send(stdout, INF, "established connection to database")
|
logger.info("established connection to database")
|
||||||
|
socket_send("established connection to database")
|
||||||
|
|
||||||
previous_time = get_previous_time(client)
|
previous_time = get_previous_time(client)
|
||||||
send(stdout, INF, "analysis backtimed to: " + str(previous_time))
|
|
||||||
|
|
||||||
config = resolve_config_conflicts(send, client, config, preference, sync)
|
logger.info("analysis backtimed to: " + str(previous_time))
|
||||||
|
socket_send("analysis backtimed to: " + str(previous_time))
|
||||||
|
|
||||||
exec_threads, config_modules = parse_config_variable(send, config)
|
config.resolve_config_conflicts(logger, client)
|
||||||
if 'competition' in config['variable']:
|
|
||||||
competition = config['variable']['competition']
|
config_modules, competition = config.modules, config.competition
|
||||||
else:
|
|
||||||
competition = pull.get_team_competition()
|
|
||||||
for m in config_modules:
|
for m in config_modules:
|
||||||
if m in modules:
|
if m in modules:
|
||||||
start = time.time()
|
start = time.time()
|
||||||
@@ -227,71 +212,81 @@ def main(send, verbose = False, profile = False, debug = False):
|
|||||||
valid = current_module.validate_config()
|
valid = current_module.validate_config()
|
||||||
if not valid:
|
if not valid:
|
||||||
continue
|
continue
|
||||||
current_module.run(exec_threads)
|
current_module.run()
|
||||||
send(stdout, INF, m + " module finished in " + str(time.time() - start) + " seconds")
|
logger.info(m + " module finished in " + str(time.time() - start) + " seconds")
|
||||||
|
socket_send(m + " module finished in " + str(time.time() - start) + " seconds")
|
||||||
if debug:
|
if debug:
|
||||||
f = open(m + ".log", "w+")
|
logger.save_module_to_file(m, current_module.data, current_module.results) # logging flag check done in logger
|
||||||
json.dump({"data": current_module.data, "results":current_module.results}, f, ensure_ascii=False, indent=4)
|
|
||||||
f.close()
|
|
||||||
|
|
||||||
set_current_time(client, loop_start)
|
set_current_time(client, loop_start)
|
||||||
close_all()
|
close_all()
|
||||||
|
|
||||||
send(stdout, INF, "closed threads and database client")
|
logger.info("closed threads and database client")
|
||||||
send(stdout, INF, "finished all tasks in " + str(time.time() - loop_start) + " seconds, looping")
|
logger.info("finished all tasks in " + str(time.time() - loop_start) + " seconds, looping")
|
||||||
|
socket_send("closed threads and database client")
|
||||||
|
socket_send("finished all tasks in " + str(time.time() - loop_start) + " seconds, looping")
|
||||||
|
|
||||||
if profile:
|
if profile:
|
||||||
exit_code = 0
|
return 0
|
||||||
break
|
|
||||||
|
if debug:
|
||||||
|
return 0
|
||||||
|
|
||||||
event_delay = config["variable"]["event-delay"]
|
event_delay = config["variable"]["event-delay"]
|
||||||
if event_delay:
|
if event_delay:
|
||||||
send(stdout, INF, "loop delayed until database returns new matches")
|
logger.info("loop delayed until database returns new matches")
|
||||||
|
socket_send("loop delayed until database returns new matches")
|
||||||
new_match = False
|
new_match = False
|
||||||
while not new_match:
|
while not new_match:
|
||||||
time.sleep(1)
|
time.sleep(1)
|
||||||
new_match = check_new_database_matches(client, competition)
|
new_match = check_new_database_matches(client, competition)
|
||||||
send(stdout, INF, "database returned new matches")
|
logger.info("database returned new matches")
|
||||||
|
socket_send("database returned new matches")
|
||||||
else:
|
else:
|
||||||
loop_delay = float(config["variable"]["loop-delay"])
|
loop_delay = float(config["variable"]["loop-delay"])
|
||||||
remaining_time = loop_delay - (time.time() - loop_start)
|
remaining_time = loop_delay - (time.time() - loop_start)
|
||||||
if remaining_time > 0:
|
if remaining_time > 0:
|
||||||
send(stdout, INF, "loop delayed by " + str(remaining_time) + " seconds")
|
logger.info("loop delayed by " + str(remaining_time) + " seconds")
|
||||||
|
socket_send("loop delayed by " + str(remaining_time) + " seconds")
|
||||||
time.sleep(remaining_time)
|
time.sleep(remaining_time)
|
||||||
|
|
||||||
except KeyboardInterrupt:
|
except KeyboardInterrupt:
|
||||||
send(stdout, INF, "detected KeyboardInterrupt, killing threads")
|
|
||||||
close_all()
|
close_all()
|
||||||
send(stdout, INF, "terminated threads, exiting")
|
logger.info("detected KeyboardInterrupt, exiting")
|
||||||
break
|
socket_send("detected KeyboardInterrupt, exiting")
|
||||||
|
return 0
|
||||||
|
|
||||||
except ConfigurationError as e:
|
except ConfigurationError as e:
|
||||||
send(stderr, ERR, "encountered a configuration error: " + str(e), code = e.code)
|
str_e = "".join(traceback.format_exception(e))
|
||||||
traceback.print_exc(file = stderr)
|
logger.error("encountered a configuration error: " + str(e))
|
||||||
exit_code = 1
|
logger.error(str_e)
|
||||||
|
socket_send("encountered a configuration error: " + str(e))
|
||||||
|
socket_send(str_e)
|
||||||
close_all()
|
close_all()
|
||||||
break
|
return 1
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
send(stderr, ERR, "encountered an exception while running", code = 1)
|
str_e = "".join(traceback.format_exception(e))
|
||||||
traceback.print_exc(file = stderr)
|
logger.error("encountered an exception while running")
|
||||||
exit_code = 1
|
logger.error(str_e)
|
||||||
|
socket_send("encountered an exception while running")
|
||||||
|
socket_send(str_e)
|
||||||
close_all()
|
close_all()
|
||||||
break
|
return 1
|
||||||
|
|
||||||
return exit_code
|
def start(pid_path, verbose, profile, debug):
|
||||||
|
|
||||||
def start(pid_path, verbose = False, profile = False, debug = False):
|
|
||||||
|
|
||||||
if profile:
|
if profile:
|
||||||
|
|
||||||
def send(target, level, message, code = 0):
|
def send(msg):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
logger = Logger(verbose, profile, debug)
|
||||||
|
|
||||||
import cProfile, pstats, io
|
import cProfile, pstats, io
|
||||||
profile = cProfile.Profile()
|
profile = cProfile.Profile()
|
||||||
profile.enable()
|
profile.enable()
|
||||||
exit_code = main(send, profile = True)
|
exit_code = main(logger, verbose, profile, debug, socket_send = send)
|
||||||
profile.disable()
|
profile.disable()
|
||||||
f = open("profile.txt", 'w+')
|
f = open("profile.txt", 'w+')
|
||||||
ps = pstats.Stats(profile, stream = f).sort_stats('cumtime')
|
ps = pstats.Stats(profile, stream = f).sort_stats('cumtime')
|
||||||
@@ -300,35 +295,53 @@ def start(pid_path, verbose = False, profile = False, debug = False):
|
|||||||
|
|
||||||
elif verbose:
|
elif verbose:
|
||||||
|
|
||||||
exit_code = main(log, verbose = verbose)
|
def send(msg):
|
||||||
|
pass
|
||||||
|
|
||||||
|
logger = Logger(verbose, profile, debug)
|
||||||
|
|
||||||
|
exit_code = main(logger, verbose, profile, debug, socket_send = send)
|
||||||
sys.exit(exit_code)
|
sys.exit(exit_code)
|
||||||
|
|
||||||
elif debug:
|
elif debug:
|
||||||
|
|
||||||
exit_code = main(log, verbose = True, profile = True, debug = debug)
|
def send(msg):
|
||||||
|
pass
|
||||||
|
|
||||||
|
logger = Logger(verbose, profile, debug)
|
||||||
|
|
||||||
|
exit_code = main(logger, verbose, profile, debug, socket_send = send)
|
||||||
sys.exit(exit_code)
|
sys.exit(exit_code)
|
||||||
|
|
||||||
else:
|
else:
|
||||||
|
|
||||||
f = open('errorlog.log', 'w+')
|
logfile = "logfile.log"
|
||||||
|
|
||||||
|
f = open(logfile, 'w+')
|
||||||
|
f.close()
|
||||||
|
|
||||||
|
e = open('errorlog.log', 'w+')
|
||||||
with daemon.DaemonContext(
|
with daemon.DaemonContext(
|
||||||
working_directory = os.getcwd(),
|
working_directory = os.getcwd(),
|
||||||
pidfile = pidfile.TimeoutPIDLockFile(pid_path),
|
pidfile = pidfile.TimeoutPIDLockFile(pid_path),
|
||||||
stderr = f
|
stderr = e
|
||||||
):
|
):
|
||||||
|
|
||||||
context = zmq.Context()
|
context = zmq.Context()
|
||||||
socket = context.socket(zmq.PUB)
|
socket = context.socket(zmq.PUB)
|
||||||
socket.bind("tcp://*:5678")
|
socket.bind("tcp://*:5678")
|
||||||
|
|
||||||
socket.send(b'status')
|
socket.send(b'status')
|
||||||
|
|
||||||
def send(target, level, message, code = 0):
|
def send(msg):
|
||||||
socket.send(bytes("status: " + message, 'utf-8'))
|
socket.send(bytes("status: " + msg, "utf-8"))
|
||||||
|
|
||||||
|
logger = Logger(verbose, profile, debug, file = logfile)
|
||||||
|
|
||||||
|
exit_code = main(logger, verbose, profile, debug, socket_send = send)
|
||||||
|
|
||||||
exit_code = main(send)
|
|
||||||
socket.close()
|
socket.close()
|
||||||
f.close()
|
f.close()
|
||||||
|
|
||||||
sys.exit(exit_code)
|
sys.exit(exit_code)
|
||||||
|
|
||||||
def stop(pid_path):
|
def stop(pid_path):
|
||||||
@@ -350,17 +363,16 @@ def stop(pid_path):
|
|||||||
if os.path.exists(pid_path):
|
if os.path.exists(pid_path):
|
||||||
os.remove(pid_path)
|
os.remove(pid_path)
|
||||||
else:
|
else:
|
||||||
traceback.print_exc(file = stderr)
|
traceback.print_exc(file = sys.stderr)
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
def restart(pid_path):
|
def restart(pid_path):
|
||||||
stop(pid_path)
|
stop(pid_path)
|
||||||
start(pid_path)
|
start(pid_path, False, False, False)
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
|
|
||||||
if sys.platform.startswith("win"):
|
if sys.platform.startswith("win"):
|
||||||
freeze_support()
|
|
||||||
start(None, verbose = True)
|
start(None, verbose = True)
|
||||||
|
|
||||||
else:
|
else:
|
||||||
@@ -370,17 +382,17 @@ if __name__ == "__main__":
|
|||||||
pid_path = "tra-daemon.pid"
|
pid_path = "tra-daemon.pid"
|
||||||
if len(sys.argv) == 2:
|
if len(sys.argv) == 2:
|
||||||
if 'start' == sys.argv[1]:
|
if 'start' == sys.argv[1]:
|
||||||
start(pid_path)
|
start(pid_path, False, False, False)
|
||||||
elif 'stop' == sys.argv[1]:
|
elif 'stop' == sys.argv[1]:
|
||||||
stop(pid_path)
|
stop(pid_path)
|
||||||
elif 'restart' == sys.argv[1]:
|
elif 'restart' == sys.argv[1]:
|
||||||
restart(pid_path)
|
restart(pid_path)
|
||||||
elif 'verbose' == sys.argv[1]:
|
elif 'verbose' == sys.argv[1]:
|
||||||
start(None, verbose = True)
|
start(None, True, False, False)
|
||||||
elif 'profile' == sys.argv[1]:
|
elif 'profile' == sys.argv[1]:
|
||||||
start(None, profile=True)
|
start(None, False, True, False)
|
||||||
elif 'debug' == sys.argv[1]:
|
elif 'debug' == sys.argv[1]:
|
||||||
start(None, debug = True)
|
start(None, False, False, True)
|
||||||
else:
|
else:
|
||||||
print("usage: %s start|stop|restart|verbose|profile|debug" % sys.argv[0])
|
print("usage: %s start|stop|restart|verbose|profile|debug" % sys.argv[0])
|
||||||
sys.exit(2)
|
sys.exit(2)
|
@@ -1,244 +0,0 @@
|
|||||||
import math
|
|
||||||
import json
|
|
||||||
from multiprocessing import Pool
|
|
||||||
import os
|
|
||||||
from cerberus import Validator
|
|
||||||
from exceptions import ConfigurationError
|
|
||||||
|
|
||||||
from data import set_database_config, get_database_config
|
|
||||||
from interface import stderr, stdout, INF, ERR
|
|
||||||
|
|
||||||
config_path = "config.json"
|
|
||||||
|
|
||||||
sample_json = """
|
|
||||||
{
|
|
||||||
"persistent":{
|
|
||||||
"key":{
|
|
||||||
"database":"",
|
|
||||||
"tba":"",
|
|
||||||
"tra":{
|
|
||||||
"CLIENT_ID":"",
|
|
||||||
"CLIENT_SECRET":"",
|
|
||||||
"url": ""
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"config-preference":"local",
|
|
||||||
"synchronize-config":false
|
|
||||||
},
|
|
||||||
"variable":{
|
|
||||||
"max-threads":0.5,
|
|
||||||
"team":"",
|
|
||||||
"event-delay":false,
|
|
||||||
"loop-delay":0,
|
|
||||||
"reportable":true,
|
|
||||||
"teams":[
|
|
||||||
|
|
||||||
],
|
|
||||||
"modules":{
|
|
||||||
"match":{
|
|
||||||
"tests":{
|
|
||||||
"balls-blocked":[
|
|
||||||
"basic_stats",
|
|
||||||
"historical_analysis",
|
|
||||||
"regression_linear",
|
|
||||||
"regression_logarithmic",
|
|
||||||
"regression_exponential",
|
|
||||||
"regression_polynomial",
|
|
||||||
"regression_sigmoidal"
|
|
||||||
],
|
|
||||||
"balls-collected":[
|
|
||||||
"basic_stats",
|
|
||||||
"historical_analysis",
|
|
||||||
"regression_linear",
|
|
||||||
"regression_logarithmic",
|
|
||||||
"regression_exponential",
|
|
||||||
"regression_polynomial",
|
|
||||||
"regression_sigmoidal"
|
|
||||||
],
|
|
||||||
"balls-lower-teleop":[
|
|
||||||
"basic_stats",
|
|
||||||
"historical_analysis",
|
|
||||||
"regression_linear",
|
|
||||||
"regression_logarithmic",
|
|
||||||
"regression_exponential",
|
|
||||||
"regression_polynomial",
|
|
||||||
"regression_sigmoidal"
|
|
||||||
],
|
|
||||||
"balls-lower-auto":[
|
|
||||||
"basic_stats",
|
|
||||||
"historical_analysis",
|
|
||||||
"regression_linear",
|
|
||||||
"regression_logarithmic",
|
|
||||||
"regression_exponential",
|
|
||||||
"regression_polynomial",
|
|
||||||
"regression_sigmoidal"
|
|
||||||
],
|
|
||||||
"balls-started":[
|
|
||||||
"basic_stats",
|
|
||||||
"historical_analyss",
|
|
||||||
"regression_linear",
|
|
||||||
"regression_logarithmic",
|
|
||||||
"regression_exponential",
|
|
||||||
"regression_polynomial",
|
|
||||||
"regression_sigmoidal"
|
|
||||||
],
|
|
||||||
"balls-upper-teleop":[
|
|
||||||
"basic_stats",
|
|
||||||
"historical_analysis",
|
|
||||||
"regression_linear",
|
|
||||||
"regression_logarithmic",
|
|
||||||
"regression_exponential",
|
|
||||||
"regression_polynomial",
|
|
||||||
"regression_sigmoidal"
|
|
||||||
],
|
|
||||||
"balls-upper-auto":[
|
|
||||||
"basic_stats",
|
|
||||||
"historical_analysis",
|
|
||||||
"regression_linear",
|
|
||||||
"regression_logarithmic",
|
|
||||||
"regression_exponential",
|
|
||||||
"regression_polynomial",
|
|
||||||
"regression_sigmoidal"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"metric":{
|
|
||||||
"tests":{
|
|
||||||
"elo":{
|
|
||||||
"score":1500,
|
|
||||||
"N":400,
|
|
||||||
"K":24
|
|
||||||
},
|
|
||||||
"gl2":{
|
|
||||||
"score":1500,
|
|
||||||
"rd":250,
|
|
||||||
"vol":0.06
|
|
||||||
},
|
|
||||||
"ts":{
|
|
||||||
"mu":25,
|
|
||||||
"sigma":8.33
|
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"pit":{
|
|
||||||
"tests":{
|
|
||||||
"wheel-mechanism":true,
|
|
||||||
"low-balls":true,
|
|
||||||
"high-balls":true,
|
|
||||||
"wheel-success":true,
|
|
||||||
"strategic-focus":true,
|
|
||||||
"climb-mechanism":true,
|
|
||||||
"attitude":true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
"""
|
|
||||||
|
|
||||||
def parse_config_persistent(send, config):
|
|
||||||
v = Validator(load_validation_schema(), allow_unknown = True)
|
|
||||||
isValidated = v.validate(config)
|
|
||||||
|
|
||||||
if not isValidated:
|
|
||||||
raise ConfigurationError(v.errors, 101)
|
|
||||||
|
|
||||||
apikey = config["persistent"]["key"]["database"]
|
|
||||||
tbakey = config["persistent"]["key"]["tba"]
|
|
||||||
preference = config["persistent"]["config-preference"]
|
|
||||||
sync = config["persistent"]["synchronize-config"]
|
|
||||||
|
|
||||||
return apikey, tbakey, preference, sync
|
|
||||||
|
|
||||||
def parse_config_variable(send, config):
|
|
||||||
|
|
||||||
sys_max_threads = os.cpu_count()
|
|
||||||
try:
|
|
||||||
cfg_max_threads = config["variable"]["max-threads"]
|
|
||||||
except:
|
|
||||||
raise ConfigurationError("variable/max-threads field is invalid or missing, refer to documentation for configuration options", 109)
|
|
||||||
if cfg_max_threads > -sys_max_threads and cfg_max_threads < 0 :
|
|
||||||
alloc_processes = sys_max_threads + cfg_max_threads
|
|
||||||
elif cfg_max_threads > 0 and cfg_max_threads < 1:
|
|
||||||
alloc_processes = math.floor(cfg_max_threads * sys_max_threads)
|
|
||||||
elif cfg_max_threads > 1 and cfg_max_threads <= sys_max_threads:
|
|
||||||
alloc_processes = cfg_max_threads
|
|
||||||
elif cfg_max_threads == 0:
|
|
||||||
alloc_processes = sys_max_threads
|
|
||||||
else:
|
|
||||||
raise ConfigurationError("variable/max-threads must be between -" + str(sys_max_threads) + " and " + str(sys_max_threads) + ", but got " + cfg_max_threads, 110)
|
|
||||||
try:
|
|
||||||
exec_threads = Pool(processes = alloc_processes)
|
|
||||||
except Exception as e:
|
|
||||||
send(stderr, INF, e)
|
|
||||||
raise ConfigurationError("unable to start threads", 200)
|
|
||||||
send(stdout, INF, "successfully initialized " + str(alloc_processes) + " threads")
|
|
||||||
|
|
||||||
try:
|
|
||||||
modules = config["variable"]["modules"]
|
|
||||||
except:
|
|
||||||
raise ConfigurationError("variable/modules field is invalid or missing", 102)
|
|
||||||
|
|
||||||
if modules == None:
|
|
||||||
raise ConfigurationError("variable/modules field is empty", 106)
|
|
||||||
|
|
||||||
send(stdout, INF, "found and loaded competition, match, metrics, pit from config")
|
|
||||||
|
|
||||||
return exec_threads, modules
|
|
||||||
|
|
||||||
def resolve_config_conflicts(send, client, config, preference, sync):
|
|
||||||
|
|
||||||
if sync:
|
|
||||||
if preference == "local" or preference == "client":
|
|
||||||
send(stdout, INF, "config-preference set to local/client, loading local config information")
|
|
||||||
remote_config = get_database_config(client)
|
|
||||||
if remote_config != config["variable"]:
|
|
||||||
set_database_config(client, config["variable"])
|
|
||||||
send(stdout, INF, "database config was different and was updated")
|
|
||||||
return config
|
|
||||||
elif preference == "remote" or preference == "database":
|
|
||||||
send(stdout, INF, "config-preference set to remote/database, loading remote config information")
|
|
||||||
remote_config= get_database_config(client)
|
|
||||||
if remote_config != config["variable"]:
|
|
||||||
config["variable"] = remote_config
|
|
||||||
if save_config(config_path, config):
|
|
||||||
raise ConfigurationError("local config was different but could not be updated", 121)
|
|
||||||
send(stdout, INF, "local config was different and was updated")
|
|
||||||
return config
|
|
||||||
else:
|
|
||||||
raise ConfigurationError("persistent/config-preference field must be \"local\"/\"client\" or \"remote\"/\"database\"", 120)
|
|
||||||
else:
|
|
||||||
if preference == "local" or preference == "client":
|
|
||||||
send(stdout, INF, "config-preference set to local/client, loading local config information")
|
|
||||||
return config
|
|
||||||
elif preference == "remote" or preference == "database":
|
|
||||||
send(stdout, INF, "config-preference set to remote/database, loading database config information")
|
|
||||||
config["variable"] = get_database_config(client)
|
|
||||||
return config
|
|
||||||
else:
|
|
||||||
raise ConfigurationError("persistent/config-preference field must be \"local\"/\"client\" or \"remote\"/\"database\"", 120)
|
|
||||||
|
|
||||||
def load_config(path, config_vector):
|
|
||||||
try:
|
|
||||||
f = open(path, "r")
|
|
||||||
config_vector.update(json.load(f))
|
|
||||||
f.close()
|
|
||||||
return 0
|
|
||||||
except:
|
|
||||||
f = open(path, "w")
|
|
||||||
f.write(sample_json)
|
|
||||||
f.close()
|
|
||||||
return 1
|
|
||||||
|
|
||||||
def load_validation_schema():
|
|
||||||
try:
|
|
||||||
with open("validation-schema.json", "r") as f:
|
|
||||||
return json.load(f)
|
|
||||||
except:
|
|
||||||
raise FileNotFoundError("Validation schema not found at validation-schema.json")
|
|
||||||
|
|
||||||
def save_config(path, config_vector):
|
|
||||||
f = open(path, "w+")
|
|
||||||
json.dump(config_vector, f, ensure_ascii=False, indent=4)
|
|
||||||
f.close()
|
|
||||||
return 0
|
|
@@ -1,11 +0,0 @@
|
|||||||
class APIError(Exception):
|
|
||||||
code = None
|
|
||||||
def __init__(self, str, endpoint):
|
|
||||||
super().__init__(str)
|
|
||||||
self.endpoint = endpoint
|
|
||||||
|
|
||||||
class ConfigurationError (Exception):
|
|
||||||
code = None
|
|
||||||
def __init__(self, str, code):
|
|
||||||
super().__init__(str)
|
|
||||||
self.code = code
|
|
@@ -1,44 +0,0 @@
|
|||||||
import sys
|
|
||||||
import time
|
|
||||||
from os import system, name
|
|
||||||
import platform
|
|
||||||
|
|
||||||
empty_delim = " "
|
|
||||||
hard_divided_delim = "|"
|
|
||||||
soft_divided_delim = "|"
|
|
||||||
l_brack = "["
|
|
||||||
r_brack = "]"
|
|
||||||
|
|
||||||
ERR = "[ERR]"
|
|
||||||
INF = "[INF]"
|
|
||||||
|
|
||||||
stdout = sys.stdout
|
|
||||||
stderr = sys.stderr
|
|
||||||
|
|
||||||
def log(target, level, message, code = 0):
|
|
||||||
|
|
||||||
message = time.ctime() + empty_delim + str(level) + l_brack + f"{code:+05}" + r_brack + empty_delim + soft_divided_delim + empty_delim + message
|
|
||||||
print(message, file = target)
|
|
||||||
|
|
||||||
def clear():
|
|
||||||
if name == "nt":
|
|
||||||
system("cls")
|
|
||||||
else:
|
|
||||||
system("clear")
|
|
||||||
|
|
||||||
def splash(version):
|
|
||||||
|
|
||||||
def hrule():
|
|
||||||
print("#"+38*"-"+"#")
|
|
||||||
def box(s):
|
|
||||||
temp = "|"
|
|
||||||
temp += s
|
|
||||||
temp += (40-len(s)-2)*" "
|
|
||||||
temp += "|"
|
|
||||||
print(temp)
|
|
||||||
|
|
||||||
hrule()
|
|
||||||
box(" superscript version: " + version)
|
|
||||||
box(" os: " + platform.system())
|
|
||||||
box(" python: " + platform.python_version())
|
|
||||||
hrule()
|
|
@@ -1,39 +0,0 @@
|
|||||||
# -*- mode: python ; coding: utf-8 -*-
|
|
||||||
|
|
||||||
block_cipher = None
|
|
||||||
|
|
||||||
a = Analysis(['superscript.py'],
|
|
||||||
pathex=['/workspaces/tra-data-analysis/src'],
|
|
||||||
binaries=[],
|
|
||||||
datas=[],
|
|
||||||
hiddenimports=[
|
|
||||||
"dnspython",
|
|
||||||
"sklearn.utils._weight_vector",
|
|
||||||
"requests",
|
|
||||||
],
|
|
||||||
hookspath=[],
|
|
||||||
runtime_hooks=[],
|
|
||||||
excludes=[
|
|
||||||
"matplotlib",
|
|
||||||
"pandas"
|
|
||||||
],
|
|
||||||
win_no_prefer_redirects=False,
|
|
||||||
win_private_assemblies=False,
|
|
||||||
cipher=block_cipher,
|
|
||||||
noarchive=False)
|
|
||||||
pyz = PYZ(a.pure, a.zipped_data,
|
|
||||||
cipher=block_cipher)
|
|
||||||
exe = EXE(pyz,
|
|
||||||
a.scripts,
|
|
||||||
a.binaries,
|
|
||||||
a.zipfiles,
|
|
||||||
a.datas,
|
|
||||||
[('W ignore', None, 'OPTION')],
|
|
||||||
name='superscript',
|
|
||||||
debug=False,
|
|
||||||
bootloader_ignore_signals=False,
|
|
||||||
strip=False,
|
|
||||||
upx=True,
|
|
||||||
upx_exclude=[],
|
|
||||||
runtime_tmpdir=None,
|
|
||||||
console=True )
|
|
@@ -1,27 +0,0 @@
|
|||||||
{
|
|
||||||
"persistent": {
|
|
||||||
"type": "dict",
|
|
||||||
"require_all": true,
|
|
||||||
"schema": {
|
|
||||||
"key": {
|
|
||||||
"type": "dict",
|
|
||||||
"require_all":true,
|
|
||||||
"schema": {
|
|
||||||
"database": {"type":"string"},
|
|
||||||
"tba": {"type": "string"},
|
|
||||||
"tra": {
|
|
||||||
"type": "dict",
|
|
||||||
"require_all": true,
|
|
||||||
"schema": {
|
|
||||||
"CLIENT_ID": {"type": "string"},
|
|
||||||
"CLIENT_SECRET": {"type": "string"},
|
|
||||||
"url": {"type": "string"}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"config-preference": {"type": "string", "required": true},
|
|
||||||
"synchronize-config": {"type": "boolean", "required": true}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
129
src/gui/data.py
129
src/gui/data.py
@@ -1,129 +0,0 @@
|
|||||||
import requests
|
|
||||||
import pymongo
|
|
||||||
import pandas as pd
|
|
||||||
import time
|
|
||||||
|
|
||||||
def pull_new_tba_matches(apikey, competition, cutoff):
|
|
||||||
api_key= apikey
|
|
||||||
x=requests.get("https://www.thebluealliance.com/api/v3/event/"+competition+"/matches/simple", headers={"X-TBA-Auth_Key":api_key})
|
|
||||||
out = []
|
|
||||||
for i in x.json():
|
|
||||||
if i["actual_time"] != None and i["actual_time"]-cutoff >= 0 and i["comp_level"] == "qm":
|
|
||||||
out.append({"match" : i['match_number'], "blue" : list(map(lambda x: int(x[3:]), i['alliances']['blue']['team_keys'])), "red" : list(map(lambda x: int(x[3:]), i['alliances']['red']['team_keys'])), "winner": i["winning_alliance"]})
|
|
||||||
return out
|
|
||||||
|
|
||||||
def get_team_match_data(apikey, competition, team_num):
|
|
||||||
client = pymongo.MongoClient(apikey)
|
|
||||||
db = client.data_scouting
|
|
||||||
mdata = db.matchdata
|
|
||||||
out = {}
|
|
||||||
for i in mdata.find({"competition" : competition, "team_scouted": team_num}):
|
|
||||||
out[i['match']] = i['data']
|
|
||||||
return pd.DataFrame(out)
|
|
||||||
|
|
||||||
def get_team_pit_data(apikey, competition, team_num):
|
|
||||||
client = pymongo.MongoClient(apikey)
|
|
||||||
db = client.data_scouting
|
|
||||||
mdata = db.pitdata
|
|
||||||
out = {}
|
|
||||||
return mdata.find_one({"competition" : competition, "team_scouted": team_num})["data"]
|
|
||||||
|
|
||||||
def get_team_metrics_data(apikey, competition, team_num):
|
|
||||||
client = pymongo.MongoClient(apikey)
|
|
||||||
db = client.data_processing
|
|
||||||
mdata = db.team_metrics
|
|
||||||
return mdata.find_one({"competition" : competition, "team": team_num})
|
|
||||||
|
|
||||||
def get_match_data_formatted(apikey, competition):
|
|
||||||
client = pymongo.MongoClient(apikey)
|
|
||||||
db = client.data_scouting
|
|
||||||
mdata = db.teamlist
|
|
||||||
x=mdata.find_one({"competition":competition})
|
|
||||||
out = {}
|
|
||||||
for i in x:
|
|
||||||
try:
|
|
||||||
out[int(i)] = unkeyify_2l(get_team_match_data(apikey, competition, int(i)).transpose().to_dict())
|
|
||||||
except:
|
|
||||||
pass
|
|
||||||
return out
|
|
||||||
|
|
||||||
def get_metrics_data_formatted(apikey, competition):
|
|
||||||
client = pymongo.MongoClient(apikey)
|
|
||||||
db = client.data_scouting
|
|
||||||
mdata = db.teamlist
|
|
||||||
x=mdata.find_one({"competition":competition})
|
|
||||||
out = {}
|
|
||||||
for i in x:
|
|
||||||
try:
|
|
||||||
out[int(i)] = d.get_team_metrics_data(apikey, competition, int(i))
|
|
||||||
except:
|
|
||||||
pass
|
|
||||||
return out
|
|
||||||
|
|
||||||
def get_pit_data_formatted(apikey, competition):
|
|
||||||
client = pymongo.MongoClient(apikey)
|
|
||||||
db = client.data_scouting
|
|
||||||
mdata = db.teamlist
|
|
||||||
x=mdata.find_one({"competition":competition})
|
|
||||||
out = {}
|
|
||||||
for i in x:
|
|
||||||
try:
|
|
||||||
out[int(i)] = get_team_pit_data(apikey, competition, int(i))
|
|
||||||
except:
|
|
||||||
pass
|
|
||||||
return out
|
|
||||||
|
|
||||||
def get_pit_variable_data(apikey, competition):
|
|
||||||
client = pymongo.MongoClient(apikey)
|
|
||||||
db = client.data_processing
|
|
||||||
mdata = db.team_pit
|
|
||||||
out = {}
|
|
||||||
return mdata.find()
|
|
||||||
|
|
||||||
def get_pit_variable_formatted(apikey, competition):
|
|
||||||
temp = get_pit_variable_data(apikey, competition)
|
|
||||||
out = {}
|
|
||||||
for i in temp:
|
|
||||||
out[i["variable"]] = i["data"]
|
|
||||||
return out
|
|
||||||
|
|
||||||
def push_team_tests_data(apikey, competition, team_num, data, dbname = "data_processing", colname = "team_tests"):
|
|
||||||
client = pymongo.MongoClient(apikey)
|
|
||||||
db = client[dbname]
|
|
||||||
mdata = db[colname]
|
|
||||||
mdata.replace_one({"competition" : competition, "team": team_num}, {"_id": competition+str(team_num)+"am", "competition" : competition, "team" : team_num, "data" : data}, True)
|
|
||||||
|
|
||||||
def push_team_metrics_data(apikey, competition, team_num, data, dbname = "data_processing", colname = "team_metrics"):
|
|
||||||
client = pymongo.MongoClient(apikey)
|
|
||||||
db = client[dbname]
|
|
||||||
mdata = db[colname]
|
|
||||||
mdata.replace_one({"competition" : competition, "team": team_num}, {"_id": competition+str(team_num)+"am", "competition" : competition, "team" : team_num, "metrics" : data}, True)
|
|
||||||
|
|
||||||
def push_team_pit_data(apikey, competition, variable, data, dbname = "data_processing", colname = "team_pit"):
|
|
||||||
client = pymongo.MongoClient(apikey)
|
|
||||||
db = client[dbname]
|
|
||||||
mdata = db[colname]
|
|
||||||
mdata.replace_one({"competition" : competition, "variable": variable}, {"competition" : competition, "variable" : variable, "data" : data}, True)
|
|
||||||
|
|
||||||
def get_analysis_flags(apikey, flag):
|
|
||||||
client = pymongo.MongoClient(apikey)
|
|
||||||
db = client.data_processing
|
|
||||||
mdata = db.flags
|
|
||||||
return mdata.find_one({flag:{"$exists":True}})
|
|
||||||
|
|
||||||
def set_analysis_flags(apikey, flag, data):
|
|
||||||
client = pymongo.MongoClient(apikey)
|
|
||||||
db = client.data_processing
|
|
||||||
mdata = db.flags
|
|
||||||
return mdata.replace_one({flag:{"$exists":True}}, data, True)
|
|
||||||
|
|
||||||
def unkeyify_2l(layered_dict):
|
|
||||||
out = {}
|
|
||||||
for i in layered_dict.keys():
|
|
||||||
add = []
|
|
||||||
sortkey = []
|
|
||||||
for j in layered_dict[i].keys():
|
|
||||||
add.append([j,layered_dict[i][j]])
|
|
||||||
add.sort(key = lambda x: x[0])
|
|
||||||
out[i] = list(map(lambda x: x[1], add))
|
|
||||||
return out
|
|
@@ -1,151 +0,0 @@
|
|||||||
<Launch>:
|
|
||||||
orientation: "vertical"
|
|
||||||
|
|
||||||
NavigationLayout:
|
|
||||||
ScreenManager:
|
|
||||||
id: screen_manager
|
|
||||||
HomeScreen:
|
|
||||||
name: "Home"
|
|
||||||
BoxLayout:
|
|
||||||
orientation: "vertical"
|
|
||||||
MDToolbar:
|
|
||||||
title: screen_manager.current
|
|
||||||
elevation: 10
|
|
||||||
left_action_items: [['menu', lambda x: nav_drawer.toggle_nav_drawer()]]
|
|
||||||
|
|
||||||
GridLayout:
|
|
||||||
cols: 1
|
|
||||||
padding: 15, 15
|
|
||||||
spacing: 20, 20
|
|
||||||
MDTextFieldRect:
|
|
||||||
hint_text: "Console Log"
|
|
||||||
# size_hint: .8, None
|
|
||||||
# align: 'center'
|
|
||||||
# Widget:
|
|
||||||
SettingsScreen:
|
|
||||||
name: "Settings"
|
|
||||||
BoxLayout:
|
|
||||||
orientation: 'vertical'
|
|
||||||
MDToolbar:
|
|
||||||
title: screen_manager.current
|
|
||||||
elevation: 10
|
|
||||||
left_action_items: [['menu', lambda x: nav_drawer.toggle_nav_drawer()]]
|
|
||||||
Widget:
|
|
||||||
InfoScreen:
|
|
||||||
name: "Info"
|
|
||||||
BoxLayout:
|
|
||||||
orientation: 'vertical'
|
|
||||||
MDToolbar:
|
|
||||||
title: screen_manager.current
|
|
||||||
elevation: 10
|
|
||||||
left_action_items: [['menu', lambda x: nav_drawer.toggle_nav_drawer()]]
|
|
||||||
# GridLayout:
|
|
||||||
# cols: 2
|
|
||||||
# padding: 15, 15
|
|
||||||
# spacing: 20, 20
|
|
||||||
BoxLayout:
|
|
||||||
orientation: "horizontal"
|
|
||||||
MDLabel:
|
|
||||||
text: "DB Key:"
|
|
||||||
halign: 'center'
|
|
||||||
MDTextField:
|
|
||||||
hint_text: "placeholder"
|
|
||||||
pos_hint: {"center_y": .5}
|
|
||||||
|
|
||||||
BoxLayout:
|
|
||||||
orientation: "horizontal"
|
|
||||||
MDLabel:
|
|
||||||
text: "TBA Key:"
|
|
||||||
halign: 'center'
|
|
||||||
MDTextField:
|
|
||||||
hint_text: "placeholder"
|
|
||||||
pos_hint: {"center_y": .5}
|
|
||||||
BoxLayout:
|
|
||||||
orientation: "horizontal"
|
|
||||||
MDLabel:
|
|
||||||
text: "CPU Use:"
|
|
||||||
halign: 'center'
|
|
||||||
MDLabel:
|
|
||||||
text: "placeholder"
|
|
||||||
halign: 'center'
|
|
||||||
BoxLayout:
|
|
||||||
orientation: "horizontal"
|
|
||||||
MDLabel:
|
|
||||||
text: "Network:"
|
|
||||||
halign: 'center'
|
|
||||||
MDLabel:
|
|
||||||
text: "placeholder"
|
|
||||||
halign: 'center'
|
|
||||||
Widget:
|
|
||||||
BoxLayout:
|
|
||||||
orientation: "horizontal"
|
|
||||||
MDLabel:
|
|
||||||
text: "Progress"
|
|
||||||
halign: 'center'
|
|
||||||
MDProgressBar:
|
|
||||||
id: progress
|
|
||||||
value: 50
|
|
||||||
StatsScreen:
|
|
||||||
name: "Stats"
|
|
||||||
MDCheckbox:
|
|
||||||
size_hint: None, None
|
|
||||||
size: "48dp", "48dp"
|
|
||||||
pos_hint: {'center_x': .5, 'center_y': .5}
|
|
||||||
on_active: Screen.test()
|
|
||||||
|
|
||||||
#Navigation Drawer -------------------------
|
|
||||||
MDNavigationDrawer:
|
|
||||||
id: nav_drawer
|
|
||||||
BoxLayout:
|
|
||||||
orientation: "vertical"
|
|
||||||
padding: "8dp"
|
|
||||||
spacing: "8dp"
|
|
||||||
MDLabel:
|
|
||||||
text: "Titan Scouting"
|
|
||||||
font_style: "Button"
|
|
||||||
size_hint_y: None
|
|
||||||
height: self.texture_size[1]
|
|
||||||
|
|
||||||
MDLabel:
|
|
||||||
text: "Data Analysis"
|
|
||||||
font_style: "Caption"
|
|
||||||
size_hint_y: None
|
|
||||||
height: self.texture_size[1]
|
|
||||||
ScrollView:
|
|
||||||
MDList:
|
|
||||||
OneLineAvatarListItem:
|
|
||||||
text: "Home"
|
|
||||||
on_press:
|
|
||||||
# nav_drawer.set_state("close")
|
|
||||||
# screen_manager.transition.direction = "left"
|
|
||||||
screen_manager.current = "Home"
|
|
||||||
IconLeftWidget:
|
|
||||||
icon: "home"
|
|
||||||
|
|
||||||
OneLineAvatarListItem:
|
|
||||||
text: "Settings"
|
|
||||||
on_press:
|
|
||||||
# nav_drawer.set_state("close")
|
|
||||||
# screen_manager.transition.direction = "right"
|
|
||||||
# screen_manager.fade
|
|
||||||
screen_manager.current = "Settings"
|
|
||||||
IconLeftWidget:
|
|
||||||
icon: "cog"
|
|
||||||
OneLineAvatarListItem:
|
|
||||||
text: "Info"
|
|
||||||
on_press:
|
|
||||||
# nav_drawer.set_state("close")
|
|
||||||
# screen_manager.transition.direction = "right"
|
|
||||||
# screen_manager.fade
|
|
||||||
screen_manager.current = "Info"
|
|
||||||
IconLeftWidget:
|
|
||||||
icon: "cog"
|
|
||||||
OneLineAvatarListItem:
|
|
||||||
text: "Stats"
|
|
||||||
on_press:
|
|
||||||
# nav_drawer.set_state("close")
|
|
||||||
# screen_manager.transition.direction = "right"
|
|
||||||
# screen_manager.fade
|
|
||||||
screen_manager.current = "Stats"
|
|
||||||
IconLeftWidget:
|
|
||||||
icon: "cog"
|
|
@@ -1,58 +0,0 @@
|
|||||||
from kivy.lang import Builder
|
|
||||||
|
|
||||||
from kivymd.uix.screen import Screen
|
|
||||||
from kivymd.uix.list import OneLineListItem, MDList, TwoLineListItem, ThreeLineListItem
|
|
||||||
from kivymd.uix.list import OneLineIconListItem, IconLeftWidget
|
|
||||||
from kivy.uix.scrollview import ScrollView
|
|
||||||
|
|
||||||
|
|
||||||
from kivy.uix.boxlayout import BoxLayout
|
|
||||||
from kivy.uix.screenmanager import ScreenManager, Screen
|
|
||||||
from kivy.uix.dropdown import DropDown
|
|
||||||
from kivy.uix.button import Button
|
|
||||||
from kivy.base import runTouchApp
|
|
||||||
from kivymd.uix.menu import MDDropdownMenu, MDMenuItem
|
|
||||||
|
|
||||||
from kivymd.app import MDApp
|
|
||||||
# import superscript as ss
|
|
||||||
|
|
||||||
# from tra_analysis import analysis as an
|
|
||||||
import data as d
|
|
||||||
from collections import defaultdict
|
|
||||||
import json
|
|
||||||
import math
|
|
||||||
import numpy as np
|
|
||||||
import os
|
|
||||||
from os import system, name
|
|
||||||
from pathlib import Path
|
|
||||||
from multiprocessing import Pool
|
|
||||||
import matplotlib.pyplot as plt
|
|
||||||
from concurrent.futures import ThreadPoolExecutor
|
|
||||||
import time
|
|
||||||
import warnings
|
|
||||||
|
|
||||||
# global exec_threads
|
|
||||||
|
|
||||||
|
|
||||||
# Screens
|
|
||||||
class HomeScreen(Screen):
|
|
||||||
pass
|
|
||||||
class SettingsScreen(Screen):
|
|
||||||
pass
|
|
||||||
class InfoScreen(Screen):
|
|
||||||
pass
|
|
||||||
|
|
||||||
class StatsScreen(Screen):
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
class MyApp(MDApp):
|
|
||||||
def build(self):
|
|
||||||
self.theme_cls.primary_palette = "Red"
|
|
||||||
return Builder.load_file("design.kv")
|
|
||||||
def test():
|
|
||||||
print("test")
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
MyApp().run()
|
|
@@ -1,627 +0,0 @@
|
|||||||
# Titan Robotics Team 2022: Superscript Script
|
|
||||||
# Written by Arthur Lu, Jacob Levine, and Dev Singh
|
|
||||||
# Notes:
|
|
||||||
# setup:
|
|
||||||
|
|
||||||
__version__ = "0.8.6"
|
|
||||||
|
|
||||||
# changelog should be viewed using print(analysis.__changelog__)
|
|
||||||
__changelog__ = """changelog:
|
|
||||||
0.8.6:
|
|
||||||
- added proper main function
|
|
||||||
0.8.5:
|
|
||||||
- added more gradeful KeyboardInterrupt exiting
|
|
||||||
- redirected stderr to errorlog.txt
|
|
||||||
0.8.4:
|
|
||||||
- added better error message for missing config.json
|
|
||||||
- added automatic config.json creation
|
|
||||||
- added splash text with version and system info
|
|
||||||
0.8.3:
|
|
||||||
- updated matchloop with new regression format (requires tra_analysis 3.x)
|
|
||||||
0.8.2:
|
|
||||||
- readded while true to main function
|
|
||||||
- added more thread config options
|
|
||||||
0.8.1:
|
|
||||||
- optimized matchloop further by bypassing GIL
|
|
||||||
0.8.0:
|
|
||||||
- added multithreading to matchloop
|
|
||||||
- tweaked user log
|
|
||||||
0.7.0:
|
|
||||||
- finished implementing main function
|
|
||||||
0.6.2:
|
|
||||||
- integrated get_team_rankings.py as get_team_metrics() function
|
|
||||||
- integrated visualize_pit.py as graph_pit_histogram() function
|
|
||||||
0.6.1:
|
|
||||||
- bug fixes with analysis.Metric() calls
|
|
||||||
- modified metric functions to use config.json defined default values
|
|
||||||
0.6.0:
|
|
||||||
- removed main function
|
|
||||||
- changed load_config function
|
|
||||||
- added save_config function
|
|
||||||
- added load_match function
|
|
||||||
- renamed simpleloop to matchloop
|
|
||||||
- moved simplestats function inside matchloop
|
|
||||||
- renamed load_metrics to load_metric
|
|
||||||
- renamed metricsloop to metricloop
|
|
||||||
- split push to database functions amon push_match, push_metric, push_pit
|
|
||||||
- moved
|
|
||||||
0.5.2:
|
|
||||||
- made changes due to refactoring of analysis
|
|
||||||
0.5.1:
|
|
||||||
- text fixes
|
|
||||||
- removed matplotlib requirement
|
|
||||||
0.5.0:
|
|
||||||
- improved user interface
|
|
||||||
0.4.2:
|
|
||||||
- removed unessasary code
|
|
||||||
0.4.1:
|
|
||||||
- fixed bug where X range for regression was determined before sanitization
|
|
||||||
- better sanitized data
|
|
||||||
0.4.0:
|
|
||||||
- fixed spelling issue in __changelog__
|
|
||||||
- addressed nan bug in regression
|
|
||||||
- fixed errors on line 335 with metrics calling incorrect key "glicko2"
|
|
||||||
- fixed errors in metrics computing
|
|
||||||
0.3.0:
|
|
||||||
- added analysis to pit data
|
|
||||||
0.2.1:
|
|
||||||
- minor stability patches
|
|
||||||
- implemented db syncing for timestamps
|
|
||||||
- fixed bugs
|
|
||||||
0.2.0:
|
|
||||||
- finalized testing and small fixes
|
|
||||||
0.1.4:
|
|
||||||
- finished metrics implement, trueskill is bugged
|
|
||||||
0.1.3:
|
|
||||||
- working
|
|
||||||
0.1.2:
|
|
||||||
- started implement of metrics
|
|
||||||
0.1.1:
|
|
||||||
- cleaned up imports
|
|
||||||
0.1.0:
|
|
||||||
- tested working, can push to database
|
|
||||||
0.0.9:
|
|
||||||
- tested working
|
|
||||||
- prints out stats for the time being, will push to database later
|
|
||||||
0.0.8:
|
|
||||||
- added data import
|
|
||||||
- removed tba import
|
|
||||||
- finished main method
|
|
||||||
0.0.7:
|
|
||||||
- added load_config
|
|
||||||
- optimized simpleloop for readibility
|
|
||||||
- added __all__ entries
|
|
||||||
- added simplestats engine
|
|
||||||
- pending testing
|
|
||||||
0.0.6:
|
|
||||||
- fixes
|
|
||||||
0.0.5:
|
|
||||||
- imported pickle
|
|
||||||
- created custom database object
|
|
||||||
0.0.4:
|
|
||||||
- fixed simpleloop to actually return a vector
|
|
||||||
0.0.3:
|
|
||||||
- added metricsloop which is unfinished
|
|
||||||
0.0.2:
|
|
||||||
- added simpleloop which is untested until data is provided
|
|
||||||
0.0.1:
|
|
||||||
- created script
|
|
||||||
- added analysis, numba, numpy imports
|
|
||||||
"""
|
|
||||||
|
|
||||||
__author__ = (
|
|
||||||
"Arthur Lu <learthurgo@gmail.com>",
|
|
||||||
"Jacob Levine <jlevine@imsa.edu>",
|
|
||||||
)
|
|
||||||
|
|
||||||
__all__ = [
|
|
||||||
"load_config",
|
|
||||||
"save_config",
|
|
||||||
"get_previous_time",
|
|
||||||
"load_match",
|
|
||||||
"matchloop",
|
|
||||||
"load_metric",
|
|
||||||
"metricloop",
|
|
||||||
"load_pit",
|
|
||||||
"pitloop",
|
|
||||||
"push_match",
|
|
||||||
"push_metric",
|
|
||||||
"push_pit",
|
|
||||||
]
|
|
||||||
|
|
||||||
# imports:
|
|
||||||
|
|
||||||
from tra_analysis import analysis as an
|
|
||||||
import data as d
|
|
||||||
from collections import defaultdict
|
|
||||||
import json
|
|
||||||
import math
|
|
||||||
import numpy as np
|
|
||||||
import os
|
|
||||||
from os import system, name
|
|
||||||
from pathlib import Path
|
|
||||||
from multiprocessing import Pool
|
|
||||||
import platform
|
|
||||||
import sys
|
|
||||||
import time
|
|
||||||
import warnings
|
|
||||||
|
|
||||||
global exec_threads
|
|
||||||
|
|
||||||
def main():
|
|
||||||
|
|
||||||
global exec_threads
|
|
||||||
|
|
||||||
sys.stderr = open("errorlog.txt", "w")
|
|
||||||
|
|
||||||
warnings.filterwarnings("ignore")
|
|
||||||
|
|
||||||
splash()
|
|
||||||
|
|
||||||
while (True):
|
|
||||||
|
|
||||||
try:
|
|
||||||
|
|
||||||
current_time = time.time()
|
|
||||||
print("[OK] time: " + str(current_time))
|
|
||||||
|
|
||||||
config = load_config("config.json")
|
|
||||||
competition = config["competition"]
|
|
||||||
match_tests = config["statistics"]["match"]
|
|
||||||
pit_tests = config["statistics"]["pit"]
|
|
||||||
metrics_tests = config["statistics"]["metric"]
|
|
||||||
print("[OK] configs loaded")
|
|
||||||
|
|
||||||
print("[OK] starting threads")
|
|
||||||
cfg_max_threads = config["max-threads"]
|
|
||||||
sys_max_threads = os.cpu_count()
|
|
||||||
if cfg_max_threads > -sys_max_threads and cfg_max_threads < 0 :
|
|
||||||
alloc_processes = sys_max_threads + cfg_max_threads
|
|
||||||
elif cfg_max_threads > 0 and cfg_max_threads < 1:
|
|
||||||
alloc_processes = math.floor(cfg_max_threads * sys_max_threads)
|
|
||||||
elif cfg_max_threads > 1 and cfg_max_threads <= sys_max_threads:
|
|
||||||
alloc_processes = cfg_max_threads
|
|
||||||
elif cfg_max_threads == 0:
|
|
||||||
alloc_processes = sys_max_threads
|
|
||||||
else:
|
|
||||||
print("[ERROR] Invalid number of processes, must be between -" + str(sys_max_threads) + " and " + str(sys_max_threads))
|
|
||||||
exit()
|
|
||||||
exec_threads = Pool(processes = alloc_processes)
|
|
||||||
print("[OK] " + str(alloc_processes) + " threads started")
|
|
||||||
|
|
||||||
apikey = config["key"]["database"]
|
|
||||||
tbakey = config["key"]["tba"]
|
|
||||||
print("[OK] loaded keys")
|
|
||||||
|
|
||||||
previous_time = get_previous_time(apikey)
|
|
||||||
print("[OK] analysis backtimed to: " + str(previous_time))
|
|
||||||
|
|
||||||
print("[OK] loading data")
|
|
||||||
start = time.time()
|
|
||||||
match_data = load_match(apikey, competition)
|
|
||||||
pit_data = load_pit(apikey, competition)
|
|
||||||
print("[OK] loaded data in " + str(time.time() - start) + " seconds")
|
|
||||||
|
|
||||||
print("[OK] running match stats")
|
|
||||||
start = time.time()
|
|
||||||
matchloop(apikey, competition, match_data, match_tests)
|
|
||||||
print("[OK] finished match stats in " + str(time.time() - start) + " seconds")
|
|
||||||
|
|
||||||
print("[OK] running team metrics")
|
|
||||||
start = time.time()
|
|
||||||
metricloop(tbakey, apikey, competition, previous_time, metrics_tests)
|
|
||||||
print("[OK] finished team metrics in " + str(time.time() - start) + " seconds")
|
|
||||||
|
|
||||||
print("[OK] running pit analysis")
|
|
||||||
start = time.time()
|
|
||||||
pitloop(apikey, competition, pit_data, pit_tests)
|
|
||||||
print("[OK] finished pit analysis in " + str(time.time() - start) + " seconds")
|
|
||||||
|
|
||||||
set_current_time(apikey, current_time)
|
|
||||||
print("[OK] finished all tests, looping")
|
|
||||||
|
|
||||||
print_hrule()
|
|
||||||
|
|
||||||
except KeyboardInterrupt:
|
|
||||||
print("\n[OK] caught KeyboardInterrupt, killing processes")
|
|
||||||
exec_threads.terminate()
|
|
||||||
print("[OK] processes killed, exiting")
|
|
||||||
exit()
|
|
||||||
|
|
||||||
else:
|
|
||||||
pass
|
|
||||||
|
|
||||||
#clear()
|
|
||||||
|
|
||||||
def clear():
|
|
||||||
|
|
||||||
# for windows
|
|
||||||
if name == 'nt':
|
|
||||||
_ = system('cls')
|
|
||||||
|
|
||||||
# for mac and linux(here, os.name is 'posix')
|
|
||||||
else:
|
|
||||||
_ = system('clear')
|
|
||||||
|
|
||||||
def print_hrule():
|
|
||||||
|
|
||||||
print("#"+38*"-"+"#")
|
|
||||||
|
|
||||||
def print_box(s):
|
|
||||||
|
|
||||||
temp = "|"
|
|
||||||
temp += s
|
|
||||||
temp += (40-len(s)-2)*" "
|
|
||||||
temp += "|"
|
|
||||||
print(temp)
|
|
||||||
|
|
||||||
def splash():
|
|
||||||
|
|
||||||
print_hrule()
|
|
||||||
print_box(" superscript version: " + __version__)
|
|
||||||
print_box(" os: " + platform.system())
|
|
||||||
print_box(" python: " + platform.python_version())
|
|
||||||
print_hrule()
|
|
||||||
|
|
||||||
def load_config(file):
|
|
||||||
|
|
||||||
config_vector = {}
|
|
||||||
|
|
||||||
try:
|
|
||||||
f = open(file)
|
|
||||||
except:
|
|
||||||
print("[ERROR] could not locate config.json, generating blank config.json and exiting")
|
|
||||||
f = open(file, "w")
|
|
||||||
f.write(sample_json)
|
|
||||||
exit()
|
|
||||||
|
|
||||||
config_vector = json.load(f)
|
|
||||||
|
|
||||||
return config_vector
|
|
||||||
|
|
||||||
def save_config(file, config_vector):
|
|
||||||
|
|
||||||
with open(file) as f:
|
|
||||||
json.dump(config_vector, f)
|
|
||||||
|
|
||||||
def get_previous_time(apikey):
|
|
||||||
|
|
||||||
previous_time = d.get_analysis_flags(apikey, "latest_update")
|
|
||||||
|
|
||||||
if previous_time == None:
|
|
||||||
|
|
||||||
d.set_analysis_flags(apikey, "latest_update", 0)
|
|
||||||
previous_time = 0
|
|
||||||
|
|
||||||
else:
|
|
||||||
|
|
||||||
previous_time = previous_time["latest_update"]
|
|
||||||
|
|
||||||
return previous_time
|
|
||||||
|
|
||||||
def set_current_time(apikey, current_time):
|
|
||||||
|
|
||||||
d.set_analysis_flags(apikey, "latest_update", {"latest_update":current_time})
|
|
||||||
|
|
||||||
def load_match(apikey, competition):
|
|
||||||
|
|
||||||
return d.get_match_data_formatted(apikey, competition)
|
|
||||||
|
|
||||||
def simplestats(data_test):
|
|
||||||
|
|
||||||
data = np.array(data_test[0])
|
|
||||||
data = data[np.isfinite(data)]
|
|
||||||
ranges = list(range(len(data)))
|
|
||||||
|
|
||||||
test = data_test[1]
|
|
||||||
|
|
||||||
if test == "basic_stats":
|
|
||||||
return an.basic_stats(data)
|
|
||||||
|
|
||||||
if test == "historical_analysis":
|
|
||||||
return an.histo_analysis([ranges, data])
|
|
||||||
|
|
||||||
if test == "regression_linear":
|
|
||||||
return an.regression(ranges, data, ['lin'])
|
|
||||||
|
|
||||||
if test == "regression_logarithmic":
|
|
||||||
return an.regression(ranges, data, ['log'])
|
|
||||||
|
|
||||||
if test == "regression_exponential":
|
|
||||||
return an.regression(ranges, data, ['exp'])
|
|
||||||
|
|
||||||
if test == "regression_polynomial":
|
|
||||||
return an.regression(ranges, data, ['ply'])
|
|
||||||
|
|
||||||
if test == "regression_sigmoidal":
|
|
||||||
return an.regression(ranges, data, ['sig'])
|
|
||||||
|
|
||||||
def matchloop(apikey, competition, data, tests): # expects 3D array with [Team][Variable][Match]
|
|
||||||
|
|
||||||
global exec_threads
|
|
||||||
|
|
||||||
short_mapping = {"regression_linear": "lin", "regression_logarithmic": "log", "regression_exponential": "exp", "regression_polynomial": "ply", "regression_sigmoidal": "sig"}
|
|
||||||
|
|
||||||
class AutoVivification(dict):
|
|
||||||
def __getitem__(self, item):
|
|
||||||
try:
|
|
||||||
return dict.__getitem__(self, item)
|
|
||||||
except KeyError:
|
|
||||||
value = self[item] = type(self)()
|
|
||||||
return value
|
|
||||||
|
|
||||||
return_vector = {}
|
|
||||||
|
|
||||||
team_filtered = []
|
|
||||||
variable_filtered = []
|
|
||||||
variable_data = []
|
|
||||||
test_filtered = []
|
|
||||||
result_filtered = []
|
|
||||||
return_vector = AutoVivification()
|
|
||||||
|
|
||||||
for team in data:
|
|
||||||
|
|
||||||
for variable in data[team]:
|
|
||||||
|
|
||||||
if variable in tests:
|
|
||||||
|
|
||||||
for test in tests[variable]:
|
|
||||||
|
|
||||||
team_filtered.append(team)
|
|
||||||
variable_filtered.append(variable)
|
|
||||||
variable_data.append((data[team][variable], test))
|
|
||||||
test_filtered.append(test)
|
|
||||||
|
|
||||||
result_filtered = exec_threads.map(simplestats, variable_data)
|
|
||||||
i = 0
|
|
||||||
|
|
||||||
result_filtered = list(result_filtered)
|
|
||||||
|
|
||||||
for result in result_filtered:
|
|
||||||
|
|
||||||
filtered = test_filtered[i]
|
|
||||||
|
|
||||||
try:
|
|
||||||
short = short_mapping[filtered]
|
|
||||||
return_vector[team_filtered[i]][variable_filtered[i]][test_filtered[i]] = result[short]
|
|
||||||
except KeyError: # not in mapping
|
|
||||||
return_vector[team_filtered[i]][variable_filtered[i]][test_filtered[i]] = result
|
|
||||||
i += 1
|
|
||||||
|
|
||||||
push_match(apikey, competition, return_vector)
|
|
||||||
|
|
||||||
def load_metric(apikey, competition, match, group_name, metrics):
|
|
||||||
|
|
||||||
group = {}
|
|
||||||
|
|
||||||
for team in match[group_name]:
|
|
||||||
|
|
||||||
db_data = d.get_team_metrics_data(apikey, competition, team)
|
|
||||||
|
|
||||||
if d.get_team_metrics_data(apikey, competition, team) == None:
|
|
||||||
|
|
||||||
elo = {"score": metrics["elo"]["score"]}
|
|
||||||
gl2 = {"score": metrics["gl2"]["score"], "rd": metrics["gl2"]["rd"], "vol": metrics["gl2"]["vol"]}
|
|
||||||
ts = {"mu": metrics["ts"]["mu"], "sigma": metrics["ts"]["sigma"]}
|
|
||||||
|
|
||||||
group[team] = {"elo": elo, "gl2": gl2, "ts": ts}
|
|
||||||
|
|
||||||
else:
|
|
||||||
|
|
||||||
metrics = db_data["metrics"]
|
|
||||||
|
|
||||||
elo = metrics["elo"]
|
|
||||||
gl2 = metrics["gl2"]
|
|
||||||
ts = metrics["ts"]
|
|
||||||
|
|
||||||
group[team] = {"elo": elo, "gl2": gl2, "ts": ts}
|
|
||||||
|
|
||||||
return group
|
|
||||||
|
|
||||||
def metricloop(tbakey, apikey, competition, timestamp, metrics): # listener based metrics update
|
|
||||||
|
|
||||||
elo_N = metrics["elo"]["N"]
|
|
||||||
elo_K = metrics["elo"]["K"]
|
|
||||||
|
|
||||||
matches = d.pull_new_tba_matches(tbakey, competition, timestamp)
|
|
||||||
|
|
||||||
red = {}
|
|
||||||
blu = {}
|
|
||||||
|
|
||||||
for match in matches:
|
|
||||||
|
|
||||||
red = load_metric(apikey, competition, match, "red", metrics)
|
|
||||||
blu = load_metric(apikey, competition, match, "blue", metrics)
|
|
||||||
|
|
||||||
elo_red_total = 0
|
|
||||||
elo_blu_total = 0
|
|
||||||
|
|
||||||
gl2_red_score_total = 0
|
|
||||||
gl2_blu_score_total = 0
|
|
||||||
|
|
||||||
gl2_red_rd_total = 0
|
|
||||||
gl2_blu_rd_total = 0
|
|
||||||
|
|
||||||
gl2_red_vol_total = 0
|
|
||||||
gl2_blu_vol_total = 0
|
|
||||||
|
|
||||||
for team in red:
|
|
||||||
|
|
||||||
elo_red_total += red[team]["elo"]["score"]
|
|
||||||
|
|
||||||
gl2_red_score_total += red[team]["gl2"]["score"]
|
|
||||||
gl2_red_rd_total += red[team]["gl2"]["rd"]
|
|
||||||
gl2_red_vol_total += red[team]["gl2"]["vol"]
|
|
||||||
|
|
||||||
for team in blu:
|
|
||||||
|
|
||||||
elo_blu_total += blu[team]["elo"]["score"]
|
|
||||||
|
|
||||||
gl2_blu_score_total += blu[team]["gl2"]["score"]
|
|
||||||
gl2_blu_rd_total += blu[team]["gl2"]["rd"]
|
|
||||||
gl2_blu_vol_total += blu[team]["gl2"]["vol"]
|
|
||||||
|
|
||||||
red_elo = {"score": elo_red_total / len(red)}
|
|
||||||
blu_elo = {"score": elo_blu_total / len(blu)}
|
|
||||||
|
|
||||||
red_gl2 = {"score": gl2_red_score_total / len(red), "rd": gl2_red_rd_total / len(red), "vol": gl2_red_vol_total / len(red)}
|
|
||||||
blu_gl2 = {"score": gl2_blu_score_total / len(blu), "rd": gl2_blu_rd_total / len(blu), "vol": gl2_blu_vol_total / len(blu)}
|
|
||||||
|
|
||||||
|
|
||||||
if match["winner"] == "red":
|
|
||||||
|
|
||||||
observations = {"red": 1, "blu": 0}
|
|
||||||
|
|
||||||
elif match["winner"] == "blue":
|
|
||||||
|
|
||||||
observations = {"red": 0, "blu": 1}
|
|
||||||
|
|
||||||
else:
|
|
||||||
|
|
||||||
observations = {"red": 0.5, "blu": 0.5}
|
|
||||||
|
|
||||||
red_elo_delta = an.Metric().elo(red_elo["score"], blu_elo["score"], observations["red"], elo_N, elo_K) - red_elo["score"]
|
|
||||||
blu_elo_delta = an.Metric().elo(blu_elo["score"], red_elo["score"], observations["blu"], elo_N, elo_K) - blu_elo["score"]
|
|
||||||
|
|
||||||
new_red_gl2_score, new_red_gl2_rd, new_red_gl2_vol = an.Metric().glicko2(red_gl2["score"], red_gl2["rd"], red_gl2["vol"], [blu_gl2["score"]], [blu_gl2["rd"]], [observations["red"], observations["blu"]])
|
|
||||||
new_blu_gl2_score, new_blu_gl2_rd, new_blu_gl2_vol = an.Metric().glicko2(blu_gl2["score"], blu_gl2["rd"], blu_gl2["vol"], [red_gl2["score"]], [red_gl2["rd"]], [observations["blu"], observations["red"]])
|
|
||||||
|
|
||||||
red_gl2_delta = {"score": new_red_gl2_score - red_gl2["score"], "rd": new_red_gl2_rd - red_gl2["rd"], "vol": new_red_gl2_vol - red_gl2["vol"]}
|
|
||||||
blu_gl2_delta = {"score": new_blu_gl2_score - blu_gl2["score"], "rd": new_blu_gl2_rd - blu_gl2["rd"], "vol": new_blu_gl2_vol - blu_gl2["vol"]}
|
|
||||||
|
|
||||||
for team in red:
|
|
||||||
|
|
||||||
red[team]["elo"]["score"] = red[team]["elo"]["score"] + red_elo_delta
|
|
||||||
|
|
||||||
red[team]["gl2"]["score"] = red[team]["gl2"]["score"] + red_gl2_delta["score"]
|
|
||||||
red[team]["gl2"]["rd"] = red[team]["gl2"]["rd"] + red_gl2_delta["rd"]
|
|
||||||
red[team]["gl2"]["vol"] = red[team]["gl2"]["vol"] + red_gl2_delta["vol"]
|
|
||||||
|
|
||||||
for team in blu:
|
|
||||||
|
|
||||||
blu[team]["elo"]["score"] = blu[team]["elo"]["score"] + blu_elo_delta
|
|
||||||
|
|
||||||
blu[team]["gl2"]["score"] = blu[team]["gl2"]["score"] + blu_gl2_delta["score"]
|
|
||||||
blu[team]["gl2"]["rd"] = blu[team]["gl2"]["rd"] + blu_gl2_delta["rd"]
|
|
||||||
blu[team]["gl2"]["vol"] = blu[team]["gl2"]["vol"] + blu_gl2_delta["vol"]
|
|
||||||
|
|
||||||
temp_vector = {}
|
|
||||||
temp_vector.update(red)
|
|
||||||
temp_vector.update(blu)
|
|
||||||
|
|
||||||
push_metric(apikey, competition, temp_vector)
|
|
||||||
|
|
||||||
def load_pit(apikey, competition):
|
|
||||||
|
|
||||||
return d.get_pit_data_formatted(apikey, competition)
|
|
||||||
|
|
||||||
def pitloop(apikey, competition, pit, tests):
|
|
||||||
|
|
||||||
return_vector = {}
|
|
||||||
for team in pit:
|
|
||||||
for variable in pit[team]:
|
|
||||||
if variable in tests:
|
|
||||||
if not variable in return_vector:
|
|
||||||
return_vector[variable] = []
|
|
||||||
return_vector[variable].append(pit[team][variable])
|
|
||||||
|
|
||||||
push_pit(apikey, competition, return_vector)
|
|
||||||
|
|
||||||
def push_match(apikey, competition, results):
|
|
||||||
|
|
||||||
for team in results:
|
|
||||||
|
|
||||||
d.push_team_tests_data(apikey, competition, team, results[team])
|
|
||||||
|
|
||||||
def push_metric(apikey, competition, metric):
|
|
||||||
|
|
||||||
for team in metric:
|
|
||||||
|
|
||||||
d.push_team_metrics_data(apikey, competition, team, metric[team])
|
|
||||||
|
|
||||||
def push_pit(apikey, competition, pit):
|
|
||||||
|
|
||||||
for variable in pit:
|
|
||||||
|
|
||||||
d.push_team_pit_data(apikey, competition, variable, pit[variable])
|
|
||||||
|
|
||||||
def get_team_metrics(apikey, tbakey, competition):
|
|
||||||
|
|
||||||
metrics = d.get_metrics_data_formatted(apikey, competition)
|
|
||||||
|
|
||||||
elo = {}
|
|
||||||
gl2 = {}
|
|
||||||
|
|
||||||
for team in metrics:
|
|
||||||
|
|
||||||
elo[team] = metrics[team]["metrics"]["elo"]["score"]
|
|
||||||
gl2[team] = metrics[team]["metrics"]["gl2"]["score"]
|
|
||||||
|
|
||||||
elo = {k: v for k, v in sorted(elo.items(), key=lambda item: item[1])}
|
|
||||||
gl2 = {k: v for k, v in sorted(gl2.items(), key=lambda item: item[1])}
|
|
||||||
|
|
||||||
elo_ranked = []
|
|
||||||
|
|
||||||
for team in elo:
|
|
||||||
|
|
||||||
elo_ranked.append({"team": str(team), "elo": str(elo[team])})
|
|
||||||
|
|
||||||
gl2_ranked = []
|
|
||||||
|
|
||||||
for team in gl2:
|
|
||||||
|
|
||||||
gl2_ranked.append({"team": str(team), "gl2": str(gl2[team])})
|
|
||||||
|
|
||||||
return {"elo-ranks": elo_ranked, "glicko2-ranks": gl2_ranked}
|
|
||||||
|
|
||||||
sample_json = """{
|
|
||||||
"max-threads": 0.5,
|
|
||||||
"team": "",
|
|
||||||
"competition": "2020ilch",
|
|
||||||
"key":{
|
|
||||||
"database":"",
|
|
||||||
"tba":""
|
|
||||||
},
|
|
||||||
"statistics":{
|
|
||||||
"match":{
|
|
||||||
"balls-blocked":["basic_stats","historical_analysis","regression_linear","regression_logarithmic","regression_exponential","regression_polynomial","regression_sigmoidal"],
|
|
||||||
"balls-collected":["basic_stats","historical_analysis","regression_linear","regression_logarithmic","regression_exponential","regression_polynomial","regression_sigmoidal"],
|
|
||||||
"balls-lower-teleop":["basic_stats","historical_analysis","regression_linear","regression_logarithmic","regression_exponential","regression_polynomial","regression_sigmoidal"],
|
|
||||||
"balls-lower-auto":["basic_stats","historical_analysis","regression_linear","regression_logarithmic","regression_exponential","regression_polynomial","regression_sigmoidal"],
|
|
||||||
"balls-started":["basic_stats","historical_analyss","regression_linear","regression_logarithmic","regression_exponential","regression_polynomial","regression_sigmoidal"],
|
|
||||||
"balls-upper-teleop":["basic_stats","historical_analysis","regression_linear","regression_logarithmic","regression_exponential","regression_polynomial","regression_sigmoidal"],
|
|
||||||
"balls-upper-auto":["basic_stats","historical_analysis","regression_linear","regression_logarithmic","regression_exponential","regression_polynomial","regression_sigmoidal"]
|
|
||||||
|
|
||||||
},
|
|
||||||
"metric":{
|
|
||||||
"elo":{
|
|
||||||
"score":1500,
|
|
||||||
"N":400,
|
|
||||||
"K":24
|
|
||||||
},
|
|
||||||
"gl2":{
|
|
||||||
"score":1500,
|
|
||||||
"rd":250,
|
|
||||||
"vol":0.06
|
|
||||||
},
|
|
||||||
"ts":{
|
|
||||||
"mu":25,
|
|
||||||
"sigma":8.33
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"pit":{
|
|
||||||
"wheel-mechanism":true,
|
|
||||||
"low-balls":true,
|
|
||||||
"high-balls":true,
|
|
||||||
"wheel-success":true,
|
|
||||||
"strategic-focus":true,
|
|
||||||
"climb-mechanism":true,
|
|
||||||
"attitude":true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}"""
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
if sys.platform.startswith('win'):
|
|
||||||
multiprocessing.freeze_support()
|
|
||||||
main()
|
|
12
submit-debug.sh
Normal file
12
submit-debug.sh
Normal file
@@ -0,0 +1,12 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
#
|
||||||
|
#SBATCH --job-name=tra-superscript
|
||||||
|
#SBATCH --output=slurm-tra-superscript.out
|
||||||
|
#SBATCH --ntasks=8
|
||||||
|
#SBATCH --time=24:00:00
|
||||||
|
#SBATCH --mem-per-cpu=256
|
||||||
|
#SBATCH --mail-user=dsingh@imsa.edu
|
||||||
|
#SBATCH -p cpu-long
|
||||||
|
|
||||||
|
cd competition
|
||||||
|
python superscript.py debug
|
12
submit-prod.sh
Normal file
12
submit-prod.sh
Normal file
@@ -0,0 +1,12 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
#
|
||||||
|
#SBATCH --job-name=tra-superscript
|
||||||
|
#SBATCH --output=PROD_slurm-tra-superscript.out
|
||||||
|
#SBATCH --ntasks=8
|
||||||
|
#SBATCH --time=24:00:00
|
||||||
|
#SBATCH --mem-per-cpu=256
|
||||||
|
#SBATCH --mail-user=dsingh@imsa.edu
|
||||||
|
#SBATCH -p cpu-long
|
||||||
|
|
||||||
|
cd competition
|
||||||
|
python superscript.py verbose
|
@@ -1,14 +0,0 @@
|
|||||||
import signal
|
|
||||||
import zmq
|
|
||||||
|
|
||||||
signal.signal(signal.SIGINT, signal.SIG_DFL)
|
|
||||||
|
|
||||||
context = zmq.Context()
|
|
||||||
|
|
||||||
socket = context.socket(zmq.SUB)
|
|
||||||
socket.connect('tcp://localhost:5678')
|
|
||||||
socket.setsockopt(zmq.SUBSCRIBE, b'status')
|
|
||||||
|
|
||||||
while True:
|
|
||||||
message = socket.recv_multipart()
|
|
||||||
print(f'Received: {message}')
|
|
@@ -1,2 +0,0 @@
|
|||||||
def test_():
|
|
||||||
assert 1 == 1
|
|
Reference in New Issue
Block a user