mirror of
https://github.com/titanscouting/tra-superscript.git
synced 2025-09-26 07:10:18 +00:00
Compare commits
64 Commits
gui
...
snyk-fix-7
Author | SHA1 | Date | |
---|---|---|---|
|
8233eef04b | ||
|
df37947b21 | ||
|
8e6c44db65 | ||
|
11398290eb | ||
|
d847f6d6a7 | ||
|
b5c8a91fad | ||
|
8e5fa7eace | ||
|
69c6059ff8 | ||
|
fdcdadb8b2 | ||
|
cdd81295fc | ||
|
82ec2d85cc | ||
|
ac8002aaf8 | ||
|
25e4babd71 | ||
|
3fe2922e97 | ||
|
9752fd323b | ||
|
ef63c1de7e | ||
|
8908f05cbe | ||
|
143218dda3 | ||
|
def2fc9b73 | ||
|
e8a5bb75f8 | ||
|
c9dd09f5e9 | ||
|
3c6e3ac58e | ||
|
8c28c24d60 | ||
|
4836f48a34 | ||
|
9a1a45f1c9 | ||
|
d7ed695ad1 | ||
|
21d92e65b2 | ||
|
0cace3cec3 | ||
|
80b63269df | ||
|
56447603e1 | ||
|
2130182212 | ||
|
b1eff19ea4 | ||
|
b43836899d | ||
|
524a0a211d | ||
|
de4d3d4967 | ||
|
d56411253c | ||
|
c415225afe | ||
|
d684813ee0 | ||
|
26079f3180 | ||
|
99e722c400 | ||
|
f5a0e0fe8c | ||
|
28e423942f | ||
|
8977f8c277 | ||
|
2b0f718aa5 | ||
|
30469a3211 | ||
|
391d4e1996 | ||
|
224f64e8b7 | ||
|
aa7d7ca927 | ||
|
d10c16d483 | ||
|
f211d00f2d | ||
|
69c707689b | ||
|
d2f9c802b3 | ||
|
99e28f5e83 | ||
|
18dbc174bd | ||
|
79689d69c8 | ||
|
80c3f1224b | ||
|
960a1b3165 | ||
|
89fcd366d3 | ||
|
79cde44108 | ||
|
2b896db9a9 | ||
|
483897c011 | ||
|
9287d98fe2 | ||
|
991751a340 | ||
|
9d2476b5eb |
@@ -1,6 +1,7 @@
|
||||
FROM python:slim
|
||||
FROM ubuntu:20.04
|
||||
WORKDIR /
|
||||
RUN apt-get -y update; apt-get -y upgrade
|
||||
RUN apt-get -y install git
|
||||
COPY requirements.txt .
|
||||
RUN pip install -r requirements.txt
|
||||
RUN apt-get -y update
|
||||
RUN DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends tzdata
|
||||
RUN apt-get install -y python3 python3-dev git python3-pip python3-kivy python-is-python3 libgl1-mesa-dev build-essential
|
||||
RUN ln -s $(which pip3) /usr/bin/pip
|
||||
RUN pip install pymongo pandas numpy scipy scikit-learn matplotlib pylint kivy
|
||||
|
2
.devcontainer/dev-dockerfile
Normal file
2
.devcontainer/dev-dockerfile
Normal file
@@ -0,0 +1,2 @@
|
||||
FROM titanscout2022/tra-analysis-base:latest
|
||||
WORKDIR /
|
@@ -1,15 +1,15 @@
|
||||
{
|
||||
"name": "TRA Analysis Development Environment",
|
||||
"build": {
|
||||
"dockerfile": "Dockerfile",
|
||||
"dockerfile": "dev-dockerfile",
|
||||
},
|
||||
"settings": {
|
||||
"terminal.integrated.shell.linux": "/bin/bash",
|
||||
"python.pythonPath": "",
|
||||
"python.pythonPath": "/usr/local/bin/python",
|
||||
"python.linting.enabled": true,
|
||||
"python.linting.pylintEnabled": true,
|
||||
"python.linting.pylintPath": "",
|
||||
"python.testing.pytestPath": "",
|
||||
"python.linting.pylintPath": "/usr/local/bin/pylint",
|
||||
"python.testing.pytestPath": "/usr/local/bin/pytest",
|
||||
"editor.tabSize": 4,
|
||||
"editor.insertSpaces": false
|
||||
},
|
||||
@@ -18,5 +18,5 @@
|
||||
"ms-python.python",
|
||||
"waderyan.gitblame"
|
||||
],
|
||||
"postCreateCommand": ""
|
||||
"postCreateCommand": "/usr/bin/pip3 install -r ${containerWorkspaceFolder}/src/requirements.txt && /usr/bin/pip3 install --no-cache-dir pylint && /usr/bin/pip3 install pytest"
|
||||
}
|
@@ -1,6 +1,7 @@
|
||||
cerberus
|
||||
dnspython
|
||||
numpy
|
||||
pandas
|
||||
pyinstaller
|
||||
pylint
|
||||
pymongo
|
||||
|
38
.github/ISSUE_TEMPLATE/bug_report.md
vendored
38
.github/ISSUE_TEMPLATE/bug_report.md
vendored
@@ -1,38 +0,0 @@
|
||||
---
|
||||
name: Bug report
|
||||
about: Create a report to help us improve
|
||||
title: ''
|
||||
labels: ''
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
**Describe the bug**
|
||||
A clear and concise description of what the bug is.
|
||||
|
||||
**To Reproduce**
|
||||
Steps to reproduce the behavior:
|
||||
1. Go to '...'
|
||||
2. Click on '....'
|
||||
3. Scroll down to '....'
|
||||
4. See error
|
||||
|
||||
**Expected behavior**
|
||||
A clear and concise description of what you expected to happen.
|
||||
|
||||
**Screenshots**
|
||||
If applicable, add screenshots to help explain your problem.
|
||||
|
||||
**Desktop (please complete the following information):**
|
||||
- OS: [e.g. iOS]
|
||||
- Browser [e.g. chrome, safari]
|
||||
- Version [e.g. 22]
|
||||
|
||||
**Smartphone (please complete the following information):**
|
||||
- Device: [e.g. iPhone6]
|
||||
- OS: [e.g. iOS8.1]
|
||||
- Browser [e.g. stock browser, safari]
|
||||
- Version [e.g. 22]
|
||||
|
||||
**Additional context**
|
||||
Add any other context about the problem here.
|
20
.github/ISSUE_TEMPLATE/feature_request.md
vendored
20
.github/ISSUE_TEMPLATE/feature_request.md
vendored
@@ -1,20 +0,0 @@
|
||||
---
|
||||
name: Feature request
|
||||
about: Suggest an idea for this project
|
||||
title: ''
|
||||
labels: ''
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
**Is your feature request related to a problem? Please describe.**
|
||||
A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
|
||||
|
||||
**Describe the solution you'd like**
|
||||
A clear and concise description of what you want to happen.
|
||||
|
||||
**Describe alternatives you've considered**
|
||||
A clear and concise description of any alternative solutions or features you've considered.
|
||||
|
||||
**Additional context**
|
||||
Add any other context or screenshots about the feature request here.
|
22
.github/workflows/build-cli.yml
vendored
22
.github/workflows/build-cli.yml
vendored
@@ -1,7 +1,7 @@
|
||||
# This workflow will install Python dependencies, run tests and lint with a variety of Python versions
|
||||
# For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions
|
||||
|
||||
name: Build Superscript Linux
|
||||
name: Superscript Unit Tests
|
||||
|
||||
on:
|
||||
release:
|
||||
@@ -11,25 +11,7 @@ jobs:
|
||||
generate:
|
||||
name: Build Linux
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout master
|
||||
uses: actions/checkout@master
|
||||
- name: Install Dependencies
|
||||
run: pip install -r requirements.txt
|
||||
working-directory: src/
|
||||
- name: Give Execute Permission
|
||||
run: chmod +x build-CLI.sh
|
||||
working-directory: build/
|
||||
- name: Build Binary
|
||||
run: ./build-CLI.sh
|
||||
working-directory: build/
|
||||
- name: Copy Binary to Root Dir
|
||||
run: cp superscript ..
|
||||
working-directory: dist/
|
||||
- name: Upload Release Asset
|
||||
uses: svenstaro/upload-release-action@v2
|
||||
with:
|
||||
repo_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
file: superscript
|
||||
asset_name: superscript
|
||||
tag: ${{ github.ref }}
|
9
.gitignore
vendored
9
.gitignore
vendored
@@ -9,11 +9,16 @@
|
||||
**/tra_analysis/
|
||||
**/temp/*
|
||||
|
||||
**/errorlog.txt
|
||||
/dist/superscript.*
|
||||
/dist/superscript
|
||||
**/*.pid
|
||||
|
||||
**/profile.*
|
||||
|
||||
**/*.log
|
||||
**/errorlog.txt
|
||||
/dist/superscript.*
|
||||
/dist/superscript
|
||||
/dist/*
|
||||
|
||||
slurm-tra-superscript.out
|
||||
config*.json
|
||||
|
@@ -1,4 +1,4 @@
|
||||
# Red Alliance Analysis · 
|
||||
# Red Alliance Analysis · 
|
||||
|
||||
Titan Robotics 2022 Strategy Team Repository for Data Analysis Tools. Included with these tools are the backend data analysis engine formatted as a python package, associated binaries for the analysis package, and premade scripts that can be pulled directly from this repository and will integrate with other Red Alliance applications to quickly deploy FRC scouting tools.
|
||||
|
||||
|
@@ -1,5 +1,5 @@
|
||||
set pathtospec="../src/cli/superscript.spec"
|
||||
set pathtospec="../src/superscript.spec"
|
||||
set pathtodist="../dist/"
|
||||
set pathtowork="temp/"
|
||||
|
||||
pyinstaller --clean --distpath %pathtodist% --workpath %pathtowork% %pathtospec%
|
||||
pyinstaller --onefile --clean --distpath %pathtodist% --workpath %pathtowork% %pathtospec%
|
@@ -1,5 +1,5 @@
|
||||
pathtospec="../src/cli/superscript.spec"
|
||||
pathtospec="../src/superscript.spec"
|
||||
pathtodist="../dist/"
|
||||
pathtowork="temp/"
|
||||
|
||||
pyinstaller --clean --distpath ${pathtodist} --workpath ${pathtowork} ${pathtospec}
|
||||
pyinstaller --onefile --clean --distpath ${pathtodist} --workpath ${pathtowork} ${pathtospec}
|
251
competition/config.py
Normal file
251
competition/config.py
Normal file
@@ -0,0 +1,251 @@
|
||||
import json
|
||||
from exceptions import ConfigurationError
|
||||
from cerberus import Validator
|
||||
|
||||
from data import set_database_config, get_database_config
|
||||
|
||||
class Configuration:
|
||||
|
||||
path = None
|
||||
config = {}
|
||||
|
||||
_sample_config = {
|
||||
"persistent":{
|
||||
"key":{
|
||||
"database":"",
|
||||
"tba":"",
|
||||
"tra":{
|
||||
"CLIENT_ID":"",
|
||||
"CLIENT_SECRET":"",
|
||||
"url": ""
|
||||
}
|
||||
},
|
||||
"config-preference":"local",
|
||||
"synchronize-config":False
|
||||
},
|
||||
"variable":{
|
||||
"event-delay":False,
|
||||
"loop-delay":0,
|
||||
"competition": "2020ilch",
|
||||
"modules":{
|
||||
"match":{
|
||||
"tests":{
|
||||
"balls-blocked":[
|
||||
"basic_stats",
|
||||
"historical_analysis",
|
||||
"regression_linear",
|
||||
"regression_logarithmic",
|
||||
"regression_exponential",
|
||||
"regression_polynomial",
|
||||
"regression_sigmoidal"
|
||||
],
|
||||
"balls-collected":[
|
||||
"basic_stats",
|
||||
"historical_analysis",
|
||||
"regression_linear",
|
||||
"regression_logarithmic",
|
||||
"regression_exponential",
|
||||
"regression_polynomial",
|
||||
"regression_sigmoidal"
|
||||
],
|
||||
"balls-lower-teleop":[
|
||||
"basic_stats",
|
||||
"historical_analysis",
|
||||
"regression_linear",
|
||||
"regression_logarithmic",
|
||||
"regression_exponential",
|
||||
"regression_polynomial",
|
||||
"regression_sigmoidal"
|
||||
],
|
||||
"balls-lower-auto":[
|
||||
"basic_stats",
|
||||
"historical_analysis",
|
||||
"regression_linear",
|
||||
"regression_logarithmic",
|
||||
"regression_exponential",
|
||||
"regression_polynomial",
|
||||
"regression_sigmoidal"
|
||||
],
|
||||
"balls-started":[
|
||||
"basic_stats",
|
||||
"historical_analyss",
|
||||
"regression_linear",
|
||||
"regression_logarithmic",
|
||||
"regression_exponential",
|
||||
"regression_polynomial",
|
||||
"regression_sigmoidal"
|
||||
],
|
||||
"balls-upper-teleop":[
|
||||
"basic_stats",
|
||||
"historical_analysis",
|
||||
"regression_linear",
|
||||
"regression_logarithmic",
|
||||
"regression_exponential",
|
||||
"regression_polynomial",
|
||||
"regression_sigmoidal"
|
||||
],
|
||||
"balls-upper-auto":[
|
||||
"basic_stats",
|
||||
"historical_analysis",
|
||||
"regression_linear",
|
||||
"regression_logarithmic",
|
||||
"regression_exponential",
|
||||
"regression_polynomial",
|
||||
"regression_sigmoidal"
|
||||
]
|
||||
}
|
||||
},
|
||||
"metric":{
|
||||
"tests":{
|
||||
"elo":{
|
||||
"score":1500,
|
||||
"N":400,
|
||||
"K":24
|
||||
},
|
||||
"gl2":{
|
||||
"score":1500,
|
||||
"rd":250,
|
||||
"vol":0.06
|
||||
},
|
||||
"ts":{
|
||||
"mu":25,
|
||||
"sigma":8.33
|
||||
}
|
||||
}
|
||||
},
|
||||
"pit":{
|
||||
"tests":{
|
||||
"wheel-mechanism":True,
|
||||
"low-balls":True,
|
||||
"high-balls":True,
|
||||
"wheel-success":True,
|
||||
"strategic-focus":True,
|
||||
"climb-mechanism":True,
|
||||
"attitude":True
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
_validation_schema = {
|
||||
"persistent": {
|
||||
"type": "dict",
|
||||
"required": True,
|
||||
"require_all": True,
|
||||
"schema": {
|
||||
"key": {
|
||||
"type": "dict",
|
||||
"require_all":True,
|
||||
"schema": {
|
||||
"database": {"type":"string"},
|
||||
"tba": {"type": "string"},
|
||||
"tra": {
|
||||
"type": "dict",
|
||||
"require_all": True,
|
||||
"schema": {
|
||||
"CLIENT_ID": {"type": "string"},
|
||||
"CLIENT_SECRET": {"type": "string"},
|
||||
"url": {"type": "string"}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"config-preference": {"type": "string", "required": True},
|
||||
"synchronize-config": {"type": "boolean", "required": True}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
def __init__(self, path):
|
||||
self.path = path
|
||||
self.load_config()
|
||||
self.validate_config()
|
||||
|
||||
def load_config(self):
|
||||
try:
|
||||
f = open(self.path, "r")
|
||||
self.config.update(json.load(f))
|
||||
f.close()
|
||||
except:
|
||||
self.config = self._sample_config
|
||||
self.save_config()
|
||||
f.close()
|
||||
raise ConfigurationError("could not find config file at <" + self.path + ">, created new sample config file at that path")
|
||||
|
||||
def save_config(self):
|
||||
f = open(self.path, "w+")
|
||||
json.dump(self.config, f, ensure_ascii=False, indent=4)
|
||||
f.close()
|
||||
|
||||
def validate_config(self):
|
||||
v = Validator(self._validation_schema, allow_unknown = True)
|
||||
isValidated = v.validate(self.config)
|
||||
|
||||
if not isValidated:
|
||||
raise ConfigurationError("config validation error: " + v.errors)
|
||||
|
||||
def __getattr__(self, name): # simple linear lookup method for common multikey-value paths, TYPE UNSAFE
|
||||
if name == "persistent":
|
||||
return self.config["persistent"]
|
||||
elif name == "key":
|
||||
return self.config["persistent"]["key"]
|
||||
elif name == "database":
|
||||
# soon to be deprecated
|
||||
return self.config["persistent"]["key"]["database"]
|
||||
elif name == "tba":
|
||||
return self.config["persistent"]["key"]["tba"]
|
||||
elif name == "tra":
|
||||
return self.config["persistent"]["key"]["tra"]
|
||||
elif name == "priority":
|
||||
return self.config["persistent"]["config-preference"]
|
||||
elif name == "sync":
|
||||
return self.config["persistent"]["synchronize-config"]
|
||||
elif name == "variable":
|
||||
return self.config["variable"]
|
||||
elif name == "event_delay":
|
||||
return self.config["variable"]["event-delay"]
|
||||
elif name == "loop_delay":
|
||||
return self.config["variable"]["loop-delay"]
|
||||
elif name == "competition":
|
||||
return self.config["variable"]["competition"]
|
||||
elif name == "modules":
|
||||
return self.config["variable"]["modules"]
|
||||
else:
|
||||
return None
|
||||
|
||||
def __getitem__(self, key):
|
||||
return self.config[key]
|
||||
|
||||
def resolve_config_conflicts(self, logger, client): # needs improvement with new localization scheme
|
||||
sync = self.sync
|
||||
priority = self.priority
|
||||
|
||||
if sync:
|
||||
if priority == "local" or priority == "client":
|
||||
logger.info("config-preference set to local/client, loading local config information")
|
||||
remote_config = get_database_config(client)
|
||||
if remote_config != self.config["variable"]:
|
||||
set_database_config(client, self.config["variable"])
|
||||
logger.info("database config was different and was updated")
|
||||
# no change to config
|
||||
elif priority == "remote" or priority == "database":
|
||||
logger.info("config-preference set to remote/database, loading remote config information")
|
||||
remote_config = get_database_config(client)
|
||||
if remote_config != self.config["variable"]:
|
||||
self.config["variable"] = remote_config
|
||||
self.save_config()
|
||||
# change variable to match remote
|
||||
logger.info("local config was different and was updated")
|
||||
else:
|
||||
raise ConfigurationError("persistent/config-preference field must be \"local\"/\"client\" or \"remote\"/\"database\"")
|
||||
else:
|
||||
if priority == "local" or priority == "client":
|
||||
logger.info("config-preference set to local/client, loading local config information")
|
||||
# no change to config
|
||||
elif priority == "remote" or priority == "database":
|
||||
logger.info("config-preference set to remote/database, loading database config information")
|
||||
self.config["variable"] = get_database_config(client)
|
||||
# change variable to match remote without updating local version
|
||||
else:
|
||||
raise ConfigurationError("persistent/config-preference field must be \"local\"/\"client\" or \"remote\"/\"database\"")
|
@@ -1,16 +1,26 @@
|
||||
from calendar import c
|
||||
import requests
|
||||
import pull
|
||||
import pandas as pd
|
||||
import json
|
||||
|
||||
def pull_new_tba_matches(apikey, competition, cutoff):
|
||||
def pull_new_tba_matches(apikey, competition, last_match):
|
||||
api_key= apikey
|
||||
x=requests.get("https://www.thebluealliance.com/api/v3/event/"+competition+"/matches/simple", headers={"X-TBA-Auth-Key":api_key}, verify=False)
|
||||
x=requests.get("https://www.thebluealliance.com/api/v3/event/"+competition+"/matches/simple", headers={"X-TBA-Auth-Key":api_key})
|
||||
json = x.json()
|
||||
out = []
|
||||
for i in x.json():
|
||||
if i["actual_time"] != None and i["actual_time"]-cutoff >= 0 and i["comp_level"] == "qm":
|
||||
for i in json:
|
||||
if i["actual_time"] != None and i["comp_level"] == "qm" and i["match_number"] > last_match :
|
||||
out.append({"match" : i['match_number'], "blue" : list(map(lambda x: int(x[3:]), i['alliances']['blue']['team_keys'])), "red" : list(map(lambda x: int(x[3:]), i['alliances']['red']['team_keys'])), "winner": i["winning_alliance"]})
|
||||
out.sort(key=lambda x: x['match'])
|
||||
return out
|
||||
|
||||
def pull_new_tba_matches_manual(apikey, competition, cutoff):
|
||||
filename = competition+"-wins.json"
|
||||
with open(filename, 'r') as f:
|
||||
data = json.load(f)
|
||||
return data
|
||||
|
||||
def get_team_match_data(client, competition, team_num):
|
||||
db = client.data_scouting
|
||||
mdata = db.matchdata
|
||||
@@ -19,6 +29,12 @@ def get_team_match_data(client, competition, team_num):
|
||||
out[i['match']] = i['data']
|
||||
return pd.DataFrame(out)
|
||||
|
||||
def clear_metrics(client, competition):
|
||||
db = client.data_processing
|
||||
data = db.team_metrics
|
||||
data.delete_many({competition: competition})
|
||||
return True
|
||||
|
||||
def get_team_pit_data(client, competition, team_num):
|
||||
db = client.data_scouting
|
||||
mdata = db.pitdata
|
||||
@@ -28,7 +44,15 @@ def get_team_pit_data(client, competition, team_num):
|
||||
def get_team_metrics_data(client, competition, team_num):
|
||||
db = client.data_processing
|
||||
mdata = db.team_metrics
|
||||
return mdata.find_one({"competition" : competition, "team": team_num})
|
||||
temp = mdata.find_one({"team": team_num})
|
||||
if temp != None:
|
||||
if competition in temp['metrics'].keys():
|
||||
temp = temp['metrics'][competition]
|
||||
else :
|
||||
temp = None
|
||||
else:
|
||||
temp = None
|
||||
return temp
|
||||
|
||||
def get_match_data_formatted(client, competition):
|
||||
teams_at_comp = pull.get_teams_at_competition(competition)
|
||||
@@ -51,7 +75,7 @@ def get_metrics_data_formatted(client, competition):
|
||||
return out
|
||||
|
||||
def get_pit_data_formatted(client, competition):
|
||||
x=requests.get("https://titanscouting.epochml.org/api/fetchAllTeamNicknamesAtCompetition?competition="+competition)
|
||||
x=requests.get("https://scouting.titanrobotics2022.com/api/fetchAllTeamNicknamesAtCompetition?competition="+competition)
|
||||
x = x.json()
|
||||
x = x['data']
|
||||
x = x.keys()
|
||||
@@ -84,7 +108,7 @@ def push_team_tests_data(client, competition, team_num, data, dbname = "data_pro
|
||||
def push_team_metrics_data(client, competition, team_num, data, dbname = "data_processing", colname = "team_metrics"):
|
||||
db = client[dbname]
|
||||
mdata = db[colname]
|
||||
mdata.replace_one({"competition" : competition, "team": team_num}, {"_id": competition+str(team_num)+"am", "competition" : competition, "team" : team_num, "metrics" : data}, True)
|
||||
mdata.update_one({"team": team_num}, {"$set": {"metrics.{}".format(competition): data}}, upsert=True)
|
||||
|
||||
def push_team_pit_data(client, competition, variable, data, dbname = "data_processing", colname = "team_pit"):
|
||||
db = client[dbname]
|
||||
@@ -94,12 +118,12 @@ def push_team_pit_data(client, competition, variable, data, dbname = "data_proce
|
||||
def get_analysis_flags(client, flag):
|
||||
db = client.data_processing
|
||||
mdata = db.flags
|
||||
return mdata.find_one({flag:{"$exists":True}})
|
||||
return mdata.find_one({"_id": "2022"})
|
||||
|
||||
def set_analysis_flags(client, flag, data):
|
||||
db = client.data_processing
|
||||
mdata = db.flags
|
||||
return mdata.replace_one({flag:{"$exists":True}}, data, True)
|
||||
return mdata.update_one({"_id": "2022"}, {"$set": data})
|
||||
|
||||
def unkeyify_2l(layered_dict):
|
||||
out = {}
|
||||
@@ -153,22 +177,17 @@ def load_metric(client, competition, match, group_name, metrics):
|
||||
db_data = get_team_metrics_data(client, competition, team)
|
||||
|
||||
if db_data == None:
|
||||
|
||||
elo = {"score": metrics["elo"]["score"]}
|
||||
gl2 = {"score": metrics["gl2"]["score"], "rd": metrics["gl2"]["rd"], "vol": metrics["gl2"]["vol"]}
|
||||
ts = {"mu": metrics["ts"]["mu"], "sigma": metrics["ts"]["sigma"]}
|
||||
|
||||
group[team] = {"elo": elo, "gl2": gl2, "ts": ts}
|
||||
group[team] = {"gl2": gl2}
|
||||
|
||||
else:
|
||||
|
||||
metrics = db_data["metrics"]
|
||||
metrics = db_data
|
||||
|
||||
elo = metrics["elo"]
|
||||
gl2 = metrics["gl2"]
|
||||
ts = metrics["ts"]
|
||||
|
||||
group[team] = {"elo": elo, "gl2": gl2, "ts": ts}
|
||||
group[team] = {"gl2": gl2}
|
||||
|
||||
return group
|
||||
|
132
competition/dep.py
Normal file
132
competition/dep.py
Normal file
@@ -0,0 +1,132 @@
|
||||
# contains deprecated functions, not to be used unless nessasary!
|
||||
|
||||
import json
|
||||
|
||||
sample_json = """
|
||||
{
|
||||
"persistent":{
|
||||
"key":{
|
||||
"database":"",
|
||||
"tba":"",
|
||||
"tra":{
|
||||
"CLIENT_ID":"",
|
||||
"CLIENT_SECRET":"",
|
||||
"url": ""
|
||||
}
|
||||
},
|
||||
"config-preference":"local",
|
||||
"synchronize-config":false
|
||||
},
|
||||
"variable":{
|
||||
"max-threads":0.5,
|
||||
"team":"",
|
||||
"event-delay":false,
|
||||
"loop-delay":0,
|
||||
"reportable":true,
|
||||
"teams":[
|
||||
|
||||
],
|
||||
"modules":{
|
||||
"match":{
|
||||
"tests":{
|
||||
"balls-blocked":[
|
||||
"basic_stats",
|
||||
"historical_analysis",
|
||||
"regression_linear",
|
||||
"regression_logarithmic",
|
||||
"regression_exponential",
|
||||
"regression_polynomial",
|
||||
"regression_sigmoidal"
|
||||
],
|
||||
"balls-collected":[
|
||||
"basic_stats",
|
||||
"historical_analysis",
|
||||
"regression_linear",
|
||||
"regression_logarithmic",
|
||||
"regression_exponential",
|
||||
"regression_polynomial",
|
||||
"regression_sigmoidal"
|
||||
],
|
||||
"balls-lower-teleop":[
|
||||
"basic_stats",
|
||||
"historical_analysis",
|
||||
"regression_linear",
|
||||
"regression_logarithmic",
|
||||
"regression_exponential",
|
||||
"regression_polynomial",
|
||||
"regression_sigmoidal"
|
||||
],
|
||||
"balls-lower-auto":[
|
||||
"basic_stats",
|
||||
"historical_analysis",
|
||||
"regression_linear",
|
||||
"regression_logarithmic",
|
||||
"regression_exponential",
|
||||
"regression_polynomial",
|
||||
"regression_sigmoidal"
|
||||
],
|
||||
"balls-started":[
|
||||
"basic_stats",
|
||||
"historical_analyss",
|
||||
"regression_linear",
|
||||
"regression_logarithmic",
|
||||
"regression_exponential",
|
||||
"regression_polynomial",
|
||||
"regression_sigmoidal"
|
||||
],
|
||||
"balls-upper-teleop":[
|
||||
"basic_stats",
|
||||
"historical_analysis",
|
||||
"regression_linear",
|
||||
"regression_logarithmic",
|
||||
"regression_exponential",
|
||||
"regression_polynomial",
|
||||
"regression_sigmoidal"
|
||||
],
|
||||
"balls-upper-auto":[
|
||||
"basic_stats",
|
||||
"historical_analysis",
|
||||
"regression_linear",
|
||||
"regression_logarithmic",
|
||||
"regression_exponential",
|
||||
"regression_polynomial",
|
||||
"regression_sigmoidal"
|
||||
]
|
||||
}
|
||||
},
|
||||
"metric":{
|
||||
"tests":{
|
||||
"gl2":{
|
||||
"score":1500,
|
||||
"rd":250,
|
||||
"vol":0.06
|
||||
},
|
||||
}
|
||||
},
|
||||
"pit":{
|
||||
"tests":{
|
||||
"wheel-mechanism":true,
|
||||
"low-balls":true,
|
||||
"high-balls":true,
|
||||
"wheel-success":true,
|
||||
"strategic-focus":true,
|
||||
"climb-mechanism":true,
|
||||
"attitude":true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
"""
|
||||
|
||||
def load_config(path, config_vector):
|
||||
try:
|
||||
f = open(path, "r")
|
||||
config_vector.update(json.load(f))
|
||||
f.close()
|
||||
return 0
|
||||
except:
|
||||
f = open(path, "w")
|
||||
f.write(sample_json)
|
||||
f.close()
|
||||
return 1
|
7
competition/exceptions.py
Normal file
7
competition/exceptions.py
Normal file
@@ -0,0 +1,7 @@
|
||||
class APIError(Exception):
|
||||
def __init__(self, str):
|
||||
super().__init__(str)
|
||||
|
||||
class ConfigurationError (Exception):
|
||||
def __init__(self, str):
|
||||
super().__init__(str)
|
91
competition/interface.py
Normal file
91
competition/interface.py
Normal file
@@ -0,0 +1,91 @@
|
||||
from logging import Logger as L
|
||||
import datetime
|
||||
import platform
|
||||
import json
|
||||
|
||||
class Logger(L):
|
||||
|
||||
file = None
|
||||
|
||||
levels = {
|
||||
0: "",
|
||||
10:"[DEBUG] ",
|
||||
20:"[INFO] ",
|
||||
30:"[WARNING] ",
|
||||
40:"[ERROR] ",
|
||||
50:"[CRITICAL]",
|
||||
}
|
||||
|
||||
targets = []
|
||||
|
||||
def __init__(self, verbose, profile, debug, file = None):
|
||||
super().__init__("tra_logger")
|
||||
|
||||
self.file = file
|
||||
|
||||
if file != None:
|
||||
self.targets.append(self._send_file)
|
||||
|
||||
if profile:
|
||||
self.targets.append(self._send_null)
|
||||
elif verbose:
|
||||
self.targets.append(self._send_scli)
|
||||
elif debug:
|
||||
self.targets.append(self._send_scli)
|
||||
else:
|
||||
self.targets.append(self._send_null)
|
||||
|
||||
def _send_null(self, msg):
|
||||
pass
|
||||
|
||||
def _send_scli(self, msg):
|
||||
print(msg)
|
||||
|
||||
def _send_file(self, msg):
|
||||
f = open(self.file, 'a')
|
||||
f.write(msg + "\n")
|
||||
f.close()
|
||||
|
||||
def get_time_formatted(self):
|
||||
return datetime.datetime.now().strftime("%Y/%m/%d %H:%M:%S %Z")
|
||||
|
||||
def log(self, level, msg):
|
||||
for t in self.targets:
|
||||
t(self.get_time_formatted() + "| " + self.levels[level] + ": " + msg)
|
||||
|
||||
def debug(self, msg):
|
||||
self.log(10, msg)
|
||||
|
||||
def info(self, msg):
|
||||
self.log(20, msg)
|
||||
|
||||
def warning(self, msg):
|
||||
self.log(30, msg)
|
||||
|
||||
def error(self, msg):
|
||||
self.log(40, msg)
|
||||
|
||||
def critical(self, msg):
|
||||
self.log(50, msg)
|
||||
|
||||
def splash(self, version):
|
||||
|
||||
def hrule():
|
||||
self.log(0, "#"+38*"-"+"#")
|
||||
def box(s):
|
||||
temp = "|"
|
||||
temp += s
|
||||
temp += (40-len(s)-2)*" "
|
||||
temp += "|"
|
||||
self.log(0, temp)
|
||||
|
||||
hrule()
|
||||
box(" superscript version: " + version)
|
||||
box(" os: " + platform.system())
|
||||
box(" python: " + platform.python_version())
|
||||
hrule()
|
||||
|
||||
def save_module_to_file(self, module, data, results):
|
||||
f = open(module + ".log", "w")
|
||||
json.dump({"data": data, "results":results}, f, ensure_ascii=False, indent=4)
|
||||
f.close()
|
@@ -3,6 +3,7 @@ import data as d
|
||||
import signal
|
||||
import numpy as np
|
||||
from tra_analysis import Analysis as an
|
||||
from tqdm import tqdm
|
||||
|
||||
class Module(metaclass = abc.ABCMeta):
|
||||
|
||||
@@ -22,7 +23,7 @@ class Module(metaclass = abc.ABCMeta):
|
||||
def validate_config(self, *args, **kwargs):
|
||||
raise NotImplementedError
|
||||
@abc.abstractmethod
|
||||
def run(self, exec_threads, *args, **kwargs):
|
||||
def run(self, *args, **kwargs):
|
||||
raise NotImplementedError
|
||||
|
||||
class Match (Module):
|
||||
@@ -46,9 +47,9 @@ class Match (Module):
|
||||
def validate_config(self):
|
||||
return True, ""
|
||||
|
||||
def run(self, exec_threads):
|
||||
def run(self):
|
||||
self._load_data()
|
||||
self._process_data(exec_threads)
|
||||
self._process_data()
|
||||
self._push_results()
|
||||
|
||||
def _load_data(self):
|
||||
@@ -85,7 +86,7 @@ class Match (Module):
|
||||
if test == "regression_sigmoidal":
|
||||
return an.regression(ranges, data, ['sig'])
|
||||
|
||||
def _process_data(self, exec_threads):
|
||||
def _process_data(self):
|
||||
|
||||
tests = self.config["tests"]
|
||||
data = self.data
|
||||
@@ -103,7 +104,6 @@ class Match (Module):
|
||||
input_vector.append((team, variable, test, data[team][variable]))
|
||||
|
||||
self.data = input_vector
|
||||
#self.results = list(exec_threads.map(self._simplestats, self.data))
|
||||
self.results = []
|
||||
for test_var_data in self.data:
|
||||
self.results.append(self._simplestats(test_var_data))
|
||||
@@ -164,31 +164,27 @@ class Metric (Module):
|
||||
def validate_config(self):
|
||||
return True, ""
|
||||
|
||||
def run(self, exec_threads):
|
||||
def run(self):
|
||||
self._load_data()
|
||||
self._process_data(exec_threads)
|
||||
self._process_data()
|
||||
self._push_results()
|
||||
|
||||
def _load_data(self):
|
||||
self.data = d.pull_new_tba_matches(self.tbakey, self.competition, self.timestamp)
|
||||
self.last_match = d.get_analysis_flags(self.apikey, 'metrics_last_match')['metrics_last_match']
|
||||
print("Previous last match", self.last_match)
|
||||
self.data = d.pull_new_tba_matches(self.tbakey, self.competition, self.last_match)
|
||||
|
||||
def _process_data(self, exec_threads):
|
||||
|
||||
elo_N = self.config["tests"]["elo"]["N"]
|
||||
elo_K = self.config["tests"]["elo"]["K"]
|
||||
def _process_data(self):
|
||||
|
||||
self.results = {}
|
||||
self.match = self.last_match
|
||||
matches = self.data
|
||||
|
||||
red = {}
|
||||
blu = {}
|
||||
|
||||
for match in matches:
|
||||
|
||||
red = d.load_metric(self.apikey, self.competition, match, "red", self.config["tests"])
|
||||
blu = d.load_metric(self.apikey, self.competition, match, "blue", self.config["tests"])
|
||||
|
||||
elo_red_total = 0
|
||||
elo_blu_total = 0
|
||||
for match in tqdm(matches, desc="Metrics"): # grab matches and loop through each one
|
||||
self.match = max(self.match, int(match['match']))
|
||||
red = d.load_metric(self.apikey, self.competition, match, "red", self.config["tests"]) # get the current ratings for red
|
||||
blu = d.load_metric(self.apikey, self.competition, match, "blue", self.config["tests"]) # get the current ratings for blue
|
||||
|
||||
gl2_red_score_total = 0
|
||||
gl2_blu_score_total = 0
|
||||
@@ -199,72 +195,63 @@ class Metric (Module):
|
||||
gl2_red_vol_total = 0
|
||||
gl2_blu_vol_total = 0
|
||||
|
||||
for team in red:
|
||||
|
||||
elo_red_total += red[team]["elo"]["score"]
|
||||
for team in red: # for each team in red, add up gl2 score components
|
||||
|
||||
gl2_red_score_total += red[team]["gl2"]["score"]
|
||||
gl2_red_rd_total += red[team]["gl2"]["rd"]
|
||||
gl2_red_vol_total += red[team]["gl2"]["vol"]
|
||||
|
||||
for team in blu:
|
||||
|
||||
elo_blu_total += blu[team]["elo"]["score"]
|
||||
for team in blu: # for each team in blue, add up gl2 score components
|
||||
|
||||
gl2_blu_score_total += blu[team]["gl2"]["score"]
|
||||
gl2_blu_rd_total += blu[team]["gl2"]["rd"]
|
||||
gl2_blu_vol_total += blu[team]["gl2"]["vol"]
|
||||
|
||||
red_elo = {"score": elo_red_total / len(red)}
|
||||
blu_elo = {"score": elo_blu_total / len(blu)}
|
||||
|
||||
red_gl2 = {"score": gl2_red_score_total / len(red), "rd": gl2_red_rd_total / len(red), "vol": gl2_red_vol_total / len(red)}
|
||||
blu_gl2 = {"score": gl2_blu_score_total / len(blu), "rd": gl2_blu_rd_total / len(blu), "vol": gl2_blu_vol_total / len(blu)}
|
||||
red_gl2 = {"score": gl2_red_score_total / len(red), "rd": gl2_red_rd_total / len(red), "vol": gl2_red_vol_total / len(red)} # average the scores by dividing by 3
|
||||
blu_gl2 = {"score": gl2_blu_score_total / len(blu), "rd": gl2_blu_rd_total / len(blu), "vol": gl2_blu_vol_total / len(blu)} # average the scores by dividing by 3
|
||||
|
||||
|
||||
if match["winner"] == "red":
|
||||
if match["winner"] == "red": # if red won, set observations to {"red": 1, "blu": 0}
|
||||
|
||||
observations = {"red": 1, "blu": 0}
|
||||
|
||||
elif match["winner"] == "blue":
|
||||
elif match["winner"] == "blue": # if blue won, set observations to {"red": 0, "blu": 1}
|
||||
|
||||
observations = {"red": 0, "blu": 1}
|
||||
|
||||
else:
|
||||
else: # otherwise it was a tie and observations is {"red": 0.5, "blu": 0.5}
|
||||
|
||||
observations = {"red": 0.5, "blu": 0.5}
|
||||
|
||||
red_elo_delta = an.Metric().elo(red_elo["score"], blu_elo["score"], observations["red"], elo_N, elo_K) - red_elo["score"]
|
||||
blu_elo_delta = an.Metric().elo(blu_elo["score"], red_elo["score"], observations["blu"], elo_N, elo_K) - blu_elo["score"]
|
||||
|
||||
new_red_gl2_score, new_red_gl2_rd, new_red_gl2_vol = an.Metric().glicko2(red_gl2["score"], red_gl2["rd"], red_gl2["vol"], [blu_gl2["score"]], [blu_gl2["rd"]], [observations["red"], observations["blu"]])
|
||||
new_blu_gl2_score, new_blu_gl2_rd, new_blu_gl2_vol = an.Metric().glicko2(blu_gl2["score"], blu_gl2["rd"], blu_gl2["vol"], [red_gl2["score"]], [red_gl2["rd"]], [observations["blu"], observations["red"]])
|
||||
new_red_gl2_score, new_red_gl2_rd, new_red_gl2_vol = an.Metric().glicko2(red_gl2["score"], red_gl2["rd"], red_gl2["vol"], [blu_gl2["score"]], [blu_gl2["rd"]], [observations["red"], observations["blu"]]) # calculate new scores for gl2 for red
|
||||
new_blu_gl2_score, new_blu_gl2_rd, new_blu_gl2_vol = an.Metric().glicko2(blu_gl2["score"], blu_gl2["rd"], blu_gl2["vol"], [red_gl2["score"]], [red_gl2["rd"]], [observations["blu"], observations["red"]]) # calculate new scores for gl2 for blue
|
||||
|
||||
red_gl2_delta = {"score": new_red_gl2_score - red_gl2["score"], "rd": new_red_gl2_rd - red_gl2["rd"], "vol": new_red_gl2_vol - red_gl2["vol"]}
|
||||
blu_gl2_delta = {"score": new_blu_gl2_score - blu_gl2["score"], "rd": new_blu_gl2_rd - blu_gl2["rd"], "vol": new_blu_gl2_vol - blu_gl2["vol"]}
|
||||
red_gl2_delta = {"score": new_red_gl2_score - red_gl2["score"], "rd": new_red_gl2_rd - red_gl2["rd"], "vol": new_red_gl2_vol - red_gl2["vol"]} # calculate gl2 deltas for red
|
||||
blu_gl2_delta = {"score": new_blu_gl2_score - blu_gl2["score"], "rd": new_blu_gl2_rd - blu_gl2["rd"], "vol": new_blu_gl2_vol - blu_gl2["vol"]} # calculate gl2 deltas for blue
|
||||
|
||||
for team in red:
|
||||
|
||||
red[team]["elo"]["score"] = red[team]["elo"]["score"] + red_elo_delta
|
||||
for team in red: # for each team on red, add the previous score with the delta to find the new score
|
||||
|
||||
red[team]["gl2"]["score"] = red[team]["gl2"]["score"] + red_gl2_delta["score"]
|
||||
red[team]["gl2"]["rd"] = red[team]["gl2"]["rd"] + red_gl2_delta["rd"]
|
||||
red[team]["gl2"]["vol"] = red[team]["gl2"]["vol"] + red_gl2_delta["vol"]
|
||||
|
||||
for team in blu:
|
||||
|
||||
blu[team]["elo"]["score"] = blu[team]["elo"]["score"] + blu_elo_delta
|
||||
for team in blu: # for each team on blue, add the previous score with the delta to find the new score
|
||||
|
||||
blu[team]["gl2"]["score"] = blu[team]["gl2"]["score"] + blu_gl2_delta["score"]
|
||||
blu[team]["gl2"]["rd"] = blu[team]["gl2"]["rd"] + blu_gl2_delta["rd"]
|
||||
blu[team]["gl2"]["vol"] = blu[team]["gl2"]["vol"] + blu_gl2_delta["vol"]
|
||||
|
||||
temp_vector = {}
|
||||
temp_vector.update(red)
|
||||
temp_vector.update(red) # update the team's score with the temporay vector
|
||||
temp_vector.update(blu)
|
||||
|
||||
d.push_metric(self.apikey, self.competition, temp_vector)
|
||||
self.results[match['match']] = temp_vector
|
||||
|
||||
d.push_metric(self.apikey, self.competition, temp_vector) # push new scores to db
|
||||
print("New last match", self.match)
|
||||
d.set_analysis_flags(self.apikey, 'metrics_last_match', {'metrics_last_match': self.match})
|
||||
def _push_results(self):
|
||||
pass
|
||||
|
||||
@@ -289,17 +276,16 @@ class Pit (Module):
|
||||
def validate_config(self):
|
||||
return True, ""
|
||||
|
||||
def run(self, exec_threads):
|
||||
def run(self):
|
||||
self._load_data()
|
||||
self._process_data(exec_threads)
|
||||
self._process_data()
|
||||
self._push_results()
|
||||
|
||||
def _load_data(self):
|
||||
self.data = d.load_pit(self.apikey, self.competition)
|
||||
|
||||
def _process_data(self, exec_threads):
|
||||
def _process_data(self):
|
||||
tests = self.config["tests"]
|
||||
print(tests)
|
||||
return_vector = {}
|
||||
for team in self.data:
|
||||
for variable in self.data[team]:
|
@@ -1,9 +1,8 @@
|
||||
import requests
|
||||
import json
|
||||
from exceptions import APIError
|
||||
from config import load_config
|
||||
from dep import load_config
|
||||
|
||||
url = "https://titanscouting.epochml.org"
|
||||
url = "https://scouting.titanrobotics2022.com"
|
||||
config_tra = {}
|
||||
load_config("config.json", config_tra)
|
||||
trakey = config_tra['persistent']['key']['tra']
|
||||
@@ -19,7 +18,7 @@ def get_team_competition():
|
||||
if json['success']:
|
||||
return json['competition']
|
||||
else:
|
||||
raise APIError(json, endpoint)
|
||||
raise APIError(json)
|
||||
|
||||
def get_team():
|
||||
endpoint = '/api/fetchTeamCompetition'
|
||||
@@ -32,7 +31,7 @@ def get_team():
|
||||
if json['success']:
|
||||
return json['team']
|
||||
else:
|
||||
raise APIError(json, endpoint)
|
||||
raise APIError(json)
|
||||
|
||||
def get_team_match_data(competition, team_num):
|
||||
endpoint = '/api/fetchAllTeamMatchData'
|
||||
@@ -47,7 +46,7 @@ def get_team_match_data(competition, team_num):
|
||||
if json['success']:
|
||||
return json['data'][team_num]
|
||||
else:
|
||||
raise APIError(json, endpoint)
|
||||
raise APIError(json)
|
||||
|
||||
def get_teams_at_competition(competition):
|
||||
endpoint = '/api/fetchAllTeamNicknamesAtCompetition'
|
||||
@@ -61,4 +60,4 @@ def get_teams_at_competition(competition):
|
||||
if json['success']:
|
||||
return list(json['data'].keys())
|
||||
else:
|
||||
raise APIError(json, endpoint)
|
||||
raise APIError(json)
|
15
competition/requirements.txt
Normal file
15
competition/requirements.txt
Normal file
@@ -0,0 +1,15 @@
|
||||
cerberus
|
||||
dnspython
|
||||
numpy
|
||||
pandas
|
||||
pyinstaller
|
||||
pylint
|
||||
pymongo
|
||||
pyparsing
|
||||
python-daemon
|
||||
pyzmq
|
||||
requests
|
||||
scikit-learn
|
||||
scipy
|
||||
six
|
||||
tra-analysis
|
@@ -23,6 +23,9 @@ __changelog__ = """changelog:
|
||||
- config-preference option selects between prioritizing local config and prioritizing database config
|
||||
- synchronize-config option selects whether to update the non prioritized config with the prioritized one
|
||||
- divided config options between persistent ones (keys), and variable ones (everything else)
|
||||
- generalized behavior of various core components by collecting loose functions in several dependencies into classes
|
||||
- module.py contains classes, each one represents a single data analysis routine
|
||||
- config.py contains the Configuration class, which stores the configuration information and abstracts the getter methods
|
||||
0.9.3:
|
||||
- improved data loading performance by removing redundant PyMongo client creation (120s to 14s)
|
||||
- passed singular instance of PyMongo client as standin for apikey parameter in all data.py functions
|
||||
@@ -144,47 +147,29 @@ __author__ = (
|
||||
"Jacob Levine <jlevine@imsa.edu>",
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
"load_config",
|
||||
"save_config",
|
||||
]
|
||||
|
||||
# imports:
|
||||
|
||||
import json
|
||||
from multiprocessing import freeze_support
|
||||
import os
|
||||
import pymongo
|
||||
import sys
|
||||
import time
|
||||
import os, sys, time
|
||||
import pymongo # soon to be deprecated
|
||||
import traceback
|
||||
import warnings
|
||||
import zmq
|
||||
import pull
|
||||
from config import parse_config_persistent, parse_config_variable, resolve_config_conflicts, load_config, save_config, ConfigurationError
|
||||
from data import get_previous_time, set_current_time, check_new_database_matches
|
||||
from interface import splash, log, ERR, INF, stdout, stderr
|
||||
from config import Configuration, ConfigurationError
|
||||
from data import get_previous_time, set_current_time, check_new_database_matches, clear_metrics
|
||||
from interface import Logger
|
||||
from module import Match, Metric, Pit
|
||||
import zmq
|
||||
|
||||
config_path = "config.json"
|
||||
|
||||
def main(send, verbose = False, profile = False, debug = False):
|
||||
def main(logger, verbose, profile, debug, socket_send = None):
|
||||
|
||||
def close_all():
|
||||
if "exec_threads" in locals():
|
||||
exec_threads.terminate()
|
||||
exec_threads.join()
|
||||
exec_threads.close()
|
||||
if "client" in locals():
|
||||
client.close()
|
||||
if "f" in locals():
|
||||
f.close()
|
||||
|
||||
warnings.filterwarnings("ignore")
|
||||
exit_code = 0
|
||||
|
||||
if verbose:
|
||||
splash(__version__)
|
||||
logger.splash(__version__)
|
||||
|
||||
modules = {"match": Match, "metric": Metric, "pit": Pit}
|
||||
|
||||
@@ -194,32 +179,32 @@ def main(send, verbose = False, profile = False, debug = False):
|
||||
|
||||
loop_start = time.time()
|
||||
|
||||
send(stdout, INF, "current time: " + str(loop_start))
|
||||
logger.info("current time: " + str(loop_start))
|
||||
socket_send("current time: " + str(loop_start))
|
||||
|
||||
config = {}
|
||||
config = Configuration(config_path)
|
||||
|
||||
if load_config(config_path, config):
|
||||
raise ConfigurationError("could not find config at <" + config_path + ">, generating blank config and exiting", 110)
|
||||
logger.info("found and loaded config at <" + config_path + ">")
|
||||
socket_send("found and loaded config at <" + config_path + ">")
|
||||
|
||||
send(stdout, INF, "found and loaded config at <" + config_path + ">")
|
||||
apikey, tbakey = config.database, config.tba
|
||||
|
||||
apikey, tbakey, preference, sync = parse_config_persistent(send, config)
|
||||
|
||||
send(stdout, INF, "found and loaded database and tba keys")
|
||||
logger.info("found and loaded database and tba keys")
|
||||
socket_send("found and loaded database and tba keys")
|
||||
|
||||
client = pymongo.MongoClient(apikey)
|
||||
|
||||
send(stdout, INF, "established connection to database")
|
||||
logger.info("established connection to database")
|
||||
socket_send("established connection to database")
|
||||
|
||||
previous_time = get_previous_time(client)
|
||||
send(stdout, INF, "analysis backtimed to: " + str(previous_time))
|
||||
|
||||
config = resolve_config_conflicts(send, client, config, preference, sync)
|
||||
logger.info("analysis backtimed to: " + str(previous_time))
|
||||
socket_send("analysis backtimed to: " + str(previous_time))
|
||||
|
||||
exec_threads, config_modules = parse_config_variable(send, config)
|
||||
if 'competition' in config['variable']:
|
||||
competition = config['variable']['competition']
|
||||
else:
|
||||
competition = pull.get_team_competition()
|
||||
config.resolve_config_conflicts(logger, client)
|
||||
|
||||
config_modules, competition = config.modules, config.competition
|
||||
for m in config_modules:
|
||||
if m in modules:
|
||||
start = time.time()
|
||||
@@ -227,71 +212,81 @@ def main(send, verbose = False, profile = False, debug = False):
|
||||
valid = current_module.validate_config()
|
||||
if not valid:
|
||||
continue
|
||||
current_module.run(exec_threads)
|
||||
send(stdout, INF, m + " module finished in " + str(time.time() - start) + " seconds")
|
||||
current_module.run()
|
||||
logger.info(m + " module finished in " + str(time.time() - start) + " seconds")
|
||||
socket_send(m + " module finished in " + str(time.time() - start) + " seconds")
|
||||
if debug:
|
||||
f = open(m + ".log", "w+")
|
||||
json.dump({"data": current_module.data, "results":current_module.results}, f, ensure_ascii=False, indent=4)
|
||||
f.close()
|
||||
logger.save_module_to_file(m, current_module.data, current_module.results) # logging flag check done in logger
|
||||
|
||||
set_current_time(client, loop_start)
|
||||
close_all()
|
||||
|
||||
send(stdout, INF, "closed threads and database client")
|
||||
send(stdout, INF, "finished all tasks in " + str(time.time() - loop_start) + " seconds, looping")
|
||||
logger.info("closed threads and database client")
|
||||
logger.info("finished all tasks in " + str(time.time() - loop_start) + " seconds, looping")
|
||||
socket_send("closed threads and database client")
|
||||
socket_send("finished all tasks in " + str(time.time() - loop_start) + " seconds, looping")
|
||||
|
||||
if profile:
|
||||
exit_code = 0
|
||||
break
|
||||
return 0
|
||||
|
||||
if debug:
|
||||
return 0
|
||||
|
||||
event_delay = config["variable"]["event-delay"]
|
||||
if event_delay:
|
||||
send(stdout, INF, "loop delayed until database returns new matches")
|
||||
logger.info("loop delayed until database returns new matches")
|
||||
socket_send("loop delayed until database returns new matches")
|
||||
new_match = False
|
||||
while not new_match:
|
||||
time.sleep(1)
|
||||
new_match = check_new_database_matches(client, competition)
|
||||
send(stdout, INF, "database returned new matches")
|
||||
logger.info("database returned new matches")
|
||||
socket_send("database returned new matches")
|
||||
else:
|
||||
loop_delay = float(config["variable"]["loop-delay"])
|
||||
remaining_time = loop_delay - (time.time() - loop_start)
|
||||
if remaining_time > 0:
|
||||
send(stdout, INF, "loop delayed by " + str(remaining_time) + " seconds")
|
||||
logger.info("loop delayed by " + str(remaining_time) + " seconds")
|
||||
socket_send("loop delayed by " + str(remaining_time) + " seconds")
|
||||
time.sleep(remaining_time)
|
||||
|
||||
except KeyboardInterrupt:
|
||||
send(stdout, INF, "detected KeyboardInterrupt, killing threads")
|
||||
close_all()
|
||||
send(stdout, INF, "terminated threads, exiting")
|
||||
break
|
||||
logger.info("detected KeyboardInterrupt, exiting")
|
||||
socket_send("detected KeyboardInterrupt, exiting")
|
||||
return 0
|
||||
|
||||
except ConfigurationError as e:
|
||||
send(stderr, ERR, "encountered a configuration error: " + str(e), code = e.code)
|
||||
traceback.print_exc(file = stderr)
|
||||
exit_code = 1
|
||||
str_e = "".join(traceback.format_exception(e))
|
||||
logger.error("encountered a configuration error: " + str(e))
|
||||
logger.error(str_e)
|
||||
socket_send("encountered a configuration error: " + str(e))
|
||||
socket_send(str_e)
|
||||
close_all()
|
||||
break
|
||||
return 1
|
||||
|
||||
except Exception as e:
|
||||
send(stderr, ERR, "encountered an exception while running", code = 1)
|
||||
traceback.print_exc(file = stderr)
|
||||
exit_code = 1
|
||||
str_e = "".join(traceback.format_exception(e))
|
||||
logger.error("encountered an exception while running")
|
||||
logger.error(str_e)
|
||||
socket_send("encountered an exception while running")
|
||||
socket_send(str_e)
|
||||
close_all()
|
||||
break
|
||||
return 1
|
||||
|
||||
return exit_code
|
||||
|
||||
def start(pid_path, verbose = False, profile = False, debug = False):
|
||||
def start(pid_path, verbose, profile, debug):
|
||||
|
||||
if profile:
|
||||
|
||||
def send(target, level, message, code = 0):
|
||||
def send(msg):
|
||||
pass
|
||||
|
||||
logger = Logger(verbose, profile, debug)
|
||||
|
||||
import cProfile, pstats, io
|
||||
profile = cProfile.Profile()
|
||||
profile.enable()
|
||||
exit_code = main(send, profile = True)
|
||||
exit_code = main(logger, verbose, profile, debug, socket_send = send)
|
||||
profile.disable()
|
||||
f = open("profile.txt", 'w+')
|
||||
ps = pstats.Stats(profile, stream = f).sort_stats('cumtime')
|
||||
@@ -300,35 +295,53 @@ def start(pid_path, verbose = False, profile = False, debug = False):
|
||||
|
||||
elif verbose:
|
||||
|
||||
exit_code = main(log, verbose = verbose)
|
||||
def send(msg):
|
||||
pass
|
||||
|
||||
logger = Logger(verbose, profile, debug)
|
||||
|
||||
exit_code = main(logger, verbose, profile, debug, socket_send = send)
|
||||
sys.exit(exit_code)
|
||||
|
||||
elif debug:
|
||||
|
||||
exit_code = main(log, verbose = True, profile = True, debug = debug)
|
||||
def send(msg):
|
||||
pass
|
||||
|
||||
logger = Logger(verbose, profile, debug)
|
||||
|
||||
exit_code = main(logger, verbose, profile, debug, socket_send = send)
|
||||
sys.exit(exit_code)
|
||||
|
||||
else:
|
||||
|
||||
f = open('errorlog.log', 'w+')
|
||||
logfile = "logfile.log"
|
||||
|
||||
f = open(logfile, 'w+')
|
||||
f.close()
|
||||
|
||||
e = open('errorlog.log', 'w+')
|
||||
with daemon.DaemonContext(
|
||||
working_directory = os.getcwd(),
|
||||
pidfile = pidfile.TimeoutPIDLockFile(pid_path),
|
||||
stderr = f
|
||||
stderr = e
|
||||
):
|
||||
|
||||
context = zmq.Context()
|
||||
socket = context.socket(zmq.PUB)
|
||||
socket.bind("tcp://*:5678")
|
||||
|
||||
socket.send(b'status')
|
||||
|
||||
def send(target, level, message, code = 0):
|
||||
socket.send(bytes("status: " + message, 'utf-8'))
|
||||
def send(msg):
|
||||
socket.send(bytes("status: " + msg, "utf-8"))
|
||||
|
||||
logger = Logger(verbose, profile, debug, file = logfile)
|
||||
|
||||
exit_code = main(logger, verbose, profile, debug, socket_send = send)
|
||||
|
||||
exit_code = main(send)
|
||||
socket.close()
|
||||
f.close()
|
||||
|
||||
sys.exit(exit_code)
|
||||
|
||||
def stop(pid_path):
|
||||
@@ -350,17 +363,16 @@ def stop(pid_path):
|
||||
if os.path.exists(pid_path):
|
||||
os.remove(pid_path)
|
||||
else:
|
||||
traceback.print_exc(file = stderr)
|
||||
traceback.print_exc(file = sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
def restart(pid_path):
|
||||
stop(pid_path)
|
||||
start(pid_path)
|
||||
start(pid_path, False, False, False)
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
if sys.platform.startswith("win"):
|
||||
freeze_support()
|
||||
start(None, verbose = True)
|
||||
|
||||
else:
|
||||
@@ -370,17 +382,17 @@ if __name__ == "__main__":
|
||||
pid_path = "tra-daemon.pid"
|
||||
if len(sys.argv) == 2:
|
||||
if 'start' == sys.argv[1]:
|
||||
start(pid_path)
|
||||
start(pid_path, False, False, False)
|
||||
elif 'stop' == sys.argv[1]:
|
||||
stop(pid_path)
|
||||
elif 'restart' == sys.argv[1]:
|
||||
restart(pid_path)
|
||||
elif 'verbose' == sys.argv[1]:
|
||||
start(None, verbose = True)
|
||||
start(None, True, False, False)
|
||||
elif 'profile' == sys.argv[1]:
|
||||
start(None, profile=True)
|
||||
start(None, False, True, False)
|
||||
elif 'debug' == sys.argv[1]:
|
||||
start(None, debug = True)
|
||||
start(None, False, False, True)
|
||||
else:
|
||||
print("usage: %s start|stop|restart|verbose|profile|debug" % sys.argv[0])
|
||||
sys.exit(2)
|
@@ -1,244 +0,0 @@
|
||||
import math
|
||||
import json
|
||||
from multiprocessing import Pool
|
||||
import os
|
||||
from cerberus import Validator
|
||||
from exceptions import ConfigurationError
|
||||
|
||||
from data import set_database_config, get_database_config
|
||||
from interface import stderr, stdout, INF, ERR
|
||||
|
||||
config_path = "config.json"
|
||||
|
||||
sample_json = """
|
||||
{
|
||||
"persistent":{
|
||||
"key":{
|
||||
"database":"",
|
||||
"tba":"",
|
||||
"tra":{
|
||||
"CLIENT_ID":"",
|
||||
"CLIENT_SECRET":"",
|
||||
"url": ""
|
||||
}
|
||||
},
|
||||
"config-preference":"local",
|
||||
"synchronize-config":false
|
||||
},
|
||||
"variable":{
|
||||
"max-threads":0.5,
|
||||
"team":"",
|
||||
"event-delay":false,
|
||||
"loop-delay":0,
|
||||
"reportable":true,
|
||||
"teams":[
|
||||
|
||||
],
|
||||
"modules":{
|
||||
"match":{
|
||||
"tests":{
|
||||
"balls-blocked":[
|
||||
"basic_stats",
|
||||
"historical_analysis",
|
||||
"regression_linear",
|
||||
"regression_logarithmic",
|
||||
"regression_exponential",
|
||||
"regression_polynomial",
|
||||
"regression_sigmoidal"
|
||||
],
|
||||
"balls-collected":[
|
||||
"basic_stats",
|
||||
"historical_analysis",
|
||||
"regression_linear",
|
||||
"regression_logarithmic",
|
||||
"regression_exponential",
|
||||
"regression_polynomial",
|
||||
"regression_sigmoidal"
|
||||
],
|
||||
"balls-lower-teleop":[
|
||||
"basic_stats",
|
||||
"historical_analysis",
|
||||
"regression_linear",
|
||||
"regression_logarithmic",
|
||||
"regression_exponential",
|
||||
"regression_polynomial",
|
||||
"regression_sigmoidal"
|
||||
],
|
||||
"balls-lower-auto":[
|
||||
"basic_stats",
|
||||
"historical_analysis",
|
||||
"regression_linear",
|
||||
"regression_logarithmic",
|
||||
"regression_exponential",
|
||||
"regression_polynomial",
|
||||
"regression_sigmoidal"
|
||||
],
|
||||
"balls-started":[
|
||||
"basic_stats",
|
||||
"historical_analyss",
|
||||
"regression_linear",
|
||||
"regression_logarithmic",
|
||||
"regression_exponential",
|
||||
"regression_polynomial",
|
||||
"regression_sigmoidal"
|
||||
],
|
||||
"balls-upper-teleop":[
|
||||
"basic_stats",
|
||||
"historical_analysis",
|
||||
"regression_linear",
|
||||
"regression_logarithmic",
|
||||
"regression_exponential",
|
||||
"regression_polynomial",
|
||||
"regression_sigmoidal"
|
||||
],
|
||||
"balls-upper-auto":[
|
||||
"basic_stats",
|
||||
"historical_analysis",
|
||||
"regression_linear",
|
||||
"regression_logarithmic",
|
||||
"regression_exponential",
|
||||
"regression_polynomial",
|
||||
"regression_sigmoidal"
|
||||
]
|
||||
}
|
||||
},
|
||||
"metric":{
|
||||
"tests":{
|
||||
"elo":{
|
||||
"score":1500,
|
||||
"N":400,
|
||||
"K":24
|
||||
},
|
||||
"gl2":{
|
||||
"score":1500,
|
||||
"rd":250,
|
||||
"vol":0.06
|
||||
},
|
||||
"ts":{
|
||||
"mu":25,
|
||||
"sigma":8.33
|
||||
}
|
||||
}
|
||||
},
|
||||
"pit":{
|
||||
"tests":{
|
||||
"wheel-mechanism":true,
|
||||
"low-balls":true,
|
||||
"high-balls":true,
|
||||
"wheel-success":true,
|
||||
"strategic-focus":true,
|
||||
"climb-mechanism":true,
|
||||
"attitude":true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
"""
|
||||
|
||||
def parse_config_persistent(send, config):
|
||||
v = Validator(load_validation_schema(), allow_unknown = True)
|
||||
isValidated = v.validate(config)
|
||||
|
||||
if not isValidated:
|
||||
raise ConfigurationError(v.errors, 101)
|
||||
|
||||
apikey = config["persistent"]["key"]["database"]
|
||||
tbakey = config["persistent"]["key"]["tba"]
|
||||
preference = config["persistent"]["config-preference"]
|
||||
sync = config["persistent"]["synchronize-config"]
|
||||
|
||||
return apikey, tbakey, preference, sync
|
||||
|
||||
def parse_config_variable(send, config):
|
||||
|
||||
sys_max_threads = os.cpu_count()
|
||||
try:
|
||||
cfg_max_threads = config["variable"]["max-threads"]
|
||||
except:
|
||||
raise ConfigurationError("variable/max-threads field is invalid or missing, refer to documentation for configuration options", 109)
|
||||
if cfg_max_threads > -sys_max_threads and cfg_max_threads < 0 :
|
||||
alloc_processes = sys_max_threads + cfg_max_threads
|
||||
elif cfg_max_threads > 0 and cfg_max_threads < 1:
|
||||
alloc_processes = math.floor(cfg_max_threads * sys_max_threads)
|
||||
elif cfg_max_threads > 1 and cfg_max_threads <= sys_max_threads:
|
||||
alloc_processes = cfg_max_threads
|
||||
elif cfg_max_threads == 0:
|
||||
alloc_processes = sys_max_threads
|
||||
else:
|
||||
raise ConfigurationError("variable/max-threads must be between -" + str(sys_max_threads) + " and " + str(sys_max_threads) + ", but got " + cfg_max_threads, 110)
|
||||
try:
|
||||
exec_threads = Pool(processes = alloc_processes)
|
||||
except Exception as e:
|
||||
send(stderr, INF, e)
|
||||
raise ConfigurationError("unable to start threads", 200)
|
||||
send(stdout, INF, "successfully initialized " + str(alloc_processes) + " threads")
|
||||
|
||||
try:
|
||||
modules = config["variable"]["modules"]
|
||||
except:
|
||||
raise ConfigurationError("variable/modules field is invalid or missing", 102)
|
||||
|
||||
if modules == None:
|
||||
raise ConfigurationError("variable/modules field is empty", 106)
|
||||
|
||||
send(stdout, INF, "found and loaded competition, match, metrics, pit from config")
|
||||
|
||||
return exec_threads, modules
|
||||
|
||||
def resolve_config_conflicts(send, client, config, preference, sync):
|
||||
|
||||
if sync:
|
||||
if preference == "local" or preference == "client":
|
||||
send(stdout, INF, "config-preference set to local/client, loading local config information")
|
||||
remote_config = get_database_config(client)
|
||||
if remote_config != config["variable"]:
|
||||
set_database_config(client, config["variable"])
|
||||
send(stdout, INF, "database config was different and was updated")
|
||||
return config
|
||||
elif preference == "remote" or preference == "database":
|
||||
send(stdout, INF, "config-preference set to remote/database, loading remote config information")
|
||||
remote_config= get_database_config(client)
|
||||
if remote_config != config["variable"]:
|
||||
config["variable"] = remote_config
|
||||
if save_config(config_path, config):
|
||||
raise ConfigurationError("local config was different but could not be updated", 121)
|
||||
send(stdout, INF, "local config was different and was updated")
|
||||
return config
|
||||
else:
|
||||
raise ConfigurationError("persistent/config-preference field must be \"local\"/\"client\" or \"remote\"/\"database\"", 120)
|
||||
else:
|
||||
if preference == "local" or preference == "client":
|
||||
send(stdout, INF, "config-preference set to local/client, loading local config information")
|
||||
return config
|
||||
elif preference == "remote" or preference == "database":
|
||||
send(stdout, INF, "config-preference set to remote/database, loading database config information")
|
||||
config["variable"] = get_database_config(client)
|
||||
return config
|
||||
else:
|
||||
raise ConfigurationError("persistent/config-preference field must be \"local\"/\"client\" or \"remote\"/\"database\"", 120)
|
||||
|
||||
def load_config(path, config_vector):
|
||||
try:
|
||||
f = open(path, "r")
|
||||
config_vector.update(json.load(f))
|
||||
f.close()
|
||||
return 0
|
||||
except:
|
||||
f = open(path, "w")
|
||||
f.write(sample_json)
|
||||
f.close()
|
||||
return 1
|
||||
|
||||
def load_validation_schema():
|
||||
try:
|
||||
with open("validation-schema.json", "r") as f:
|
||||
return json.load(f)
|
||||
except:
|
||||
raise FileNotFoundError("Validation schema not found at validation-schema.json")
|
||||
|
||||
def save_config(path, config_vector):
|
||||
f = open(path, "w+")
|
||||
json.dump(config_vector, f, ensure_ascii=False, indent=4)
|
||||
f.close()
|
||||
return 0
|
@@ -1,11 +0,0 @@
|
||||
class APIError(Exception):
|
||||
code = None
|
||||
def __init__(self, str, endpoint):
|
||||
super().__init__(str)
|
||||
self.endpoint = endpoint
|
||||
|
||||
class ConfigurationError (Exception):
|
||||
code = None
|
||||
def __init__(self, str, code):
|
||||
super().__init__(str)
|
||||
self.code = code
|
@@ -1,44 +0,0 @@
|
||||
import sys
|
||||
import time
|
||||
from os import system, name
|
||||
import platform
|
||||
|
||||
empty_delim = " "
|
||||
hard_divided_delim = "|"
|
||||
soft_divided_delim = "|"
|
||||
l_brack = "["
|
||||
r_brack = "]"
|
||||
|
||||
ERR = "[ERR]"
|
||||
INF = "[INF]"
|
||||
|
||||
stdout = sys.stdout
|
||||
stderr = sys.stderr
|
||||
|
||||
def log(target, level, message, code = 0):
|
||||
|
||||
message = time.ctime() + empty_delim + str(level) + l_brack + f"{code:+05}" + r_brack + empty_delim + soft_divided_delim + empty_delim + message
|
||||
print(message, file = target)
|
||||
|
||||
def clear():
|
||||
if name == "nt":
|
||||
system("cls")
|
||||
else:
|
||||
system("clear")
|
||||
|
||||
def splash(version):
|
||||
|
||||
def hrule():
|
||||
print("#"+38*"-"+"#")
|
||||
def box(s):
|
||||
temp = "|"
|
||||
temp += s
|
||||
temp += (40-len(s)-2)*" "
|
||||
temp += "|"
|
||||
print(temp)
|
||||
|
||||
hrule()
|
||||
box(" superscript version: " + version)
|
||||
box(" os: " + platform.system())
|
||||
box(" python: " + platform.python_version())
|
||||
hrule()
|
@@ -1,27 +0,0 @@
|
||||
{
|
||||
"persistent": {
|
||||
"type": "dict",
|
||||
"require_all": true,
|
||||
"schema": {
|
||||
"key": {
|
||||
"type": "dict",
|
||||
"require_all":true,
|
||||
"schema": {
|
||||
"database": {"type":"string"},
|
||||
"tba": {"type": "string"},
|
||||
"tra": {
|
||||
"type": "dict",
|
||||
"require_all": true,
|
||||
"schema": {
|
||||
"CLIENT_ID": {"type": "string"},
|
||||
"CLIENT_SECRET": {"type": "string"},
|
||||
"url": {"type": "string"}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"config-preference": {"type": "string", "required": true},
|
||||
"synchronize-config": {"type": "boolean", "required": true}
|
||||
}
|
||||
}
|
||||
}
|
129
src/gui/data.py
129
src/gui/data.py
@@ -1,129 +0,0 @@
|
||||
import requests
|
||||
import pymongo
|
||||
import pandas as pd
|
||||
import time
|
||||
|
||||
def pull_new_tba_matches(apikey, competition, cutoff):
|
||||
api_key= apikey
|
||||
x=requests.get("https://www.thebluealliance.com/api/v3/event/"+competition+"/matches/simple", headers={"X-TBA-Auth_Key":api_key})
|
||||
out = []
|
||||
for i in x.json():
|
||||
if i["actual_time"] != None and i["actual_time"]-cutoff >= 0 and i["comp_level"] == "qm":
|
||||
out.append({"match" : i['match_number'], "blue" : list(map(lambda x: int(x[3:]), i['alliances']['blue']['team_keys'])), "red" : list(map(lambda x: int(x[3:]), i['alliances']['red']['team_keys'])), "winner": i["winning_alliance"]})
|
||||
return out
|
||||
|
||||
def get_team_match_data(apikey, competition, team_num):
|
||||
client = pymongo.MongoClient(apikey)
|
||||
db = client.data_scouting
|
||||
mdata = db.matchdata
|
||||
out = {}
|
||||
for i in mdata.find({"competition" : competition, "team_scouted": team_num}):
|
||||
out[i['match']] = i['data']
|
||||
return pd.DataFrame(out)
|
||||
|
||||
def get_team_pit_data(apikey, competition, team_num):
|
||||
client = pymongo.MongoClient(apikey)
|
||||
db = client.data_scouting
|
||||
mdata = db.pitdata
|
||||
out = {}
|
||||
return mdata.find_one({"competition" : competition, "team_scouted": team_num})["data"]
|
||||
|
||||
def get_team_metrics_data(apikey, competition, team_num):
|
||||
client = pymongo.MongoClient(apikey)
|
||||
db = client.data_processing
|
||||
mdata = db.team_metrics
|
||||
return mdata.find_one({"competition" : competition, "team": team_num})
|
||||
|
||||
def get_match_data_formatted(apikey, competition):
|
||||
client = pymongo.MongoClient(apikey)
|
||||
db = client.data_scouting
|
||||
mdata = db.teamlist
|
||||
x=mdata.find_one({"competition":competition})
|
||||
out = {}
|
||||
for i in x:
|
||||
try:
|
||||
out[int(i)] = unkeyify_2l(get_team_match_data(apikey, competition, int(i)).transpose().to_dict())
|
||||
except:
|
||||
pass
|
||||
return out
|
||||
|
||||
def get_metrics_data_formatted(apikey, competition):
|
||||
client = pymongo.MongoClient(apikey)
|
||||
db = client.data_scouting
|
||||
mdata = db.teamlist
|
||||
x=mdata.find_one({"competition":competition})
|
||||
out = {}
|
||||
for i in x:
|
||||
try:
|
||||
out[int(i)] = d.get_team_metrics_data(apikey, competition, int(i))
|
||||
except:
|
||||
pass
|
||||
return out
|
||||
|
||||
def get_pit_data_formatted(apikey, competition):
|
||||
client = pymongo.MongoClient(apikey)
|
||||
db = client.data_scouting
|
||||
mdata = db.teamlist
|
||||
x=mdata.find_one({"competition":competition})
|
||||
out = {}
|
||||
for i in x:
|
||||
try:
|
||||
out[int(i)] = get_team_pit_data(apikey, competition, int(i))
|
||||
except:
|
||||
pass
|
||||
return out
|
||||
|
||||
def get_pit_variable_data(apikey, competition):
|
||||
client = pymongo.MongoClient(apikey)
|
||||
db = client.data_processing
|
||||
mdata = db.team_pit
|
||||
out = {}
|
||||
return mdata.find()
|
||||
|
||||
def get_pit_variable_formatted(apikey, competition):
|
||||
temp = get_pit_variable_data(apikey, competition)
|
||||
out = {}
|
||||
for i in temp:
|
||||
out[i["variable"]] = i["data"]
|
||||
return out
|
||||
|
||||
def push_team_tests_data(apikey, competition, team_num, data, dbname = "data_processing", colname = "team_tests"):
|
||||
client = pymongo.MongoClient(apikey)
|
||||
db = client[dbname]
|
||||
mdata = db[colname]
|
||||
mdata.replace_one({"competition" : competition, "team": team_num}, {"_id": competition+str(team_num)+"am", "competition" : competition, "team" : team_num, "data" : data}, True)
|
||||
|
||||
def push_team_metrics_data(apikey, competition, team_num, data, dbname = "data_processing", colname = "team_metrics"):
|
||||
client = pymongo.MongoClient(apikey)
|
||||
db = client[dbname]
|
||||
mdata = db[colname]
|
||||
mdata.replace_one({"competition" : competition, "team": team_num}, {"_id": competition+str(team_num)+"am", "competition" : competition, "team" : team_num, "metrics" : data}, True)
|
||||
|
||||
def push_team_pit_data(apikey, competition, variable, data, dbname = "data_processing", colname = "team_pit"):
|
||||
client = pymongo.MongoClient(apikey)
|
||||
db = client[dbname]
|
||||
mdata = db[colname]
|
||||
mdata.replace_one({"competition" : competition, "variable": variable}, {"competition" : competition, "variable" : variable, "data" : data}, True)
|
||||
|
||||
def get_analysis_flags(apikey, flag):
|
||||
client = pymongo.MongoClient(apikey)
|
||||
db = client.data_processing
|
||||
mdata = db.flags
|
||||
return mdata.find_one({flag:{"$exists":True}})
|
||||
|
||||
def set_analysis_flags(apikey, flag, data):
|
||||
client = pymongo.MongoClient(apikey)
|
||||
db = client.data_processing
|
||||
mdata = db.flags
|
||||
return mdata.replace_one({flag:{"$exists":True}}, data, True)
|
||||
|
||||
def unkeyify_2l(layered_dict):
|
||||
out = {}
|
||||
for i in layered_dict.keys():
|
||||
add = []
|
||||
sortkey = []
|
||||
for j in layered_dict[i].keys():
|
||||
add.append([j,layered_dict[i][j]])
|
||||
add.sort(key = lambda x: x[0])
|
||||
out[i] = list(map(lambda x: x[1], add))
|
||||
return out
|
@@ -1,151 +0,0 @@
|
||||
<Launch>:
|
||||
orientation: "vertical"
|
||||
|
||||
NavigationLayout:
|
||||
ScreenManager:
|
||||
id: screen_manager
|
||||
HomeScreen:
|
||||
name: "Home"
|
||||
BoxLayout:
|
||||
orientation: "vertical"
|
||||
MDToolbar:
|
||||
title: screen_manager.current
|
||||
elevation: 10
|
||||
left_action_items: [['menu', lambda x: nav_drawer.toggle_nav_drawer()]]
|
||||
|
||||
GridLayout:
|
||||
cols: 1
|
||||
padding: 15, 15
|
||||
spacing: 20, 20
|
||||
MDTextFieldRect:
|
||||
hint_text: "Console Log"
|
||||
# size_hint: .8, None
|
||||
# align: 'center'
|
||||
# Widget:
|
||||
SettingsScreen:
|
||||
name: "Settings"
|
||||
BoxLayout:
|
||||
orientation: 'vertical'
|
||||
MDToolbar:
|
||||
title: screen_manager.current
|
||||
elevation: 10
|
||||
left_action_items: [['menu', lambda x: nav_drawer.toggle_nav_drawer()]]
|
||||
Widget:
|
||||
InfoScreen:
|
||||
name: "Info"
|
||||
BoxLayout:
|
||||
orientation: 'vertical'
|
||||
MDToolbar:
|
||||
title: screen_manager.current
|
||||
elevation: 10
|
||||
left_action_items: [['menu', lambda x: nav_drawer.toggle_nav_drawer()]]
|
||||
# GridLayout:
|
||||
# cols: 2
|
||||
# padding: 15, 15
|
||||
# spacing: 20, 20
|
||||
BoxLayout:
|
||||
orientation: "horizontal"
|
||||
MDLabel:
|
||||
text: "DB Key:"
|
||||
halign: 'center'
|
||||
MDTextField:
|
||||
hint_text: "placeholder"
|
||||
pos_hint: {"center_y": .5}
|
||||
|
||||
BoxLayout:
|
||||
orientation: "horizontal"
|
||||
MDLabel:
|
||||
text: "TBA Key:"
|
||||
halign: 'center'
|
||||
MDTextField:
|
||||
hint_text: "placeholder"
|
||||
pos_hint: {"center_y": .5}
|
||||
BoxLayout:
|
||||
orientation: "horizontal"
|
||||
MDLabel:
|
||||
text: "CPU Use:"
|
||||
halign: 'center'
|
||||
MDLabel:
|
||||
text: "placeholder"
|
||||
halign: 'center'
|
||||
BoxLayout:
|
||||
orientation: "horizontal"
|
||||
MDLabel:
|
||||
text: "Network:"
|
||||
halign: 'center'
|
||||
MDLabel:
|
||||
text: "placeholder"
|
||||
halign: 'center'
|
||||
Widget:
|
||||
BoxLayout:
|
||||
orientation: "horizontal"
|
||||
MDLabel:
|
||||
text: "Progress"
|
||||
halign: 'center'
|
||||
MDProgressBar:
|
||||
id: progress
|
||||
value: 50
|
||||
StatsScreen:
|
||||
name: "Stats"
|
||||
MDCheckbox:
|
||||
size_hint: None, None
|
||||
size: "48dp", "48dp"
|
||||
pos_hint: {'center_x': .5, 'center_y': .5}
|
||||
on_active: Screen.test()
|
||||
|
||||
#Navigation Drawer -------------------------
|
||||
MDNavigationDrawer:
|
||||
id: nav_drawer
|
||||
BoxLayout:
|
||||
orientation: "vertical"
|
||||
padding: "8dp"
|
||||
spacing: "8dp"
|
||||
MDLabel:
|
||||
text: "Titan Scouting"
|
||||
font_style: "Button"
|
||||
size_hint_y: None
|
||||
height: self.texture_size[1]
|
||||
|
||||
MDLabel:
|
||||
text: "Data Analysis"
|
||||
font_style: "Caption"
|
||||
size_hint_y: None
|
||||
height: self.texture_size[1]
|
||||
ScrollView:
|
||||
MDList:
|
||||
OneLineAvatarListItem:
|
||||
text: "Home"
|
||||
on_press:
|
||||
# nav_drawer.set_state("close")
|
||||
# screen_manager.transition.direction = "left"
|
||||
screen_manager.current = "Home"
|
||||
IconLeftWidget:
|
||||
icon: "home"
|
||||
|
||||
OneLineAvatarListItem:
|
||||
text: "Settings"
|
||||
on_press:
|
||||
# nav_drawer.set_state("close")
|
||||
# screen_manager.transition.direction = "right"
|
||||
# screen_manager.fade
|
||||
screen_manager.current = "Settings"
|
||||
IconLeftWidget:
|
||||
icon: "cog"
|
||||
OneLineAvatarListItem:
|
||||
text: "Info"
|
||||
on_press:
|
||||
# nav_drawer.set_state("close")
|
||||
# screen_manager.transition.direction = "right"
|
||||
# screen_manager.fade
|
||||
screen_manager.current = "Info"
|
||||
IconLeftWidget:
|
||||
icon: "cog"
|
||||
OneLineAvatarListItem:
|
||||
text: "Stats"
|
||||
on_press:
|
||||
# nav_drawer.set_state("close")
|
||||
# screen_manager.transition.direction = "right"
|
||||
# screen_manager.fade
|
||||
screen_manager.current = "Stats"
|
||||
IconLeftWidget:
|
||||
icon: "cog"
|
@@ -1,58 +0,0 @@
|
||||
from kivy.lang import Builder
|
||||
|
||||
from kivymd.uix.screen import Screen
|
||||
from kivymd.uix.list import OneLineListItem, MDList, TwoLineListItem, ThreeLineListItem
|
||||
from kivymd.uix.list import OneLineIconListItem, IconLeftWidget
|
||||
from kivy.uix.scrollview import ScrollView
|
||||
|
||||
|
||||
from kivy.uix.boxlayout import BoxLayout
|
||||
from kivy.uix.screenmanager import ScreenManager, Screen
|
||||
from kivy.uix.dropdown import DropDown
|
||||
from kivy.uix.button import Button
|
||||
from kivy.base import runTouchApp
|
||||
from kivymd.uix.menu import MDDropdownMenu, MDMenuItem
|
||||
|
||||
from kivymd.app import MDApp
|
||||
# import superscript as ss
|
||||
|
||||
# from tra_analysis import analysis as an
|
||||
import data as d
|
||||
from collections import defaultdict
|
||||
import json
|
||||
import math
|
||||
import numpy as np
|
||||
import os
|
||||
from os import system, name
|
||||
from pathlib import Path
|
||||
from multiprocessing import Pool
|
||||
import matplotlib.pyplot as plt
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
import time
|
||||
import warnings
|
||||
|
||||
# global exec_threads
|
||||
|
||||
|
||||
# Screens
|
||||
class HomeScreen(Screen):
|
||||
pass
|
||||
class SettingsScreen(Screen):
|
||||
pass
|
||||
class InfoScreen(Screen):
|
||||
pass
|
||||
|
||||
class StatsScreen(Screen):
|
||||
pass
|
||||
|
||||
|
||||
class MyApp(MDApp):
|
||||
def build(self):
|
||||
self.theme_cls.primary_palette = "Red"
|
||||
return Builder.load_file("design.kv")
|
||||
def test():
|
||||
print("test")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
MyApp().run()
|
19
src/requirements.txt
Normal file
19
src/requirements.txt
Normal file
@@ -0,0 +1,19 @@
|
||||
requests
|
||||
pymongo
|
||||
pandas
|
||||
tra-analysis
|
||||
|
||||
dnspython
|
||||
pyinstaller
|
||||
requests
|
||||
pymongo
|
||||
|
||||
numpy
|
||||
scipy
|
||||
scikit-learn
|
||||
six
|
||||
pyparsing
|
||||
pandas
|
||||
|
||||
kivy==2.0.0rc2
|
||||
zipp>=3.19.1 # not directly required, pinned by Snyk to avoid a vulnerability
|
@@ -2,6 +2,7 @@
|
||||
|
||||
block_cipher = None
|
||||
|
||||
|
||||
a = Analysis(['superscript.py'],
|
||||
pathex=['/workspaces/tra-data-analysis/src'],
|
||||
binaries=[],
|
||||
@@ -13,10 +14,7 @@ a = Analysis(['superscript.py'],
|
||||
],
|
||||
hookspath=[],
|
||||
runtime_hooks=[],
|
||||
excludes=[
|
||||
"matplotlib",
|
||||
"pandas"
|
||||
],
|
||||
excludes=[],
|
||||
win_no_prefer_redirects=False,
|
||||
win_private_assemblies=False,
|
||||
cipher=block_cipher,
|
12
submit-debug.sh
Normal file
12
submit-debug.sh
Normal file
@@ -0,0 +1,12 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
#SBATCH --job-name=tra-superscript
|
||||
#SBATCH --output=slurm-tra-superscript.out
|
||||
#SBATCH --ntasks=8
|
||||
#SBATCH --time=24:00:00
|
||||
#SBATCH --mem-per-cpu=256
|
||||
#SBATCH --mail-user=dsingh@imsa.edu
|
||||
#SBATCH -p cpu-long
|
||||
|
||||
cd competition
|
||||
python superscript.py debug
|
12
submit-prod.sh
Normal file
12
submit-prod.sh
Normal file
@@ -0,0 +1,12 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
#SBATCH --job-name=tra-superscript
|
||||
#SBATCH --output=PROD_slurm-tra-superscript.out
|
||||
#SBATCH --ntasks=8
|
||||
#SBATCH --time=24:00:00
|
||||
#SBATCH --mem-per-cpu=256
|
||||
#SBATCH --mail-user=dsingh@imsa.edu
|
||||
#SBATCH -p cpu-long
|
||||
|
||||
cd competition
|
||||
python superscript.py verbose
|
@@ -1,14 +0,0 @@
|
||||
import signal
|
||||
import zmq
|
||||
|
||||
signal.signal(signal.SIGINT, signal.SIG_DFL)
|
||||
|
||||
context = zmq.Context()
|
||||
|
||||
socket = context.socket(zmq.SUB)
|
||||
socket.connect('tcp://localhost:5678')
|
||||
socket.setsockopt(zmq.SUBSCRIBE, b'status')
|
||||
|
||||
while True:
|
||||
message = socket.recv_multipart()
|
||||
print(f'Received: {message}')
|
Reference in New Issue
Block a user