mirror of
https://github.com/titanscouting/tra-superscript.git
synced 2025-09-26 07:10:18 +00:00
Compare commits
22 Commits
superscrip
...
master
Author | SHA1 | Date | |
---|---|---|---|
|
df37947b21 | ||
|
8e6c44db65 | ||
|
11398290eb | ||
|
d847f6d6a7 | ||
|
b5c8a91fad | ||
|
8e5fa7eace | ||
|
69c6059ff8 | ||
|
fdcdadb8b2 | ||
|
cdd81295fc | ||
|
82ec2d85cc | ||
|
ac8002aaf8 | ||
|
25e4babd71 | ||
|
3fe2922e97 | ||
|
9752fd323b | ||
|
ef63c1de7e | ||
|
8908f05cbe | ||
|
143218dda3 | ||
|
def2fc9b73 | ||
|
e8a5bb75f8 | ||
|
c9dd09f5e9 | ||
|
3c6e3ac58e | ||
|
8c28c24d60 |
@@ -1,6 +1,7 @@
|
||||
FROM python:slim
|
||||
FROM ubuntu:20.04
|
||||
WORKDIR /
|
||||
RUN apt-get -y update; apt-get -y upgrade
|
||||
RUN apt-get -y install git binutils
|
||||
COPY requirements.txt .
|
||||
RUN pip install -r requirements.txt
|
||||
RUN apt-get -y update
|
||||
RUN DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends tzdata
|
||||
RUN apt-get install -y python3 python3-dev git python3-pip python3-kivy python-is-python3 libgl1-mesa-dev build-essential
|
||||
RUN ln -s $(which pip3) /usr/bin/pip
|
||||
RUN pip install pymongo pandas numpy scipy scikit-learn matplotlib pylint kivy
|
||||
|
2
.devcontainer/dev-dockerfile
Normal file
2
.devcontainer/dev-dockerfile
Normal file
@@ -0,0 +1,2 @@
|
||||
FROM titanscout2022/tra-analysis-base:latest
|
||||
WORKDIR /
|
@@ -1,7 +1,7 @@
|
||||
{
|
||||
"name": "TRA Analysis Development Environment",
|
||||
"build": {
|
||||
"dockerfile": "Dockerfile",
|
||||
"dockerfile": "dev-dockerfile",
|
||||
},
|
||||
"settings": {
|
||||
"terminal.integrated.shell.linux": "/bin/bash",
|
||||
@@ -9,15 +9,14 @@
|
||||
"python.linting.enabled": true,
|
||||
"python.linting.pylintEnabled": true,
|
||||
"python.linting.pylintPath": "/usr/local/bin/pylint",
|
||||
"python.linting.pylintArgs": ["--indent-string", "\t"],
|
||||
"python.testing.pytestPath": "/usr/local/bin/pytest",
|
||||
"editor.tabSize": 4,
|
||||
"editor.insertSpaces": false
|
||||
"editor.tabSize": 4,
|
||||
"editor.insertSpaces": false
|
||||
},
|
||||
"extensions": [
|
||||
"mhutchie.git-graph",
|
||||
"ms-python.python",
|
||||
"waderyan.gitblame"
|
||||
],
|
||||
"postCreateCommand": ""
|
||||
"postCreateCommand": "/usr/bin/pip3 install -r ${containerWorkspaceFolder}/src/requirements.txt && /usr/bin/pip3 install --no-cache-dir pylint && /usr/bin/pip3 install pytest"
|
||||
}
|
@@ -1,11 +1,14 @@
|
||||
cerberus
|
||||
dnspython
|
||||
numpy
|
||||
pandas
|
||||
pyinstaller
|
||||
pylint
|
||||
pymongo
|
||||
pyparsing
|
||||
pytest
|
||||
python-daemon
|
||||
pyzmq
|
||||
requests
|
||||
scikit-learn
|
||||
scipy
|
||||
|
38
.github/ISSUE_TEMPLATE/bug_report.md
vendored
38
.github/ISSUE_TEMPLATE/bug_report.md
vendored
@@ -1,38 +0,0 @@
|
||||
---
|
||||
name: Bug report
|
||||
about: Create a report to help us improve
|
||||
title: ''
|
||||
labels: ''
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
**Describe the bug**
|
||||
A clear and concise description of what the bug is.
|
||||
|
||||
**To Reproduce**
|
||||
Steps to reproduce the behavior:
|
||||
1. Go to '...'
|
||||
2. Click on '....'
|
||||
3. Scroll down to '....'
|
||||
4. See error
|
||||
|
||||
**Expected behavior**
|
||||
A clear and concise description of what you expected to happen.
|
||||
|
||||
**Screenshots**
|
||||
If applicable, add screenshots to help explain your problem.
|
||||
|
||||
**Desktop (please complete the following information):**
|
||||
- OS: [e.g. iOS]
|
||||
- Browser [e.g. chrome, safari]
|
||||
- Version [e.g. 22]
|
||||
|
||||
**Smartphone (please complete the following information):**
|
||||
- Device: [e.g. iPhone6]
|
||||
- OS: [e.g. iOS8.1]
|
||||
- Browser [e.g. stock browser, safari]
|
||||
- Version [e.g. 22]
|
||||
|
||||
**Additional context**
|
||||
Add any other context about the problem here.
|
20
.github/ISSUE_TEMPLATE/feature_request.md
vendored
20
.github/ISSUE_TEMPLATE/feature_request.md
vendored
@@ -1,20 +0,0 @@
|
||||
---
|
||||
name: Feature request
|
||||
about: Suggest an idea for this project
|
||||
title: ''
|
||||
labels: ''
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
**Is your feature request related to a problem? Please describe.**
|
||||
A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
|
||||
|
||||
**Describe the solution you'd like**
|
||||
A clear and concise description of what you want to happen.
|
||||
|
||||
**Describe alternatives you've considered**
|
||||
A clear and concise description of any alternative solutions or features you've considered.
|
||||
|
||||
**Additional context**
|
||||
Add any other context or screenshots about the feature request here.
|
24
.github/workflows/build-cli.yml
vendored
24
.github/workflows/build-cli.yml
vendored
@@ -1,7 +1,7 @@
|
||||
# This workflow will install Python dependencies, run tests and lint with a variety of Python versions
|
||||
# For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions
|
||||
|
||||
name: Build Superscript Linux
|
||||
name: Superscript Unit Tests
|
||||
|
||||
on:
|
||||
release:
|
||||
@@ -11,25 +11,7 @@ jobs:
|
||||
generate:
|
||||
name: Build Linux
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout master
|
||||
uses: actions/checkout@master
|
||||
- name: Install Dependencies
|
||||
run: pip install -r requirements.txt
|
||||
working-directory: src/
|
||||
- name: Give Execute Permission
|
||||
run: chmod +x build-CLI.sh
|
||||
working-directory: build/
|
||||
- name: Build Binary
|
||||
run: ./build-CLI.sh
|
||||
working-directory: build/
|
||||
- name: Copy Binary to Root Dir
|
||||
run: cp superscript ..
|
||||
working-directory: dist/
|
||||
- name: Upload Release Asset
|
||||
uses: svenstaro/upload-release-action@v2
|
||||
with:
|
||||
repo_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
file: superscript
|
||||
asset_name: superscript
|
||||
tag: ${{ github.ref }}
|
||||
uses: actions/checkout@master
|
||||
|
6
.gitignore
vendored
6
.gitignore
vendored
@@ -9,6 +9,9 @@
|
||||
**/tra_analysis/
|
||||
**/temp/*
|
||||
|
||||
**/errorlog.txt
|
||||
/dist/superscript.*
|
||||
/dist/superscript
|
||||
**/*.pid
|
||||
|
||||
**/profile.*
|
||||
@@ -16,3 +19,6 @@
|
||||
**/*.log
|
||||
**/errorlog.txt
|
||||
/dist/*
|
||||
|
||||
slurm-tra-superscript.out
|
||||
config*.json
|
||||
|
@@ -1,4 +1,4 @@
|
||||
# Red Alliance Analysis · 
|
||||
# Red Alliance Analysis · 
|
||||
|
||||
Titan Robotics 2022 Strategy Team Repository for Data Analysis Tools. Included with these tools are the backend data analysis engine formatted as a python package, associated binaries for the analysis package, and premade scripts that can be pulled directly from this repository and will integrate with other Red Alliance applications to quickly deploy FRC scouting tools.
|
||||
|
||||
|
@@ -1,5 +1,5 @@
|
||||
set pathtospec="superscript.spec"
|
||||
set pathtospec="../src/superscript.spec"
|
||||
set pathtodist="../dist/"
|
||||
set pathtowork="temp/"
|
||||
|
||||
pyinstaller --clean --distpath %pathtodist% --workpath %pathtowork% %pathtospec%
|
||||
pyinstaller --onefile --clean --distpath %pathtodist% --workpath %pathtowork% %pathtospec%
|
@@ -1,5 +1,5 @@
|
||||
pathtospec="superscript.spec"
|
||||
pathtospec="../src/superscript.spec"
|
||||
pathtodist="../dist/"
|
||||
pathtowork="temp/"
|
||||
|
||||
pyinstaller --clean --distpath ${pathtodist} --workpath ${pathtowork} ${pathtospec}
|
||||
pyinstaller --onefile --clean --distpath ${pathtodist} --workpath ${pathtowork} ${pathtospec}
|
@@ -1,35 +0,0 @@
|
||||
# -*- mode: python ; coding: utf-8 -*-
|
||||
|
||||
block_cipher = None
|
||||
|
||||
a = Analysis(
|
||||
['../src/superscript.py'],
|
||||
pathex=[],
|
||||
binaries=[],
|
||||
datas=[],
|
||||
hiddenimports=['dnspython', 'sklearn.utils._weight_vector', 'sklearn.utils._typedefs', 'sklearn.neighbors._partition_nodes', 'requests'],
|
||||
hookspath=[],
|
||||
hooksconfig={},
|
||||
runtime_hooks=[],
|
||||
excludes=[],
|
||||
win_no_prefer_redirects=False,
|
||||
win_private_assemblies=False,
|
||||
cipher=block_cipher,
|
||||
noarchive=False
|
||||
)
|
||||
pyz = PYZ(a.pure, a.zipped_data,
|
||||
cipher=block_cipher)
|
||||
exe = EXE(pyz,
|
||||
a.scripts,
|
||||
a.binaries,
|
||||
a.zipfiles,
|
||||
a.datas,
|
||||
[('W ignore', None, 'OPTION')],
|
||||
name='superscript',
|
||||
debug=False,
|
||||
bootloader_ignore_signals=False,
|
||||
strip=False,
|
||||
upx=True,
|
||||
upx_exclude=[],
|
||||
runtime_tmpdir=None,
|
||||
console=True )
|
@@ -2,6 +2,8 @@ import json
|
||||
from exceptions import ConfigurationError
|
||||
from cerberus import Validator
|
||||
|
||||
from data import set_database_config, get_database_config
|
||||
|
||||
class Configuration:
|
||||
|
||||
path = None
|
||||
@@ -183,24 +185,33 @@ class Configuration:
|
||||
if not isValidated:
|
||||
raise ConfigurationError("config validation error: " + v.errors)
|
||||
|
||||
def __getattr__(self, name): # better hashed lookup method for common multikey-value paths, TYPE UNSAFE
|
||||
attr_lookup = {
|
||||
"persistent": self.config["persistent"],
|
||||
"key": self.config["persistent"]["key"],
|
||||
"database": self.config["persistent"]["key"]["database"],
|
||||
"tba": self.config["persistent"]["key"]["tba"],
|
||||
"tra": self.config["persistent"]["key"]["tra"],
|
||||
"priority": self.config["persistent"]["config-preference"],
|
||||
"sync": self.config["persistent"]["synchronize-config"],
|
||||
"variable": self.config["variable"],
|
||||
"event_delay": self.config["variable"]["event-delay"],
|
||||
"loop_delay": self.config["variable"]["loop-delay"],
|
||||
"competition": self.config["variable"]["competition"],
|
||||
"modules": self.config["variable"]["modules"]
|
||||
}
|
||||
try:
|
||||
return attr_lookup[name]
|
||||
except KeyError:
|
||||
def __getattr__(self, name): # simple linear lookup method for common multikey-value paths, TYPE UNSAFE
|
||||
if name == "persistent":
|
||||
return self.config["persistent"]
|
||||
elif name == "key":
|
||||
return self.config["persistent"]["key"]
|
||||
elif name == "database":
|
||||
# soon to be deprecated
|
||||
return self.config["persistent"]["key"]["database"]
|
||||
elif name == "tba":
|
||||
return self.config["persistent"]["key"]["tba"]
|
||||
elif name == "tra":
|
||||
return self.config["persistent"]["key"]["tra"]
|
||||
elif name == "priority":
|
||||
return self.config["persistent"]["config-preference"]
|
||||
elif name == "sync":
|
||||
return self.config["persistent"]["synchronize-config"]
|
||||
elif name == "variable":
|
||||
return self.config["variable"]
|
||||
elif name == "event_delay":
|
||||
return self.config["variable"]["event-delay"]
|
||||
elif name == "loop_delay":
|
||||
return self.config["variable"]["loop-delay"]
|
||||
elif name == "competition":
|
||||
return self.config["variable"]["competition"]
|
||||
elif name == "modules":
|
||||
return self.config["variable"]["modules"]
|
||||
else:
|
||||
return None
|
||||
|
||||
def __getitem__(self, key):
|
||||
@@ -213,14 +224,14 @@ class Configuration:
|
||||
if sync:
|
||||
if priority == "local" or priority == "client":
|
||||
logger.info("config-preference set to local/client, loading local config information")
|
||||
remote_config = client.get_database_config()
|
||||
remote_config = get_database_config(client)
|
||||
if remote_config != self.config["variable"]:
|
||||
client.set_database_config(self.config["variable"])
|
||||
set_database_config(client, self.config["variable"])
|
||||
logger.info("database config was different and was updated")
|
||||
# no change to config
|
||||
elif priority == "remote" or priority == "database":
|
||||
logger.info("config-preference set to remote/database, loading remote config information")
|
||||
remote_config = client.get_database_config()
|
||||
remote_config = get_database_config(client)
|
||||
if remote_config != self.config["variable"]:
|
||||
self.config["variable"] = remote_config
|
||||
self.save_config()
|
||||
@@ -234,7 +245,7 @@ class Configuration:
|
||||
# no change to config
|
||||
elif priority == "remote" or priority == "database":
|
||||
logger.info("config-preference set to remote/database, loading database config information")
|
||||
self.config["variable"] = client.get_database_config()
|
||||
self.config["variable"] = get_database_config(client)
|
||||
# change variable to match remote without updating local version
|
||||
else:
|
||||
raise ConfigurationError("persistent/config-preference field must be \"local\"/\"client\" or \"remote\"/\"database\"")
|
218
competition/data.py
Normal file
218
competition/data.py
Normal file
@@ -0,0 +1,218 @@
|
||||
from calendar import c
|
||||
import requests
|
||||
import pull
|
||||
import pandas as pd
|
||||
import json
|
||||
|
||||
def pull_new_tba_matches(apikey, competition, last_match):
|
||||
api_key= apikey
|
||||
x=requests.get("https://www.thebluealliance.com/api/v3/event/"+competition+"/matches/simple", headers={"X-TBA-Auth-Key":api_key})
|
||||
json = x.json()
|
||||
out = []
|
||||
for i in json:
|
||||
if i["actual_time"] != None and i["comp_level"] == "qm" and i["match_number"] > last_match :
|
||||
out.append({"match" : i['match_number'], "blue" : list(map(lambda x: int(x[3:]), i['alliances']['blue']['team_keys'])), "red" : list(map(lambda x: int(x[3:]), i['alliances']['red']['team_keys'])), "winner": i["winning_alliance"]})
|
||||
out.sort(key=lambda x: x['match'])
|
||||
return out
|
||||
|
||||
def pull_new_tba_matches_manual(apikey, competition, cutoff):
|
||||
filename = competition+"-wins.json"
|
||||
with open(filename, 'r') as f:
|
||||
data = json.load(f)
|
||||
return data
|
||||
|
||||
def get_team_match_data(client, competition, team_num):
|
||||
db = client.data_scouting
|
||||
mdata = db.matchdata
|
||||
out = {}
|
||||
for i in mdata.find({"competition" : competition, "team_scouted": str(team_num)}):
|
||||
out[i['match']] = i['data']
|
||||
return pd.DataFrame(out)
|
||||
|
||||
def clear_metrics(client, competition):
|
||||
db = client.data_processing
|
||||
data = db.team_metrics
|
||||
data.delete_many({competition: competition})
|
||||
return True
|
||||
|
||||
def get_team_pit_data(client, competition, team_num):
|
||||
db = client.data_scouting
|
||||
mdata = db.pitdata
|
||||
out = {}
|
||||
return mdata.find_one({"competition" : competition, "team_scouted": str(team_num)})["data"]
|
||||
|
||||
def get_team_metrics_data(client, competition, team_num):
|
||||
db = client.data_processing
|
||||
mdata = db.team_metrics
|
||||
temp = mdata.find_one({"team": team_num})
|
||||
if temp != None:
|
||||
if competition in temp['metrics'].keys():
|
||||
temp = temp['metrics'][competition]
|
||||
else :
|
||||
temp = None
|
||||
else:
|
||||
temp = None
|
||||
return temp
|
||||
|
||||
def get_match_data_formatted(client, competition):
|
||||
teams_at_comp = pull.get_teams_at_competition(competition)
|
||||
out = {}
|
||||
for team in teams_at_comp:
|
||||
try:
|
||||
out[int(team)] = unkeyify_2l(get_team_match_data(client, competition, team).transpose().to_dict())
|
||||
except:
|
||||
pass
|
||||
return out
|
||||
|
||||
def get_metrics_data_formatted(client, competition):
|
||||
teams_at_comp = pull.get_teams_at_competition(competition)
|
||||
out = {}
|
||||
for team in teams_at_comp:
|
||||
try:
|
||||
out[int(team)] = get_team_metrics_data(client, competition, int(team))
|
||||
except:
|
||||
pass
|
||||
return out
|
||||
|
||||
def get_pit_data_formatted(client, competition):
|
||||
x=requests.get("https://scouting.titanrobotics2022.com/api/fetchAllTeamNicknamesAtCompetition?competition="+competition)
|
||||
x = x.json()
|
||||
x = x['data']
|
||||
x = x.keys()
|
||||
out = {}
|
||||
for i in x:
|
||||
try:
|
||||
out[int(i)] = get_team_pit_data(client, competition, int(i))
|
||||
except:
|
||||
pass
|
||||
return out
|
||||
|
||||
def get_pit_variable_data(client, competition):
|
||||
db = client.data_processing
|
||||
mdata = db.team_pit
|
||||
out = {}
|
||||
return mdata.find()
|
||||
|
||||
def get_pit_variable_formatted(client, competition):
|
||||
temp = get_pit_variable_data(client, competition)
|
||||
out = {}
|
||||
for i in temp:
|
||||
out[i["variable"]] = i["data"]
|
||||
return out
|
||||
|
||||
def push_team_tests_data(client, competition, team_num, data, dbname = "data_processing", colname = "team_tests"):
|
||||
db = client[dbname]
|
||||
mdata = db[colname]
|
||||
mdata.replace_one({"competition" : competition, "team": team_num}, {"_id": competition+str(team_num)+"am", "competition" : competition, "team" : team_num, "data" : data}, True)
|
||||
|
||||
def push_team_metrics_data(client, competition, team_num, data, dbname = "data_processing", colname = "team_metrics"):
|
||||
db = client[dbname]
|
||||
mdata = db[colname]
|
||||
mdata.update_one({"team": team_num}, {"$set": {"metrics.{}".format(competition): data}}, upsert=True)
|
||||
|
||||
def push_team_pit_data(client, competition, variable, data, dbname = "data_processing", colname = "team_pit"):
|
||||
db = client[dbname]
|
||||
mdata = db[colname]
|
||||
mdata.replace_one({"competition" : competition, "variable": variable}, {"competition" : competition, "variable" : variable, "data" : data}, True)
|
||||
|
||||
def get_analysis_flags(client, flag):
|
||||
db = client.data_processing
|
||||
mdata = db.flags
|
||||
return mdata.find_one({"_id": "2022"})
|
||||
|
||||
def set_analysis_flags(client, flag, data):
|
||||
db = client.data_processing
|
||||
mdata = db.flags
|
||||
return mdata.update_one({"_id": "2022"}, {"$set": data})
|
||||
|
||||
def unkeyify_2l(layered_dict):
|
||||
out = {}
|
||||
for i in layered_dict.keys():
|
||||
add = []
|
||||
sortkey = []
|
||||
for j in layered_dict[i].keys():
|
||||
add.append([j,layered_dict[i][j]])
|
||||
add.sort(key = lambda x: x[0])
|
||||
out[i] = list(map(lambda x: x[1], add))
|
||||
return out
|
||||
|
||||
def get_previous_time(client):
|
||||
|
||||
previous_time = get_analysis_flags(client, "latest_update")
|
||||
|
||||
if previous_time == None:
|
||||
|
||||
set_analysis_flags(client, "latest_update", 0)
|
||||
previous_time = 0
|
||||
|
||||
else:
|
||||
|
||||
previous_time = previous_time["latest_update"]
|
||||
|
||||
return previous_time
|
||||
|
||||
def set_current_time(client, current_time):
|
||||
|
||||
set_analysis_flags(client, "latest_update", {"latest_update":current_time})
|
||||
|
||||
def get_database_config(client):
|
||||
|
||||
remote_config = get_analysis_flags(client, "config")
|
||||
return remote_config["config"] if remote_config != None else None
|
||||
|
||||
def set_database_config(client, config):
|
||||
|
||||
set_analysis_flags(client, "config", {"config": config})
|
||||
|
||||
def load_match(client, competition):
|
||||
|
||||
return get_match_data_formatted(client, competition)
|
||||
|
||||
def load_metric(client, competition, match, group_name, metrics):
|
||||
|
||||
group = {}
|
||||
|
||||
for team in match[group_name]:
|
||||
|
||||
db_data = get_team_metrics_data(client, competition, team)
|
||||
|
||||
if db_data == None:
|
||||
gl2 = {"score": metrics["gl2"]["score"], "rd": metrics["gl2"]["rd"], "vol": metrics["gl2"]["vol"]}
|
||||
|
||||
group[team] = {"gl2": gl2}
|
||||
|
||||
else:
|
||||
|
||||
metrics = db_data
|
||||
|
||||
gl2 = metrics["gl2"]
|
||||
|
||||
group[team] = {"gl2": gl2}
|
||||
|
||||
return group
|
||||
|
||||
def load_pit(client, competition):
|
||||
|
||||
return get_pit_data_formatted(client, competition)
|
||||
|
||||
def push_match(client, competition, results):
|
||||
|
||||
for team in results:
|
||||
|
||||
push_team_tests_data(client, competition, team, results[team])
|
||||
|
||||
def push_metric(client, competition, metric):
|
||||
|
||||
for team in metric:
|
||||
|
||||
push_team_metrics_data(client, competition, team, metric[team])
|
||||
|
||||
def push_pit(client, competition, pit):
|
||||
|
||||
for variable in pit:
|
||||
|
||||
push_team_pit_data(client, competition, variable, pit[variable])
|
||||
|
||||
def check_new_database_matches(client, competition):
|
||||
|
||||
return True
|
132
competition/dep.py
Normal file
132
competition/dep.py
Normal file
@@ -0,0 +1,132 @@
|
||||
# contains deprecated functions, not to be used unless nessasary!
|
||||
|
||||
import json
|
||||
|
||||
sample_json = """
|
||||
{
|
||||
"persistent":{
|
||||
"key":{
|
||||
"database":"",
|
||||
"tba":"",
|
||||
"tra":{
|
||||
"CLIENT_ID":"",
|
||||
"CLIENT_SECRET":"",
|
||||
"url": ""
|
||||
}
|
||||
},
|
||||
"config-preference":"local",
|
||||
"synchronize-config":false
|
||||
},
|
||||
"variable":{
|
||||
"max-threads":0.5,
|
||||
"team":"",
|
||||
"event-delay":false,
|
||||
"loop-delay":0,
|
||||
"reportable":true,
|
||||
"teams":[
|
||||
|
||||
],
|
||||
"modules":{
|
||||
"match":{
|
||||
"tests":{
|
||||
"balls-blocked":[
|
||||
"basic_stats",
|
||||
"historical_analysis",
|
||||
"regression_linear",
|
||||
"regression_logarithmic",
|
||||
"regression_exponential",
|
||||
"regression_polynomial",
|
||||
"regression_sigmoidal"
|
||||
],
|
||||
"balls-collected":[
|
||||
"basic_stats",
|
||||
"historical_analysis",
|
||||
"regression_linear",
|
||||
"regression_logarithmic",
|
||||
"regression_exponential",
|
||||
"regression_polynomial",
|
||||
"regression_sigmoidal"
|
||||
],
|
||||
"balls-lower-teleop":[
|
||||
"basic_stats",
|
||||
"historical_analysis",
|
||||
"regression_linear",
|
||||
"regression_logarithmic",
|
||||
"regression_exponential",
|
||||
"regression_polynomial",
|
||||
"regression_sigmoidal"
|
||||
],
|
||||
"balls-lower-auto":[
|
||||
"basic_stats",
|
||||
"historical_analysis",
|
||||
"regression_linear",
|
||||
"regression_logarithmic",
|
||||
"regression_exponential",
|
||||
"regression_polynomial",
|
||||
"regression_sigmoidal"
|
||||
],
|
||||
"balls-started":[
|
||||
"basic_stats",
|
||||
"historical_analyss",
|
||||
"regression_linear",
|
||||
"regression_logarithmic",
|
||||
"regression_exponential",
|
||||
"regression_polynomial",
|
||||
"regression_sigmoidal"
|
||||
],
|
||||
"balls-upper-teleop":[
|
||||
"basic_stats",
|
||||
"historical_analysis",
|
||||
"regression_linear",
|
||||
"regression_logarithmic",
|
||||
"regression_exponential",
|
||||
"regression_polynomial",
|
||||
"regression_sigmoidal"
|
||||
],
|
||||
"balls-upper-auto":[
|
||||
"basic_stats",
|
||||
"historical_analysis",
|
||||
"regression_linear",
|
||||
"regression_logarithmic",
|
||||
"regression_exponential",
|
||||
"regression_polynomial",
|
||||
"regression_sigmoidal"
|
||||
]
|
||||
}
|
||||
},
|
||||
"metric":{
|
||||
"tests":{
|
||||
"gl2":{
|
||||
"score":1500,
|
||||
"rd":250,
|
||||
"vol":0.06
|
||||
},
|
||||
}
|
||||
},
|
||||
"pit":{
|
||||
"tests":{
|
||||
"wheel-mechanism":true,
|
||||
"low-balls":true,
|
||||
"high-balls":true,
|
||||
"wheel-success":true,
|
||||
"strategic-focus":true,
|
||||
"climb-mechanism":true,
|
||||
"attitude":true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
"""
|
||||
|
||||
def load_config(path, config_vector):
|
||||
try:
|
||||
f = open(path, "r")
|
||||
config_vector.update(json.load(f))
|
||||
f.close()
|
||||
return 0
|
||||
except:
|
||||
f = open(path, "w")
|
||||
f.write(sample_json)
|
||||
f.close()
|
||||
return 1
|
@@ -23,7 +23,7 @@ class Logger(L):
|
||||
|
||||
self.file = file
|
||||
|
||||
if file is not None:
|
||||
if file != None:
|
||||
self.targets.append(self._send_file)
|
||||
|
||||
if profile:
|
@@ -1,4 +1,5 @@
|
||||
import abc
|
||||
import data as d
|
||||
import signal
|
||||
import numpy as np
|
||||
from tra_analysis import Analysis as an
|
||||
@@ -16,7 +17,7 @@ class Module(metaclass = abc.ABCMeta):
|
||||
callable(subclass.run)
|
||||
)
|
||||
@abc.abstractmethod
|
||||
def __init__(self, *args, **kwargs):
|
||||
def __init__(self, config, apikey, tbakey, timestamp, competition, *args, **kwargs):
|
||||
raise NotImplementedError
|
||||
@abc.abstractmethod
|
||||
def validate_config(self, *args, **kwargs):
|
||||
@@ -28,16 +29,20 @@ class Module(metaclass = abc.ABCMeta):
|
||||
class Match (Module):
|
||||
|
||||
config = None
|
||||
apikey = None
|
||||
tbakey = None
|
||||
timestamp = None
|
||||
client = None
|
||||
competition = None
|
||||
|
||||
data = None
|
||||
results = None
|
||||
|
||||
def __init__(self, config, timestamp, client):
|
||||
def __init__(self, config, apikey, tbakey, timestamp, competition):
|
||||
self.config = config
|
||||
self.apikey = apikey
|
||||
self.tbakey = tbakey
|
||||
self.timestamp = timestamp
|
||||
self.client = client
|
||||
self.competition = competition
|
||||
|
||||
def validate_config(self):
|
||||
return True, ""
|
||||
@@ -48,7 +53,7 @@ class Match (Module):
|
||||
self._push_results()
|
||||
|
||||
def _load_data(self):
|
||||
self.data = self.client.load_match()
|
||||
self.data = d.load_match(self.apikey, self.competition)
|
||||
|
||||
def _simplestats(self, data_test):
|
||||
|
||||
@@ -88,7 +93,7 @@ class Match (Module):
|
||||
|
||||
input_vector = []
|
||||
|
||||
for team in tqdm(data, desc = "Match Module ", unit = " team"):
|
||||
for team in data:
|
||||
|
||||
for variable in data[team]:
|
||||
|
||||
@@ -136,21 +141,25 @@ class Match (Module):
|
||||
|
||||
self.results = return_vector
|
||||
|
||||
self.client.push_match(self.results)
|
||||
d.push_match(self.apikey, self.competition, self.results)
|
||||
|
||||
class Metric (Module):
|
||||
|
||||
config = None
|
||||
apikey = None
|
||||
tbakey = None
|
||||
timestamp = None
|
||||
client = None
|
||||
competition = None
|
||||
|
||||
data = None
|
||||
results = None
|
||||
|
||||
def __init__(self, config, timestamp, client):
|
||||
def __init__(self, config, apikey, tbakey, timestamp, competition):
|
||||
self.config = config
|
||||
self.apikey = apikey
|
||||
self.tbakey = tbakey
|
||||
self.timestamp = timestamp
|
||||
self.client = client
|
||||
self.competition = competition
|
||||
|
||||
def validate_config(self):
|
||||
return True, ""
|
||||
@@ -161,27 +170,21 @@ class Metric (Module):
|
||||
self._push_results()
|
||||
|
||||
def _load_data(self):
|
||||
self.data = self.client.pull_new_tba_matches(self.timestamp)
|
||||
self.last_match = d.get_analysis_flags(self.apikey, 'metrics_last_match')['metrics_last_match']
|
||||
print("Previous last match", self.last_match)
|
||||
self.data = d.pull_new_tba_matches(self.tbakey, self.competition, self.last_match)
|
||||
|
||||
def _process_data(self):
|
||||
|
||||
self.results = {}
|
||||
|
||||
elo_N = self.config["tests"]["elo"]["N"]
|
||||
elo_K = self.config["tests"]["elo"]["K"]
|
||||
|
||||
self.match = self.last_match
|
||||
matches = self.data
|
||||
|
||||
red = {}
|
||||
blu = {}
|
||||
|
||||
for match in tqdm(matches, desc = "Metric Module ", unit = " match"):
|
||||
|
||||
red = self.client.load_metric(match, "red", self.config["tests"])
|
||||
blu = self.client.load_metric(match, "blue", self.config["tests"])
|
||||
|
||||
elo_red_total = 0
|
||||
elo_blu_total = 0
|
||||
for match in tqdm(matches, desc="Metrics"): # grab matches and loop through each one
|
||||
self.match = max(self.match, int(match['match']))
|
||||
red = d.load_metric(self.apikey, self.competition, match, "red", self.config["tests"]) # get the current ratings for red
|
||||
blu = d.load_metric(self.apikey, self.competition, match, "blue", self.config["tests"]) # get the current ratings for blue
|
||||
|
||||
gl2_red_score_total = 0
|
||||
gl2_blu_score_total = 0
|
||||
@@ -192,90 +195,83 @@ class Metric (Module):
|
||||
gl2_red_vol_total = 0
|
||||
gl2_blu_vol_total = 0
|
||||
|
||||
for team in red:
|
||||
|
||||
elo_red_total += red[team]["elo"]["score"]
|
||||
for team in red: # for each team in red, add up gl2 score components
|
||||
|
||||
gl2_red_score_total += red[team]["gl2"]["score"]
|
||||
gl2_red_rd_total += red[team]["gl2"]["rd"]
|
||||
gl2_red_vol_total += red[team]["gl2"]["vol"]
|
||||
|
||||
for team in blu:
|
||||
|
||||
elo_blu_total += blu[team]["elo"]["score"]
|
||||
for team in blu: # for each team in blue, add up gl2 score components
|
||||
|
||||
gl2_blu_score_total += blu[team]["gl2"]["score"]
|
||||
gl2_blu_rd_total += blu[team]["gl2"]["rd"]
|
||||
gl2_blu_vol_total += blu[team]["gl2"]["vol"]
|
||||
|
||||
red_elo = {"score": elo_red_total / len(red)}
|
||||
blu_elo = {"score": elo_blu_total / len(blu)}
|
||||
|
||||
red_gl2 = {"score": gl2_red_score_total / len(red), "rd": gl2_red_rd_total / len(red), "vol": gl2_red_vol_total / len(red)}
|
||||
blu_gl2 = {"score": gl2_blu_score_total / len(blu), "rd": gl2_blu_rd_total / len(blu), "vol": gl2_blu_vol_total / len(blu)}
|
||||
red_gl2 = {"score": gl2_red_score_total / len(red), "rd": gl2_red_rd_total / len(red), "vol": gl2_red_vol_total / len(red)} # average the scores by dividing by 3
|
||||
blu_gl2 = {"score": gl2_blu_score_total / len(blu), "rd": gl2_blu_rd_total / len(blu), "vol": gl2_blu_vol_total / len(blu)} # average the scores by dividing by 3
|
||||
|
||||
|
||||
if match["winner"] == "red":
|
||||
if match["winner"] == "red": # if red won, set observations to {"red": 1, "blu": 0}
|
||||
|
||||
observations = {"red": 1, "blu": 0}
|
||||
|
||||
elif match["winner"] == "blue":
|
||||
elif match["winner"] == "blue": # if blue won, set observations to {"red": 0, "blu": 1}
|
||||
|
||||
observations = {"red": 0, "blu": 1}
|
||||
|
||||
else:
|
||||
else: # otherwise it was a tie and observations is {"red": 0.5, "blu": 0.5}
|
||||
|
||||
observations = {"red": 0.5, "blu": 0.5}
|
||||
|
||||
red_elo_delta = an.Metric().elo(red_elo["score"], blu_elo["score"], observations["red"], elo_N, elo_K) - red_elo["score"]
|
||||
blu_elo_delta = an.Metric().elo(blu_elo["score"], red_elo["score"], observations["blu"], elo_N, elo_K) - blu_elo["score"]
|
||||
|
||||
new_red_gl2_score, new_red_gl2_rd, new_red_gl2_vol = an.Metric().glicko2(red_gl2["score"], red_gl2["rd"], red_gl2["vol"], [blu_gl2["score"]], [blu_gl2["rd"]], [observations["red"], observations["blu"]])
|
||||
new_blu_gl2_score, new_blu_gl2_rd, new_blu_gl2_vol = an.Metric().glicko2(blu_gl2["score"], blu_gl2["rd"], blu_gl2["vol"], [red_gl2["score"]], [red_gl2["rd"]], [observations["blu"], observations["red"]])
|
||||
new_red_gl2_score, new_red_gl2_rd, new_red_gl2_vol = an.Metric().glicko2(red_gl2["score"], red_gl2["rd"], red_gl2["vol"], [blu_gl2["score"]], [blu_gl2["rd"]], [observations["red"], observations["blu"]]) # calculate new scores for gl2 for red
|
||||
new_blu_gl2_score, new_blu_gl2_rd, new_blu_gl2_vol = an.Metric().glicko2(blu_gl2["score"], blu_gl2["rd"], blu_gl2["vol"], [red_gl2["score"]], [red_gl2["rd"]], [observations["blu"], observations["red"]]) # calculate new scores for gl2 for blue
|
||||
|
||||
red_gl2_delta = {"score": new_red_gl2_score - red_gl2["score"], "rd": new_red_gl2_rd - red_gl2["rd"], "vol": new_red_gl2_vol - red_gl2["vol"]}
|
||||
blu_gl2_delta = {"score": new_blu_gl2_score - blu_gl2["score"], "rd": new_blu_gl2_rd - blu_gl2["rd"], "vol": new_blu_gl2_vol - blu_gl2["vol"]}
|
||||
red_gl2_delta = {"score": new_red_gl2_score - red_gl2["score"], "rd": new_red_gl2_rd - red_gl2["rd"], "vol": new_red_gl2_vol - red_gl2["vol"]} # calculate gl2 deltas for red
|
||||
blu_gl2_delta = {"score": new_blu_gl2_score - blu_gl2["score"], "rd": new_blu_gl2_rd - blu_gl2["rd"], "vol": new_blu_gl2_vol - blu_gl2["vol"]} # calculate gl2 deltas for blue
|
||||
|
||||
for team in red:
|
||||
|
||||
red[team]["elo"]["score"] = red[team]["elo"]["score"] + red_elo_delta
|
||||
for team in red: # for each team on red, add the previous score with the delta to find the new score
|
||||
|
||||
red[team]["gl2"]["score"] = red[team]["gl2"]["score"] + red_gl2_delta["score"]
|
||||
red[team]["gl2"]["rd"] = red[team]["gl2"]["rd"] + red_gl2_delta["rd"]
|
||||
red[team]["gl2"]["vol"] = red[team]["gl2"]["vol"] + red_gl2_delta["vol"]
|
||||
|
||||
for team in blu:
|
||||
|
||||
blu[team]["elo"]["score"] = blu[team]["elo"]["score"] + blu_elo_delta
|
||||
for team in blu: # for each team on blue, add the previous score with the delta to find the new score
|
||||
|
||||
blu[team]["gl2"]["score"] = blu[team]["gl2"]["score"] + blu_gl2_delta["score"]
|
||||
blu[team]["gl2"]["rd"] = blu[team]["gl2"]["rd"] + blu_gl2_delta["rd"]
|
||||
blu[team]["gl2"]["vol"] = blu[team]["gl2"]["vol"] + blu_gl2_delta["vol"]
|
||||
|
||||
temp_vector = {}
|
||||
temp_vector.update(red)
|
||||
temp_vector.update(red) # update the team's score with the temporay vector
|
||||
temp_vector.update(blu)
|
||||
|
||||
self.results[match['match']] = temp_vector
|
||||
|
||||
self.client.push_metric(temp_vector)
|
||||
|
||||
d.push_metric(self.apikey, self.competition, temp_vector) # push new scores to db
|
||||
print("New last match", self.match)
|
||||
d.set_analysis_flags(self.apikey, 'metrics_last_match', {'metrics_last_match': self.match})
|
||||
def _push_results(self):
|
||||
pass
|
||||
|
||||
class Pit (Module):
|
||||
|
||||
config = None
|
||||
apikey = None
|
||||
tbakey = None
|
||||
timestamp = None
|
||||
client = None
|
||||
competition = None
|
||||
|
||||
data = None
|
||||
results = None
|
||||
|
||||
def __init__(self, config, timestamp, client):
|
||||
def __init__(self, config, apikey, tbakey, timestamp, competition):
|
||||
self.config = config
|
||||
self.apikey = apikey
|
||||
self.tbakey = tbakey
|
||||
self.timestamp = timestamp
|
||||
self.client = client
|
||||
self.competition = competition
|
||||
|
||||
def validate_config(self):
|
||||
return True, ""
|
||||
@@ -286,12 +282,12 @@ class Pit (Module):
|
||||
self._push_results()
|
||||
|
||||
def _load_data(self):
|
||||
self.data = self.client.load_pit()
|
||||
self.data = d.load_pit(self.apikey, self.competition)
|
||||
|
||||
def _process_data(self):
|
||||
tests = self.config["tests"]
|
||||
return_vector = {}
|
||||
for team in tqdm(self.data, desc = "Pit Module ", unit = " team"):
|
||||
for team in self.data:
|
||||
for variable in self.data[team]:
|
||||
if variable in tests:
|
||||
if not variable in return_vector:
|
||||
@@ -301,7 +297,7 @@ class Pit (Module):
|
||||
self.results = return_vector
|
||||
|
||||
def _push_results(self):
|
||||
self.client.push_pit(self.results)
|
||||
d.push_pit(self.apikey, self.competition, self.results)
|
||||
|
||||
class Rating (Module):
|
||||
pass
|
63
competition/pull.py
Normal file
63
competition/pull.py
Normal file
@@ -0,0 +1,63 @@
|
||||
import requests
|
||||
from exceptions import APIError
|
||||
from dep import load_config
|
||||
|
||||
url = "https://scouting.titanrobotics2022.com"
|
||||
config_tra = {}
|
||||
load_config("config.json", config_tra)
|
||||
trakey = config_tra['persistent']['key']['tra']
|
||||
|
||||
def get_team_competition():
|
||||
endpoint = '/api/fetchTeamCompetition'
|
||||
params = {
|
||||
"CLIENT_ID": trakey['CLIENT_ID'],
|
||||
"CLIENT_SECRET": trakey['CLIENT_SECRET']
|
||||
}
|
||||
response = requests.request("GET", url + endpoint, params=params)
|
||||
json = response.json()
|
||||
if json['success']:
|
||||
return json['competition']
|
||||
else:
|
||||
raise APIError(json)
|
||||
|
||||
def get_team():
|
||||
endpoint = '/api/fetchTeamCompetition'
|
||||
params = {
|
||||
"CLIENT_ID": trakey['CLIENT_ID'],
|
||||
"CLIENT_SECRET": trakey['CLIENT_SECRET']
|
||||
}
|
||||
response = requests.request("GET", url + endpoint, params=params)
|
||||
json = response.json()
|
||||
if json['success']:
|
||||
return json['team']
|
||||
else:
|
||||
raise APIError(json)
|
||||
|
||||
def get_team_match_data(competition, team_num):
|
||||
endpoint = '/api/fetchAllTeamMatchData'
|
||||
params = {
|
||||
"competition": competition,
|
||||
"teamScouted": team_num,
|
||||
"CLIENT_ID": trakey['CLIENT_ID'],
|
||||
"CLIENT_SECRET": trakey['CLIENT_SECRET']
|
||||
}
|
||||
response = requests.request("GET", url + endpoint, params=params)
|
||||
json = response.json()
|
||||
if json['success']:
|
||||
return json['data'][team_num]
|
||||
else:
|
||||
raise APIError(json)
|
||||
|
||||
def get_teams_at_competition(competition):
|
||||
endpoint = '/api/fetchAllTeamNicknamesAtCompetition'
|
||||
params = {
|
||||
"competition": competition,
|
||||
"CLIENT_ID": trakey['CLIENT_ID'],
|
||||
"CLIENT_SECRET": trakey['CLIENT_SECRET']
|
||||
}
|
||||
response = requests.request("GET", url + endpoint, params=params)
|
||||
json = response.json()
|
||||
if json['success']:
|
||||
return list(json['data'].keys())
|
||||
else:
|
||||
raise APIError(json)
|
15
competition/requirements.txt
Normal file
15
competition/requirements.txt
Normal file
@@ -0,0 +1,15 @@
|
||||
cerberus
|
||||
dnspython
|
||||
numpy
|
||||
pandas
|
||||
pyinstaller
|
||||
pylint
|
||||
pymongo
|
||||
pyparsing
|
||||
python-daemon
|
||||
pyzmq
|
||||
requests
|
||||
scikit-learn
|
||||
scipy
|
||||
six
|
||||
tra-analysis
|
402
competition/superscript.py
Normal file
402
competition/superscript.py
Normal file
@@ -0,0 +1,402 @@
|
||||
# Titan Robotics Team 2022: Superscript Script
|
||||
# Written by Arthur Lu, Jacob Levine, and Dev Singh
|
||||
# Notes:
|
||||
# setup:
|
||||
|
||||
__version__ = "1.0.0"
|
||||
|
||||
# changelog should be viewed using print(analysis.__changelog__)
|
||||
__changelog__ = """changelog:
|
||||
1.0.0:
|
||||
- superscript now runs in PEP 3143 compliant well behaved daemon on Linux systems
|
||||
- linux superscript daemon has integrated websocket output to monitor progress/status remotely
|
||||
- linux daemon now sends stderr to errorlog.log
|
||||
- added verbose option to linux superscript to allow for interactive output
|
||||
- moved pymongo import to superscript.py
|
||||
- added profile option to linux superscript to profile runtime of script
|
||||
- reduced memory usage slightly by consolidating the unwrapped input data
|
||||
- added debug option, which performs one loop of analysis and dumps results to local files
|
||||
- added event and time delay options to config
|
||||
- event delay pauses loop until even listener recieves an update
|
||||
- time delay pauses loop until the time specified has elapsed since the BEGINNING of previous loop
|
||||
- added options to pull config information from database (reatins option to use local config file)
|
||||
- config-preference option selects between prioritizing local config and prioritizing database config
|
||||
- synchronize-config option selects whether to update the non prioritized config with the prioritized one
|
||||
- divided config options between persistent ones (keys), and variable ones (everything else)
|
||||
- generalized behavior of various core components by collecting loose functions in several dependencies into classes
|
||||
- module.py contains classes, each one represents a single data analysis routine
|
||||
- config.py contains the Configuration class, which stores the configuration information and abstracts the getter methods
|
||||
0.9.3:
|
||||
- improved data loading performance by removing redundant PyMongo client creation (120s to 14s)
|
||||
- passed singular instance of PyMongo client as standin for apikey parameter in all data.py functions
|
||||
0.9.2:
|
||||
- removed unessasary imports from data
|
||||
- minor changes to interface
|
||||
0.9.1:
|
||||
- fixed bugs in configuration item loading exception handling
|
||||
0.9.0:
|
||||
- moved printing and logging related functions to interface.py (changelog will stay in this file)
|
||||
- changed function return files for load_config and save_config to standard C values (0 for success, 1 for error)
|
||||
- added local variables for config location
|
||||
- moved dataset getting and setting functions to dataset.py (changelog will stay in this file)
|
||||
- moved matchloop, metricloop, pitloop and helper functions (simplestats) to processing.py
|
||||
0.8.6:
|
||||
- added proper main function
|
||||
0.8.5:
|
||||
- added more gradeful KeyboardInterrupt exiting
|
||||
- redirected stderr to errorlog.txt
|
||||
0.8.4:
|
||||
- added better error message for missing config.json
|
||||
- added automatic config.json creation
|
||||
- added splash text with version and system info
|
||||
0.8.3:
|
||||
- updated matchloop with new regression format (requires tra_analysis 3.x)
|
||||
0.8.2:
|
||||
- readded while true to main function
|
||||
- added more thread config options
|
||||
0.8.1:
|
||||
- optimized matchloop further by bypassing GIL
|
||||
0.8.0:
|
||||
- added multithreading to matchloop
|
||||
- tweaked user log
|
||||
0.7.0:
|
||||
- finished implementing main function
|
||||
0.6.2:
|
||||
- integrated get_team_rankings.py as get_team_metrics() function
|
||||
- integrated visualize_pit.py as graph_pit_histogram() function
|
||||
0.6.1:
|
||||
- bug fixes with analysis.Metric() calls
|
||||
- modified metric functions to use config.json defined default values
|
||||
0.6.0:
|
||||
- removed main function
|
||||
- changed load_config function
|
||||
- added save_config function
|
||||
- added load_match function
|
||||
- renamed simpleloop to matchloop
|
||||
- moved simplestats function inside matchloop
|
||||
- renamed load_metrics to load_metric
|
||||
- renamed metricsloop to metricloop
|
||||
- split push to database functions amon push_match, push_metric, push_pit
|
||||
- moved
|
||||
0.5.2:
|
||||
- made changes due to refactoring of analysis
|
||||
0.5.1:
|
||||
- text fixes
|
||||
- removed matplotlib requirement
|
||||
0.5.0:
|
||||
- improved user interface
|
||||
0.4.2:
|
||||
- removed unessasary code
|
||||
0.4.1:
|
||||
- fixed bug where X range for regression was determined before sanitization
|
||||
- better sanitized data
|
||||
0.4.0:
|
||||
- fixed spelling issue in __changelog__
|
||||
- addressed nan bug in regression
|
||||
- fixed errors on line 335 with metrics calling incorrect key "glicko2"
|
||||
- fixed errors in metrics computing
|
||||
0.3.0:
|
||||
- added analysis to pit data
|
||||
0.2.1:
|
||||
- minor stability patches
|
||||
- implemented db syncing for timestamps
|
||||
- fixed bugs
|
||||
0.2.0:
|
||||
- finalized testing and small fixes
|
||||
0.1.4:
|
||||
- finished metrics implement, trueskill is bugged
|
||||
0.1.3:
|
||||
- working
|
||||
0.1.2:
|
||||
- started implement of metrics
|
||||
0.1.1:
|
||||
- cleaned up imports
|
||||
0.1.0:
|
||||
- tested working, can push to database
|
||||
0.0.9:
|
||||
- tested working
|
||||
- prints out stats for the time being, will push to database later
|
||||
0.0.8:
|
||||
- added data import
|
||||
- removed tba import
|
||||
- finished main method
|
||||
0.0.7:
|
||||
- added load_config
|
||||
- optimized simpleloop for readibility
|
||||
- added __all__ entries
|
||||
- added simplestats engine
|
||||
- pending testing
|
||||
0.0.6:
|
||||
- fixes
|
||||
0.0.5:
|
||||
- imported pickle
|
||||
- created custom database object
|
||||
0.0.4:
|
||||
- fixed simpleloop to actually return a vector
|
||||
0.0.3:
|
||||
- added metricsloop which is unfinished
|
||||
0.0.2:
|
||||
- added simpleloop which is untested until data is provided
|
||||
0.0.1:
|
||||
- created script
|
||||
- added analysis, numba, numpy imports
|
||||
"""
|
||||
|
||||
__author__ = (
|
||||
"Arthur Lu <learthurgo@gmail.com>",
|
||||
"Jacob Levine <jlevine@imsa.edu>",
|
||||
)
|
||||
|
||||
# imports:
|
||||
|
||||
import os, sys, time
|
||||
import pymongo # soon to be deprecated
|
||||
import traceback
|
||||
import warnings
|
||||
from config import Configuration, ConfigurationError
|
||||
from data import get_previous_time, set_current_time, check_new_database_matches, clear_metrics
|
||||
from interface import Logger
|
||||
from module import Match, Metric, Pit
|
||||
import zmq
|
||||
|
||||
config_path = "config.json"
|
||||
|
||||
def main(logger, verbose, profile, debug, socket_send = None):
|
||||
|
||||
def close_all():
|
||||
if "client" in locals():
|
||||
client.close()
|
||||
|
||||
warnings.filterwarnings("ignore")
|
||||
|
||||
logger.splash(__version__)
|
||||
|
||||
modules = {"match": Match, "metric": Metric, "pit": Pit}
|
||||
|
||||
while True:
|
||||
|
||||
try:
|
||||
|
||||
loop_start = time.time()
|
||||
|
||||
logger.info("current time: " + str(loop_start))
|
||||
socket_send("current time: " + str(loop_start))
|
||||
|
||||
config = Configuration(config_path)
|
||||
|
||||
logger.info("found and loaded config at <" + config_path + ">")
|
||||
socket_send("found and loaded config at <" + config_path + ">")
|
||||
|
||||
apikey, tbakey = config.database, config.tba
|
||||
|
||||
logger.info("found and loaded database and tba keys")
|
||||
socket_send("found and loaded database and tba keys")
|
||||
|
||||
client = pymongo.MongoClient(apikey)
|
||||
|
||||
logger.info("established connection to database")
|
||||
socket_send("established connection to database")
|
||||
|
||||
previous_time = get_previous_time(client)
|
||||
|
||||
logger.info("analysis backtimed to: " + str(previous_time))
|
||||
socket_send("analysis backtimed to: " + str(previous_time))
|
||||
|
||||
config.resolve_config_conflicts(logger, client)
|
||||
|
||||
config_modules, competition = config.modules, config.competition
|
||||
for m in config_modules:
|
||||
if m in modules:
|
||||
start = time.time()
|
||||
current_module = modules[m](config_modules[m], client, tbakey, previous_time, competition)
|
||||
valid = current_module.validate_config()
|
||||
if not valid:
|
||||
continue
|
||||
current_module.run()
|
||||
logger.info(m + " module finished in " + str(time.time() - start) + " seconds")
|
||||
socket_send(m + " module finished in " + str(time.time() - start) + " seconds")
|
||||
if debug:
|
||||
logger.save_module_to_file(m, current_module.data, current_module.results) # logging flag check done in logger
|
||||
|
||||
set_current_time(client, loop_start)
|
||||
close_all()
|
||||
|
||||
logger.info("closed threads and database client")
|
||||
logger.info("finished all tasks in " + str(time.time() - loop_start) + " seconds, looping")
|
||||
socket_send("closed threads and database client")
|
||||
socket_send("finished all tasks in " + str(time.time() - loop_start) + " seconds, looping")
|
||||
|
||||
if profile:
|
||||
return 0
|
||||
|
||||
if debug:
|
||||
return 0
|
||||
|
||||
event_delay = config["variable"]["event-delay"]
|
||||
if event_delay:
|
||||
logger.info("loop delayed until database returns new matches")
|
||||
socket_send("loop delayed until database returns new matches")
|
||||
new_match = False
|
||||
while not new_match:
|
||||
time.sleep(1)
|
||||
new_match = check_new_database_matches(client, competition)
|
||||
logger.info("database returned new matches")
|
||||
socket_send("database returned new matches")
|
||||
else:
|
||||
loop_delay = float(config["variable"]["loop-delay"])
|
||||
remaining_time = loop_delay - (time.time() - loop_start)
|
||||
if remaining_time > 0:
|
||||
logger.info("loop delayed by " + str(remaining_time) + " seconds")
|
||||
socket_send("loop delayed by " + str(remaining_time) + " seconds")
|
||||
time.sleep(remaining_time)
|
||||
|
||||
except KeyboardInterrupt:
|
||||
close_all()
|
||||
logger.info("detected KeyboardInterrupt, exiting")
|
||||
socket_send("detected KeyboardInterrupt, exiting")
|
||||
return 0
|
||||
|
||||
except ConfigurationError as e:
|
||||
str_e = "".join(traceback.format_exception(e))
|
||||
logger.error("encountered a configuration error: " + str(e))
|
||||
logger.error(str_e)
|
||||
socket_send("encountered a configuration error: " + str(e))
|
||||
socket_send(str_e)
|
||||
close_all()
|
||||
return 1
|
||||
|
||||
except Exception as e:
|
||||
str_e = "".join(traceback.format_exception(e))
|
||||
logger.error("encountered an exception while running")
|
||||
logger.error(str_e)
|
||||
socket_send("encountered an exception while running")
|
||||
socket_send(str_e)
|
||||
close_all()
|
||||
return 1
|
||||
|
||||
def start(pid_path, verbose, profile, debug):
|
||||
|
||||
if profile:
|
||||
|
||||
def send(msg):
|
||||
pass
|
||||
|
||||
logger = Logger(verbose, profile, debug)
|
||||
|
||||
import cProfile, pstats, io
|
||||
profile = cProfile.Profile()
|
||||
profile.enable()
|
||||
exit_code = main(logger, verbose, profile, debug, socket_send = send)
|
||||
profile.disable()
|
||||
f = open("profile.txt", 'w+')
|
||||
ps = pstats.Stats(profile, stream = f).sort_stats('cumtime')
|
||||
ps.print_stats()
|
||||
sys.exit(exit_code)
|
||||
|
||||
elif verbose:
|
||||
|
||||
def send(msg):
|
||||
pass
|
||||
|
||||
logger = Logger(verbose, profile, debug)
|
||||
|
||||
exit_code = main(logger, verbose, profile, debug, socket_send = send)
|
||||
sys.exit(exit_code)
|
||||
|
||||
elif debug:
|
||||
|
||||
def send(msg):
|
||||
pass
|
||||
|
||||
logger = Logger(verbose, profile, debug)
|
||||
|
||||
exit_code = main(logger, verbose, profile, debug, socket_send = send)
|
||||
sys.exit(exit_code)
|
||||
|
||||
else:
|
||||
|
||||
logfile = "logfile.log"
|
||||
|
||||
f = open(logfile, 'w+')
|
||||
f.close()
|
||||
|
||||
e = open('errorlog.log', 'w+')
|
||||
with daemon.DaemonContext(
|
||||
working_directory = os.getcwd(),
|
||||
pidfile = pidfile.TimeoutPIDLockFile(pid_path),
|
||||
stderr = e
|
||||
):
|
||||
|
||||
context = zmq.Context()
|
||||
socket = context.socket(zmq.PUB)
|
||||
socket.bind("tcp://*:5678")
|
||||
socket.send(b'status')
|
||||
|
||||
def send(msg):
|
||||
socket.send(bytes("status: " + msg, "utf-8"))
|
||||
|
||||
logger = Logger(verbose, profile, debug, file = logfile)
|
||||
|
||||
exit_code = main(logger, verbose, profile, debug, socket_send = send)
|
||||
|
||||
socket.close()
|
||||
f.close()
|
||||
|
||||
sys.exit(exit_code)
|
||||
|
||||
def stop(pid_path):
|
||||
try:
|
||||
pf = open(pid_path, 'r')
|
||||
pid = int(pf.read().strip())
|
||||
pf.close()
|
||||
except IOError:
|
||||
sys.stderr.write("pidfile at <" + pid_path + "> does not exist. Daemon not running?\n")
|
||||
return
|
||||
|
||||
try:
|
||||
while True:
|
||||
os.kill(pid, SIGTERM)
|
||||
time.sleep(0.01)
|
||||
except OSError as err:
|
||||
err = str(err)
|
||||
if err.find("No such process") > 0:
|
||||
if os.path.exists(pid_path):
|
||||
os.remove(pid_path)
|
||||
else:
|
||||
traceback.print_exc(file = sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
def restart(pid_path):
|
||||
stop(pid_path)
|
||||
start(pid_path, False, False, False)
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
if sys.platform.startswith("win"):
|
||||
start(None, verbose = True)
|
||||
|
||||
else:
|
||||
import daemon
|
||||
from daemon import pidfile
|
||||
from signal import SIGTERM
|
||||
pid_path = "tra-daemon.pid"
|
||||
if len(sys.argv) == 2:
|
||||
if 'start' == sys.argv[1]:
|
||||
start(pid_path, False, False, False)
|
||||
elif 'stop' == sys.argv[1]:
|
||||
stop(pid_path)
|
||||
elif 'restart' == sys.argv[1]:
|
||||
restart(pid_path)
|
||||
elif 'verbose' == sys.argv[1]:
|
||||
start(None, True, False, False)
|
||||
elif 'profile' == sys.argv[1]:
|
||||
start(None, False, True, False)
|
||||
elif 'debug' == sys.argv[1]:
|
||||
start(None, False, False, True)
|
||||
else:
|
||||
print("usage: %s start|stop|restart|verbose|profile|debug" % sys.argv[0])
|
||||
sys.exit(2)
|
||||
sys.exit(0)
|
||||
else:
|
||||
print("usage: %s start|stop|restart|verbose|profile|debug" % sys.argv[0])
|
||||
sys.exit(2)
|
298
src/data.py
298
src/data.py
@@ -1,298 +0,0 @@
|
||||
import requests
|
||||
import pandas as pd
|
||||
import pymongo
|
||||
from exceptions import APIError
|
||||
|
||||
class Client:
|
||||
|
||||
def __init__(self, config):
|
||||
self.competition = config.competition
|
||||
self.tbakey = config.tba
|
||||
self.mongoclient = pymongo.MongoClient(config.database)
|
||||
self.trakey = config.tra
|
||||
|
||||
def close(self):
|
||||
self.mongoclient.close()
|
||||
|
||||
def pull_new_tba_matches(self, cutoff):
|
||||
competition = self.competition
|
||||
api_key= self.tbakey
|
||||
x=requests.get("https://www.thebluealliance.com/api/v3/event/"+competition+"/matches/simple", headers={"X-TBA-Auth-Key":api_key})
|
||||
json = x.json()
|
||||
out = []
|
||||
for i in json:
|
||||
if i["actual_time"] != None and i["comp_level"] == "qm":
|
||||
out.append({"match" : i['match_number'], "blue" : list(map(lambda x: int(x[3:]), i['alliances']['blue']['team_keys'])), "red" : list(map(lambda x: int(x[3:]), i['alliances']['red']['team_keys'])), "winner": i["winning_alliance"]})
|
||||
out.sort(key=lambda x: x['match'])
|
||||
return out
|
||||
|
||||
def get_team_match_data(self, team_num):
|
||||
client = self.mongoclient
|
||||
competition = self.competition
|
||||
db = client.data_scouting
|
||||
mdata = db.matchdata
|
||||
out = {}
|
||||
for i in mdata.find({"competition" : competition, "team_scouted": str(team_num)}):
|
||||
out[i['match']] = i['data']
|
||||
return pd.DataFrame(out)
|
||||
|
||||
def get_team_metrics_data(self, team_num):
|
||||
client = self.mongoclient
|
||||
competition = self.competition
|
||||
db = client.data_processing
|
||||
mdata = db.team_metrics
|
||||
return mdata.find_one({"competition" : competition, "team": team_num})
|
||||
|
||||
def get_team_pit_data(self, team_num):
|
||||
client = self.mongoclient
|
||||
competition = self.competition
|
||||
db = client.data_scouting
|
||||
mdata = db.pitdata
|
||||
return mdata.find_one({"competition" : competition, "team_scouted": str(team_num)})["data"]
|
||||
|
||||
def unkeyify_2l(self, layered_dict):
|
||||
out = {}
|
||||
for i in layered_dict.keys():
|
||||
add = []
|
||||
sortkey = []
|
||||
for j in layered_dict[i].keys():
|
||||
add.append([j,layered_dict[i][j]])
|
||||
add.sort(key = lambda x: x[0])
|
||||
out[i] = list(map(lambda x: x[1], add))
|
||||
return out
|
||||
|
||||
def get_match_data_formatted(self):
|
||||
teams_at_comp = self.get_teams_at_competition()
|
||||
out = {}
|
||||
for team in teams_at_comp:
|
||||
try:
|
||||
out[int(team)] = self.unkeyify_2l(self.get_team_match_data(team).transpose().to_dict())
|
||||
except:
|
||||
pass
|
||||
return out
|
||||
|
||||
def get_metrics_data_formatted(self):
|
||||
competition = self.competition
|
||||
teams_at_comp = self.get_teams_at_competition()
|
||||
out = {}
|
||||
for team in teams_at_comp:
|
||||
try:
|
||||
out[int(team)] = self.get_team_metrics_data(int(team))
|
||||
except:
|
||||
pass
|
||||
return out
|
||||
|
||||
def get_pit_data_formatted(self):
|
||||
client = self.mongoclient
|
||||
competition = self.competition
|
||||
x=requests.get("https://titanscouting.epochml.org/api/fetchAllTeamNicknamesAtCompetition?competition="+competition)
|
||||
x = x.json()
|
||||
x = x['data']
|
||||
x = x.keys()
|
||||
out = {}
|
||||
for i in x:
|
||||
try:
|
||||
out[int(i)] = self.get_team_pit_data(int(i))
|
||||
except:
|
||||
pass
|
||||
return out
|
||||
|
||||
def get_pit_variable_data(self):
|
||||
client = self.mongoclient
|
||||
db = client.data_processing
|
||||
mdata = db.team_pit
|
||||
return mdata.find()
|
||||
|
||||
def get_pit_variable_formatted(self):
|
||||
temp = self.get_pit_variable_data()
|
||||
out = {}
|
||||
for i in temp:
|
||||
out[i["variable"]] = i["data"]
|
||||
return out
|
||||
|
||||
def push_team_tests_data(self, team_num, data, dbname = "data_processing", colname = "team_tests"):
|
||||
client = self.mongoclient
|
||||
competition = self.competition
|
||||
db = client[dbname]
|
||||
mdata = db[colname]
|
||||
mdata.replace_one({"competition" : competition, "team": team_num}, {"_id": competition+str(team_num)+"am", "competition" : competition, "team" : team_num, "data" : data}, True)
|
||||
|
||||
def push_team_metrics_data(self, team_num, data, dbname = "data_processing", colname = "team_metrics"):
|
||||
client = self.mongoclient
|
||||
competition = self.competition
|
||||
db = client[dbname]
|
||||
mdata = db[colname]
|
||||
mdata.replace_one({"competition" : competition, "team": team_num}, {"_id": competition+str(team_num)+"am", "competition" : competition, "team" : team_num, "metrics" : data}, True)
|
||||
|
||||
def push_team_pit_data(self, variable, data, dbname = "data_processing", colname = "team_pit"):
|
||||
client = self.mongoclient
|
||||
competition = self.competition
|
||||
db = client[dbname]
|
||||
mdata = db[colname]
|
||||
mdata.replace_one({"competition" : competition, "variable": variable}, {"competition" : competition, "variable" : variable, "data" : data}, True)
|
||||
|
||||
def get_analysis_flags(self, flag):
|
||||
client = self.mongoclient
|
||||
db = client.data_processing
|
||||
mdata = db.flags
|
||||
return mdata.find_one({flag:{"$exists":True}})
|
||||
|
||||
def set_analysis_flags(self, flag, data):
|
||||
client = self.mongoclient
|
||||
db = client.data_processing
|
||||
mdata = db.flags
|
||||
return mdata.replace_one({flag:{"$exists":True}}, data, True)
|
||||
|
||||
def get_previous_time(self):
|
||||
|
||||
previous_time = self.get_analysis_flags("latest_update")
|
||||
|
||||
if previous_time == None:
|
||||
|
||||
self.set_analysis_flags("latest_update", 0)
|
||||
previous_time = 0
|
||||
|
||||
else:
|
||||
|
||||
previous_time = previous_time["latest_update"]
|
||||
|
||||
return previous_time
|
||||
|
||||
def set_current_time(self, current_time):
|
||||
|
||||
self.set_analysis_flags("latest_update", {"latest_update":current_time})
|
||||
|
||||
def get_database_config(self):
|
||||
|
||||
remote_config = self.get_analysis_flags("config")
|
||||
return remote_config["config"] if remote_config != None else None
|
||||
|
||||
def set_database_config(self, config):
|
||||
|
||||
self.set_analysis_flags("config", {"config": config})
|
||||
|
||||
def load_match(self):
|
||||
|
||||
return self.get_match_data_formatted()
|
||||
|
||||
def load_metric(self, match, group_name, metrics):
|
||||
|
||||
group = {}
|
||||
|
||||
for team in match[group_name]:
|
||||
|
||||
db_data = self.get_team_metrics_data(team)
|
||||
|
||||
if db_data == None:
|
||||
|
||||
elo = {"score": metrics["elo"]["score"]}
|
||||
gl2 = {"score": metrics["gl2"]["score"], "rd": metrics["gl2"]["rd"], "vol": metrics["gl2"]["vol"]}
|
||||
ts = {"mu": metrics["ts"]["mu"], "sigma": metrics["ts"]["sigma"]}
|
||||
|
||||
group[team] = {"elo": elo, "gl2": gl2, "ts": ts}
|
||||
|
||||
else:
|
||||
|
||||
metrics = db_data["metrics"]
|
||||
|
||||
elo = metrics["elo"]
|
||||
gl2 = metrics["gl2"]
|
||||
ts = metrics["ts"]
|
||||
|
||||
group[team] = {"elo": elo, "gl2": gl2, "ts": ts}
|
||||
|
||||
return group
|
||||
|
||||
def load_pit(self):
|
||||
|
||||
return self.get_pit_data_formatted()
|
||||
|
||||
def push_match(self, results):
|
||||
|
||||
for team in results:
|
||||
|
||||
self.push_team_tests_data(team, results[team])
|
||||
|
||||
def push_metric(self, metric):
|
||||
|
||||
for team in metric:
|
||||
|
||||
self.push_team_metrics_data(team, metric[team])
|
||||
|
||||
def push_pit(self, pit):
|
||||
|
||||
for variable in pit:
|
||||
|
||||
self.push_team_pit_data(variable, pit[variable])
|
||||
|
||||
def check_new_database_matches(self):
|
||||
|
||||
return True
|
||||
|
||||
#----- API implementations below -----#
|
||||
|
||||
def get_team_competition(self):
|
||||
trakey = self.trakey
|
||||
url = self.trakey['url']
|
||||
endpoint = '/api/fetchTeamCompetition'
|
||||
params = {
|
||||
"CLIENT_ID": trakey['CLIENT_ID'],
|
||||
"CLIENT_SECRET": trakey['CLIENT_SECRET']
|
||||
}
|
||||
response = requests.request("GET", url + endpoint, params=params)
|
||||
json = response.json()
|
||||
if json['success']:
|
||||
return json['competition']
|
||||
else:
|
||||
raise APIError(json)
|
||||
|
||||
def get_team(self):
|
||||
trakey = self.trakey
|
||||
url = self.trakey['url']
|
||||
endpoint = '/api/fetchTeamCompetition'
|
||||
params = {
|
||||
"CLIENT_ID": trakey['CLIENT_ID'],
|
||||
"CLIENT_SECRET": trakey['CLIENT_SECRET']
|
||||
}
|
||||
response = requests.request("GET", url + endpoint, params=params)
|
||||
json = response.json()
|
||||
if json['success']:
|
||||
return json['team']
|
||||
else:
|
||||
raise APIError(json)
|
||||
|
||||
""" doesn't seem to be functional:
|
||||
def get_team_match_data(self, team_num):
|
||||
trakey = self.trakey
|
||||
url = self.trakey['url']
|
||||
competition = self.competition
|
||||
endpoint = '/api/fetchAllTeamMatchData'
|
||||
params = {
|
||||
"competition": competition,
|
||||
"teamScouted": team_num,
|
||||
"CLIENT_ID": trakey['CLIENT_ID'],
|
||||
"CLIENT_SECRET": trakey['CLIENT_SECRET']
|
||||
}
|
||||
response = requests.request("GET", url + endpoint, params=params)
|
||||
json = response.json()
|
||||
if json['success']:
|
||||
return json['data'][team_num]
|
||||
else:
|
||||
raise APIError(json)"""
|
||||
|
||||
def get_teams_at_competition(self):
|
||||
trakey = self.trakey
|
||||
url = self.trakey['url']
|
||||
competition = self.competition
|
||||
endpoint = '/api/fetchAllTeamNicknamesAtCompetition'
|
||||
params = {
|
||||
"competition": competition,
|
||||
"CLIENT_ID": trakey['CLIENT_ID'],
|
||||
"CLIENT_SECRET": trakey['CLIENT_SECRET']
|
||||
}
|
||||
response = requests.request("GET", url + endpoint, params=params)
|
||||
json = response.json()
|
||||
if json['success']:
|
||||
return list(json['data'].keys())
|
||||
else:
|
||||
raise APIError(json)
|
18
src/requirements.txt
Normal file
18
src/requirements.txt
Normal file
@@ -0,0 +1,18 @@
|
||||
requests
|
||||
pymongo
|
||||
pandas
|
||||
tra-analysis
|
||||
|
||||
dnspython
|
||||
pyinstaller
|
||||
requests
|
||||
pymongo
|
||||
|
||||
numpy
|
||||
scipy
|
||||
scikit-learn
|
||||
six
|
||||
pyparsing
|
||||
pandas
|
||||
|
||||
kivy==2.0.0rc2
|
@@ -3,42 +3,10 @@
|
||||
# Notes:
|
||||
# setup:
|
||||
|
||||
__version__ = "1.0.0"
|
||||
__version__ = "0.8.6"
|
||||
|
||||
# changelog should be viewed using print(analysis.__changelog__)
|
||||
__changelog__ = """changelog:
|
||||
1.0.0:
|
||||
- superscript now runs in PEP 3143 compliant well behaved daemon on Linux systems
|
||||
- removed daemon and socket functionality, user can implement using external software
|
||||
- added verbose option to linux superscript to allow for interactive output
|
||||
- moved pymongo import to superscript.py
|
||||
- added profile option to linux superscript to profile runtime of script
|
||||
- reduced memory usage slightly by consolidating the unwrapped input data
|
||||
- added debug option, which performs one loop of analysis and dumps results to local files
|
||||
- added event and time delay options to config
|
||||
- event delay pauses loop until even listener recieves an update
|
||||
- time delay pauses loop until the time specified has elapsed since the BEGINNING of previous loop
|
||||
- added options to pull config information from database (reatins option to use local config file)
|
||||
- config-preference option selects between prioritizing local config and prioritizing database config
|
||||
- synchronize-config option selects whether to update the non prioritized config with the prioritized one
|
||||
- divided config options between persistent ones (keys), and variable ones (everything else)
|
||||
- generalized behavior of various core components by collecting loose functions in several dependencies into classes
|
||||
- module.py contains classes, each one represents a single data analysis routine
|
||||
- config.py contains the Configuration class, which stores the configuration information and abstracts the getter methods
|
||||
0.9.3:
|
||||
- improved data loading performance by removing redundant PyMongo client creation (120s to 14s)
|
||||
- passed singular instance of PyMongo client as standin for apikey parameter in all data.py functions
|
||||
0.9.2:
|
||||
- removed unessasary imports from data
|
||||
- minor changes to interface
|
||||
0.9.1:
|
||||
- fixed bugs in configuration item loading exception handling
|
||||
0.9.0:
|
||||
- moved printing and logging related functions to interface.py (changelog will stay in this file)
|
||||
- changed function return files for load_config and save_config to standard C values (0 for success, 1 for error)
|
||||
- added local variables for config location
|
||||
- moved dataset getting and setting functions to dataset.py (changelog will stay in this file)
|
||||
- moved matchloop, metricloop, pitloop and helper functions (simplestats) to processing.py
|
||||
0.8.6:
|
||||
- added proper main function
|
||||
0.8.5:
|
||||
@@ -146,157 +114,514 @@ __author__ = (
|
||||
"Jacob Levine <jlevine@imsa.edu>",
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
"load_config",
|
||||
"save_config",
|
||||
"get_previous_time",
|
||||
"load_match",
|
||||
"matchloop",
|
||||
"load_metric",
|
||||
"metricloop",
|
||||
"load_pit",
|
||||
"pitloop",
|
||||
"push_match",
|
||||
"push_metric",
|
||||
"push_pit",
|
||||
]
|
||||
|
||||
# imports:
|
||||
|
||||
import argparse, sys, time, traceback, warnings
|
||||
from config import Configuration, ConfigurationError
|
||||
from data import Client
|
||||
from interface import Logger
|
||||
from module import Match, Metric, Pit
|
||||
from tra_analysis import analysis as an
|
||||
import data as d
|
||||
from collections import defaultdict
|
||||
import json
|
||||
import math
|
||||
import numpy as np
|
||||
import os
|
||||
from os import system, name
|
||||
from pathlib import Path
|
||||
from multiprocessing import Pool
|
||||
import platform
|
||||
import sys
|
||||
import time
|
||||
import warnings
|
||||
|
||||
def main(logger, verbose, profile, debug, config_path):
|
||||
global exec_threads
|
||||
|
||||
def close_all():
|
||||
if "client" in locals():
|
||||
client.close()
|
||||
def main():
|
||||
|
||||
global exec_threads
|
||||
|
||||
sys.stderr = open("errorlog.txt", "w")
|
||||
|
||||
warnings.filterwarnings("ignore")
|
||||
|
||||
logger.splash(__version__)
|
||||
splash()
|
||||
|
||||
modules = {"match": Match, "metric": Metric, "pit": Pit}
|
||||
|
||||
while True:
|
||||
while (True):
|
||||
|
||||
try:
|
||||
|
||||
loop_start = time.time()
|
||||
current_time = time.time()
|
||||
print("[OK] time: " + str(current_time))
|
||||
|
||||
logger.info("current time: " + str(loop_start))
|
||||
config = load_config("config.json")
|
||||
competition = config["competition"]
|
||||
match_tests = config["statistics"]["match"]
|
||||
pit_tests = config["statistics"]["pit"]
|
||||
metrics_tests = config["statistics"]["metric"]
|
||||
print("[OK] configs loaded")
|
||||
|
||||
config = Configuration(config_path)
|
||||
|
||||
logger.info("found and loaded config at <" + config_path + ">")
|
||||
|
||||
client = Client(config)
|
||||
|
||||
logger.info("established connection to database")
|
||||
|
||||
previous_time = client.get_previous_time()
|
||||
|
||||
logger.info("analysis backtimed to: " + str(previous_time))
|
||||
|
||||
config.resolve_config_conflicts(logger, client)
|
||||
|
||||
config_modules, competition = config.modules, config.competition
|
||||
|
||||
client.competition = competition
|
||||
|
||||
for m in config_modules:
|
||||
if m in modules:
|
||||
start = time.time()
|
||||
current_module = modules[m](config_modules[m], previous_time, client)
|
||||
valid = current_module.validate_config()
|
||||
if not valid:
|
||||
continue
|
||||
current_module.run()
|
||||
logger.info(m + " module finished in " + str(time.time() - start) + " seconds")
|
||||
if debug:
|
||||
logger.save_module_to_file(m, current_module.data, current_module.results) # logging flag check done in logger
|
||||
|
||||
client.set_current_time(loop_start)
|
||||
close_all()
|
||||
|
||||
logger.info("closed threads and database client")
|
||||
logger.info("finished all tasks in " + str(time.time() - loop_start) + " seconds, looping")
|
||||
|
||||
if profile:
|
||||
return 0
|
||||
|
||||
if debug:
|
||||
return 0
|
||||
|
||||
event_delay = config["variable"]["event-delay"]
|
||||
if event_delay:
|
||||
logger.info("loop delayed until database returns new matches")
|
||||
new_match = False
|
||||
while not new_match:
|
||||
time.sleep(1)
|
||||
new_match = client.check_new_database_matches()
|
||||
logger.info("database returned new matches")
|
||||
print("[OK] starting threads")
|
||||
cfg_max_threads = config["max-threads"]
|
||||
sys_max_threads = os.cpu_count()
|
||||
if cfg_max_threads > -sys_max_threads and cfg_max_threads < 0 :
|
||||
alloc_processes = sys_max_threads + cfg_max_threads
|
||||
elif cfg_max_threads > 0 and cfg_max_threads < 1:
|
||||
alloc_processes = math.floor(cfg_max_threads * sys_max_threads)
|
||||
elif cfg_max_threads > 1 and cfg_max_threads <= sys_max_threads:
|
||||
alloc_processes = cfg_max_threads
|
||||
elif cfg_max_threads == 0:
|
||||
alloc_processes = sys_max_threads
|
||||
else:
|
||||
loop_delay = float(config["variable"]["loop-delay"])
|
||||
remaining_time = loop_delay - (time.time() - loop_start)
|
||||
if remaining_time > 0:
|
||||
logger.info("loop delayed by " + str(remaining_time) + " seconds")
|
||||
time.sleep(remaining_time)
|
||||
print("[ERROR] Invalid number of processes, must be between -" + str(sys_max_threads) + " and " + str(sys_max_threads))
|
||||
exit()
|
||||
exec_threads = Pool(processes = alloc_processes)
|
||||
print("[OK] " + str(alloc_processes) + " threads started")
|
||||
|
||||
apikey = config["key"]["database"]
|
||||
tbakey = config["key"]["tba"]
|
||||
print("[OK] loaded keys")
|
||||
|
||||
previous_time = get_previous_time(apikey)
|
||||
print("[OK] analysis backtimed to: " + str(previous_time))
|
||||
|
||||
print("[OK] loading data")
|
||||
start = time.time()
|
||||
match_data = load_match(apikey, competition)
|
||||
pit_data = load_pit(apikey, competition)
|
||||
print("[OK] loaded data in " + str(time.time() - start) + " seconds")
|
||||
|
||||
print("[OK] running match stats")
|
||||
start = time.time()
|
||||
matchloop(apikey, competition, match_data, match_tests)
|
||||
print("[OK] finished match stats in " + str(time.time() - start) + " seconds")
|
||||
|
||||
print("[OK] running team metrics")
|
||||
start = time.time()
|
||||
metricloop(tbakey, apikey, competition, previous_time, metrics_tests)
|
||||
print("[OK] finished team metrics in " + str(time.time() - start) + " seconds")
|
||||
|
||||
print("[OK] running pit analysis")
|
||||
start = time.time()
|
||||
pitloop(apikey, competition, pit_data, pit_tests)
|
||||
print("[OK] finished pit analysis in " + str(time.time() - start) + " seconds")
|
||||
|
||||
set_current_time(apikey, current_time)
|
||||
print("[OK] finished all tests, looping")
|
||||
|
||||
print_hrule()
|
||||
|
||||
except KeyboardInterrupt:
|
||||
close_all()
|
||||
logger.info("detected KeyboardInterrupt, exiting")
|
||||
return 0
|
||||
print("\n[OK] caught KeyboardInterrupt, killing processes")
|
||||
exec_threads.terminate()
|
||||
print("[OK] processes killed, exiting")
|
||||
exit()
|
||||
|
||||
except ConfigurationError as e:
|
||||
str_e = "".join(traceback.format_exception(e))
|
||||
logger.error("encountered a configuration error: " + str(e))
|
||||
logger.error(str_e)
|
||||
close_all()
|
||||
return 1
|
||||
else:
|
||||
pass
|
||||
|
||||
except Exception as e:
|
||||
str_e = "".join(traceback.format_exception(e))
|
||||
logger.error("encountered an exception while running")
|
||||
logger.error(str_e)
|
||||
close_all()
|
||||
return 1
|
||||
#clear()
|
||||
|
||||
def start(verbose, profile, debug, config_path, log_path):
|
||||
def clear():
|
||||
|
||||
logger = Logger(verbose, profile, debug, file = log_path)
|
||||
# for windows
|
||||
if name == 'nt':
|
||||
_ = system('cls')
|
||||
|
||||
if profile:
|
||||
# for mac and linux(here, os.name is 'posix')
|
||||
else:
|
||||
_ = system('clear')
|
||||
|
||||
import cProfile, pstats, io
|
||||
profile = cProfile.Profile()
|
||||
profile.enable()
|
||||
exit_code = main(logger, verbose, profile, debug, config_path)
|
||||
profile.disable()
|
||||
f = open("profile.txt", "w+")
|
||||
ps = pstats.Stats(profile, stream = f).sort_stats("cumtime")
|
||||
ps.print_stats()
|
||||
sys.exit(exit_code)
|
||||
def print_hrule():
|
||||
|
||||
elif verbose:
|
||||
print("#"+38*"-"+"#")
|
||||
|
||||
exit_code = main(logger, verbose, profile, debug, config_path)
|
||||
sys.exit(exit_code)
|
||||
def print_box(s):
|
||||
|
||||
elif debug:
|
||||
temp = "|"
|
||||
temp += s
|
||||
temp += (40-len(s)-2)*" "
|
||||
temp += "|"
|
||||
print(temp)
|
||||
|
||||
exit_code = main(logger, verbose, profile, debug, config_path)
|
||||
sys.exit(exit_code)
|
||||
def splash():
|
||||
|
||||
print_hrule()
|
||||
print_box(" superscript version: " + __version__)
|
||||
print_box(" os: " + platform.system())
|
||||
print_box(" python: " + platform.python_version())
|
||||
print_hrule()
|
||||
|
||||
def load_config(file):
|
||||
|
||||
config_vector = {}
|
||||
|
||||
try:
|
||||
f = open(file)
|
||||
except:
|
||||
print("[ERROR] could not locate config.json, generating blank config.json and exiting")
|
||||
f = open(file, "w")
|
||||
f.write(sample_json)
|
||||
exit()
|
||||
|
||||
config_vector = json.load(f)
|
||||
|
||||
return config_vector
|
||||
|
||||
def save_config(file, config_vector):
|
||||
|
||||
with open(file) as f:
|
||||
json.dump(config_vector, f)
|
||||
|
||||
def get_previous_time(apikey):
|
||||
|
||||
previous_time = d.get_analysis_flags(apikey, "latest_update")
|
||||
|
||||
if previous_time == None:
|
||||
|
||||
d.set_analysis_flags(apikey, "latest_update", 0)
|
||||
previous_time = 0
|
||||
|
||||
else:
|
||||
|
||||
pass # must be vebose, debug or profile
|
||||
previous_time = previous_time["latest_update"]
|
||||
|
||||
return previous_time
|
||||
|
||||
def set_current_time(apikey, current_time):
|
||||
|
||||
d.set_analysis_flags(apikey, "latest_update", {"latest_update":current_time})
|
||||
|
||||
def load_match(apikey, competition):
|
||||
|
||||
return d.get_match_data_formatted(apikey, competition)
|
||||
|
||||
def simplestats(data_test):
|
||||
|
||||
data = np.array(data_test[0])
|
||||
data = data[np.isfinite(data)]
|
||||
ranges = list(range(len(data)))
|
||||
|
||||
test = data_test[1]
|
||||
|
||||
if test == "basic_stats":
|
||||
return an.basic_stats(data)
|
||||
|
||||
if test == "historical_analysis":
|
||||
return an.histo_analysis([ranges, data])
|
||||
|
||||
if test == "regression_linear":
|
||||
return an.regression(ranges, data, ['lin'])
|
||||
|
||||
if test == "regression_logarithmic":
|
||||
return an.regression(ranges, data, ['log'])
|
||||
|
||||
if test == "regression_exponential":
|
||||
return an.regression(ranges, data, ['exp'])
|
||||
|
||||
if test == "regression_polynomial":
|
||||
return an.regression(ranges, data, ['ply'])
|
||||
|
||||
if test == "regression_sigmoidal":
|
||||
return an.regression(ranges, data, ['sig'])
|
||||
|
||||
def matchloop(apikey, competition, data, tests): # expects 3D array with [Team][Variable][Match]
|
||||
|
||||
global exec_threads
|
||||
|
||||
short_mapping = {"regression_linear": "lin", "regression_logarithmic": "log", "regression_exponential": "exp", "regression_polynomial": "ply", "regression_sigmoidal": "sig"}
|
||||
|
||||
class AutoVivification(dict):
|
||||
def __getitem__(self, item):
|
||||
try:
|
||||
return dict.__getitem__(self, item)
|
||||
except KeyError:
|
||||
value = self[item] = type(self)()
|
||||
return value
|
||||
|
||||
return_vector = {}
|
||||
|
||||
team_filtered = []
|
||||
variable_filtered = []
|
||||
variable_data = []
|
||||
test_filtered = []
|
||||
result_filtered = []
|
||||
return_vector = AutoVivification()
|
||||
|
||||
for team in data:
|
||||
|
||||
for variable in data[team]:
|
||||
|
||||
if variable in tests:
|
||||
|
||||
for test in tests[variable]:
|
||||
|
||||
team_filtered.append(team)
|
||||
variable_filtered.append(variable)
|
||||
variable_data.append((data[team][variable], test))
|
||||
test_filtered.append(test)
|
||||
|
||||
result_filtered = exec_threads.map(simplestats, variable_data)
|
||||
i = 0
|
||||
|
||||
result_filtered = list(result_filtered)
|
||||
|
||||
for result in result_filtered:
|
||||
|
||||
filtered = test_filtered[i]
|
||||
|
||||
try:
|
||||
short = short_mapping[filtered]
|
||||
return_vector[team_filtered[i]][variable_filtered[i]][test_filtered[i]] = result[short]
|
||||
except KeyError: # not in mapping
|
||||
return_vector[team_filtered[i]][variable_filtered[i]][test_filtered[i]] = result
|
||||
i += 1
|
||||
|
||||
push_match(apikey, competition, return_vector)
|
||||
|
||||
def load_metric(apikey, competition, match, group_name, metrics):
|
||||
|
||||
group = {}
|
||||
|
||||
for team in match[group_name]:
|
||||
|
||||
db_data = d.get_team_metrics_data(apikey, competition, team)
|
||||
|
||||
if d.get_team_metrics_data(apikey, competition, team) == None:
|
||||
|
||||
elo = {"score": metrics["elo"]["score"]}
|
||||
gl2 = {"score": metrics["gl2"]["score"], "rd": metrics["gl2"]["rd"], "vol": metrics["gl2"]["vol"]}
|
||||
ts = {"mu": metrics["ts"]["mu"], "sigma": metrics["ts"]["sigma"]}
|
||||
|
||||
group[team] = {"elo": elo, "gl2": gl2, "ts": ts}
|
||||
|
||||
else:
|
||||
|
||||
metrics = db_data["metrics"]
|
||||
|
||||
elo = metrics["elo"]
|
||||
gl2 = metrics["gl2"]
|
||||
ts = metrics["ts"]
|
||||
|
||||
group[team] = {"elo": elo, "gl2": gl2, "ts": ts}
|
||||
|
||||
return group
|
||||
|
||||
def metricloop(tbakey, apikey, competition, timestamp, metrics): # listener based metrics update
|
||||
|
||||
elo_N = metrics["elo"]["N"]
|
||||
elo_K = metrics["elo"]["K"]
|
||||
|
||||
matches = d.pull_new_tba_matches(tbakey, competition, timestamp)
|
||||
|
||||
red = {}
|
||||
blu = {}
|
||||
|
||||
for match in matches:
|
||||
|
||||
red = load_metric(apikey, competition, match, "red", metrics)
|
||||
blu = load_metric(apikey, competition, match, "blue", metrics)
|
||||
|
||||
elo_red_total = 0
|
||||
elo_blu_total = 0
|
||||
|
||||
gl2_red_score_total = 0
|
||||
gl2_blu_score_total = 0
|
||||
|
||||
gl2_red_rd_total = 0
|
||||
gl2_blu_rd_total = 0
|
||||
|
||||
gl2_red_vol_total = 0
|
||||
gl2_blu_vol_total = 0
|
||||
|
||||
for team in red:
|
||||
|
||||
elo_red_total += red[team]["elo"]["score"]
|
||||
|
||||
gl2_red_score_total += red[team]["gl2"]["score"]
|
||||
gl2_red_rd_total += red[team]["gl2"]["rd"]
|
||||
gl2_red_vol_total += red[team]["gl2"]["vol"]
|
||||
|
||||
for team in blu:
|
||||
|
||||
elo_blu_total += blu[team]["elo"]["score"]
|
||||
|
||||
gl2_blu_score_total += blu[team]["gl2"]["score"]
|
||||
gl2_blu_rd_total += blu[team]["gl2"]["rd"]
|
||||
gl2_blu_vol_total += blu[team]["gl2"]["vol"]
|
||||
|
||||
red_elo = {"score": elo_red_total / len(red)}
|
||||
blu_elo = {"score": elo_blu_total / len(blu)}
|
||||
|
||||
red_gl2 = {"score": gl2_red_score_total / len(red), "rd": gl2_red_rd_total / len(red), "vol": gl2_red_vol_total / len(red)}
|
||||
blu_gl2 = {"score": gl2_blu_score_total / len(blu), "rd": gl2_blu_rd_total / len(blu), "vol": gl2_blu_vol_total / len(blu)}
|
||||
|
||||
|
||||
if match["winner"] == "red":
|
||||
|
||||
observations = {"red": 1, "blu": 0}
|
||||
|
||||
elif match["winner"] == "blue":
|
||||
|
||||
observations = {"red": 0, "blu": 1}
|
||||
|
||||
else:
|
||||
|
||||
observations = {"red": 0.5, "blu": 0.5}
|
||||
|
||||
red_elo_delta = an.Metric().elo(red_elo["score"], blu_elo["score"], observations["red"], elo_N, elo_K) - red_elo["score"]
|
||||
blu_elo_delta = an.Metric().elo(blu_elo["score"], red_elo["score"], observations["blu"], elo_N, elo_K) - blu_elo["score"]
|
||||
|
||||
new_red_gl2_score, new_red_gl2_rd, new_red_gl2_vol = an.Metric().glicko2(red_gl2["score"], red_gl2["rd"], red_gl2["vol"], [blu_gl2["score"]], [blu_gl2["rd"]], [observations["red"], observations["blu"]])
|
||||
new_blu_gl2_score, new_blu_gl2_rd, new_blu_gl2_vol = an.Metric().glicko2(blu_gl2["score"], blu_gl2["rd"], blu_gl2["vol"], [red_gl2["score"]], [red_gl2["rd"]], [observations["blu"], observations["red"]])
|
||||
|
||||
red_gl2_delta = {"score": new_red_gl2_score - red_gl2["score"], "rd": new_red_gl2_rd - red_gl2["rd"], "vol": new_red_gl2_vol - red_gl2["vol"]}
|
||||
blu_gl2_delta = {"score": new_blu_gl2_score - blu_gl2["score"], "rd": new_blu_gl2_rd - blu_gl2["rd"], "vol": new_blu_gl2_vol - blu_gl2["vol"]}
|
||||
|
||||
for team in red:
|
||||
|
||||
red[team]["elo"]["score"] = red[team]["elo"]["score"] + red_elo_delta
|
||||
|
||||
red[team]["gl2"]["score"] = red[team]["gl2"]["score"] + red_gl2_delta["score"]
|
||||
red[team]["gl2"]["rd"] = red[team]["gl2"]["rd"] + red_gl2_delta["rd"]
|
||||
red[team]["gl2"]["vol"] = red[team]["gl2"]["vol"] + red_gl2_delta["vol"]
|
||||
|
||||
for team in blu:
|
||||
|
||||
blu[team]["elo"]["score"] = blu[team]["elo"]["score"] + blu_elo_delta
|
||||
|
||||
blu[team]["gl2"]["score"] = blu[team]["gl2"]["score"] + blu_gl2_delta["score"]
|
||||
blu[team]["gl2"]["rd"] = blu[team]["gl2"]["rd"] + blu_gl2_delta["rd"]
|
||||
blu[team]["gl2"]["vol"] = blu[team]["gl2"]["vol"] + blu_gl2_delta["vol"]
|
||||
|
||||
temp_vector = {}
|
||||
temp_vector.update(red)
|
||||
temp_vector.update(blu)
|
||||
|
||||
push_metric(apikey, competition, temp_vector)
|
||||
|
||||
def load_pit(apikey, competition):
|
||||
|
||||
return d.get_pit_data_formatted(apikey, competition)
|
||||
|
||||
def pitloop(apikey, competition, pit, tests):
|
||||
|
||||
return_vector = {}
|
||||
for team in pit:
|
||||
for variable in pit[team]:
|
||||
if variable in tests:
|
||||
if not variable in return_vector:
|
||||
return_vector[variable] = []
|
||||
return_vector[variable].append(pit[team][variable])
|
||||
|
||||
push_pit(apikey, competition, return_vector)
|
||||
|
||||
def push_match(apikey, competition, results):
|
||||
|
||||
for team in results:
|
||||
|
||||
d.push_team_tests_data(apikey, competition, team, results[team])
|
||||
|
||||
def push_metric(apikey, competition, metric):
|
||||
|
||||
for team in metric:
|
||||
|
||||
d.push_team_metrics_data(apikey, competition, team, metric[team])
|
||||
|
||||
def push_pit(apikey, competition, pit):
|
||||
|
||||
for variable in pit:
|
||||
|
||||
d.push_team_pit_data(apikey, competition, variable, pit[variable])
|
||||
|
||||
def get_team_metrics(apikey, tbakey, competition):
|
||||
|
||||
metrics = d.get_metrics_data_formatted(apikey, competition)
|
||||
|
||||
elo = {}
|
||||
gl2 = {}
|
||||
|
||||
for team in metrics:
|
||||
|
||||
elo[team] = metrics[team]["metrics"]["elo"]["score"]
|
||||
gl2[team] = metrics[team]["metrics"]["gl2"]["score"]
|
||||
|
||||
elo = {k: v for k, v in sorted(elo.items(), key=lambda item: item[1])}
|
||||
gl2 = {k: v for k, v in sorted(gl2.items(), key=lambda item: item[1])}
|
||||
|
||||
elo_ranked = []
|
||||
|
||||
for team in elo:
|
||||
|
||||
elo_ranked.append({"team": str(team), "elo": str(elo[team])})
|
||||
|
||||
gl2_ranked = []
|
||||
|
||||
for team in gl2:
|
||||
|
||||
gl2_ranked.append({"team": str(team), "gl2": str(gl2[team])})
|
||||
|
||||
return {"elo-ranks": elo_ranked, "glicko2-ranks": gl2_ranked}
|
||||
|
||||
sample_json = """{
|
||||
"max-threads": 0.5,
|
||||
"team": "",
|
||||
"competition": "2020ilch",
|
||||
"key":{
|
||||
"database":"",
|
||||
"tba":""
|
||||
},
|
||||
"statistics":{
|
||||
"match":{
|
||||
"balls-blocked":["basic_stats","historical_analysis","regression_linear","regression_logarithmic","regression_exponential","regression_polynomial","regression_sigmoidal"],
|
||||
"balls-collected":["basic_stats","historical_analysis","regression_linear","regression_logarithmic","regression_exponential","regression_polynomial","regression_sigmoidal"],
|
||||
"balls-lower-teleop":["basic_stats","historical_analysis","regression_linear","regression_logarithmic","regression_exponential","regression_polynomial","regression_sigmoidal"],
|
||||
"balls-lower-auto":["basic_stats","historical_analysis","regression_linear","regression_logarithmic","regression_exponential","regression_polynomial","regression_sigmoidal"],
|
||||
"balls-started":["basic_stats","historical_analyss","regression_linear","regression_logarithmic","regression_exponential","regression_polynomial","regression_sigmoidal"],
|
||||
"balls-upper-teleop":["basic_stats","historical_analysis","regression_linear","regression_logarithmic","regression_exponential","regression_polynomial","regression_sigmoidal"],
|
||||
"balls-upper-auto":["basic_stats","historical_analysis","regression_linear","regression_logarithmic","regression_exponential","regression_polynomial","regression_sigmoidal"]
|
||||
|
||||
},
|
||||
"metric":{
|
||||
"elo":{
|
||||
"score":1500,
|
||||
"N":400,
|
||||
"K":24
|
||||
},
|
||||
"gl2":{
|
||||
"score":1500,
|
||||
"rd":250,
|
||||
"vol":0.06
|
||||
},
|
||||
"ts":{
|
||||
"mu":25,
|
||||
"sigma":8.33
|
||||
}
|
||||
},
|
||||
"pit":{
|
||||
"wheel-mechanism":true,
|
||||
"low-balls":true,
|
||||
"high-balls":true,
|
||||
"wheel-success":true,
|
||||
"strategic-focus":true,
|
||||
"climb-mechanism":true,
|
||||
"attitude":true
|
||||
}
|
||||
}
|
||||
}"""
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
parser = argparse.ArgumentParser(description = "TRA data processing application.")
|
||||
parser.add_argument("mode", metavar = "MODE", type = str, nargs = 1, choices = ["verbose", "profile", "debug"], help = "verbose, debug, profile")
|
||||
parser.add_argument("--config", dest = "config", default = "config.json", type = str, help = "path to config file")
|
||||
parser.add_argument("--logfile", dest = "logfile", default = "logfile.log", type = str, help = "path to log file")
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
mode = args.mode[0]
|
||||
config_path = args.config
|
||||
log_path = args.logfile
|
||||
if mode == "verbose":
|
||||
start(True, False, False, config_path = config_path, log_path = log_path)
|
||||
elif mode == "profile":
|
||||
start(False, True, False, config_path = config_path, log_path = log_path)
|
||||
elif mode == "debug":
|
||||
start(False, False, True, config_path = config_path, log_path = log_path)
|
||||
|
||||
exit(0)
|
||||
if sys.platform.startswith('win'):
|
||||
multiprocessing.freeze_support()
|
||||
main()
|
37
src/superscript.spec
Normal file
37
src/superscript.spec
Normal file
@@ -0,0 +1,37 @@
|
||||
# -*- mode: python ; coding: utf-8 -*-
|
||||
|
||||
block_cipher = None
|
||||
|
||||
|
||||
a = Analysis(['superscript.py'],
|
||||
pathex=['/workspaces/tra-data-analysis/src'],
|
||||
binaries=[],
|
||||
datas=[],
|
||||
hiddenimports=[
|
||||
"dnspython",
|
||||
"sklearn.utils._weight_vector",
|
||||
"requests",
|
||||
],
|
||||
hookspath=[],
|
||||
runtime_hooks=[],
|
||||
excludes=[],
|
||||
win_no_prefer_redirects=False,
|
||||
win_private_assemblies=False,
|
||||
cipher=block_cipher,
|
||||
noarchive=False)
|
||||
pyz = PYZ(a.pure, a.zipped_data,
|
||||
cipher=block_cipher)
|
||||
exe = EXE(pyz,
|
||||
a.scripts,
|
||||
a.binaries,
|
||||
a.zipfiles,
|
||||
a.datas,
|
||||
[('W ignore', None, 'OPTION')],
|
||||
name='superscript',
|
||||
debug=False,
|
||||
bootloader_ignore_signals=False,
|
||||
strip=False,
|
||||
upx=True,
|
||||
upx_exclude=[],
|
||||
runtime_tmpdir=None,
|
||||
console=True )
|
12
submit-debug.sh
Normal file
12
submit-debug.sh
Normal file
@@ -0,0 +1,12 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
#SBATCH --job-name=tra-superscript
|
||||
#SBATCH --output=slurm-tra-superscript.out
|
||||
#SBATCH --ntasks=8
|
||||
#SBATCH --time=24:00:00
|
||||
#SBATCH --mem-per-cpu=256
|
||||
#SBATCH --mail-user=dsingh@imsa.edu
|
||||
#SBATCH -p cpu-long
|
||||
|
||||
cd competition
|
||||
python superscript.py debug
|
12
submit-prod.sh
Normal file
12
submit-prod.sh
Normal file
@@ -0,0 +1,12 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
#SBATCH --job-name=tra-superscript
|
||||
#SBATCH --output=PROD_slurm-tra-superscript.out
|
||||
#SBATCH --ntasks=8
|
||||
#SBATCH --time=24:00:00
|
||||
#SBATCH --mem-per-cpu=256
|
||||
#SBATCH --mail-user=dsingh@imsa.edu
|
||||
#SBATCH -p cpu-long
|
||||
|
||||
cd competition
|
||||
python superscript.py verbose
|
@@ -1,14 +0,0 @@
|
||||
import signal
|
||||
import zmq
|
||||
|
||||
signal.signal(signal.SIGINT, signal.SIG_DFL)
|
||||
|
||||
context = zmq.Context()
|
||||
|
||||
socket = context.socket(zmq.SUB)
|
||||
socket.connect('tcp://localhost:5678')
|
||||
socket.setsockopt(zmq.SUBSCRIBE, b'status')
|
||||
|
||||
while True:
|
||||
message = socket.recv_multipart()
|
||||
print(f'Received: {message}')
|
Reference in New Issue
Block a user