18 Commits

Author SHA1 Message Date
Arthur Lu
93091b6bd2 appeased pylint in config.py attr lookup 2022-03-31 02:18:30 +00:00
Arthur Lu
0024a94f4e added file logging with default,
added basic progress bars for each module
2022-03-30 04:53:40 +00:00
Arthur Lu
5885224231 removed match printing,
CLI args use argparse

Signed-off-by: Arthur Lu <learthurgo@gmail.com>
2022-03-29 23:09:37 +00:00
Arthur Lu
64ea7c227c removed commented code
Signed-off-by: Arthur Lu <learthurgo@gmail.com>
2022-03-29 22:23:24 +00:00
Arthur Lu
ddf6faeecf fixed metrics processing ordering,
added metrics logging
2022-03-29 21:15:24 +00:00
Arthur Lu
b4766d1b3e fixed Module template __init__ definition
Signed-off-by: Arthur Lu <learthurgo@gmail.com>
2022-03-29 16:49:38 +00:00
Arthur Lu
e04245952a merged data and pull functions into Client class,
removed pull.py dep.py,
modified existing code to work with new Client class
2022-03-29 05:48:39 +00:00
Arthur Lu
2ebaddb92c updated usage 2022-03-29 04:44:59 +00:00
Arthur Lu
8b09e155dc updated changelog 2022-03-29 04:42:26 +00:00
Arthur Lu
5ca474d158 finished removing socket functionality 2022-03-29 04:39:52 +00:00
Arthur Lu
e7a8a259fc finished removing daemon functionality 2022-03-29 04:35:01 +00:00
Arthur Lu
5553e3dddf fixed CLI options,
implemented better config attr search member,
fixed imports
2022-03-29 04:28:09 +00:00
Arthur Lu
0212e6b2ca pylint now uses tab indent 2022-03-29 04:15:47 +00:00
Arthur Lu
14f8901803 removed unnecessary imports 2022-03-28 23:22:42 +00:00
Arthur Lu
a5f9e55cf4 fixed build scripts 2022-03-28 23:15:13 +00:00
Arthur Lu
34f0b3f10c removed: daemonization,
socket messaging

added: CLI option to specify config file

Not working, requires data.py changes in competition branch
2022-03-28 22:42:04 +00:00
Arthur Lu
6b070c7b08 fixed merge changes 2022-03-15 05:31:51 +00:00
Arthur Lu
9279311664 Merge branch 'master' into superscript-v1 2022-03-15 05:27:11 +00:00
28 changed files with 679 additions and 1509 deletions

View File

@@ -1,7 +1,6 @@
FROM ubuntu:20.04 FROM python:slim
WORKDIR / WORKDIR /
RUN apt-get -y update RUN apt-get -y update; apt-get -y upgrade
RUN DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends tzdata RUN apt-get -y install git binutils
RUN apt-get install -y python3 python3-dev git python3-pip python3-kivy python-is-python3 libgl1-mesa-dev build-essential COPY requirements.txt .
RUN ln -s $(which pip3) /usr/bin/pip RUN pip install -r requirements.txt
RUN pip install pymongo pandas numpy scipy scikit-learn matplotlib pylint kivy

View File

@@ -1,2 +0,0 @@
FROM titanscout2022/tra-analysis-base:latest
WORKDIR /

View File

@@ -1,7 +1,7 @@
{ {
"name": "TRA Analysis Development Environment", "name": "TRA Analysis Development Environment",
"build": { "build": {
"dockerfile": "dev-dockerfile", "dockerfile": "Dockerfile",
}, },
"settings": { "settings": {
"terminal.integrated.shell.linux": "/bin/bash", "terminal.integrated.shell.linux": "/bin/bash",
@@ -9,14 +9,15 @@
"python.linting.enabled": true, "python.linting.enabled": true,
"python.linting.pylintEnabled": true, "python.linting.pylintEnabled": true,
"python.linting.pylintPath": "/usr/local/bin/pylint", "python.linting.pylintPath": "/usr/local/bin/pylint",
"python.linting.pylintArgs": ["--indent-string", "\t"],
"python.testing.pytestPath": "/usr/local/bin/pytest", "python.testing.pytestPath": "/usr/local/bin/pytest",
"editor.tabSize": 4, "editor.tabSize": 4,
"editor.insertSpaces": false "editor.insertSpaces": false
}, },
"extensions": [ "extensions": [
"mhutchie.git-graph", "mhutchie.git-graph",
"ms-python.python", "ms-python.python",
"waderyan.gitblame" "waderyan.gitblame"
], ],
"postCreateCommand": "/usr/bin/pip3 install -r ${containerWorkspaceFolder}/src/requirements.txt && /usr/bin/pip3 install --no-cache-dir pylint && /usr/bin/pip3 install pytest" "postCreateCommand": ""
} }

View File

@@ -1,14 +1,11 @@
cerberus cerberus
dnspython dnspython
numpy numpy
pandas
pyinstaller pyinstaller
pylint pylint
pymongo pymongo
pyparsing pyparsing
pytest pytest
python-daemon
pyzmq
requests requests
scikit-learn scikit-learn
scipy scipy

38
.github/ISSUE_TEMPLATE/bug_report.md vendored Normal file
View File

@@ -0,0 +1,38 @@
---
name: Bug report
about: Create a report to help us improve
title: ''
labels: ''
assignees: ''
---
**Describe the bug**
A clear and concise description of what the bug is.
**To Reproduce**
Steps to reproduce the behavior:
1. Go to '...'
2. Click on '....'
3. Scroll down to '....'
4. See error
**Expected behavior**
A clear and concise description of what you expected to happen.
**Screenshots**
If applicable, add screenshots to help explain your problem.
**Desktop (please complete the following information):**
- OS: [e.g. iOS]
- Browser [e.g. chrome, safari]
- Version [e.g. 22]
**Smartphone (please complete the following information):**
- Device: [e.g. iPhone6]
- OS: [e.g. iOS8.1]
- Browser [e.g. stock browser, safari]
- Version [e.g. 22]
**Additional context**
Add any other context about the problem here.

View File

@@ -0,0 +1,20 @@
---
name: Feature request
about: Suggest an idea for this project
title: ''
labels: ''
assignees: ''
---
**Is your feature request related to a problem? Please describe.**
A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
**Describe the solution you'd like**
A clear and concise description of what you want to happen.
**Describe alternatives you've considered**
A clear and concise description of any alternative solutions or features you've considered.
**Additional context**
Add any other context or screenshots about the feature request here.

View File

@@ -1,7 +1,7 @@
# This workflow will install Python dependencies, run tests and lint with a variety of Python versions # This workflow will install Python dependencies, run tests and lint with a variety of Python versions
# For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions # For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions
name: Superscript Unit Tests name: Build Superscript Linux
on: on:
release: release:
@@ -11,7 +11,25 @@ jobs:
generate: generate:
name: Build Linux name: Build Linux
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Checkout master - name: Checkout master
uses: actions/checkout@master uses: actions/checkout@master
- name: Install Dependencies
run: pip install -r requirements.txt
working-directory: src/
- name: Give Execute Permission
run: chmod +x build-CLI.sh
working-directory: build/
- name: Build Binary
run: ./build-CLI.sh
working-directory: build/
- name: Copy Binary to Root Dir
run: cp superscript ..
working-directory: dist/
- name: Upload Release Asset
uses: svenstaro/upload-release-action@v2
with:
repo_token: ${{ secrets.GITHUB_TOKEN }}
file: superscript
asset_name: superscript
tag: ${{ github.ref }}

6
.gitignore vendored
View File

@@ -9,9 +9,6 @@
**/tra_analysis/ **/tra_analysis/
**/temp/* **/temp/*
**/errorlog.txt
/dist/superscript.*
/dist/superscript
**/*.pid **/*.pid
**/profile.* **/profile.*
@@ -19,6 +16,3 @@
**/*.log **/*.log
**/errorlog.txt **/errorlog.txt
/dist/* /dist/*
slurm-tra-superscript.out
config*.json

View File

@@ -1,4 +1,4 @@
# Red Alliance Analysis &middot; ![GitHub release (latest by date)](https://img.shields.io/github/v/release/titanscouting/tra-superscript) # Red Alliance Analysis &middot; ![GitHub release (latest by date)](https://img.shields.io/github/v/release/titanscout2022/red-alliance-analysis)
Titan Robotics 2022 Strategy Team Repository for Data Analysis Tools. Included with these tools are the backend data analysis engine formatted as a python package, associated binaries for the analysis package, and premade scripts that can be pulled directly from this repository and will integrate with other Red Alliance applications to quickly deploy FRC scouting tools. Titan Robotics 2022 Strategy Team Repository for Data Analysis Tools. Included with these tools are the backend data analysis engine formatted as a python package, associated binaries for the analysis package, and premade scripts that can be pulled directly from this repository and will integrate with other Red Alliance applications to quickly deploy FRC scouting tools.

View File

@@ -1,5 +1,5 @@
set pathtospec="../src/superscript.spec" set pathtospec="superscript.spec"
set pathtodist="../dist/" set pathtodist="../dist/"
set pathtowork="temp/" set pathtowork="temp/"
pyinstaller --onefile --clean --distpath %pathtodist% --workpath %pathtowork% %pathtospec% pyinstaller --clean --distpath %pathtodist% --workpath %pathtowork% %pathtospec%

View File

@@ -1,5 +1,5 @@
pathtospec="../src/superscript.spec" pathtospec="superscript.spec"
pathtodist="../dist/" pathtodist="../dist/"
pathtowork="temp/" pathtowork="temp/"
pyinstaller --onefile --clean --distpath ${pathtodist} --workpath ${pathtowork} ${pathtospec} pyinstaller --clean --distpath ${pathtodist} --workpath ${pathtowork} ${pathtospec}

35
build/superscript.spec Normal file
View File

@@ -0,0 +1,35 @@
# -*- mode: python ; coding: utf-8 -*-
block_cipher = None
a = Analysis(
['../src/superscript.py'],
pathex=[],
binaries=[],
datas=[],
hiddenimports=['dnspython', 'sklearn.utils._weight_vector', 'sklearn.utils._typedefs', 'sklearn.neighbors._partition_nodes', 'requests'],
hookspath=[],
hooksconfig={},
runtime_hooks=[],
excludes=[],
win_no_prefer_redirects=False,
win_private_assemblies=False,
cipher=block_cipher,
noarchive=False
)
pyz = PYZ(a.pure, a.zipped_data,
cipher=block_cipher)
exe = EXE(pyz,
a.scripts,
a.binaries,
a.zipfiles,
a.datas,
[('W ignore', None, 'OPTION')],
name='superscript',
debug=False,
bootloader_ignore_signals=False,
strip=False,
upx=True,
upx_exclude=[],
runtime_tmpdir=None,
console=True )

View File

@@ -1,218 +0,0 @@
from calendar import c
import requests
import pull
import pandas as pd
import json
def pull_new_tba_matches(apikey, competition, last_match):
api_key= apikey
x=requests.get("https://www.thebluealliance.com/api/v3/event/"+competition+"/matches/simple", headers={"X-TBA-Auth-Key":api_key})
json = x.json()
out = []
for i in json:
if i["actual_time"] != None and i["comp_level"] == "qm" and i["match_number"] > last_match :
out.append({"match" : i['match_number'], "blue" : list(map(lambda x: int(x[3:]), i['alliances']['blue']['team_keys'])), "red" : list(map(lambda x: int(x[3:]), i['alliances']['red']['team_keys'])), "winner": i["winning_alliance"]})
out.sort(key=lambda x: x['match'])
return out
def pull_new_tba_matches_manual(apikey, competition, cutoff):
filename = competition+"-wins.json"
with open(filename, 'r') as f:
data = json.load(f)
return data
def get_team_match_data(client, competition, team_num):
db = client.data_scouting
mdata = db.matchdata
out = {}
for i in mdata.find({"competition" : competition, "team_scouted": str(team_num)}):
out[i['match']] = i['data']
return pd.DataFrame(out)
def clear_metrics(client, competition):
db = client.data_processing
data = db.team_metrics
data.delete_many({competition: competition})
return True
def get_team_pit_data(client, competition, team_num):
db = client.data_scouting
mdata = db.pitdata
out = {}
return mdata.find_one({"competition" : competition, "team_scouted": str(team_num)})["data"]
def get_team_metrics_data(client, competition, team_num):
db = client.data_processing
mdata = db.team_metrics
temp = mdata.find_one({"team": team_num})
if temp != None:
if competition in temp['metrics'].keys():
temp = temp['metrics'][competition]
else :
temp = None
else:
temp = None
return temp
def get_match_data_formatted(client, competition):
teams_at_comp = pull.get_teams_at_competition(competition)
out = {}
for team in teams_at_comp:
try:
out[int(team)] = unkeyify_2l(get_team_match_data(client, competition, team).transpose().to_dict())
except:
pass
return out
def get_metrics_data_formatted(client, competition):
teams_at_comp = pull.get_teams_at_competition(competition)
out = {}
for team in teams_at_comp:
try:
out[int(team)] = get_team_metrics_data(client, competition, int(team))
except:
pass
return out
def get_pit_data_formatted(client, competition):
x=requests.get("https://scouting.titanrobotics2022.com/api/fetchAllTeamNicknamesAtCompetition?competition="+competition)
x = x.json()
x = x['data']
x = x.keys()
out = {}
for i in x:
try:
out[int(i)] = get_team_pit_data(client, competition, int(i))
except:
pass
return out
def get_pit_variable_data(client, competition):
db = client.data_processing
mdata = db.team_pit
out = {}
return mdata.find()
def get_pit_variable_formatted(client, competition):
temp = get_pit_variable_data(client, competition)
out = {}
for i in temp:
out[i["variable"]] = i["data"]
return out
def push_team_tests_data(client, competition, team_num, data, dbname = "data_processing", colname = "team_tests"):
db = client[dbname]
mdata = db[colname]
mdata.replace_one({"competition" : competition, "team": team_num}, {"_id": competition+str(team_num)+"am", "competition" : competition, "team" : team_num, "data" : data}, True)
def push_team_metrics_data(client, competition, team_num, data, dbname = "data_processing", colname = "team_metrics"):
db = client[dbname]
mdata = db[colname]
mdata.update_one({"team": team_num}, {"$set": {"metrics.{}".format(competition): data}}, upsert=True)
def push_team_pit_data(client, competition, variable, data, dbname = "data_processing", colname = "team_pit"):
db = client[dbname]
mdata = db[colname]
mdata.replace_one({"competition" : competition, "variable": variable}, {"competition" : competition, "variable" : variable, "data" : data}, True)
def get_analysis_flags(client, flag):
db = client.data_processing
mdata = db.flags
return mdata.find_one({"_id": "2022"})
def set_analysis_flags(client, flag, data):
db = client.data_processing
mdata = db.flags
return mdata.update_one({"_id": "2022"}, {"$set": data})
def unkeyify_2l(layered_dict):
out = {}
for i in layered_dict.keys():
add = []
sortkey = []
for j in layered_dict[i].keys():
add.append([j,layered_dict[i][j]])
add.sort(key = lambda x: x[0])
out[i] = list(map(lambda x: x[1], add))
return out
def get_previous_time(client):
previous_time = get_analysis_flags(client, "latest_update")
if previous_time == None:
set_analysis_flags(client, "latest_update", 0)
previous_time = 0
else:
previous_time = previous_time["latest_update"]
return previous_time
def set_current_time(client, current_time):
set_analysis_flags(client, "latest_update", {"latest_update":current_time})
def get_database_config(client):
remote_config = get_analysis_flags(client, "config")
return remote_config["config"] if remote_config != None else None
def set_database_config(client, config):
set_analysis_flags(client, "config", {"config": config})
def load_match(client, competition):
return get_match_data_formatted(client, competition)
def load_metric(client, competition, match, group_name, metrics):
group = {}
for team in match[group_name]:
db_data = get_team_metrics_data(client, competition, team)
if db_data == None:
gl2 = {"score": metrics["gl2"]["score"], "rd": metrics["gl2"]["rd"], "vol": metrics["gl2"]["vol"]}
group[team] = {"gl2": gl2}
else:
metrics = db_data
gl2 = metrics["gl2"]
group[team] = {"gl2": gl2}
return group
def load_pit(client, competition):
return get_pit_data_formatted(client, competition)
def push_match(client, competition, results):
for team in results:
push_team_tests_data(client, competition, team, results[team])
def push_metric(client, competition, metric):
for team in metric:
push_team_metrics_data(client, competition, team, metric[team])
def push_pit(client, competition, pit):
for variable in pit:
push_team_pit_data(client, competition, variable, pit[variable])
def check_new_database_matches(client, competition):
return True

View File

@@ -1,132 +0,0 @@
# contains deprecated functions, not to be used unless nessasary!
import json
sample_json = """
{
"persistent":{
"key":{
"database":"",
"tba":"",
"tra":{
"CLIENT_ID":"",
"CLIENT_SECRET":"",
"url": ""
}
},
"config-preference":"local",
"synchronize-config":false
},
"variable":{
"max-threads":0.5,
"team":"",
"event-delay":false,
"loop-delay":0,
"reportable":true,
"teams":[
],
"modules":{
"match":{
"tests":{
"balls-blocked":[
"basic_stats",
"historical_analysis",
"regression_linear",
"regression_logarithmic",
"regression_exponential",
"regression_polynomial",
"regression_sigmoidal"
],
"balls-collected":[
"basic_stats",
"historical_analysis",
"regression_linear",
"regression_logarithmic",
"regression_exponential",
"regression_polynomial",
"regression_sigmoidal"
],
"balls-lower-teleop":[
"basic_stats",
"historical_analysis",
"regression_linear",
"regression_logarithmic",
"regression_exponential",
"regression_polynomial",
"regression_sigmoidal"
],
"balls-lower-auto":[
"basic_stats",
"historical_analysis",
"regression_linear",
"regression_logarithmic",
"regression_exponential",
"regression_polynomial",
"regression_sigmoidal"
],
"balls-started":[
"basic_stats",
"historical_analyss",
"regression_linear",
"regression_logarithmic",
"regression_exponential",
"regression_polynomial",
"regression_sigmoidal"
],
"balls-upper-teleop":[
"basic_stats",
"historical_analysis",
"regression_linear",
"regression_logarithmic",
"regression_exponential",
"regression_polynomial",
"regression_sigmoidal"
],
"balls-upper-auto":[
"basic_stats",
"historical_analysis",
"regression_linear",
"regression_logarithmic",
"regression_exponential",
"regression_polynomial",
"regression_sigmoidal"
]
}
},
"metric":{
"tests":{
"gl2":{
"score":1500,
"rd":250,
"vol":0.06
},
}
},
"pit":{
"tests":{
"wheel-mechanism":true,
"low-balls":true,
"high-balls":true,
"wheel-success":true,
"strategic-focus":true,
"climb-mechanism":true,
"attitude":true
}
}
}
}
}
"""
def load_config(path, config_vector):
try:
f = open(path, "r")
config_vector.update(json.load(f))
f.close()
return 0
except:
f = open(path, "w")
f.write(sample_json)
f.close()
return 1

View File

@@ -1,63 +0,0 @@
import requests
from exceptions import APIError
from dep import load_config
url = "https://scouting.titanrobotics2022.com"
config_tra = {}
load_config("config.json", config_tra)
trakey = config_tra['persistent']['key']['tra']
def get_team_competition():
endpoint = '/api/fetchTeamCompetition'
params = {
"CLIENT_ID": trakey['CLIENT_ID'],
"CLIENT_SECRET": trakey['CLIENT_SECRET']
}
response = requests.request("GET", url + endpoint, params=params)
json = response.json()
if json['success']:
return json['competition']
else:
raise APIError(json)
def get_team():
endpoint = '/api/fetchTeamCompetition'
params = {
"CLIENT_ID": trakey['CLIENT_ID'],
"CLIENT_SECRET": trakey['CLIENT_SECRET']
}
response = requests.request("GET", url + endpoint, params=params)
json = response.json()
if json['success']:
return json['team']
else:
raise APIError(json)
def get_team_match_data(competition, team_num):
endpoint = '/api/fetchAllTeamMatchData'
params = {
"competition": competition,
"teamScouted": team_num,
"CLIENT_ID": trakey['CLIENT_ID'],
"CLIENT_SECRET": trakey['CLIENT_SECRET']
}
response = requests.request("GET", url + endpoint, params=params)
json = response.json()
if json['success']:
return json['data'][team_num]
else:
raise APIError(json)
def get_teams_at_competition(competition):
endpoint = '/api/fetchAllTeamNicknamesAtCompetition'
params = {
"competition": competition,
"CLIENT_ID": trakey['CLIENT_ID'],
"CLIENT_SECRET": trakey['CLIENT_SECRET']
}
response = requests.request("GET", url + endpoint, params=params)
json = response.json()
if json['success']:
return list(json['data'].keys())
else:
raise APIError(json)

View File

@@ -1,15 +0,0 @@
cerberus
dnspython
numpy
pandas
pyinstaller
pylint
pymongo
pyparsing
python-daemon
pyzmq
requests
scikit-learn
scipy
six
tra-analysis

View File

@@ -1,402 +0,0 @@
# Titan Robotics Team 2022: Superscript Script
# Written by Arthur Lu, Jacob Levine, and Dev Singh
# Notes:
# setup:
__version__ = "1.0.0"
# changelog should be viewed using print(analysis.__changelog__)
__changelog__ = """changelog:
1.0.0:
- superscript now runs in PEP 3143 compliant well behaved daemon on Linux systems
- linux superscript daemon has integrated websocket output to monitor progress/status remotely
- linux daemon now sends stderr to errorlog.log
- added verbose option to linux superscript to allow for interactive output
- moved pymongo import to superscript.py
- added profile option to linux superscript to profile runtime of script
- reduced memory usage slightly by consolidating the unwrapped input data
- added debug option, which performs one loop of analysis and dumps results to local files
- added event and time delay options to config
- event delay pauses loop until even listener recieves an update
- time delay pauses loop until the time specified has elapsed since the BEGINNING of previous loop
- added options to pull config information from database (reatins option to use local config file)
- config-preference option selects between prioritizing local config and prioritizing database config
- synchronize-config option selects whether to update the non prioritized config with the prioritized one
- divided config options between persistent ones (keys), and variable ones (everything else)
- generalized behavior of various core components by collecting loose functions in several dependencies into classes
- module.py contains classes, each one represents a single data analysis routine
- config.py contains the Configuration class, which stores the configuration information and abstracts the getter methods
0.9.3:
- improved data loading performance by removing redundant PyMongo client creation (120s to 14s)
- passed singular instance of PyMongo client as standin for apikey parameter in all data.py functions
0.9.2:
- removed unessasary imports from data
- minor changes to interface
0.9.1:
- fixed bugs in configuration item loading exception handling
0.9.0:
- moved printing and logging related functions to interface.py (changelog will stay in this file)
- changed function return files for load_config and save_config to standard C values (0 for success, 1 for error)
- added local variables for config location
- moved dataset getting and setting functions to dataset.py (changelog will stay in this file)
- moved matchloop, metricloop, pitloop and helper functions (simplestats) to processing.py
0.8.6:
- added proper main function
0.8.5:
- added more gradeful KeyboardInterrupt exiting
- redirected stderr to errorlog.txt
0.8.4:
- added better error message for missing config.json
- added automatic config.json creation
- added splash text with version and system info
0.8.3:
- updated matchloop with new regression format (requires tra_analysis 3.x)
0.8.2:
- readded while true to main function
- added more thread config options
0.8.1:
- optimized matchloop further by bypassing GIL
0.8.0:
- added multithreading to matchloop
- tweaked user log
0.7.0:
- finished implementing main function
0.6.2:
- integrated get_team_rankings.py as get_team_metrics() function
- integrated visualize_pit.py as graph_pit_histogram() function
0.6.1:
- bug fixes with analysis.Metric() calls
- modified metric functions to use config.json defined default values
0.6.0:
- removed main function
- changed load_config function
- added save_config function
- added load_match function
- renamed simpleloop to matchloop
- moved simplestats function inside matchloop
- renamed load_metrics to load_metric
- renamed metricsloop to metricloop
- split push to database functions amon push_match, push_metric, push_pit
- moved
0.5.2:
- made changes due to refactoring of analysis
0.5.1:
- text fixes
- removed matplotlib requirement
0.5.0:
- improved user interface
0.4.2:
- removed unessasary code
0.4.1:
- fixed bug where X range for regression was determined before sanitization
- better sanitized data
0.4.0:
- fixed spelling issue in __changelog__
- addressed nan bug in regression
- fixed errors on line 335 with metrics calling incorrect key "glicko2"
- fixed errors in metrics computing
0.3.0:
- added analysis to pit data
0.2.1:
- minor stability patches
- implemented db syncing for timestamps
- fixed bugs
0.2.0:
- finalized testing and small fixes
0.1.4:
- finished metrics implement, trueskill is bugged
0.1.3:
- working
0.1.2:
- started implement of metrics
0.1.1:
- cleaned up imports
0.1.0:
- tested working, can push to database
0.0.9:
- tested working
- prints out stats for the time being, will push to database later
0.0.8:
- added data import
- removed tba import
- finished main method
0.0.7:
- added load_config
- optimized simpleloop for readibility
- added __all__ entries
- added simplestats engine
- pending testing
0.0.6:
- fixes
0.0.5:
- imported pickle
- created custom database object
0.0.4:
- fixed simpleloop to actually return a vector
0.0.3:
- added metricsloop which is unfinished
0.0.2:
- added simpleloop which is untested until data is provided
0.0.1:
- created script
- added analysis, numba, numpy imports
"""
__author__ = (
"Arthur Lu <learthurgo@gmail.com>",
"Jacob Levine <jlevine@imsa.edu>",
)
# imports:
import os, sys, time
import pymongo # soon to be deprecated
import traceback
import warnings
from config import Configuration, ConfigurationError
from data import get_previous_time, set_current_time, check_new_database_matches, clear_metrics
from interface import Logger
from module import Match, Metric, Pit
import zmq
config_path = "config.json"
def main(logger, verbose, profile, debug, socket_send = None):
def close_all():
if "client" in locals():
client.close()
warnings.filterwarnings("ignore")
logger.splash(__version__)
modules = {"match": Match, "metric": Metric, "pit": Pit}
while True:
try:
loop_start = time.time()
logger.info("current time: " + str(loop_start))
socket_send("current time: " + str(loop_start))
config = Configuration(config_path)
logger.info("found and loaded config at <" + config_path + ">")
socket_send("found and loaded config at <" + config_path + ">")
apikey, tbakey = config.database, config.tba
logger.info("found and loaded database and tba keys")
socket_send("found and loaded database and tba keys")
client = pymongo.MongoClient(apikey)
logger.info("established connection to database")
socket_send("established connection to database")
previous_time = get_previous_time(client)
logger.info("analysis backtimed to: " + str(previous_time))
socket_send("analysis backtimed to: " + str(previous_time))
config.resolve_config_conflicts(logger, client)
config_modules, competition = config.modules, config.competition
for m in config_modules:
if m in modules:
start = time.time()
current_module = modules[m](config_modules[m], client, tbakey, previous_time, competition)
valid = current_module.validate_config()
if not valid:
continue
current_module.run()
logger.info(m + " module finished in " + str(time.time() - start) + " seconds")
socket_send(m + " module finished in " + str(time.time() - start) + " seconds")
if debug:
logger.save_module_to_file(m, current_module.data, current_module.results) # logging flag check done in logger
set_current_time(client, loop_start)
close_all()
logger.info("closed threads and database client")
logger.info("finished all tasks in " + str(time.time() - loop_start) + " seconds, looping")
socket_send("closed threads and database client")
socket_send("finished all tasks in " + str(time.time() - loop_start) + " seconds, looping")
if profile:
return 0
if debug:
return 0
event_delay = config["variable"]["event-delay"]
if event_delay:
logger.info("loop delayed until database returns new matches")
socket_send("loop delayed until database returns new matches")
new_match = False
while not new_match:
time.sleep(1)
new_match = check_new_database_matches(client, competition)
logger.info("database returned new matches")
socket_send("database returned new matches")
else:
loop_delay = float(config["variable"]["loop-delay"])
remaining_time = loop_delay - (time.time() - loop_start)
if remaining_time > 0:
logger.info("loop delayed by " + str(remaining_time) + " seconds")
socket_send("loop delayed by " + str(remaining_time) + " seconds")
time.sleep(remaining_time)
except KeyboardInterrupt:
close_all()
logger.info("detected KeyboardInterrupt, exiting")
socket_send("detected KeyboardInterrupt, exiting")
return 0
except ConfigurationError as e:
str_e = "".join(traceback.format_exception(e))
logger.error("encountered a configuration error: " + str(e))
logger.error(str_e)
socket_send("encountered a configuration error: " + str(e))
socket_send(str_e)
close_all()
return 1
except Exception as e:
str_e = "".join(traceback.format_exception(e))
logger.error("encountered an exception while running")
logger.error(str_e)
socket_send("encountered an exception while running")
socket_send(str_e)
close_all()
return 1
def start(pid_path, verbose, profile, debug):
if profile:
def send(msg):
pass
logger = Logger(verbose, profile, debug)
import cProfile, pstats, io
profile = cProfile.Profile()
profile.enable()
exit_code = main(logger, verbose, profile, debug, socket_send = send)
profile.disable()
f = open("profile.txt", 'w+')
ps = pstats.Stats(profile, stream = f).sort_stats('cumtime')
ps.print_stats()
sys.exit(exit_code)
elif verbose:
def send(msg):
pass
logger = Logger(verbose, profile, debug)
exit_code = main(logger, verbose, profile, debug, socket_send = send)
sys.exit(exit_code)
elif debug:
def send(msg):
pass
logger = Logger(verbose, profile, debug)
exit_code = main(logger, verbose, profile, debug, socket_send = send)
sys.exit(exit_code)
else:
logfile = "logfile.log"
f = open(logfile, 'w+')
f.close()
e = open('errorlog.log', 'w+')
with daemon.DaemonContext(
working_directory = os.getcwd(),
pidfile = pidfile.TimeoutPIDLockFile(pid_path),
stderr = e
):
context = zmq.Context()
socket = context.socket(zmq.PUB)
socket.bind("tcp://*:5678")
socket.send(b'status')
def send(msg):
socket.send(bytes("status: " + msg, "utf-8"))
logger = Logger(verbose, profile, debug, file = logfile)
exit_code = main(logger, verbose, profile, debug, socket_send = send)
socket.close()
f.close()
sys.exit(exit_code)
def stop(pid_path):
try:
pf = open(pid_path, 'r')
pid = int(pf.read().strip())
pf.close()
except IOError:
sys.stderr.write("pidfile at <" + pid_path + "> does not exist. Daemon not running?\n")
return
try:
while True:
os.kill(pid, SIGTERM)
time.sleep(0.01)
except OSError as err:
err = str(err)
if err.find("No such process") > 0:
if os.path.exists(pid_path):
os.remove(pid_path)
else:
traceback.print_exc(file = sys.stderr)
sys.exit(1)
def restart(pid_path):
stop(pid_path)
start(pid_path, False, False, False)
if __name__ == "__main__":
if sys.platform.startswith("win"):
start(None, verbose = True)
else:
import daemon
from daemon import pidfile
from signal import SIGTERM
pid_path = "tra-daemon.pid"
if len(sys.argv) == 2:
if 'start' == sys.argv[1]:
start(pid_path, False, False, False)
elif 'stop' == sys.argv[1]:
stop(pid_path)
elif 'restart' == sys.argv[1]:
restart(pid_path)
elif 'verbose' == sys.argv[1]:
start(None, True, False, False)
elif 'profile' == sys.argv[1]:
start(None, False, True, False)
elif 'debug' == sys.argv[1]:
start(None, False, False, True)
else:
print("usage: %s start|stop|restart|verbose|profile|debug" % sys.argv[0])
sys.exit(2)
sys.exit(0)
else:
print("usage: %s start|stop|restart|verbose|profile|debug" % sys.argv[0])
sys.exit(2)

View File

@@ -2,8 +2,6 @@ import json
from exceptions import ConfigurationError from exceptions import ConfigurationError
from cerberus import Validator from cerberus import Validator
from data import set_database_config, get_database_config
class Configuration: class Configuration:
path = None path = None
@@ -185,33 +183,24 @@ class Configuration:
if not isValidated: if not isValidated:
raise ConfigurationError("config validation error: " + v.errors) raise ConfigurationError("config validation error: " + v.errors)
def __getattr__(self, name): # simple linear lookup method for common multikey-value paths, TYPE UNSAFE def __getattr__(self, name): # better hashed lookup method for common multikey-value paths, TYPE UNSAFE
if name == "persistent": attr_lookup = {
return self.config["persistent"] "persistent": self.config["persistent"],
elif name == "key": "key": self.config["persistent"]["key"],
return self.config["persistent"]["key"] "database": self.config["persistent"]["key"]["database"],
elif name == "database": "tba": self.config["persistent"]["key"]["tba"],
# soon to be deprecated "tra": self.config["persistent"]["key"]["tra"],
return self.config["persistent"]["key"]["database"] "priority": self.config["persistent"]["config-preference"],
elif name == "tba": "sync": self.config["persistent"]["synchronize-config"],
return self.config["persistent"]["key"]["tba"] "variable": self.config["variable"],
elif name == "tra": "event_delay": self.config["variable"]["event-delay"],
return self.config["persistent"]["key"]["tra"] "loop_delay": self.config["variable"]["loop-delay"],
elif name == "priority": "competition": self.config["variable"]["competition"],
return self.config["persistent"]["config-preference"] "modules": self.config["variable"]["modules"]
elif name == "sync": }
return self.config["persistent"]["synchronize-config"] try:
elif name == "variable": return attr_lookup[name]
return self.config["variable"] except KeyError:
elif name == "event_delay":
return self.config["variable"]["event-delay"]
elif name == "loop_delay":
return self.config["variable"]["loop-delay"]
elif name == "competition":
return self.config["variable"]["competition"]
elif name == "modules":
return self.config["variable"]["modules"]
else:
return None return None
def __getitem__(self, key): def __getitem__(self, key):
@@ -224,14 +213,14 @@ class Configuration:
if sync: if sync:
if priority == "local" or priority == "client": if priority == "local" or priority == "client":
logger.info("config-preference set to local/client, loading local config information") logger.info("config-preference set to local/client, loading local config information")
remote_config = get_database_config(client) remote_config = client.get_database_config()
if remote_config != self.config["variable"]: if remote_config != self.config["variable"]:
set_database_config(client, self.config["variable"]) client.set_database_config(self.config["variable"])
logger.info("database config was different and was updated") logger.info("database config was different and was updated")
# no change to config # no change to config
elif priority == "remote" or priority == "database": elif priority == "remote" or priority == "database":
logger.info("config-preference set to remote/database, loading remote config information") logger.info("config-preference set to remote/database, loading remote config information")
remote_config = get_database_config(client) remote_config = client.get_database_config()
if remote_config != self.config["variable"]: if remote_config != self.config["variable"]:
self.config["variable"] = remote_config self.config["variable"] = remote_config
self.save_config() self.save_config()
@@ -245,7 +234,7 @@ class Configuration:
# no change to config # no change to config
elif priority == "remote" or priority == "database": elif priority == "remote" or priority == "database":
logger.info("config-preference set to remote/database, loading database config information") logger.info("config-preference set to remote/database, loading database config information")
self.config["variable"] = get_database_config(client) self.config["variable"] = client.get_database_config()
# change variable to match remote without updating local version # change variable to match remote without updating local version
else: else:
raise ConfigurationError("persistent/config-preference field must be \"local\"/\"client\" or \"remote\"/\"database\"") raise ConfigurationError("persistent/config-preference field must be \"local\"/\"client\" or \"remote\"/\"database\"")

298
src/data.py Normal file
View File

@@ -0,0 +1,298 @@
import requests
import pandas as pd
import pymongo
from exceptions import APIError
class Client:
def __init__(self, config):
self.competition = config.competition
self.tbakey = config.tba
self.mongoclient = pymongo.MongoClient(config.database)
self.trakey = config.tra
def close(self):
self.mongoclient.close()
def pull_new_tba_matches(self, cutoff):
competition = self.competition
api_key= self.tbakey
x=requests.get("https://www.thebluealliance.com/api/v3/event/"+competition+"/matches/simple", headers={"X-TBA-Auth-Key":api_key})
json = x.json()
out = []
for i in json:
if i["actual_time"] != None and i["comp_level"] == "qm":
out.append({"match" : i['match_number'], "blue" : list(map(lambda x: int(x[3:]), i['alliances']['blue']['team_keys'])), "red" : list(map(lambda x: int(x[3:]), i['alliances']['red']['team_keys'])), "winner": i["winning_alliance"]})
out.sort(key=lambda x: x['match'])
return out
def get_team_match_data(self, team_num):
client = self.mongoclient
competition = self.competition
db = client.data_scouting
mdata = db.matchdata
out = {}
for i in mdata.find({"competition" : competition, "team_scouted": str(team_num)}):
out[i['match']] = i['data']
return pd.DataFrame(out)
def get_team_metrics_data(self, team_num):
client = self.mongoclient
competition = self.competition
db = client.data_processing
mdata = db.team_metrics
return mdata.find_one({"competition" : competition, "team": team_num})
def get_team_pit_data(self, team_num):
client = self.mongoclient
competition = self.competition
db = client.data_scouting
mdata = db.pitdata
return mdata.find_one({"competition" : competition, "team_scouted": str(team_num)})["data"]
def unkeyify_2l(self, layered_dict):
out = {}
for i in layered_dict.keys():
add = []
sortkey = []
for j in layered_dict[i].keys():
add.append([j,layered_dict[i][j]])
add.sort(key = lambda x: x[0])
out[i] = list(map(lambda x: x[1], add))
return out
def get_match_data_formatted(self):
teams_at_comp = self.get_teams_at_competition()
out = {}
for team in teams_at_comp:
try:
out[int(team)] = self.unkeyify_2l(self.get_team_match_data(team).transpose().to_dict())
except:
pass
return out
def get_metrics_data_formatted(self):
competition = self.competition
teams_at_comp = self.get_teams_at_competition()
out = {}
for team in teams_at_comp:
try:
out[int(team)] = self.get_team_metrics_data(int(team))
except:
pass
return out
def get_pit_data_formatted(self):
client = self.mongoclient
competition = self.competition
x=requests.get("https://titanscouting.epochml.org/api/fetchAllTeamNicknamesAtCompetition?competition="+competition)
x = x.json()
x = x['data']
x = x.keys()
out = {}
for i in x:
try:
out[int(i)] = self.get_team_pit_data(int(i))
except:
pass
return out
def get_pit_variable_data(self):
client = self.mongoclient
db = client.data_processing
mdata = db.team_pit
return mdata.find()
def get_pit_variable_formatted(self):
temp = self.get_pit_variable_data()
out = {}
for i in temp:
out[i["variable"]] = i["data"]
return out
def push_team_tests_data(self, team_num, data, dbname = "data_processing", colname = "team_tests"):
client = self.mongoclient
competition = self.competition
db = client[dbname]
mdata = db[colname]
mdata.replace_one({"competition" : competition, "team": team_num}, {"_id": competition+str(team_num)+"am", "competition" : competition, "team" : team_num, "data" : data}, True)
def push_team_metrics_data(self, team_num, data, dbname = "data_processing", colname = "team_metrics"):
client = self.mongoclient
competition = self.competition
db = client[dbname]
mdata = db[colname]
mdata.replace_one({"competition" : competition, "team": team_num}, {"_id": competition+str(team_num)+"am", "competition" : competition, "team" : team_num, "metrics" : data}, True)
def push_team_pit_data(self, variable, data, dbname = "data_processing", colname = "team_pit"):
client = self.mongoclient
competition = self.competition
db = client[dbname]
mdata = db[colname]
mdata.replace_one({"competition" : competition, "variable": variable}, {"competition" : competition, "variable" : variable, "data" : data}, True)
def get_analysis_flags(self, flag):
client = self.mongoclient
db = client.data_processing
mdata = db.flags
return mdata.find_one({flag:{"$exists":True}})
def set_analysis_flags(self, flag, data):
client = self.mongoclient
db = client.data_processing
mdata = db.flags
return mdata.replace_one({flag:{"$exists":True}}, data, True)
def get_previous_time(self):
previous_time = self.get_analysis_flags("latest_update")
if previous_time == None:
self.set_analysis_flags("latest_update", 0)
previous_time = 0
else:
previous_time = previous_time["latest_update"]
return previous_time
def set_current_time(self, current_time):
self.set_analysis_flags("latest_update", {"latest_update":current_time})
def get_database_config(self):
remote_config = self.get_analysis_flags("config")
return remote_config["config"] if remote_config != None else None
def set_database_config(self, config):
self.set_analysis_flags("config", {"config": config})
def load_match(self):
return self.get_match_data_formatted()
def load_metric(self, match, group_name, metrics):
group = {}
for team in match[group_name]:
db_data = self.get_team_metrics_data(team)
if db_data == None:
elo = {"score": metrics["elo"]["score"]}
gl2 = {"score": metrics["gl2"]["score"], "rd": metrics["gl2"]["rd"], "vol": metrics["gl2"]["vol"]}
ts = {"mu": metrics["ts"]["mu"], "sigma": metrics["ts"]["sigma"]}
group[team] = {"elo": elo, "gl2": gl2, "ts": ts}
else:
metrics = db_data["metrics"]
elo = metrics["elo"]
gl2 = metrics["gl2"]
ts = metrics["ts"]
group[team] = {"elo": elo, "gl2": gl2, "ts": ts}
return group
def load_pit(self):
return self.get_pit_data_formatted()
def push_match(self, results):
for team in results:
self.push_team_tests_data(team, results[team])
def push_metric(self, metric):
for team in metric:
self.push_team_metrics_data(team, metric[team])
def push_pit(self, pit):
for variable in pit:
self.push_team_pit_data(variable, pit[variable])
def check_new_database_matches(self):
return True
#----- API implementations below -----#
def get_team_competition(self):
trakey = self.trakey
url = self.trakey['url']
endpoint = '/api/fetchTeamCompetition'
params = {
"CLIENT_ID": trakey['CLIENT_ID'],
"CLIENT_SECRET": trakey['CLIENT_SECRET']
}
response = requests.request("GET", url + endpoint, params=params)
json = response.json()
if json['success']:
return json['competition']
else:
raise APIError(json)
def get_team(self):
trakey = self.trakey
url = self.trakey['url']
endpoint = '/api/fetchTeamCompetition'
params = {
"CLIENT_ID": trakey['CLIENT_ID'],
"CLIENT_SECRET": trakey['CLIENT_SECRET']
}
response = requests.request("GET", url + endpoint, params=params)
json = response.json()
if json['success']:
return json['team']
else:
raise APIError(json)
""" doesn't seem to be functional:
def get_team_match_data(self, team_num):
trakey = self.trakey
url = self.trakey['url']
competition = self.competition
endpoint = '/api/fetchAllTeamMatchData'
params = {
"competition": competition,
"teamScouted": team_num,
"CLIENT_ID": trakey['CLIENT_ID'],
"CLIENT_SECRET": trakey['CLIENT_SECRET']
}
response = requests.request("GET", url + endpoint, params=params)
json = response.json()
if json['success']:
return json['data'][team_num]
else:
raise APIError(json)"""
def get_teams_at_competition(self):
trakey = self.trakey
url = self.trakey['url']
competition = self.competition
endpoint = '/api/fetchAllTeamNicknamesAtCompetition'
params = {
"competition": competition,
"CLIENT_ID": trakey['CLIENT_ID'],
"CLIENT_SECRET": trakey['CLIENT_SECRET']
}
response = requests.request("GET", url + endpoint, params=params)
json = response.json()
if json['success']:
return list(json['data'].keys())
else:
raise APIError(json)

View File

@@ -23,7 +23,7 @@ class Logger(L):
self.file = file self.file = file
if file != None: if file is not None:
self.targets.append(self._send_file) self.targets.append(self._send_file)
if profile: if profile:

View File

@@ -1,5 +1,4 @@
import abc import abc
import data as d
import signal import signal
import numpy as np import numpy as np
from tra_analysis import Analysis as an from tra_analysis import Analysis as an
@@ -17,7 +16,7 @@ class Module(metaclass = abc.ABCMeta):
callable(subclass.run) callable(subclass.run)
) )
@abc.abstractmethod @abc.abstractmethod
def __init__(self, config, apikey, tbakey, timestamp, competition, *args, **kwargs): def __init__(self, *args, **kwargs):
raise NotImplementedError raise NotImplementedError
@abc.abstractmethod @abc.abstractmethod
def validate_config(self, *args, **kwargs): def validate_config(self, *args, **kwargs):
@@ -29,20 +28,16 @@ class Module(metaclass = abc.ABCMeta):
class Match (Module): class Match (Module):
config = None config = None
apikey = None
tbakey = None
timestamp = None timestamp = None
competition = None client = None
data = None data = None
results = None results = None
def __init__(self, config, apikey, tbakey, timestamp, competition): def __init__(self, config, timestamp, client):
self.config = config self.config = config
self.apikey = apikey
self.tbakey = tbakey
self.timestamp = timestamp self.timestamp = timestamp
self.competition = competition self.client = client
def validate_config(self): def validate_config(self):
return True, "" return True, ""
@@ -53,7 +48,7 @@ class Match (Module):
self._push_results() self._push_results()
def _load_data(self): def _load_data(self):
self.data = d.load_match(self.apikey, self.competition) self.data = self.client.load_match()
def _simplestats(self, data_test): def _simplestats(self, data_test):
@@ -93,7 +88,7 @@ class Match (Module):
input_vector = [] input_vector = []
for team in data: for team in tqdm(data, desc = "Match Module ", unit = " team"):
for variable in data[team]: for variable in data[team]:
@@ -141,25 +136,21 @@ class Match (Module):
self.results = return_vector self.results = return_vector
d.push_match(self.apikey, self.competition, self.results) self.client.push_match(self.results)
class Metric (Module): class Metric (Module):
config = None config = None
apikey = None
tbakey = None
timestamp = None timestamp = None
competition = None client = None
data = None data = None
results = None results = None
def __init__(self, config, apikey, tbakey, timestamp, competition): def __init__(self, config, timestamp, client):
self.config = config self.config = config
self.apikey = apikey
self.tbakey = tbakey
self.timestamp = timestamp self.timestamp = timestamp
self.competition = competition self.client = client
def validate_config(self): def validate_config(self):
return True, "" return True, ""
@@ -170,21 +161,27 @@ class Metric (Module):
self._push_results() self._push_results()
def _load_data(self): def _load_data(self):
self.last_match = d.get_analysis_flags(self.apikey, 'metrics_last_match')['metrics_last_match'] self.data = self.client.pull_new_tba_matches(self.timestamp)
print("Previous last match", self.last_match)
self.data = d.pull_new_tba_matches(self.tbakey, self.competition, self.last_match)
def _process_data(self): def _process_data(self):
self.results = {} self.results = {}
self.match = self.last_match
elo_N = self.config["tests"]["elo"]["N"]
elo_K = self.config["tests"]["elo"]["K"]
matches = self.data matches = self.data
red = {} red = {}
blu = {} blu = {}
for match in tqdm(matches, desc="Metrics"): # grab matches and loop through each one
self.match = max(self.match, int(match['match'])) for match in tqdm(matches, desc = "Metric Module ", unit = " match"):
red = d.load_metric(self.apikey, self.competition, match, "red", self.config["tests"]) # get the current ratings for red
blu = d.load_metric(self.apikey, self.competition, match, "blue", self.config["tests"]) # get the current ratings for blue red = self.client.load_metric(match, "red", self.config["tests"])
blu = self.client.load_metric(match, "blue", self.config["tests"])
elo_red_total = 0
elo_blu_total = 0
gl2_red_score_total = 0 gl2_red_score_total = 0
gl2_blu_score_total = 0 gl2_blu_score_total = 0
@@ -195,83 +192,90 @@ class Metric (Module):
gl2_red_vol_total = 0 gl2_red_vol_total = 0
gl2_blu_vol_total = 0 gl2_blu_vol_total = 0
for team in red: # for each team in red, add up gl2 score components for team in red:
elo_red_total += red[team]["elo"]["score"]
gl2_red_score_total += red[team]["gl2"]["score"] gl2_red_score_total += red[team]["gl2"]["score"]
gl2_red_rd_total += red[team]["gl2"]["rd"] gl2_red_rd_total += red[team]["gl2"]["rd"]
gl2_red_vol_total += red[team]["gl2"]["vol"] gl2_red_vol_total += red[team]["gl2"]["vol"]
for team in blu: # for each team in blue, add up gl2 score components for team in blu:
elo_blu_total += blu[team]["elo"]["score"]
gl2_blu_score_total += blu[team]["gl2"]["score"] gl2_blu_score_total += blu[team]["gl2"]["score"]
gl2_blu_rd_total += blu[team]["gl2"]["rd"] gl2_blu_rd_total += blu[team]["gl2"]["rd"]
gl2_blu_vol_total += blu[team]["gl2"]["vol"] gl2_blu_vol_total += blu[team]["gl2"]["vol"]
red_elo = {"score": elo_red_total / len(red)}
blu_elo = {"score": elo_blu_total / len(blu)}
red_gl2 = {"score": gl2_red_score_total / len(red), "rd": gl2_red_rd_total / len(red), "vol": gl2_red_vol_total / len(red)} # average the scores by dividing by 3 red_gl2 = {"score": gl2_red_score_total / len(red), "rd": gl2_red_rd_total / len(red), "vol": gl2_red_vol_total / len(red)}
blu_gl2 = {"score": gl2_blu_score_total / len(blu), "rd": gl2_blu_rd_total / len(blu), "vol": gl2_blu_vol_total / len(blu)} # average the scores by dividing by 3 blu_gl2 = {"score": gl2_blu_score_total / len(blu), "rd": gl2_blu_rd_total / len(blu), "vol": gl2_blu_vol_total / len(blu)}
if match["winner"] == "red": # if red won, set observations to {"red": 1, "blu": 0} if match["winner"] == "red":
observations = {"red": 1, "blu": 0} observations = {"red": 1, "blu": 0}
elif match["winner"] == "blue": # if blue won, set observations to {"red": 0, "blu": 1} elif match["winner"] == "blue":
observations = {"red": 0, "blu": 1} observations = {"red": 0, "blu": 1}
else: # otherwise it was a tie and observations is {"red": 0.5, "blu": 0.5} else:
observations = {"red": 0.5, "blu": 0.5} observations = {"red": 0.5, "blu": 0.5}
red_elo_delta = an.Metric().elo(red_elo["score"], blu_elo["score"], observations["red"], elo_N, elo_K) - red_elo["score"]
blu_elo_delta = an.Metric().elo(blu_elo["score"], red_elo["score"], observations["blu"], elo_N, elo_K) - blu_elo["score"]
new_red_gl2_score, new_red_gl2_rd, new_red_gl2_vol = an.Metric().glicko2(red_gl2["score"], red_gl2["rd"], red_gl2["vol"], [blu_gl2["score"]], [blu_gl2["rd"]], [observations["red"], observations["blu"]]) # calculate new scores for gl2 for red new_red_gl2_score, new_red_gl2_rd, new_red_gl2_vol = an.Metric().glicko2(red_gl2["score"], red_gl2["rd"], red_gl2["vol"], [blu_gl2["score"]], [blu_gl2["rd"]], [observations["red"], observations["blu"]])
new_blu_gl2_score, new_blu_gl2_rd, new_blu_gl2_vol = an.Metric().glicko2(blu_gl2["score"], blu_gl2["rd"], blu_gl2["vol"], [red_gl2["score"]], [red_gl2["rd"]], [observations["blu"], observations["red"]]) # calculate new scores for gl2 for blue new_blu_gl2_score, new_blu_gl2_rd, new_blu_gl2_vol = an.Metric().glicko2(blu_gl2["score"], blu_gl2["rd"], blu_gl2["vol"], [red_gl2["score"]], [red_gl2["rd"]], [observations["blu"], observations["red"]])
red_gl2_delta = {"score": new_red_gl2_score - red_gl2["score"], "rd": new_red_gl2_rd - red_gl2["rd"], "vol": new_red_gl2_vol - red_gl2["vol"]} # calculate gl2 deltas for red red_gl2_delta = {"score": new_red_gl2_score - red_gl2["score"], "rd": new_red_gl2_rd - red_gl2["rd"], "vol": new_red_gl2_vol - red_gl2["vol"]}
blu_gl2_delta = {"score": new_blu_gl2_score - blu_gl2["score"], "rd": new_blu_gl2_rd - blu_gl2["rd"], "vol": new_blu_gl2_vol - blu_gl2["vol"]} # calculate gl2 deltas for blue blu_gl2_delta = {"score": new_blu_gl2_score - blu_gl2["score"], "rd": new_blu_gl2_rd - blu_gl2["rd"], "vol": new_blu_gl2_vol - blu_gl2["vol"]}
for team in red: # for each team on red, add the previous score with the delta to find the new score for team in red:
red[team]["elo"]["score"] = red[team]["elo"]["score"] + red_elo_delta
red[team]["gl2"]["score"] = red[team]["gl2"]["score"] + red_gl2_delta["score"] red[team]["gl2"]["score"] = red[team]["gl2"]["score"] + red_gl2_delta["score"]
red[team]["gl2"]["rd"] = red[team]["gl2"]["rd"] + red_gl2_delta["rd"] red[team]["gl2"]["rd"] = red[team]["gl2"]["rd"] + red_gl2_delta["rd"]
red[team]["gl2"]["vol"] = red[team]["gl2"]["vol"] + red_gl2_delta["vol"] red[team]["gl2"]["vol"] = red[team]["gl2"]["vol"] + red_gl2_delta["vol"]
for team in blu: # for each team on blue, add the previous score with the delta to find the new score for team in blu:
blu[team]["elo"]["score"] = blu[team]["elo"]["score"] + blu_elo_delta
blu[team]["gl2"]["score"] = blu[team]["gl2"]["score"] + blu_gl2_delta["score"] blu[team]["gl2"]["score"] = blu[team]["gl2"]["score"] + blu_gl2_delta["score"]
blu[team]["gl2"]["rd"] = blu[team]["gl2"]["rd"] + blu_gl2_delta["rd"] blu[team]["gl2"]["rd"] = blu[team]["gl2"]["rd"] + blu_gl2_delta["rd"]
blu[team]["gl2"]["vol"] = blu[team]["gl2"]["vol"] + blu_gl2_delta["vol"] blu[team]["gl2"]["vol"] = blu[team]["gl2"]["vol"] + blu_gl2_delta["vol"]
temp_vector = {} temp_vector = {}
temp_vector.update(red) # update the team's score with the temporay vector temp_vector.update(red)
temp_vector.update(blu) temp_vector.update(blu)
self.results[match['match']] = temp_vector self.results[match['match']] = temp_vector
d.push_metric(self.apikey, self.competition, temp_vector) # push new scores to db self.client.push_metric(temp_vector)
print("New last match", self.match)
d.set_analysis_flags(self.apikey, 'metrics_last_match', {'metrics_last_match': self.match})
def _push_results(self): def _push_results(self):
pass pass
class Pit (Module): class Pit (Module):
config = None config = None
apikey = None
tbakey = None
timestamp = None timestamp = None
competition = None client = None
data = None data = None
results = None results = None
def __init__(self, config, apikey, tbakey, timestamp, competition): def __init__(self, config, timestamp, client):
self.config = config self.config = config
self.apikey = apikey
self.tbakey = tbakey
self.timestamp = timestamp self.timestamp = timestamp
self.competition = competition self.client = client
def validate_config(self): def validate_config(self):
return True, "" return True, ""
@@ -282,12 +286,12 @@ class Pit (Module):
self._push_results() self._push_results()
def _load_data(self): def _load_data(self):
self.data = d.load_pit(self.apikey, self.competition) self.data = self.client.load_pit()
def _process_data(self): def _process_data(self):
tests = self.config["tests"] tests = self.config["tests"]
return_vector = {} return_vector = {}
for team in self.data: for team in tqdm(self.data, desc = "Pit Module ", unit = " team"):
for variable in self.data[team]: for variable in self.data[team]:
if variable in tests: if variable in tests:
if not variable in return_vector: if not variable in return_vector:
@@ -297,7 +301,7 @@ class Pit (Module):
self.results = return_vector self.results = return_vector
def _push_results(self): def _push_results(self):
d.push_pit(self.apikey, self.competition, self.results) self.client.push_pit(self.results)
class Rating (Module): class Rating (Module):
pass pass

View File

@@ -1,19 +0,0 @@
requests
pymongo
pandas
tra-analysis
dnspython
pyinstaller
requests
pymongo
numpy
scipy
scikit-learn
six
pyparsing
pandas
kivy==2.0.0rc2
setuptools>=70.0.0 # not directly required, pinned by Snyk to avoid a vulnerability

View File

@@ -3,10 +3,42 @@
# Notes: # Notes:
# setup: # setup:
__version__ = "0.8.6" __version__ = "1.0.0"
# changelog should be viewed using print(analysis.__changelog__) # changelog should be viewed using print(analysis.__changelog__)
__changelog__ = """changelog: __changelog__ = """changelog:
1.0.0:
- superscript now runs in PEP 3143 compliant well behaved daemon on Linux systems
- removed daemon and socket functionality, user can implement using external software
- added verbose option to linux superscript to allow for interactive output
- moved pymongo import to superscript.py
- added profile option to linux superscript to profile runtime of script
- reduced memory usage slightly by consolidating the unwrapped input data
- added debug option, which performs one loop of analysis and dumps results to local files
- added event and time delay options to config
- event delay pauses loop until even listener recieves an update
- time delay pauses loop until the time specified has elapsed since the BEGINNING of previous loop
- added options to pull config information from database (reatins option to use local config file)
- config-preference option selects between prioritizing local config and prioritizing database config
- synchronize-config option selects whether to update the non prioritized config with the prioritized one
- divided config options between persistent ones (keys), and variable ones (everything else)
- generalized behavior of various core components by collecting loose functions in several dependencies into classes
- module.py contains classes, each one represents a single data analysis routine
- config.py contains the Configuration class, which stores the configuration information and abstracts the getter methods
0.9.3:
- improved data loading performance by removing redundant PyMongo client creation (120s to 14s)
- passed singular instance of PyMongo client as standin for apikey parameter in all data.py functions
0.9.2:
- removed unessasary imports from data
- minor changes to interface
0.9.1:
- fixed bugs in configuration item loading exception handling
0.9.0:
- moved printing and logging related functions to interface.py (changelog will stay in this file)
- changed function return files for load_config and save_config to standard C values (0 for success, 1 for error)
- added local variables for config location
- moved dataset getting and setting functions to dataset.py (changelog will stay in this file)
- moved matchloop, metricloop, pitloop and helper functions (simplestats) to processing.py
0.8.6: 0.8.6:
- added proper main function - added proper main function
0.8.5: 0.8.5:
@@ -114,514 +146,157 @@ __author__ = (
"Jacob Levine <jlevine@imsa.edu>", "Jacob Levine <jlevine@imsa.edu>",
) )
__all__ = [
"load_config",
"save_config",
"get_previous_time",
"load_match",
"matchloop",
"load_metric",
"metricloop",
"load_pit",
"pitloop",
"push_match",
"push_metric",
"push_pit",
]
# imports: # imports:
from tra_analysis import analysis as an import argparse, sys, time, traceback, warnings
import data as d from config import Configuration, ConfigurationError
from collections import defaultdict from data import Client
import json from interface import Logger
import math from module import Match, Metric, Pit
import numpy as np
import os
from os import system, name
from pathlib import Path
from multiprocessing import Pool
import platform
import sys
import time
import warnings
global exec_threads def main(logger, verbose, profile, debug, config_path):
def main(): def close_all():
if "client" in locals():
global exec_threads client.close()
sys.stderr = open("errorlog.txt", "w")
warnings.filterwarnings("ignore") warnings.filterwarnings("ignore")
splash() logger.splash(__version__)
while (True): modules = {"match": Match, "metric": Metric, "pit": Pit}
while True:
try: try:
current_time = time.time() loop_start = time.time()
print("[OK] time: " + str(current_time))
config = load_config("config.json") logger.info("current time: " + str(loop_start))
competition = config["competition"]
match_tests = config["statistics"]["match"]
pit_tests = config["statistics"]["pit"]
metrics_tests = config["statistics"]["metric"]
print("[OK] configs loaded")
print("[OK] starting threads") config = Configuration(config_path)
cfg_max_threads = config["max-threads"]
sys_max_threads = os.cpu_count()
if cfg_max_threads > -sys_max_threads and cfg_max_threads < 0 :
alloc_processes = sys_max_threads + cfg_max_threads
elif cfg_max_threads > 0 and cfg_max_threads < 1:
alloc_processes = math.floor(cfg_max_threads * sys_max_threads)
elif cfg_max_threads > 1 and cfg_max_threads <= sys_max_threads:
alloc_processes = cfg_max_threads
elif cfg_max_threads == 0:
alloc_processes = sys_max_threads
else:
print("[ERROR] Invalid number of processes, must be between -" + str(sys_max_threads) + " and " + str(sys_max_threads))
exit()
exec_threads = Pool(processes = alloc_processes)
print("[OK] " + str(alloc_processes) + " threads started")
apikey = config["key"]["database"]
tbakey = config["key"]["tba"]
print("[OK] loaded keys")
previous_time = get_previous_time(apikey)
print("[OK] analysis backtimed to: " + str(previous_time))
print("[OK] loading data")
start = time.time()
match_data = load_match(apikey, competition)
pit_data = load_pit(apikey, competition)
print("[OK] loaded data in " + str(time.time() - start) + " seconds")
print("[OK] running match stats")
start = time.time()
matchloop(apikey, competition, match_data, match_tests)
print("[OK] finished match stats in " + str(time.time() - start) + " seconds")
print("[OK] running team metrics")
start = time.time()
metricloop(tbakey, apikey, competition, previous_time, metrics_tests)
print("[OK] finished team metrics in " + str(time.time() - start) + " seconds")
print("[OK] running pit analysis")
start = time.time()
pitloop(apikey, competition, pit_data, pit_tests)
print("[OK] finished pit analysis in " + str(time.time() - start) + " seconds")
set_current_time(apikey, current_time) logger.info("found and loaded config at <" + config_path + ">")
print("[OK] finished all tests, looping")
client = Client(config)
logger.info("established connection to database")
previous_time = client.get_previous_time()
logger.info("analysis backtimed to: " + str(previous_time))
config.resolve_config_conflicts(logger, client)
config_modules, competition = config.modules, config.competition
client.competition = competition
for m in config_modules:
if m in modules:
start = time.time()
current_module = modules[m](config_modules[m], previous_time, client)
valid = current_module.validate_config()
if not valid:
continue
current_module.run()
logger.info(m + " module finished in " + str(time.time() - start) + " seconds")
if debug:
logger.save_module_to_file(m, current_module.data, current_module.results) # logging flag check done in logger
client.set_current_time(loop_start)
close_all()
logger.info("closed threads and database client")
logger.info("finished all tasks in " + str(time.time() - loop_start) + " seconds, looping")
if profile:
return 0
if debug:
return 0
event_delay = config["variable"]["event-delay"]
if event_delay:
logger.info("loop delayed until database returns new matches")
new_match = False
while not new_match:
time.sleep(1)
new_match = client.check_new_database_matches()
logger.info("database returned new matches")
else:
loop_delay = float(config["variable"]["loop-delay"])
remaining_time = loop_delay - (time.time() - loop_start)
if remaining_time > 0:
logger.info("loop delayed by " + str(remaining_time) + " seconds")
time.sleep(remaining_time)
print_hrule()
except KeyboardInterrupt: except KeyboardInterrupt:
print("\n[OK] caught KeyboardInterrupt, killing processes") close_all()
exec_threads.terminate() logger.info("detected KeyboardInterrupt, exiting")
print("[OK] processes killed, exiting") return 0
exit()
else: except ConfigurationError as e:
pass str_e = "".join(traceback.format_exception(e))
logger.error("encountered a configuration error: " + str(e))
logger.error(str_e)
close_all()
return 1
#clear() except Exception as e:
str_e = "".join(traceback.format_exception(e))
logger.error("encountered an exception while running")
logger.error(str_e)
close_all()
return 1
def clear(): def start(verbose, profile, debug, config_path, log_path):
# for windows
if name == 'nt':
_ = system('cls')
# for mac and linux(here, os.name is 'posix')
else:
_ = system('clear')
def print_hrule(): logger = Logger(verbose, profile, debug, file = log_path)
print("#"+38*"-"+"#") if profile:
def print_box(s): import cProfile, pstats, io
profile = cProfile.Profile()
profile.enable()
exit_code = main(logger, verbose, profile, debug, config_path)
profile.disable()
f = open("profile.txt", "w+")
ps = pstats.Stats(profile, stream = f).sort_stats("cumtime")
ps.print_stats()
sys.exit(exit_code)
temp = "|" elif verbose:
temp += s
temp += (40-len(s)-2)*" "
temp += "|"
print(temp)
def splash(): exit_code = main(logger, verbose, profile, debug, config_path)
sys.exit(exit_code)
print_hrule() elif debug:
print_box(" superscript version: " + __version__)
print_box(" os: " + platform.system())
print_box(" python: " + platform.python_version())
print_hrule()
def load_config(file): exit_code = main(logger, verbose, profile, debug, config_path)
sys.exit(exit_code)
config_vector = {}
try:
f = open(file)
except:
print("[ERROR] could not locate config.json, generating blank config.json and exiting")
f = open(file, "w")
f.write(sample_json)
exit()
config_vector = json.load(f)
return config_vector
def save_config(file, config_vector):
with open(file) as f:
json.dump(config_vector, f)
def get_previous_time(apikey):
previous_time = d.get_analysis_flags(apikey, "latest_update")
if previous_time == None:
d.set_analysis_flags(apikey, "latest_update", 0)
previous_time = 0
else: else:
previous_time = previous_time["latest_update"] pass # must be vebose, debug or profile
return previous_time
def set_current_time(apikey, current_time):
d.set_analysis_flags(apikey, "latest_update", {"latest_update":current_time})
def load_match(apikey, competition):
return d.get_match_data_formatted(apikey, competition)
def simplestats(data_test):
data = np.array(data_test[0])
data = data[np.isfinite(data)]
ranges = list(range(len(data)))
test = data_test[1]
if test == "basic_stats":
return an.basic_stats(data)
if test == "historical_analysis":
return an.histo_analysis([ranges, data])
if test == "regression_linear":
return an.regression(ranges, data, ['lin'])
if test == "regression_logarithmic":
return an.regression(ranges, data, ['log'])
if test == "regression_exponential":
return an.regression(ranges, data, ['exp'])
if test == "regression_polynomial":
return an.regression(ranges, data, ['ply'])
if test == "regression_sigmoidal":
return an.regression(ranges, data, ['sig'])
def matchloop(apikey, competition, data, tests): # expects 3D array with [Team][Variable][Match]
global exec_threads
short_mapping = {"regression_linear": "lin", "regression_logarithmic": "log", "regression_exponential": "exp", "regression_polynomial": "ply", "regression_sigmoidal": "sig"}
class AutoVivification(dict):
def __getitem__(self, item):
try:
return dict.__getitem__(self, item)
except KeyError:
value = self[item] = type(self)()
return value
return_vector = {}
team_filtered = []
variable_filtered = []
variable_data = []
test_filtered = []
result_filtered = []
return_vector = AutoVivification()
for team in data:
for variable in data[team]:
if variable in tests:
for test in tests[variable]:
team_filtered.append(team)
variable_filtered.append(variable)
variable_data.append((data[team][variable], test))
test_filtered.append(test)
result_filtered = exec_threads.map(simplestats, variable_data)
i = 0
result_filtered = list(result_filtered)
for result in result_filtered:
filtered = test_filtered[i]
try:
short = short_mapping[filtered]
return_vector[team_filtered[i]][variable_filtered[i]][test_filtered[i]] = result[short]
except KeyError: # not in mapping
return_vector[team_filtered[i]][variable_filtered[i]][test_filtered[i]] = result
i += 1
push_match(apikey, competition, return_vector)
def load_metric(apikey, competition, match, group_name, metrics):
group = {}
for team in match[group_name]:
db_data = d.get_team_metrics_data(apikey, competition, team)
if d.get_team_metrics_data(apikey, competition, team) == None:
elo = {"score": metrics["elo"]["score"]}
gl2 = {"score": metrics["gl2"]["score"], "rd": metrics["gl2"]["rd"], "vol": metrics["gl2"]["vol"]}
ts = {"mu": metrics["ts"]["mu"], "sigma": metrics["ts"]["sigma"]}
group[team] = {"elo": elo, "gl2": gl2, "ts": ts}
else:
metrics = db_data["metrics"]
elo = metrics["elo"]
gl2 = metrics["gl2"]
ts = metrics["ts"]
group[team] = {"elo": elo, "gl2": gl2, "ts": ts}
return group
def metricloop(tbakey, apikey, competition, timestamp, metrics): # listener based metrics update
elo_N = metrics["elo"]["N"]
elo_K = metrics["elo"]["K"]
matches = d.pull_new_tba_matches(tbakey, competition, timestamp)
red = {}
blu = {}
for match in matches:
red = load_metric(apikey, competition, match, "red", metrics)
blu = load_metric(apikey, competition, match, "blue", metrics)
elo_red_total = 0
elo_blu_total = 0
gl2_red_score_total = 0
gl2_blu_score_total = 0
gl2_red_rd_total = 0
gl2_blu_rd_total = 0
gl2_red_vol_total = 0
gl2_blu_vol_total = 0
for team in red:
elo_red_total += red[team]["elo"]["score"]
gl2_red_score_total += red[team]["gl2"]["score"]
gl2_red_rd_total += red[team]["gl2"]["rd"]
gl2_red_vol_total += red[team]["gl2"]["vol"]
for team in blu:
elo_blu_total += blu[team]["elo"]["score"]
gl2_blu_score_total += blu[team]["gl2"]["score"]
gl2_blu_rd_total += blu[team]["gl2"]["rd"]
gl2_blu_vol_total += blu[team]["gl2"]["vol"]
red_elo = {"score": elo_red_total / len(red)}
blu_elo = {"score": elo_blu_total / len(blu)}
red_gl2 = {"score": gl2_red_score_total / len(red), "rd": gl2_red_rd_total / len(red), "vol": gl2_red_vol_total / len(red)}
blu_gl2 = {"score": gl2_blu_score_total / len(blu), "rd": gl2_blu_rd_total / len(blu), "vol": gl2_blu_vol_total / len(blu)}
if match["winner"] == "red":
observations = {"red": 1, "blu": 0}
elif match["winner"] == "blue":
observations = {"red": 0, "blu": 1}
else:
observations = {"red": 0.5, "blu": 0.5}
red_elo_delta = an.Metric().elo(red_elo["score"], blu_elo["score"], observations["red"], elo_N, elo_K) - red_elo["score"]
blu_elo_delta = an.Metric().elo(blu_elo["score"], red_elo["score"], observations["blu"], elo_N, elo_K) - blu_elo["score"]
new_red_gl2_score, new_red_gl2_rd, new_red_gl2_vol = an.Metric().glicko2(red_gl2["score"], red_gl2["rd"], red_gl2["vol"], [blu_gl2["score"]], [blu_gl2["rd"]], [observations["red"], observations["blu"]])
new_blu_gl2_score, new_blu_gl2_rd, new_blu_gl2_vol = an.Metric().glicko2(blu_gl2["score"], blu_gl2["rd"], blu_gl2["vol"], [red_gl2["score"]], [red_gl2["rd"]], [observations["blu"], observations["red"]])
red_gl2_delta = {"score": new_red_gl2_score - red_gl2["score"], "rd": new_red_gl2_rd - red_gl2["rd"], "vol": new_red_gl2_vol - red_gl2["vol"]}
blu_gl2_delta = {"score": new_blu_gl2_score - blu_gl2["score"], "rd": new_blu_gl2_rd - blu_gl2["rd"], "vol": new_blu_gl2_vol - blu_gl2["vol"]}
for team in red:
red[team]["elo"]["score"] = red[team]["elo"]["score"] + red_elo_delta
red[team]["gl2"]["score"] = red[team]["gl2"]["score"] + red_gl2_delta["score"]
red[team]["gl2"]["rd"] = red[team]["gl2"]["rd"] + red_gl2_delta["rd"]
red[team]["gl2"]["vol"] = red[team]["gl2"]["vol"] + red_gl2_delta["vol"]
for team in blu:
blu[team]["elo"]["score"] = blu[team]["elo"]["score"] + blu_elo_delta
blu[team]["gl2"]["score"] = blu[team]["gl2"]["score"] + blu_gl2_delta["score"]
blu[team]["gl2"]["rd"] = blu[team]["gl2"]["rd"] + blu_gl2_delta["rd"]
blu[team]["gl2"]["vol"] = blu[team]["gl2"]["vol"] + blu_gl2_delta["vol"]
temp_vector = {}
temp_vector.update(red)
temp_vector.update(blu)
push_metric(apikey, competition, temp_vector)
def load_pit(apikey, competition):
return d.get_pit_data_formatted(apikey, competition)
def pitloop(apikey, competition, pit, tests):
return_vector = {}
for team in pit:
for variable in pit[team]:
if variable in tests:
if not variable in return_vector:
return_vector[variable] = []
return_vector[variable].append(pit[team][variable])
push_pit(apikey, competition, return_vector)
def push_match(apikey, competition, results):
for team in results:
d.push_team_tests_data(apikey, competition, team, results[team])
def push_metric(apikey, competition, metric):
for team in metric:
d.push_team_metrics_data(apikey, competition, team, metric[team])
def push_pit(apikey, competition, pit):
for variable in pit:
d.push_team_pit_data(apikey, competition, variable, pit[variable])
def get_team_metrics(apikey, tbakey, competition):
metrics = d.get_metrics_data_formatted(apikey, competition)
elo = {}
gl2 = {}
for team in metrics:
elo[team] = metrics[team]["metrics"]["elo"]["score"]
gl2[team] = metrics[team]["metrics"]["gl2"]["score"]
elo = {k: v for k, v in sorted(elo.items(), key=lambda item: item[1])}
gl2 = {k: v for k, v in sorted(gl2.items(), key=lambda item: item[1])}
elo_ranked = []
for team in elo:
elo_ranked.append({"team": str(team), "elo": str(elo[team])})
gl2_ranked = []
for team in gl2:
gl2_ranked.append({"team": str(team), "gl2": str(gl2[team])})
return {"elo-ranks": elo_ranked, "glicko2-ranks": gl2_ranked}
sample_json = """{
"max-threads": 0.5,
"team": "",
"competition": "2020ilch",
"key":{
"database":"",
"tba":""
},
"statistics":{
"match":{
"balls-blocked":["basic_stats","historical_analysis","regression_linear","regression_logarithmic","regression_exponential","regression_polynomial","regression_sigmoidal"],
"balls-collected":["basic_stats","historical_analysis","regression_linear","regression_logarithmic","regression_exponential","regression_polynomial","regression_sigmoidal"],
"balls-lower-teleop":["basic_stats","historical_analysis","regression_linear","regression_logarithmic","regression_exponential","regression_polynomial","regression_sigmoidal"],
"balls-lower-auto":["basic_stats","historical_analysis","regression_linear","regression_logarithmic","regression_exponential","regression_polynomial","regression_sigmoidal"],
"balls-started":["basic_stats","historical_analyss","regression_linear","regression_logarithmic","regression_exponential","regression_polynomial","regression_sigmoidal"],
"balls-upper-teleop":["basic_stats","historical_analysis","regression_linear","regression_logarithmic","regression_exponential","regression_polynomial","regression_sigmoidal"],
"balls-upper-auto":["basic_stats","historical_analysis","regression_linear","regression_logarithmic","regression_exponential","regression_polynomial","regression_sigmoidal"]
},
"metric":{
"elo":{
"score":1500,
"N":400,
"K":24
},
"gl2":{
"score":1500,
"rd":250,
"vol":0.06
},
"ts":{
"mu":25,
"sigma":8.33
}
},
"pit":{
"wheel-mechanism":true,
"low-balls":true,
"high-balls":true,
"wheel-success":true,
"strategic-focus":true,
"climb-mechanism":true,
"attitude":true
}
}
}"""
if __name__ == "__main__": if __name__ == "__main__":
if sys.platform.startswith('win'):
multiprocessing.freeze_support() parser = argparse.ArgumentParser(description = "TRA data processing application.")
main() parser.add_argument("mode", metavar = "MODE", type = str, nargs = 1, choices = ["verbose", "profile", "debug"], help = "verbose, debug, profile")
parser.add_argument("--config", dest = "config", default = "config.json", type = str, help = "path to config file")
parser.add_argument("--logfile", dest = "logfile", default = "logfile.log", type = str, help = "path to log file")
args = parser.parse_args()
mode = args.mode[0]
config_path = args.config
log_path = args.logfile
if mode == "verbose":
start(True, False, False, config_path = config_path, log_path = log_path)
elif mode == "profile":
start(False, True, False, config_path = config_path, log_path = log_path)
elif mode == "debug":
start(False, False, True, config_path = config_path, log_path = log_path)
exit(0)

View File

@@ -1,37 +0,0 @@
# -*- mode: python ; coding: utf-8 -*-
block_cipher = None
a = Analysis(['superscript.py'],
pathex=['/workspaces/tra-data-analysis/src'],
binaries=[],
datas=[],
hiddenimports=[
"dnspython",
"sklearn.utils._weight_vector",
"requests",
],
hookspath=[],
runtime_hooks=[],
excludes=[],
win_no_prefer_redirects=False,
win_private_assemblies=False,
cipher=block_cipher,
noarchive=False)
pyz = PYZ(a.pure, a.zipped_data,
cipher=block_cipher)
exe = EXE(pyz,
a.scripts,
a.binaries,
a.zipfiles,
a.datas,
[('W ignore', None, 'OPTION')],
name='superscript',
debug=False,
bootloader_ignore_signals=False,
strip=False,
upx=True,
upx_exclude=[],
runtime_tmpdir=None,
console=True )

View File

@@ -1,12 +0,0 @@
#!/bin/bash
#
#SBATCH --job-name=tra-superscript
#SBATCH --output=slurm-tra-superscript.out
#SBATCH --ntasks=8
#SBATCH --time=24:00:00
#SBATCH --mem-per-cpu=256
#SBATCH --mail-user=dsingh@imsa.edu
#SBATCH -p cpu-long
cd competition
python superscript.py debug

View File

@@ -1,12 +0,0 @@
#!/bin/bash
#
#SBATCH --job-name=tra-superscript
#SBATCH --output=PROD_slurm-tra-superscript.out
#SBATCH --ntasks=8
#SBATCH --time=24:00:00
#SBATCH --mem-per-cpu=256
#SBATCH --mail-user=dsingh@imsa.edu
#SBATCH -p cpu-long
cd competition
python superscript.py verbose

14
test/test_zmq.py Normal file
View File

@@ -0,0 +1,14 @@
import signal
import zmq
signal.signal(signal.SIGINT, signal.SIG_DFL)
context = zmq.Context()
socket = context.socket(zmq.SUB)
socket.connect('tcp://localhost:5678')
socket.setsockopt(zmq.SUBSCRIBE, b'status')
while True:
message = socket.recv_multipart()
print(f'Received: {message}')